From d4ffc362e140efce0b04ddb68e95778178d78e2e Mon Sep 17 00:00:00 2001 From: Hendrik Wouters Date: Wed, 8 Nov 2017 16:57:31 +0100 Subject: [PATCH 001/129] adding routines for reading ground and air profile data --- data_air.py | 393 +++++++++++++++++++++++++++++++++++++++++++++++ data_ground.py | 393 +++++++++++++++++++++++++++++++++++++++++++++++ data_sounding.py | 393 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1179 insertions(+) create mode 100644 data_air.py create mode 100644 data_ground.py create mode 100644 data_sounding.py diff --git a/data_air.py b/data_air.py new file mode 100644 index 0000000..011afe6 --- /dev/null +++ b/data_air.py @@ -0,0 +1,393 @@ +import numpy as np + +from bs4 import BeautifulSoup +import pandas as pd +import datetime as dt +import pylab as pl +import io +import os +import calendar +import Pysolar +import Pysolar.util +#from urllib import request +def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25): + + #initialize error BLH + BLHe = 0. + eps = 2.#security limit + iTHTV_0 = np.where(~np.isnan(THTV))[0] + if len(iTHTV_0) > 0: + iTHTV_0 = iTHTV_0[0] + THTV_0 = THTV[iTHTV_0] + else: + THTV_0 = np.nan + RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2 + + + #RiB = 9.81/THTV_0 * ( THTV[i-1] + (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2 + #RiB - RiBc = 0 + + #best guess of BLH + + #print("RiB: ",RiB) + #print("RiBc: ",RiBc) + + + + BLHi = np.where(RiB > RiBc)[0] + if len(BLHi ) > 0: + BLHi = BLHi[0] + #print("BLHi: ",BLHi) + BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] + + # possible error is calculated as the difference height levels used for the interpolation + BLHu = np.max([BLH,HAGL[BLHi]-eps]) + BLHd = np.min([BLH,HAGL[BLHi-1]+eps]) + # calculate an alternative BLH based on another critical Richardson number (RiBce): + BLHi =np.where(RiB > RiBce)[0] + if len(BLHi ) > 0: + BLHi = BLHi[0] + + BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] + BLHu = np.max([BLHu,HAGL[BLHi]-eps]) + BLHd = np.min([BLHd,HAGL[BLHi-1]+eps]) + + BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)]) + BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)]) + + else: + BLH,BLHu,BLHd = np.nan, np.nan,np.nan + + else: + BLH,BLHu,BLHd = np.nan, np.nan,np.nan + + return BLH,BLHu,BLHd + +def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)): + STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds()) + return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)] + + +#from os import listdir +#from os.path import isfile #,join +import glob + + +class wyoming(object): + def __init__(self): + self.status = 'init' + self.found = False + self.DT = None + self.current = None + self.mode = 'b' + self.profile_type = 'wyoming' + self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] + self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/" + + def set_STNM(self,STNM): + self.__init__() + self.STNM = STNM + self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html") + self.FILES = [os.path.realpath(FILE) for FILE in self.FILES] + self.current = None + self.found = False + self.FILES.sort() + + def find_first(self,year=None,get_atm=False): + self.found = False + + # check first file/year or specified year + if year == None: + self.iFN = 0 + self.FN = self.FILES[self.iFN] + else: + self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html") + self.iFN = self.FILES.index(self.FN) + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)" + + # go through other files and find first sounding when year is not specified + self.iFN=self.iFN+1 + while keepsearching: + self.FN = self.FILES[self.iFN] + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + self.iFN=self.iFN+1 + keepsearching = (self.current is None) and (self.iFN < len(self.FILES)) + self.found = (self.current is not None) + + self.status = 'fetch' + if self.found: + self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + + if self.found and get_atm: + self.get_values_one_column_atm() + + + def find(self,DT,get_atm=False): + + self.found = False + keepsearching = True + print(DT) + # we open a new file only when it's needed. Otherwise we just scroll to the right sounding. + if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)): + self.DT = DT + self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html") + self.iFN = self.FILES.index(self.FN) + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + + keepsearching = (self.current is not None) + while keepsearching: + DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + if DTcurrent == DT: + self.found = True + keepsearching = False + if get_atm: + self.get_values_one_column_atm() + self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + elif DTcurrent > DT: + keepsearching = False + self.current = None + else: + self.current = self.current.find_next('h2') + if self.current is None: + keepsearching = False + self.found = (self.current is not None) + self.status = 'fetch' + + def find_next(self,get_atm=False): + self.found = False + self.DT = None + if self.current is None: + self.find_first() + else: + self.current = self.current.find_next('h2') + self.found = (self.current is not None) + keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES))) + while keepsearching: + self.iFN=self.iFN+1 + self.FN = self.FILES[self.iFN] + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + + self.found = (self.current is not None) + keepsearching = ((self.current is None) and (self.iFN < len(self.FILES))) + if self.found: + self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + if self.found and get_atm: + self.get_values_one_column_atm() + + + + def get_values_one_column_atm(self): + + # for iDT,DT in enumerate(DTS): + + #websource = urllib.request.urlopen(webpage) + #soup = BeautifulSoup(open(webpage), "html.parser") + + + #workaround for ...last line has
 which results in stringlike first column
+        string = self.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = self.current.find_next('pre').find_next('pre').text
+
+        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
+        #PARAMS.insert(0,'date',DT)
+
+        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        
+        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
+        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
+        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
+        HAGL = HGHT - np.float(PARAMS['Station elevation'])
+        ONE_COLUMN.insert(0,'HAGL',HAGL)
+
+        
+        
+        
+        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
+        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
+        ONE_COLUMN.insert(0,'QABS',QABS)
+        
+        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
+
+        #mixed layer potential temperature
+        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
+
+        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
+        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
+        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
+
+        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
+
+        #security values for mixed-layer jump values dthetav, dtheta and dq
+        
+        # fit new profiles taking the above-estimated mixed-layer height
+        ONE_COLUMNNEW = []
+        for BLH in [BLHV,BLHVu,BLHVd]:
+            ONE_COLUMNNEW.append(pd.DataFrame())
+            
+            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+            
+            listHAGLNEW = list(HAGLNEW)
+            for icol,col in enumerate(['THTV','THTA','QABS']):
+                if len(np.where(HAGL <= BLH)[0]) >= 4:
+                    meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL <= BLH][2:-1],dtype=np.float))
+                else:
+                    meanabl = np.nanmean(ONE_COLUMN[col][1:2],dtype=np.float)
+            
+                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+                #THTVM = np.nanmean(THTV[HAGL <= BLH])
+                #print("new_pro_h",new_pro_h)
+                # calculate jump ath the top of the mixed layer
+                if col in ['THTA','THTV']:
+                    #for moisture
+                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+                    if len(listHAGLNEW) > 4:
+                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+                        dtheta = np.max((0.1,(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) )
+                    else:
+                        dtheta = np.nan
+                else:
+                    if len(listHAGLNEW) > 4:
+                        #for moisture (it can have both negative and positive slope)
+                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+                    else:
+                        dtheta = np.nan
+                #print('dtheta',dtheta)
+                
+                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+            
+                
+                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+                
+            #QABSM = np.nanmean(QABS[HAGL <= BLH])
+            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+            
+        # we just make a copy of the fields, so that it can be read correctly by CLASS 
+        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+
+        # assign fields adopted by CLASS
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'b':
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'u':
+            PARAMS.insert(0,'h',   BLHVu)
+        elif self.mode == 'd':
+            PARAMS.insert(0,'h',   BLHVd)
+        else:
+            PARAMS.insert(0,'h',   BLHV)
+            
+        PARAMS.insert(0,'day', PARAMS['datetime'][0].day)
+        PARAMS.insert(0,'tstart', PARAMS['datetime'][0].hour)
+        PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+        PARAMS.insert(0,'lon', np.float(PARAMS['Station longitude'][0]))
+        PARAMS['ldatetime'] = PARAMS.datetime.value - dt.timedelta(hours=PARAMS.lon.value/360.*24.) 
+        PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+        PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+        PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+           
+        ONE_COLUMNb = ONE_COLUMNNEW[0]
+        ONE_COLUMNu = ONE_COLUMNNEW[1]
+        ONE_COLUMNd = ONE_COLUMNNEW[2]
+        
+
+        THTVM = np.nanmean(THTV[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+        
+        QABSM = np.nanmean(QABS[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
+        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
+        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
+
+        BLHVe = abs(BLHV - BLHVu)
+        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
+        PARAMS.insert(0,'OK',((BLHVe < 100.) and (len(np.where(~pd.isnull(ONE_COLUMN['THTV'][ONE_COLUMN['HAGL'] > 5000.]))[0]) >0 )))
+        
+        #PARAMS.insert(0,'dq',0.)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
+        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+        
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
+            
+        elif self.mode == 'b': # best BLH
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
+            
+        elif self.mode == 'u':# upper BLH
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNu['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNu['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNu['THTA'])[2]-list(ONE_COLUMNu['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNu['QABS'])[2]-list(ONE_COLUMNu['QABS'])[1]))
+            
+        elif self.mode == 'd': # lower BLH
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNd['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNd['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNd['THTA'])[2]-list(ONE_COLUMNd['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNd['QABS'])[2]-list(ONE_COLUMNd['QABS'])[1]))
+            
+        
+        PARAMS = PARAMS.T
+
+        
+        self.PARAMS = PARAMS
+        if self.mode == 'o': #original 
+            self.ONE_COLUMN = ONE_COLUMN
+        elif self.mode == 'b': # best BLH
+            self.ONE_COLUMN = ONE_COLUMNb
+        elif self.mode == 'u':# upper BLH
+            self.ONE_COLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # lower BLH
+            self.ONE_COLUMN=ONE_COLUMNd
+        else:
+            self.ONE_COLUMN = ONE_COLUMN
+
+class one_column(object):
+    def __init__(self):
+        self.status = 'init'
+        
+        
+    def set_one_column_atm(self,INPUT):
+        PARAMS,ONE_COLUMN = INPUT.PARAMS,INPUT.ONE_COLUMN
+
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = ONE_COLUMN
+        self.__dict__.update(PARAMS.to_dict()['value'])
+        self.__dict__.update(ONE_COLUMN.to_dict('list'))
+        self.status = 'filled'
+        
+        #convert all list to arrays for CLASS
+        for key in self.__dict__.keys():
+            if type(self.__dict__[key]).__name__ == 'list':
+                self.__dict__[key] = np.array(self.__dict__[key])
+
+
diff --git a/data_ground.py b/data_ground.py
new file mode 100644
index 0000000..d4e0b5a
--- /dev/null
+++ b/data_ground.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: vsc42247
+
+Purpose: Set surface conditions for the CLASS boundary-layer model
+"""
+
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+import pynacolada as pcd
+import pandas as pd
+
+def get_class4gl_ground(class_settings,**kwargs):   
+    
+    key = "IGBPDIS"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+    
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        print('reading soil water saturation from '+input_fn)
+
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__['wsat'] = input_nc.variables['wsat'][ilon,ilat]
+        input_nc.close()
+
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc"
+        print('reading soil water field capacity from '+input_fn)
+    
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__['wfc'] = input_nc.variables['wfc'][ilon,ilat]
+        input_nc.close()
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc"
+        print('reading soil wilting point from '+input_fn)
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__['wwilt'] = input_nc.variables['wwp'][ilon,ilat]
+        input_nc.close()
+        
+    key = "GLEAM"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+        
+        #INPUT_gleam = gleam() 
+        #INPUT_gleam.path = "/kyukon/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/" 
+        
+        gleam_path = "/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/"
+        print('reading soil-water content for "+str(class_settings,datetime.year)+" from '+gleam_path)
+        
+        gleam_files = {}
+        
+        gleam_vars = ['SMroot','SMsurf']
+        
+        for VAR in gleam_vars:
+            gleam_files[VAR] = nc4.Dataset(gleam_path+'/'+str(class_settings.datetime.year)+'/'+VAR+'_'+str(class_settings.datetime.year)+'_GLEAM_v3.1a.nc','r')
+        
+
+        year = class_settings.datetime.year
+        day = class_settings.datetime.day
+        hour = class_settings.datetime.hour
+  
+        ilat = np.where(gleam_files['SMsurf'].variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(gleam_files['SMsurf'].variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        VAR = 'SMsurf'; class_settings.wg = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
+        VAR = 'SMroot'; class_settings.w2 = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
+        
+        for VAR in gleam_vars:
+            gleam_files[VAR].close()
+    
+    key = "MOD44B"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+    
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc"
+        print('initializing vegetation fraction from '+input_fn)
+        var = 'cveg'
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__[var] = input_nc.variables['fv'][ilon,ilat]
+        input_nc.close()
+        
+    key = "DSMW"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+         # Procedure of the thermal properties:
+         # 1. determine soil texture from DSMW
+         # 2. soil type with look-up table (according to DWD/EXTPAR)
+         # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987) 
+         #    with parameter look-up table from Noilhan and Planton (1989). 
+         #    Note: The look-up table is inspired on DWD/COSMO
+                 
+       
+        #preparing for soil thermal properties
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc"
+        
+        print("deriving soil thermal properties for the force-restore methodes from the soil texture file "+ input_fn)
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        DSMW = input_nc.variables['DSMW'][ilat,ilon]
+        
+        
+        #EXTPAR: zfine   = soil_texslo(soil_unit)%tex_fine
+        SP = {}; SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code']
+        for SPKEY in SPKEYS: 
+            SP[SPKEY] = np.array(input_nc.variables[SPKEY][DSMW])
+        input_nc.close()
+        
+        SP['texture'] = (0.5*SP['tex_medium']+1.0*SP['tex_coarse']) /(SP['tex_coarse']+SP['tex_medium']+SP['tex_fine'])
+        
+        if pd.isnull(SP['texture']):
+            print('Warning, texture is invalid> Setting to Ocean')
+            SP['itex'] = 9
+        
+        else:
+            SP['itex'] = int(SP['texture']*100)
+        
+        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+        SP['isoil'] = np.zeros_like(SP['itex'],dtype=np.int)
+        LOOKUP = [
+                  [0 ,7],# fine textured, clay (soil type 7)
+                  [20,6],# medium to fine textured, loamy clay (soil type 6)
+                  [40,5],# medium textured, loam (soil type 5)
+                  [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                  [80,3],# coarse textured, sand (soil type 3)
+                ]
+        for iitex,iisoil in LOOKUP: 
+            SP['isoil'][SP['itex'] >= iitex ] = iisoil 
+        
+        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+        LOOKUP = [
+                  [9001, 1 ], # ice, glacier (soil type 1) 
+                  [9002, 2 ], # rock, lithosols (soil type 2)
+                  [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                  [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                  [9,    9 ], # undefined (ocean)
+                  [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                  [9000, 9 ], # undefined (inland lake)
+                  [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                  [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                ]
+        # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+        for icode,iisoil in LOOKUP: 
+            SP['isoil'][SP['code'] == icode] = iisoil 
+        
+        #adopted from data_soil.f90 (COSMO5.0)
+        SP_LOOKUP = { 
+          # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea  
+          # (by index)                                           loam                    loam                                water      ice
+          'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+          'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+          'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+          'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+          'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+          'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+          'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+          'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+          'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+          'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+          'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+          'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+          'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+          'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+          'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+          'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+          'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+          'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+          'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+          #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+          'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , np.nan    , np.nan   ,  np.nan  ],
+          #error in table 2 of NP89: values need to be multiplied by e-6
+          'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+          'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , np.nan    , np.nan   ,  np.nan  ],
+          'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , np.nan    , np.nan   ,  np.nan  ],
+          'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , np.nan    , np.nan   ,  np.nan  ],
+          'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , np.nan    , np.nan   ,  np.nan  ],
+        }
+        
+        for SPKEY in SP_LOOKUP.keys(): 
+            SP[SPKEY] = np.zeros_like(SP['isoil'],dtype=np.float)
+        
+        for i in range(11):
+            SELECT = (SP['isoil'] == i)
+            for SPKEY in SP_LOOKUP.keys(): 
+                SP[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+        
+        for SPKEY in list(SP_LOOKUP.keys())[-6:]: 
+            var = SPKEY
+            class_settings.__dict__[var] = np.float(SP[SPKEY])
+            
+        # only print the last parameter value in the plot
+        
+        #inputs.append(cp.deepcopy(class_settings))
+        #var = 'cala'
+        #class_settings.__dict__[var] = np.float(SP['cala0'])
+        #valnew = class_settings.__dict__[var]
+        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+        
+        #inputs.append(cp.deepcopy(class_settings))
+        #var = 'crhoc'
+        #class_settings.__dict__[var] = np.float(SP['crhoc'])
+        #valnew = class_settings.__dict__[var]
+        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+        
+    key = "CERES"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
+        
+        CERES_start_date = dt.datetime(2000,3,1)
+        DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+        DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+        print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+            
+        var = 'cc'
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        print(class_settings.lat,class_settings.lon)
+        
+        class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:(idatetime+class_settings.runtime),ilat,ilon])/100.
+   
+        input_nc.close()
+    
+    key = "GIMMS"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
+       
+    
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+        print("Reading Leag Area Index from "+input_fn)
+        var = 'LAI'
+        
+        #plt.plot
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+        
+        print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+        tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+        
+        if np.isnan(tarray[idatetime]):
+            print("interpolating GIMMS cveg nan value")
+            
+            mask = np.isnan(tarray)
+            if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+                tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+            else:
+                print("Warning. Could not interpolate GIMMS cveg nan value")
+                
+        class_settings.__dict__[var] = tarray[idatetime]
+        
+        input_nc.close()
+ 
+    key = "IGBPDIS_ALPHA"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
+       
+        var = 'alpha'
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+        print("Reading albedo from "+input_fn)
+    
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        
+        landfr = {}
+        for ltype in ['W','B','H','TC']:   
+            landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+        
+        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+        
+        alpha=0.
+        for ltype in landfr.keys():
+            alpha += landfr[ltype]*aweights[ltype]
+        
+        
+        class_settings.__dict__[var] = alpha
+        input_nc.close()        
+        
+        
+    key = "ERAINT_ST"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
+       
+        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+        print("Reading soil temperature from "+input_fn)
+        
+        var = 'Tsoil'
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+        
+        
+        class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+        
+        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+        var = 'T2'
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+        
+        
+        class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+        
+        
+        input_nc.close()
+        
+        
+    
+    #inputs.append(cp.deepcopy(class_settings))
+    #var = 'T2'
+    #valold = class_settings.__dict__[var]
+    #
+    #class_settings.__dict__[var] = 305.
+    #class_settings.__dict__['Tsoil'] = 302.
+    #valnew = class_settings.__dict__[var]
+    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+    
+    
+    
+    #inputs.append(cp.deepcopy(class_settings))
+    #
+    #var = 'Lambda'
+    #valold = class_settings.__dict__[var]
+    
+    ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book. 
+    ## I need to ask Chiel.
+    ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+    #
+    #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg'] 
+    #class_settings.__dict__[var] = valnew
+    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+    
+    
+    
+    key = "GLAS"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
+       
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+        print("Reading canopy height for determining roughness length from "+input_fn)
+        var = 'z0m'
+    
+        
+        #plt.plot
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+        
+        lowerlimit = 0.01
+        if testval < lowerlimit:
+            print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+            class_settings.__dict__[var] = lowerlimit
+        else:
+            class_settings.__dict__[var] = testval
+        
+        class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+        
+        
+        input_nc.close()
+        
diff --git a/data_sounding.py b/data_sounding.py
new file mode 100644
index 0000000..011afe6
--- /dev/null
+++ b/data_sounding.py
@@ -0,0 +1,393 @@
+import numpy as np
+
+from bs4 import BeautifulSoup
+import pandas as pd
+import datetime as dt
+import pylab as pl
+import io
+import os
+import calendar
+import Pysolar
+import Pysolar.util
+#from urllib import request
+def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
+    
+    #initialize error BLH
+    BLHe = 0.
+    eps = 2.#security limit
+    iTHTV_0 = np.where(~np.isnan(THTV))[0]
+    if len(iTHTV_0) > 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHd
+
+def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
+    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
+    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
+
+
+#from os import listdir
+#from os.path import isfile #,join
+import glob
+
+
+class wyoming(object):
+    def __init__(self):
+       self.status = 'init'
+       self.found = False
+       self.DT = None
+       self.current = None
+       self.mode = 'b'
+       self.profile_type = 'wyoming'  
+       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
+       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+         
+    def set_STNM(self,STNM):
+        self.__init__()
+        self.STNM = STNM
+        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
+        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
+        self.current = None
+        self.found = False
+        self.FILES.sort()
+        
+    def find_first(self,year=None,get_atm=False):
+        self.found = False    
+                
+        # check first file/year or specified year
+        if year == None:
+            self.iFN = 0
+            self.FN = self.FILES[self.iFN]
+        else:
+            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+        self.current = self.sounding_series.find('h2')
+        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
+        
+        # go through other files and find first sounding when year is not specified
+        self.iFN=self.iFN+1
+        while keepsearching:
+            self.FN = self.FILES[self.iFN]
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            self.iFN=self.iFN+1
+            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
+        self.found = (self.current is not None)
+
+        self.status = 'fetch'
+        if self.found:
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        
+        if self.found and get_atm:
+            self.get_values_one_column_atm()
+        
+    
+    def find(self,DT,get_atm=False):
+        
+        self.found = False
+        keepsearching = True
+        print(DT)
+        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
+        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
+            self.DT = DT
+            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            
+        keepsearching = (self.current is not None)
+        while keepsearching:
+            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            if DTcurrent == DT:
+                self.found = True
+                keepsearching = False
+                if get_atm:
+                    self.get_values_one_column_atm()
+                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            elif DTcurrent > DT:
+                keepsearching = False
+                self.current = None
+            else:
+                self.current = self.current.find_next('h2')
+                if self.current is None:
+                    keepsearching = False
+        self.found = (self.current is not None)
+        self.status = 'fetch'
+
+    def find_next(self,get_atm=False):
+        self.found = False
+        self.DT = None
+        if self.current is None:
+            self.find_first()
+        else:                
+            self.current = self.current.find_next('h2')
+            self.found = (self.current is not None)
+            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
+            while keepsearching:
+                self.iFN=self.iFN+1
+                self.FN = self.FILES[self.iFN]
+                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+                self.current = self.sounding_series.find('h2')
+                
+                self.found = (self.current is not None)
+                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
+        if self.found:        
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        if self.found and get_atm:
+            self.get_values_one_column_atm()
+       
+
+
+    def get_values_one_column_atm(self):
+
+        # for iDT,DT in enumerate(DTS):
+        
+            #websource = urllib.request.urlopen(webpage)
+        #soup = BeautifulSoup(open(webpage), "html.parser")
+        
+       
+        #workaround for ...last line has 
 which results in stringlike first column
+        string = self.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = self.current.find_next('pre').find_next('pre').text
+
+        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
+        #PARAMS.insert(0,'date',DT)
+
+        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        
+        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
+        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
+        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
+        HAGL = HGHT - np.float(PARAMS['Station elevation'])
+        ONE_COLUMN.insert(0,'HAGL',HAGL)
+
+        
+        
+        
+        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
+        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
+        ONE_COLUMN.insert(0,'QABS',QABS)
+        
+        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
+
+        #mixed layer potential temperature
+        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
+
+        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
+        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
+        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
+
+        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
+
+        #security values for mixed-layer jump values dthetav, dtheta and dq
+        
+        # fit new profiles taking the above-estimated mixed-layer height
+        ONE_COLUMNNEW = []
+        for BLH in [BLHV,BLHVu,BLHVd]:
+            ONE_COLUMNNEW.append(pd.DataFrame())
+            
+            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+            
+            listHAGLNEW = list(HAGLNEW)
+            for icol,col in enumerate(['THTV','THTA','QABS']):
+                if len(np.where(HAGL <= BLH)[0]) >= 4:
+                    meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL <= BLH][2:-1],dtype=np.float))
+                else:
+                    meanabl = np.nanmean(ONE_COLUMN[col][1:2],dtype=np.float)
+            
+                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+                #THTVM = np.nanmean(THTV[HAGL <= BLH])
+                #print("new_pro_h",new_pro_h)
+                # calculate jump ath the top of the mixed layer
+                if col in ['THTA','THTV']:
+                    #for moisture
+                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+                    if len(listHAGLNEW) > 4:
+                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+                        dtheta = np.max((0.1,(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) )
+                    else:
+                        dtheta = np.nan
+                else:
+                    if len(listHAGLNEW) > 4:
+                        #for moisture (it can have both negative and positive slope)
+                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+                    else:
+                        dtheta = np.nan
+                #print('dtheta',dtheta)
+                
+                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+            
+                
+                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+                
+            #QABSM = np.nanmean(QABS[HAGL <= BLH])
+            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+            
+        # we just make a copy of the fields, so that it can be read correctly by CLASS 
+        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+
+        # assign fields adopted by CLASS
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'b':
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'u':
+            PARAMS.insert(0,'h',   BLHVu)
+        elif self.mode == 'd':
+            PARAMS.insert(0,'h',   BLHVd)
+        else:
+            PARAMS.insert(0,'h',   BLHV)
+            
+        PARAMS.insert(0,'day', PARAMS['datetime'][0].day)
+        PARAMS.insert(0,'tstart', PARAMS['datetime'][0].hour)
+        PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+        PARAMS.insert(0,'lon', np.float(PARAMS['Station longitude'][0]))
+        PARAMS['ldatetime'] = PARAMS.datetime.value - dt.timedelta(hours=PARAMS.lon.value/360.*24.) 
+        PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+        PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+        PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+           
+        ONE_COLUMNb = ONE_COLUMNNEW[0]
+        ONE_COLUMNu = ONE_COLUMNNEW[1]
+        ONE_COLUMNd = ONE_COLUMNNEW[2]
+        
+
+        THTVM = np.nanmean(THTV[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+        
+        QABSM = np.nanmean(QABS[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
+        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
+        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
+
+        BLHVe = abs(BLHV - BLHVu)
+        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
+        PARAMS.insert(0,'OK',((BLHVe < 100.) and (len(np.where(~pd.isnull(ONE_COLUMN['THTV'][ONE_COLUMN['HAGL'] > 5000.]))[0]) >0 )))
+        
+        #PARAMS.insert(0,'dq',0.)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
+        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+        
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
+            
+        elif self.mode == 'b': # best BLH
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
+            
+        elif self.mode == 'u':# upper BLH
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNu['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNu['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNu['THTA'])[2]-list(ONE_COLUMNu['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNu['QABS'])[2]-list(ONE_COLUMNu['QABS'])[1]))
+            
+        elif self.mode == 'd': # lower BLH
+            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNd['THTA'])[1]))
+            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNd['QABS'])[1]))
+            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNd['THTA'])[2]-list(ONE_COLUMNd['THTA'])[1]))
+            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNd['QABS'])[2]-list(ONE_COLUMNd['QABS'])[1]))
+            
+        
+        PARAMS = PARAMS.T
+
+        
+        self.PARAMS = PARAMS
+        if self.mode == 'o': #original 
+            self.ONE_COLUMN = ONE_COLUMN
+        elif self.mode == 'b': # best BLH
+            self.ONE_COLUMN = ONE_COLUMNb
+        elif self.mode == 'u':# upper BLH
+            self.ONE_COLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # lower BLH
+            self.ONE_COLUMN=ONE_COLUMNd
+        else:
+            self.ONE_COLUMN = ONE_COLUMN
+
+class one_column(object):
+    def __init__(self):
+        self.status = 'init'
+        
+        
+    def set_one_column_atm(self,INPUT):
+        PARAMS,ONE_COLUMN = INPUT.PARAMS,INPUT.ONE_COLUMN
+
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = ONE_COLUMN
+        self.__dict__.update(PARAMS.to_dict()['value'])
+        self.__dict__.update(ONE_COLUMN.to_dict('list'))
+        self.status = 'filled'
+        
+        #convert all list to arrays for CLASS
+        for key in self.__dict__.keys():
+            if type(self.__dict__[key]).__name__ == 'list':
+                self.__dict__[key] = np.array(self.__dict__[key])
+
+

From 73aa61b24fc21a83dbaeea951b57d0b9751ad714 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 8 Nov 2017 17:09:33 +0100
Subject: [PATCH 002/129] make change to the main code to be able to read air
 profiles

---
 data_sounding.py | 393 -----------------------------------------------
 model.py         | 283 ++++++++++++++++++++++++++++++----
 2 files changed, 256 insertions(+), 420 deletions(-)
 delete mode 100644 data_sounding.py

diff --git a/data_sounding.py b/data_sounding.py
deleted file mode 100644
index 011afe6..0000000
--- a/data_sounding.py
+++ /dev/null
@@ -1,393 +0,0 @@
-import numpy as np
-
-from bs4 import BeautifulSoup
-import pandas as pd
-import datetime as dt
-import pylab as pl
-import io
-import os
-import calendar
-import Pysolar
-import Pysolar.util
-#from urllib import request
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHd
-
-def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
-    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
-    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
-
-
-#from os import listdir
-#from os.path import isfile #,join
-import glob
-
-
-class wyoming(object):
-    def __init__(self):
-       self.status = 'init'
-       self.found = False
-       self.DT = None
-       self.current = None
-       self.mode = 'b'
-       self.profile_type = 'wyoming'  
-       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
-       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-         
-    def set_STNM(self,STNM):
-        self.__init__()
-        self.STNM = STNM
-        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
-        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
-        self.current = None
-        self.found = False
-        self.FILES.sort()
-        
-    def find_first(self,year=None,get_atm=False):
-        self.found = False    
-                
-        # check first file/year or specified year
-        if year == None:
-            self.iFN = 0
-            self.FN = self.FILES[self.iFN]
-        else:
-            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-        self.current = self.sounding_series.find('h2')
-        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
-        
-        # go through other files and find first sounding when year is not specified
-        self.iFN=self.iFN+1
-        while keepsearching:
-            self.FN = self.FILES[self.iFN]
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            self.iFN=self.iFN+1
-            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
-        self.found = (self.current is not None)
-
-        self.status = 'fetch'
-        if self.found:
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        
-        if self.found and get_atm:
-            self.get_values_one_column_atm()
-        
-    
-    def find(self,DT,get_atm=False):
-        
-        self.found = False
-        keepsearching = True
-        print(DT)
-        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
-        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
-            self.DT = DT
-            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            
-        keepsearching = (self.current is not None)
-        while keepsearching:
-            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            if DTcurrent == DT:
-                self.found = True
-                keepsearching = False
-                if get_atm:
-                    self.get_values_one_column_atm()
-                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            elif DTcurrent > DT:
-                keepsearching = False
-                self.current = None
-            else:
-                self.current = self.current.find_next('h2')
-                if self.current is None:
-                    keepsearching = False
-        self.found = (self.current is not None)
-        self.status = 'fetch'
-
-    def find_next(self,get_atm=False):
-        self.found = False
-        self.DT = None
-        if self.current is None:
-            self.find_first()
-        else:                
-            self.current = self.current.find_next('h2')
-            self.found = (self.current is not None)
-            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
-            while keepsearching:
-                self.iFN=self.iFN+1
-                self.FN = self.FILES[self.iFN]
-                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-                self.current = self.sounding_series.find('h2')
-                
-                self.found = (self.current is not None)
-                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
-        if self.found:        
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        if self.found and get_atm:
-            self.get_values_one_column_atm()
-       
-
-
-    def get_values_one_column_atm(self):
-
-        # for iDT,DT in enumerate(DTS):
-        
-            #websource = urllib.request.urlopen(webpage)
-        #soup = BeautifulSoup(open(webpage), "html.parser")
-        
-       
-        #workaround for ...last line has 
 which results in stringlike first column
-        string = self.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = self.current.find_next('pre').find_next('pre').text
-
-        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
-        #PARAMS.insert(0,'date',DT)
-
-        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
-        
-        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
-        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
-        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
-        HAGL = HGHT - np.float(PARAMS['Station elevation'])
-        ONE_COLUMN.insert(0,'HAGL',HAGL)
-
-        
-        
-        
-        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
-        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
-        ONE_COLUMN.insert(0,'QABS',QABS)
-        
-        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
-
-        #mixed layer potential temperature
-        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
-
-        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
-        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
-        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
-
-        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
-        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
-
-        #security values for mixed-layer jump values dthetav, dtheta and dq
-        
-        # fit new profiles taking the above-estimated mixed-layer height
-        ONE_COLUMNNEW = []
-        for BLH in [BLHV,BLHVu,BLHVd]:
-            ONE_COLUMNNEW.append(pd.DataFrame())
-            
-            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
-            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
-            
-            listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTV','THTA','QABS']):
-                if len(np.where(HAGL <= BLH)[0]) >= 4:
-                    meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL <= BLH][2:-1],dtype=np.float))
-                else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][1:2],dtype=np.float)
-            
-                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
-                #THTVM = np.nanmean(THTV[HAGL <= BLH])
-                #print("new_pro_h",new_pro_h)
-                # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV']:
-                    #for moisture
-                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
-                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
-                    if len(listHAGLNEW) > 4:
-                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta = np.max((0.1,(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) )
-                    else:
-                        dtheta = np.nan
-                else:
-                    if len(listHAGLNEW) > 4:
-                        #for moisture (it can have both negative and positive slope)
-                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
-                    else:
-                        dtheta = np.nan
-                #print('dtheta',dtheta)
-                
-                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
-            
-                
-                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
-                
-            #QABSM = np.nanmean(QABS[HAGL <= BLH])
-            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
-            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
-            
-        # we just make a copy of the fields, so that it can be read correctly by CLASS 
-        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
-
-        # assign fields adopted by CLASS
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'b':
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'u':
-            PARAMS.insert(0,'h',   BLHVu)
-        elif self.mode == 'd':
-            PARAMS.insert(0,'h',   BLHVd)
-        else:
-            PARAMS.insert(0,'h',   BLHV)
-            
-        PARAMS.insert(0,'day', PARAMS['datetime'][0].day)
-        PARAMS.insert(0,'tstart', PARAMS['datetime'][0].hour)
-        PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-        PARAMS.insert(0,'lon', np.float(PARAMS['Station longitude'][0]))
-        PARAMS['ldatetime'] = PARAMS.datetime.value - dt.timedelta(hours=PARAMS.lon.value/360.*24.) 
-        PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-        PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-        PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-           
-        ONE_COLUMNb = ONE_COLUMNNEW[0]
-        ONE_COLUMNu = ONE_COLUMNNEW[1]
-        ONE_COLUMNd = ONE_COLUMNNEW[2]
-        
-
-        THTVM = np.nanmean(THTV[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
-        
-        QABSM = np.nanmean(QABS[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
-        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
-        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
-
-        BLHVe = abs(BLHV - BLHVu)
-        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-        PARAMS.insert(0,'OK',((BLHVe < 100.) and (len(np.where(~pd.isnull(ONE_COLUMN['THTV'][ONE_COLUMN['HAGL'] > 5000.]))[0]) >0 )))
-        
-        #PARAMS.insert(0,'dq',0.)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
-        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
-        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
-        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
-        
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
-            
-        elif self.mode == 'b': # best BLH
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
-            
-        elif self.mode == 'u':# upper BLH
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNu['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNu['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNu['THTA'])[2]-list(ONE_COLUMNu['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNu['QABS'])[2]-list(ONE_COLUMNu['QABS'])[1]))
-            
-        elif self.mode == 'd': # lower BLH
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNd['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNd['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNd['THTA'])[2]-list(ONE_COLUMNd['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNd['QABS'])[2]-list(ONE_COLUMNd['QABS'])[1]))
-            
-        
-        PARAMS = PARAMS.T
-
-        
-        self.PARAMS = PARAMS
-        if self.mode == 'o': #original 
-            self.ONE_COLUMN = ONE_COLUMN
-        elif self.mode == 'b': # best BLH
-            self.ONE_COLUMN = ONE_COLUMNb
-        elif self.mode == 'u':# upper BLH
-            self.ONE_COLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # lower BLH
-            self.ONE_COLUMN=ONE_COLUMNd
-        else:
-            self.ONE_COLUMN = ONE_COLUMN
-
-class one_column(object):
-    def __init__(self):
-        self.status = 'init'
-        
-        
-    def set_one_column_atm(self,INPUT):
-        PARAMS,ONE_COLUMN = INPUT.PARAMS,INPUT.ONE_COLUMN
-
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = ONE_COLUMN
-        self.__dict__.update(PARAMS.to_dict()['value'])
-        self.__dict__.update(ONE_COLUMN.to_dict('list'))
-        self.status = 'filled'
-        
-        #convert all list to arrays for CLASS
-        for key in self.__dict__.keys():
-            if type(self.__dict__[key]).__name__ == 'list':
-                self.__dict__[key] = np.array(self.__dict__[key])
-
-
diff --git a/model.py b/model.py
index bb01df7..8e8efdc 100644
--- a/model.py
+++ b/model.py
@@ -1,4 +1,4 @@
-# 
+#_ 
 # CLASS
 # Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
 # Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
@@ -9,7 +9,7 @@
 # This file is part of CLASS
 # 
 # CLASS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
+# it under the terms of the GNU General Public License as published bygamma
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 # 
@@ -43,7 +43,8 @@ def run(self):
         self.init()
   
         # time integrate model 
-        for self.t in range(self.tsteps):
+        #for self.t in range(self.tsteps):
+        while self.t < self.tsteps:
           
             # time integrate components
             self.timestep()
@@ -115,6 +116,13 @@ def init(self):
        
          # Temperature 
         self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
+        
+        
+        self.substep    = False
+        self.substeps   = 0
+
+
+
         self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
         self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
         self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
@@ -152,8 +160,14 @@ def init(self):
         self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
         self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
        
+        
+        
+        
+        
+        
         # Moisture 
         self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
+
         self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
         self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
         self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
@@ -167,6 +181,107 @@ def init(self):
         self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
         self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
       
+  # BEGIN -- HW 20170606
+
+        # z-coordinate for vertical profiles of stratification above the mixed-layer height
+
+        self.z_pro      = self.input.z_pro  # initial profile of potential temperature [K]
+
+        self.theta_pro  = self.input.theta_pro  # initial profile of potential temperature [K]
+
+
+        if ((self.theta_pro is not None) and (self.z_pro is not None)):
+
+            
+            indextheta = np.where(self.z_pro == self.h)
+            if len(indextheta) == 0:
+                raise RuntimeError("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
+                
+                
+            if indextheta[0][0] !=1:
+                print("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
+                stop
+
+            
+            if self.theta_pro[1] != self.theta_pro[0]:
+                print("error input profile consistency: two lowest profile levels should be equal.")
+            
+            # initialize theta from its profile when available
+            theta_old = self.theta
+            theta_new = self.theta_pro[indextheta[0][0]]
+            
+            
+            
+            if ((theta_old is not None) & (theta_old != theta_new)):
+                print("Warning: theta input was provided ("+str(theta_old)+\
+                    "kg kg-1), but it is now overwritten by the first level (index 0) of theta_pro which is different ("\
+                    +str(theta_new)+"K).")
+                                    
+            self.theta = theta_new
+
+            # make a profile of the stratification 
+            # please note that the stratification between z_pro[i] and z_pro[i+1] 
+            # is given by gammatheta_pro[i]
+
+            # self.gammatheta_pro = np.gradient(self.theta_pro) / np.gradient(self.z_pro)
+            with np.errstate(divide='ignore'):
+                self.gammatheta_pro = np.array(self.theta_pro[1:] - self.theta_pro[:-1]) \
+                           / np.array(self.z_pro[1:] -  self.z_pro[:-1]    )
+                           
+            self.gammatheta = self.gammatheta_pro[np.where(self.h >= self.z_pro)[0][-1]]
+        else:
+            self.gammatheta_pro = None
+
+
+        self.q_pro  = self.input.q_pro  # initial profile of potential temperature [K]
+
+        if ((self.q_pro is not None) and (self.z_pro is not None)):
+            
+            indexq = np.where(self.z_pro == self.h)
+            if len(indexq) == 0:
+                print("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
+                stop   
+                
+            if indexq[0][0] !=1:
+                print("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
+                stop
+
+            
+            if self.q_pro[1] != self.q_pro[0]:
+                print("error inpuy ptogilr consistency: two lowest profile levels should be equal.")
+            
+            # initialize q from its profile when available
+            q_old = self.q
+            q_new = self.q_pro[indexq[0][0]]
+            
+            
+            
+            if ((q_old is not None) & (q_old != q_new)):
+                print("Warning: q input was provided ("+str(q_old)+\
+                    "kg kg-1), but it is now overwritten by the first level (index 0) of q_pro which is different ("\
+                    +str(q_new)+"kg kg-1).")
+                                    
+            self.q = q_new
+
+            # make a profile of the stratification 
+            # please note that the stratification between z_pro[i] and z_pro[i+1] 
+            # is given by gammaq_pro[i]
+
+            # self.gammaq_pro = np.gradient(self.q_pro) / np.gradient(self.z_pro)
+            with np.errstate(divide='ignore'):
+                self.gammaq_pro = np.array(self.q_pro[1:] - self.q_pro[:-1]) \
+                           / np.array(self.z_pro[1:] -  self.z_pro[:-1]    )
+                           
+            self.gammaq = self.gammaq_pro[np.where(self.h >= self.z_pro)[0][-1]]
+        else:
+            self.gammaq_pro = None
+
+# END -- HW 20170606      
+        
+        
+        
+        
+        
         # CO2
         fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
         self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
@@ -282,7 +397,7 @@ def init(self):
 
         # initialize A-Gs surface scheme
         self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
- 
+
         # initialize cumulus parameterization
         self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
         self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
@@ -293,6 +408,8 @@ def init(self):
         # initialize time variables
         self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
         self.dt     = self.input.dt
+        self.dtcur      = self.dt
+        self.firsttime = True
         self.t      = 0
  
         # Some sanity checks for valid input
@@ -346,8 +463,23 @@ def timestep(self):
         if(self.sw_ml):
             self.run_mixed_layer()
  
+        #get first profile data point above mixed layer
+        zidx_first = np.where(self.z_pro > self.h)[0][0]
+        
+        if self.htend != 0.:
+            dtmax = ( self.z_pro[zidx_first] - self.h)/self.htend
+        else:
+            dtmax = +np.inf
+        
+        
+        self.substep =  (self.dtcur > dtmax)
+        if self.substep:
+            dtnext = self.dtcur - dtmax
+            self.dtcur = dtmax
+        # HW: this will be done multiple times in case of a substep is needed
         # store output before time integration
-        self.store()
+        if self.firsttime:
+            self.store()
   
         # time integrate land surface model
         if(self.sw_ls):
@@ -356,13 +488,25 @@ def timestep(self):
         # time integrate mixed-layer model
         if(self.sw_ml):
             self.integrate_mixed_layer()
+        
+        if self.substep:
+            self.dtcur = dtnext
+            self.firsttime = False
+            self.substeps += 1
+        else:
+            self.dtcur = self.dt
+            self.t += 1 
+            self.firsttime = True
+            self.substeps = 0
+        
+        
+        
   
     def statistics(self):
         # Calculate virtual temperatures 
         self.thetav   = self.theta  + 0.61 * self.theta * self.q
         self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
         self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
-
         # Mixed-layer top properties
         self.P_h    = self.Ps - self.rho * self.g * self.h
         self.T_h    = self.theta - self.g/self.cp * self.h
@@ -390,7 +534,7 @@ def statistics(self):
 
         if(it == itmax):
             print("LCL calculation not converged!!")
-            print("RHlcl = %f, zlcl=%f"%(RHlcl, self.lcl))
+            print("RHlcl = %f, zlcl=%f, theta=%f, q=%f"%(RHlcl, self.lcl,self.theta,self.q))
 
     def run_cumulus(self):
         # Calculate mixed-layer top relative humidity variance (Neggers et. al 2006/7)
@@ -417,10 +561,12 @@ def run_mixed_layer(self):
             # decompose ustar along the wind components
             self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
             self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
-      
+
+
+
         # calculate large-scale vertical velocity (subsidence)
         self.ws = -self.divU * self.h
-      
+              
         # calculate compensation to fix the free troposphere in case of subsidence 
         if(self.sw_fixft):
             w_th_ft  = self.gammatheta * self.ws
@@ -448,7 +594,6 @@ def run_mixed_layer(self):
             self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
         else:
             self.we    = -self.wthetave / self.dthetav
-
         # Don't allow boundary layer shrinking if wtheta < 0 
         if(self.we < 0):
             self.we = 0.
@@ -457,9 +602,9 @@ def run_mixed_layer(self):
         self.wthetae     = -self.we * self.dtheta
         self.wqe         = -self.we * self.dq
         self.wCO2e       = -self.we * self.dCO2
-  
+        
         self.htend       = self.we + self.ws + self.wf - self.M
-       
+        
         self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
         self.qtend       = (self.wq     - self.wqe     - self.wqM  ) / self.h + self.advq
         self.CO2tend     = (self.wCO2   - self.wCO2e   - self.wCO2M) / self.h + self.advCO2
@@ -501,14 +646,19 @@ def integrate_mixed_layer(self):
         dz0     = self.dz_h
   
         # integrate mixed-layer equations
-        self.h        = h0      + self.dt * self.htend
-        self.theta    = theta0  + self.dt * self.thetatend
-        self.dtheta   = dtheta0 + self.dt * self.dthetatend
-        self.q        = q0      + self.dt * self.qtend
-        self.dq       = dq0     + self.dt * self.dqtend
-        self.CO2      = CO20    + self.dt * self.CO2tend
-        self.dCO2     = dCO20   + self.dt * self.dCO2tend
-        self.dz_h     = dz0     + self.dt * self.dztend
+        
+            
+
+# END -- HW 20170606        
+        
+        self.h        = h0      + self.dtcur * self.htend
+        self.theta    = theta0  + self.dtcur * self.thetatend
+        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
+        self.q        = q0      + self.dtcur * self.qtend
+        self.dq       = dq0     + self.dtcur * self.dqtend
+        self.CO2      = CO20    + self.dtcur * self.CO2tend
+        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
+        self.dz_h     = dz0     + self.dtcur * self.dztend
 
         # Limit dz to minimal value
         dz0 = 50
@@ -516,10 +666,65 @@ def integrate_mixed_layer(self):
             self.dz_h = dz0 
   
         if(self.sw_wind):
-            self.u        = u0      + self.dt * self.utend
-            self.du       = du0     + self.dt * self.dutend
-            self.v        = v0      + self.dt * self.vtend
-            self.dv       = dv0     + self.dt * self.dvtend
+            self.u        = u0      + self.dtcur * self.utend
+            self.du       = du0     + self.dtcur * self.dutend
+            self.v        = v0      + self.dtcur * self.vtend
+            self.dv       = dv0     + self.dtcur * self.dvtend
+
+
+        
+        # note that theta and q itself are updatet by class itself
+
+        #Afterwards, update the vertical profiles by removing any data points below the new h
+        if self.theta_pro is not None:
+            self.theta_pro = np.array([self.theta,self.theta,self.theta+self.dtheta]+list(self.theta_pro[self.z_pro > self.h]))
+
+        if self.q_pro is not None:
+            self.q_pro = np.array([self.q,self.q,self.q+self.dq]+list(self.q_pro[self.z_pro > self.h]))
+
+
+
+        if self.z_pro is not None:
+            self.z_pro = np.array([2.,self.h,self.h]+list(self.z_pro[self.z_pro > self.h]))
+
+        
+        if self.gammatheta_pro is not None:
+            
+                        # self.gammatheta_pro = np.gradient(self.theta_pro) / np.gradient(self.z_pro)
+            with np.errstate(divide='ignore'):
+                self.gammatheta_pro = np.array(self.theta_pro[1:] - self.theta_pro[:-1]) \
+                           / np.array(self.z_pro[1:] -  self.z_pro[:-1]    )
+                           
+            self.gammatheta = self.gammatheta_pro[np.where(self.h >= self.z_pro)[0][-1]]
+            
+            
+            
+            #self.gammatheta_pro = np.array([self.gammatheta,self.gammatheta,self.gammatheta]+list(self.gammatheta_pro[self.z_pro[:-1] > self.h]))
+
+        if self.gammaq_pro is not None:            
+                        
+            #self.gammaq_pro = np.gradient(self.q_pro) / np.gradient(self.z_pro)
+            with np.errstate(divide='ignore'):
+                self.gammaq_pro = (self.q_pro[1:] -self.q_pro[:-1])  / (self.z_pro[1:] -self.z_pro[:-1])
+            
+            self.gammaq = self.gammaq_pro[np.where(self.h >= self.z_pro)[0][-1]]
+            
+            
+            self.gammaq_pro = np.array([self.gammaq,self.gammaq,self.gammaq]+list(self.gammaq_pro[self.z_pro[:-1] > self.h]))
+
+
+#	# BEGIN -- HW 20170606
+#        # get new gammatheta(h) from its vertical profile when available:
+#        if(self.gammatheta_pro != None):
+#            #self.gammatheta = np.interp(self.h, self.z_pro, self.gammatheta_pro) 
+#            self.gammatheta = self.gammatheta_pro[np.where(self.h >= self.z_pro )[0][-1]]
+#
+#        # get gammaq(h) from its vertical profile when available:
+ #       if(self.gammaq_pro != None):
+ #           #self.gammaq = np.interp(self.h, self.z_pro, self.gammaq_pro) 
+ #           self.gammaq = self.gammaq_pro[np.where(self.h >= self.z_pro)[0][-1]]
+#
+        
  
     def run_radiation(self):
         sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
@@ -532,6 +737,8 @@ def run_radiation(self):
   
         self.Swin  = self.S0 * Tr * sinlea
         self.Swout = self.alpha * self.S0 * Tr * sinlea
+        
+        
         self.Lwin  = 0.8 * self.bolz * Ta ** 4.
         self.Lwout = self.bolz * self.Ts ** 4.
           
@@ -804,9 +1011,9 @@ def integrate_land_surface(self):
         wg0           = self.wg
         Wl0           = self.Wl
   
-        self.Tsoil    = Tsoil0  + self.dt * self.Tsoiltend
-        self.wg       = wg0     + self.dt * self.wgtend
-        self.Wl       = Wl0     + self.dt * self.Wltend
+        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
+        self.wg       = wg0     + self.dtcur * self.wgtend
+        self.Wl       = Wl0     + self.dtcur * self.Wltend
   
     # store model output
     def store(self):
@@ -814,6 +1021,12 @@ def store(self):
         self.out.t[t]          = t * self.dt / 3600. + self.tstart
         self.out.h[t]          = self.h
         
+        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the following loop:
+        #   for key in list(self.out.__dict__.keys()):
+        #       self.out.__dict__[key][t] = self[key]
+        
+        self.out.gammatheta[t] = self.gammatheta
+        self.out.gammaq[t]     = self.gammaq
         self.out.theta[t]      = self.theta
         self.out.thetav[t]     = self.thetav
         self.out.dtheta[t]     = self.dtheta
@@ -888,6 +1101,7 @@ def store(self):
         self.out.ac[t]         = self.ac
         self.out.M[t]          = self.M
         self.out.dz[t]         = self.dz_h
+        self.out.substeps[t]   = self.substeps
   
     # delete class variables to facilitate analysis in ipython
     def exitmodel(self):
@@ -1048,6 +1262,8 @@ def __init__(self, tsteps):
         self.h          = np.zeros(tsteps)    # ABL height [m]
         
         self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
         self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
         self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
         self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
@@ -1129,6 +1345,8 @@ def __init__(self, tsteps):
         self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
         self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
         self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
+        
+        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
 
 # class for storing mixed-layer model input data
 class model_input:
@@ -1147,6 +1365,10 @@ def __init__(self):
         self.fc         = None  # Coriolis parameter [s-1]
         
         self.theta      = None  # initial mixed-layer potential temperature [K]
+        self.theta_pro  = None  # optional/initial profile of potential temperature [K]
+
+        self.z_pro      = None  # height coordinate of the optional input profiles [m]
+
         self.dtheta     = None  # initial temperature jump at h [K]
         self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
         self.advtheta   = None  # advection of heat [K s-1]
@@ -1154,6 +1376,8 @@ def __init__(self):
         self.wtheta     = None  # surface kinematic heat flux [K m s-1]
         
         self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
+        self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+
         self.dq         = None  # initial specific humidity jump at h [kg kg-1]
         self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
         self.advq       = None  # advection of moisture [kg kg-1 s-1]
@@ -1238,3 +1462,8 @@ def __init__(self):
         # Cumulus parameters
         self.sw_cu      = None  # Cumulus parameterization switch
         self.dz_h       = None  # Transition layer thickness [m]
+        
+# BEGIN -- HW 20171027
+        self.cala       = None      # soil heat conductivity [W/(K*m)]
+        self.crhoc      = None      # soil heat capacity  [J/K*m**3]
+# END -- HW 20171027
\ No newline at end of file

From f9c88f9b01f9dc7b9b7b863574f596870da9fcd5 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Fri, 10 Nov 2017 10:49:57 +0100
Subject: [PATCH 003/129] improve structure by providing a class4gl.py wrapper
 around model.py

---
 class4gl.py | 28 ++++++++++++++++++++++++++++
 data_air.py | 31 +++++++------------------------
 model.py    |  2 +-
 3 files changed, 36 insertions(+), 25 deletions(-)
 create mode 100644 class4gl.py

diff --git a/class4gl.py b/class4gl.py
new file mode 100644
index 0000000..147c934
--- /dev/null
+++ b/class4gl.py
@@ -0,0 +1,28 @@
+# class4gl extends the standard 'model' environment to be able to take global air profiles as input 
+
+from model import model as class4gl
+from model import model_output as class4gl_output
+import numpy as np
+
+class air_input(object):
+    def __init__(self):
+        self.status = 'init'
+        
+        
+    def set_air_input(self,INPUT):
+        PARAMS,ONE_COLUMN = INPUT.PARAMS,INPUT.ONE_COLUMN
+
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = ONE_COLUMN
+        self.__dict__.update(PARAMS.to_dict()['value'])
+        self.__dict__.update(ONE_COLUMN.to_dict('list'))
+        self.status = 'filled'
+        
+        #convert all list to arrays for CLASS
+        for key in self.__dict__.keys():
+            if type(self.__dict__[key]).__name__ == 'list':
+                self.__dict__[key] = np.array(self.__dict__[key])
+
+from model import model_input
+class4gl_input = type('class4gl_input', (model_input,air_input), dict(c='c'))
+
diff --git a/data_air.py b/data_air.py
index 011afe6..3427915 100644
--- a/data_air.py
+++ b/data_air.py
@@ -9,6 +9,9 @@
 import calendar
 import Pysolar
 import Pysolar.util
+
+
+
 #from urllib import request
 def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
     
@@ -122,7 +125,7 @@ def find_first(self,year=None,get_atm=False):
             self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
         
         if self.found and get_atm:
-            self.get_values_one_column_atm()
+            self.get_values_air_input()
         
     
     def find(self,DT,get_atm=False):
@@ -145,7 +148,7 @@ def find(self,DT,get_atm=False):
                 self.found = True
                 keepsearching = False
                 if get_atm:
-                    self.get_values_one_column_atm()
+                    self.get_values_air_input()
                     self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
             elif DTcurrent > DT:
                 keepsearching = False
@@ -177,11 +180,11 @@ def find_next(self,get_atm=False):
         if self.found:        
             self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
         if self.found and get_atm:
-            self.get_values_one_column_atm()
+            self.get_values_air_input()
        
 
 
-    def get_values_one_column_atm(self):
+    def get_values_air_input(self):
 
         # for iDT,DT in enumerate(DTS):
         
@@ -371,23 +374,3 @@ def get_values_one_column_atm(self):
         else:
             self.ONE_COLUMN = ONE_COLUMN
 
-class one_column(object):
-    def __init__(self):
-        self.status = 'init'
-        
-        
-    def set_one_column_atm(self,INPUT):
-        PARAMS,ONE_COLUMN = INPUT.PARAMS,INPUT.ONE_COLUMN
-
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = ONE_COLUMN
-        self.__dict__.update(PARAMS.to_dict()['value'])
-        self.__dict__.update(ONE_COLUMN.to_dict('list'))
-        self.status = 'filled'
-        
-        #convert all list to arrays for CLASS
-        for key in self.__dict__.keys():
-            if type(self.__dict__[key]).__name__ == 'list':
-                self.__dict__[key] = np.array(self.__dict__[key])
-
-
diff --git a/model.py b/model.py
index 8e8efdc..077e6c1 100644
--- a/model.py
+++ b/model.py
@@ -1466,4 +1466,4 @@ def __init__(self):
 # BEGIN -- HW 20171027
         self.cala       = None      # soil heat conductivity [W/(K*m)]
         self.crhoc      = None      # soil heat capacity  [J/K*m**3]
-# END -- HW 20171027
\ No newline at end of file
+# END -- HW 20171027

From a1ce7c02b06457d1ef80e4f210cd420b5701f60c Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 20 Aug 2018 14:02:06 +0200
Subject: [PATCH 004/129] this is the beta version of class4gl

---
 class4gl.py            | 1615 ++++++++++++++++++++++++++++++-
 data_air.py            |  209 ++--
 data_global.py         |  936 ++++++++++++++++++
 interface_functions.py |  506 ++++++++++
 interface_multi.py     | 2061 ++++++++++++++++++++++++++++++++++++++++
 model.py               | 1155 ++++++++++++++++++----
 ribtol_hw.py           |  165 ++++
 7 files changed, 6370 insertions(+), 277 deletions(-)
 create mode 100644 data_global.py
 create mode 100644 interface_functions.py
 create mode 100644 interface_multi.py
 create mode 100644 ribtol_hw.py

diff --git a/class4gl.py b/class4gl.py
index 147c934..7baaa51 100644
--- a/class4gl.py
+++ b/class4gl.py
@@ -1,28 +1,1611 @@
-# class4gl extends the standard 'model' environment to be able to take global air profiles as input 
+# -*- coding: utf-8 -*-
 
-from model import model as class4gl
+"""
+
+Created on Mon Jan 29 12:33:51 2018
+
+Module file for class4gl, which  extents the class-model to be able to take
+global air profiles as input. It exists of:
+
+CLASSES:
+    - an input object, namely class4gl_input. It includes:
+        - a function to read Wyoming sounding data from a yyoming stream object
+        - a function to read global data from a globaldata library object 
+    - the model object: class4gl
+    - ....    
+
+DEPENDENCIES:
+    - xarray
+    - numpy
+    - data_global
+    - Pysolar
+    - yaml
+
+@author: Hendrik Wouters
+
+"""
+
+
+
+""" Setup of envirnoment """
+
+# Standard modules of the stand class-boundary-layer model
+from model import model
 from model import model_output as class4gl_output
+from model import model_input
+from model import qsat
+#from data_soundings import wyoming 
+import Pysolar
+import yaml
+import logging
+import warnings
+import pytz
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+
+# Generic Python Packages
 import numpy as np
+import datetime as dt
+import pandas as pd
+import xarray as xr
+import io
+#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
+from data_global import data_global
+grav = 9.81
 
-class air_input(object):
+# this is just a generic input object
+class generic_input(object):
     def __init__(self):
-        self.status = 'init'
+        self.init = True
+
+
+# all units from all variables in CLASS(4GL) should be defined here!
+units = {
+         'h':'m',
+         'theta':'K', 
+         'q':'kg/kg',
+         'cc': '-',
+         'cveg': '-',
+         'wg': 'm3 m-3',
+         'w2': 'm3 m-3',
+         #'wg': 'kg/kg',
+         'Tsoil': 'K',
+         'T2': 'K',
+         'z0m': 'm',
+         'alpha': '-',
+         'LAI': '-',
+         'dhdt':'m/h',
+         'dthetadt':'K/h',
+         'dqdt':'kg/kg/h',
+         'BR': '-',
+         'EF': '-',
+}
+
+class class4gl_input(object):
+# this was the way it was defined previously.
+#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
+
+    def __init__(self,set_pars_defaults=True,debug_level=None):
+
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        print('hello')
+        self.logger = logging.getLogger('class4gl_input')
+        print(self.logger)
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # # create logger
+        # self.logger = logging.getLogger('class4gl_input')
+        # self.logger.setLevel(debug_level)
+
+        # # create console handler and set level to debug
+        # ch = logging.StreamHandler()
+        # ch.setLevel(debug_level)
+
+        # # create formatter
+        # formatter = logging.Formatter('%(asctime)s - \
+        #                                %(name)s - \
+        #                                %(levelname)s - \
+        #                                %(message)s')
+        # add formatter to ch
+        # ch.setFormatter(formatter)
+     
+        # # add ch to logger
+        # self.logger.addHandler(ch)
+
+        # """ end set up logger """
+
+
+
+        # these are the standard model input single-value parameters for class
+        self.pars = model_input()
+
+        # diagnostic parameters of the initial profile
+        self.diag = dict()
+
+        # In this variable, we keep track of the different parameters from where it originates from. 
+        self.sources = {}
+
+        if set_pars_defaults:
+            self.set_pars_defaults()
+
+    def set_pars_defaults(self):
+
+        """ 
+        Create empty model_input and set up case
+        """
+        defaults = dict( 
+        dt         = 60.    , # time step [s] 
+        runtime    = 6*3600 ,  # total run time [s]
+        
+        # mixed-layer input
+        sw_ml      = True   ,  # mixed-layer model switch
+        sw_shearwe = False  ,  # shear growth mixed-layer switch
+        sw_fixft   = False  ,  # Fix the free-troposphere switch
+        h          = 200.   ,  # initial ABL height [m]
+        Ps         = 101300.,  # surface pressure [Pa]
+        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
+        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
+        
+        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
+        dtheta     = 1.     ,  # initial temperature jump at h [K]
+        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
+        advtheta   = 0.     ,  # advection of heat [K s-1]
+        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
+        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
+        
+        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
+        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
+        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
+        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
+        
+        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
+        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
+        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
+        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
+        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
+        sw_wind    = True  ,  # prognostic wind switch
+        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
+        du         = 0.     ,  # initial u-wind jump at h [m s-1]
+        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
+        advu       = 0.     ,  # advection of u-wind [m s-2]
+        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
+        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
+        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
+        advv       = 0.     ,  # advection of v-wind [m s-2]
+        sw_sl      = True   , # surface layer switch
+        ustar      = 0.3    ,  # surface friction velocity [m s-1]
+        z0m        = 0.02   ,  # roughness length for momentum [m]
+        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
+        sw_rad     = True   , # radiation switch
+        lat        = 51.97  ,  # latitude [deg]
+        lon        = -4.93  ,  # longitude [deg]
+        doy        = 268.   ,  # day of the year [-]
+        tstart     = 6.8    ,  # time of the day [h UTC]
+        cc         = 0.0    ,  # cloud cover fraction [-]
+        Q          = 400.   ,  # net radiation [W m-2] 
+        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
+        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
+        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
+        cveg       = 0.85   ,  # vegetation fraction [-]
+        Tsoil      = 295.   ,  # temperature top soil layer [K]
+        Ts         = 295.   ,    # initial surface temperature [K]
+        T2         = 296.   ,  # temperature deeper soil layer [K]
+        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
+        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
+        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
+        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
+        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
+        wfc        = 0.323  ,  # volumetric water content field capacity [-]
+        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
+        C1sat      = 0.132  ,  
+        C2ref      = 1.8    ,
+        LAI        = 2.     ,  # leaf area index [-]
+        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
+        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
+        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
+        alpha      = 0.25   ,  # surface albedo [-]
+        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
+        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
+        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
+        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
+        sw_cu      = False  ,  # Cumulus parameterization switch
+        dz_h       = 150.   ,  # Transition layer thickness [m]
+        cala       = None   ,  # soil heat conductivity [W/(K*m)]
+        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
+        sw_ls      = True   ,
+        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
+        sw_lit     = False,
+        )
+        pars = model_input()
+        for key in defaults:
+            pars.__dict__[key] = defaults[key]
+        
+        self.update(source='defaults',pars=pars)
+        
+    def clear(self):
+        """ this procudure clears the class4gl_input """
+
+        for key in list(self.__dict__.keys()):
+            del(self.__dict__[key])
+        self.__init__()
+
+    def dump(self,file):
+        """ this procedure dumps the class4gl_input object into a yaml file
+            
+            Input: 
+                - self.__dict__ (internal): the dictionary from which we read 
+            Output:
+                - file: All the parameters in self.__init__() are written to
+                the yaml file, including pars, air_ap, sources etc.
+        """
+        file.write('---\n')
+        index = file.tell()
+        file.write('# CLASS4GL input; format version: 0.1\n')
+
+        # write out the position of the current record
+        yaml.dump({'index':index}, file, default_flow_style=False)
+
+        # we do not include the none values
+        for key,data in self.__dict__.items():
+            #if ((type(data) == model_input) or (type(class4gl_input):
+            if key == 'pars':
+
+                pars = {'pars' : self.__dict__['pars'].__dict__}
+                parsout = {}
+                for key in pars.keys():
+                    if pars[key] is not None:
+                        parsout[key] = pars[key]
+
+                yaml.dump(parsout, file, default_flow_style=False)
+            elif type(data) == dict:
+                if key == 'sources':
+                    # in case of sources, we want to have a
+                    # condensed list format as well, so we leave out
+                    # 'default_flow_style=False'
+                    yaml.dump({key : data}, file)
+                else: 
+                    yaml.dump({key : data}, file,
+                              default_flow_style=False)
+            elif type(data) == pd.DataFrame:
+                # in case of dataframes (for profiles), we want to have a
+                # condensed list format as well, so we leave out
+                # 'default_flow_style=False'
+                yaml.dump({key: data.to_dict(orient='list')},file)
+
+                # # these are trials to get it into a more human-readable
+                # fixed-width format, but it is too complex
+                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
+                #file.write(stream)
+                
+                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
+                #file.write(key+': !!str |\n')
+                #file.write(str(data)+'\n')
+       
+    def load_yaml_dict(self,yaml_dict,reset=True):
+        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
+            
+            Input: 
+                - yaml_dict: the dictionary from which we read 
+                - reset: reset data before reading        
+            Output:
+                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
+        """
+        
+        if reset:
+            for key in list(self.__dict__.keys()):
+                del(self.__dict__[key])
+            self.__init__()
+
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                self.__dict__[key] = model_input()
+                self.__dict__[key].__dict__ = data
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            elif key == 'sources':
+                self.__dict__[key] = data
+            elif key == 'diag':
+                self.__dict__[key] = data
+            else: 
+                warnings.warn("Key '"+key+"' may not be implemented.")
+                self.__dict__[key] = data
+
+    def update(self,source,**kwargs):
+        """ this procedure is to make updates of input parameters and tracking
+        of their source more convenient. It implements the assignment of
+        parameter source/sensitivity experiment IDs ('eg.,
+        'defaults', 'sounding balloon', any satellite information, climate
+        models, sensitivity tests etc.). These are all stored in a convenient
+        way with as class4gl_input.sources.  This way, the user can always consult with
+        from where parameters data originates from.  
+        
+        Input:
+            - source:    name of the underlying dataset
+            - **kwargs: a dictionary of data input, for which the key values
+            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
+            the values is a again a dictionary/dataframe of datakeys/columns
+            ('wg','PRES','datetime', ...) and datavalues (either single values,
+            profiles ...), eg., 
+
+                pars = {'wg': 0.007  , 'w2', 0.005}
+                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
+                                     300.,...]}
+            
+        Output:
+            - self.__dict__[datatype] : object to which the parameters are
+                                        assigned. They can be consulted with
+                                        self.pars, self.profiles, etc.
+                                        
+            - self.sources[source] : It supplements the overview overview of
+                                     data sources can be consulted with
+                                     self.sources. The structure is as follows:
+                                     as:
+                self.sources = { 
+                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
+                'GLEAM' :  ['pars:wg','pars:w2', ...],
+                 ...
+                }
+        
+        """
+
+        #print(source,kwargs)
+
+        for key,data in kwargs.items():
+
+            #print(key)
+            # if the key is not in class4gl_input object, then just add it. In
+            # that case, the update procedures below will just overwrite it 
+            if key not in self.__dict__:
+                self.__dict__[key] = data
+
+
+            
+
+            #... we do an additional check to see whether there is a type
+            # match. I not then raise a key error
+            if (type(data) != type(self.__dict__[key]) \
+                # we allow dict input for model_input pars
+                and not ((key == 'pars') and (type(data) == dict) and \
+                (type(self.__dict__[key]) == model_input))):
+
+                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
+
+
+            # This variable keeps track of the added data that is supplemented
+            # by the current source. We add this to class4gl_input.sources
+            datakeys = []
+
+            #... and we update the class4gl_input data, and this depends on the
+            # data type
+
+            if type(self.__dict__[key]) == pd.DataFrame:
+                # If the data type is a dataframe, then we update the columns
+                for column in list(data.columns):
+                    #print(column)
+                    self.__dict__[key][column] = data[column]
+                    datakeys.append(column)
+                    
+
+            elif type(self.__dict__[key]) == model_input:
+                # if the data type is a model_input, then we update its internal
+                # dictionary of parameters
+                if type(data) == model_input:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data.__dict__}
+                    datakeys = list(data.__dict__.keys())
+                elif type(data) == dict:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data}
+                    datakeys = list(data.keys())
+                else:
+                    raise TypeError('input key '+key+' is not of the same type\
+                                    as the one in the class4gl_object')
+
+            elif type(self.__dict__[key]) == dict:
+                # if the data type is a dictionary, we update the
+                # dictionary 
+                self.__dict__[key] = {self.__dict__[key] , data}
+                datakeys = list(data.keys())
+
+
+            # if source entry is not existing yet, we add it
+            if source not in self.sources.keys():
+                self.sources[source] = []
+
+
+            # self.logger.debug('updating section "'+\
+            #                  key+' ('+' '.join(datakeys)+')'\
+            #                  '" from source \
+            #                  "'+source+'"')
+
+            # Update the source dictionary: add the provided data keys to the
+            # specified source list
+            for datakey in datakeys:
+                # At first, remove the occurences of the keys in the other
+                # source lists
+                for sourcekey,sourcelist in self.sources.items():
+                    if key+':'+datakey in sourcelist:
+                        self.sources[sourcekey].remove(key+':'+datakey)
+                # Afterwards, add it to the current source list
+                self.sources[source].append(key+':'+datakey)
+
+
+        # # in case the datatype is a class4gl_input_pars, we update its keys
+        # # according to **kwargs dictionary
+        # if type(self.__dict__[datatype]) == class4gl_input_pars:
+        #     # add the data parameters to the datatype object dictionary of the
+        #     # datatype
+        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
+        #                                        **kwargs}
+        # # in case, the datatype reflects a dataframe, we update the columns according
+        # # to the *args list
+        # elif type(self.__dict__[datatype]) == pd.DataFrame:
+        #     for dataframe in args:
+        #         for column in list(dataframe.columns):
+        #             self.__dict__[datatype][column] = dataframe[column]
+        
+
+    def get_profile(self,IOBJ, *args, **argv):
+        # if type(IOBJ) == wyoming:
+        self.get_profile_wyoming(IOBJ,*args,**argv)
+        # else:
+        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
+        
+    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
+        """ 
+            Purpose: 
+                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
+
+            Input:
+                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
+                function will take the profile at the stream's current
+                position. 
+                2. air_ap_mode: which air profile do we take? 
+                    - b : best
+                    - l : according to lower limit for the mixed-layer height
+                            estimate
+                    - u : according to upper limit for the mixed-layer height
+                            estimate
+
+
+            Output:
+                1. all single-value parameters are stored in the
+                   class4gl_input.pars object
+                2. the souding profiles are stored in the in the
+                   class4gl_input.air_balloon dataframe
+                3. modified sounding profiles for which the mixed layer height
+                   is fitted
+                4. ...
+
+        """
+
+
+        # Raise an error in case the input stream is not the correct object
+        # if type(wy_strm) is not wyoming:
+        #    raise TypeError('Not a wyoming type input stream')
+
+        # Let's tell the class_input object that it is a Wyoming fit type
+        self.air_ap_type = 'wyoming'
+        # ... and which mode of fitting we apply
+        self.air_ap_mode = air_ap_mode
+
+        """ Temporary variables used for output """
+        # single value parameters derived from the sounding profile
+        dpars = dict()
+        # profile values
+        air_balloon = pd.DataFrame()
+        # fitted profile values
+        air_ap = pd.DataFrame()
+        
+        string = wy_strm.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = wy_strm.current.find_next('pre').find_next('pre').text
+        
+        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
+        dpars = {**dpars,
+                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
+               }
+        
+        # we get weird output when it's a numpy Timestamp, so we convert it to
+        # pd.datetime type
+
+        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
+        dpars['STNID'] = dpars['Station number']
+
+        # altitude above ground level
+        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
+        # absolute humidity in g/kg
+        air_balloon['q']= (air_balloon.MIXR/1000.) \
+                              / \
+                             (air_balloon.MIXR/1000.+1.)
+        # convert wind speed from knots to m/s
+        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
+        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+        
+        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
+        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
+
+        
+
+        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+
+        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
+        air_balloon['p'] = air_balloon.PRES*100.
+
+
+        # Therefore, determine the sounding that are valid for 'any' column 
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        #is_valid = (air_balloon.z >= 0)
+        # # this is an alternative pipe/numpy method
+        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
+        valid_indices = air_balloon.index[is_valid].values
+        print(valid_indices)
+
+        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+
+        air_balloon['t'] = air_balloon['TEMP']+273.15
+        air_balloon['theta'] = (air_balloon.t) * \
+                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
+        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
+
+        if len(valid_indices) > 0:
+            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
+            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            
+            # the final mixed-layer height that will be used by class. We round it
+            # to 1 decimal so that we get a clean yaml output format
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+
+
+        if np.isnan(dpars['h']):
+            dpars['Ps'] = np.nan
+
+
+
+
+        if ~np.isnan(dpars['h']):
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u 
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+            
+
+
+
+        # First 3 data points of the mixed-layer fit. We create a empty head
+        # first
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+        
+        #calculate mixed-layer jump ( this should be larger than 0.1)
+        
+        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        air_ap_head['HGHT'] = air_ap_head['z'] \
+                                + \
+                                np.round(dpars[ 'Station elevation'],1)
+        
+        # make a row object for defining the jump
+        jump = air_ap_head.iloc[0] * np.nan
+            
+        if air_ap_tail.shape[0] > 1:
+
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = dpars['theta']
+        z_low =     dpars['h']
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                (z_mean > (z_low+10.)) and \
+                (theta_mean > (theta_low+0.2) ) and \
+                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+
+
+
+
+
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
         
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        #print(air_ap['PRES'].iloc[0])
+
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+
+        
+        dpars['lat'] = dpars['Station latitude']
+        dpars['latitude'] = dpars['lat']
+        
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        dpars['longitude'] = dpars['Station longitude']
+        
+        dpars['ldatetime'] = dpars['datetime'] \
+                            + \
+                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
         
-    def set_air_input(self,INPUT):
-        PARAMS,ONE_COLUMN = INPUT.PARAMS,INPUT.ONE_COLUMN
 
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = ONE_COLUMN
-        self.__dict__.update(PARAMS.to_dict()['value'])
-        self.__dict__.update(ONE_COLUMN.to_dict('list'))
-        self.status = 'filled'
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+
+        # # we make a pars object that is similar to the destination object
+        # pars = model_input()
+        # for key,value in dpars.items():
+        #     pars.__dict__[key] = value
+
+
+        # we round the columns to a specified decimal, so that we get a clean
+        # output format for yaml
+        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
+                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
+                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
+# 
+        for column,decimal in decimals.items():
+            air_balloon[column] = air_balloon[column].round(decimal)
+            air_ap[column] = air_ap[column].round(decimal)
+
+        self.update(source='wyoming',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+
+        
+    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
+    
+        """
+        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
+                 according to the position (lat lon) and the class datetime and timespan
+                 globaldata should be a globaldata multifile object
         
-        #convert all list to arrays for CLASS
+        Input: 
+            - globaldata: this is the library object
+            - only_keys: only extract specified keys
+            - exclude_keys: do not inherit specified keys
+        """
+        classdatetime      = np.datetime64(self.pars.datetime_daylight)
+        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
+                                           + \
+                                           dt.timedelta(seconds=self.pars.runtime)\
+                                          )
+
+
+        # # list of variables that we get from global ground data
+        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
+        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
+        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
+        #                 'texture', 'itex', 'isoil', 'BR',
+        #                 'b', 'cveg',
+        #                 'C1sat', 
+        #                 'C2ref', 'p', 'a',
+        #                 ] #globaldata.datasets.keys():
+
+        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
+        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
+
+
+        if type(globaldata) is not data_global:
+            raise TypeError("Wrong type of input library") 
+
+        # by default, we get all dataset keys
+        keys = list(globaldata.datasets.keys())
+
+        # We add LAI manually, because it is not listed in the datasets and
+        #they its retreival is hard coded below based on LAIpixel and cveg
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            keys.append('LAI')
+
+        # # In case there is surface pressure, we also calculate the half-level
+        # # and full-level pressure fields
+        # if ('sp' in keys):
+        #     keys.append('pfull')
+        #     keys.append('phalf')
+
+        # If specified, we only take the keys that are in only_keys
+        if only_keys is not None:
+            for key in keys:
+                if key not in only_keys:
+                    keys.remove(key)
+                
+        # If specified, we take out keys that are in exclude keys
+        if exclude_keys is not None:
+            for key in keys:
+                if key in exclude_keys:
+                    keys.remove(key)
+
+        # we set everything to nan first in the pars section (non-profile parameters
+        # without lev argument), so that we can check afterwards whether the
+        # data is well-fetched or not.
+        for key in keys:
+            if not ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None) and \
+                ('lev' in globaldata.datasets[key].page[key].dims)):
+                self.update(source='globaldata',pars={key:np.nan})
+            # # we do not check profile input for now. We assume it is
+            # # available
+            #else:
+            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
+
+        self.logger.debug('getting keys "'+', '.join(keys)+'\
+                          from global data')
+
+        for key in keys:
+            # If we find it, then we obtain the variables
+            if ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None)):
+
+                # check first whether the dataset has a height coordinate (3d space)
+                if 'lev' in globaldata.datasets[key].page[key].dims:
+
+                    # first, we browse to the correct file that has the current time
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+                        globaldata.datasets[key].browse_page(time=classdatetime)
+
+                    
+                    if (globaldata.datasets[key].page is not None):
+                        # find longitude and latitude coordinates
+                        ilats = (np.abs(globaldata.datasets[key].page.lat -
+                                        self.pars.latitude) < 0.5)
+                        ilons = (np.abs(globaldata.datasets[key].page.lon -
+                                        self.pars.longitude) < 0.5)
+                        
+                        # if we have a time dimension, then we look up the required timesteps during the class simulation
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            itimes = ((globaldata.datasets[key].page.time >= \
+                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
+
+                            # In case we didn't find any correct time, we take the
+                            # closest one.
+                            if np.sum(itimes) == 0.:
+
+
+                                classdatetimemean = \
+                                    np.datetime64(self.pars.datetime_daylight + \
+                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
+                                                ))
+
+                                dstimes = globaldata.datasets[key].page.time
+                                time = dstimes.sel(time=classdatetimemean,method='nearest')
+                                itimes = (globaldata.datasets[key].page.time ==
+                                          time)
+                                
+                        else:
+                            # we don't have a time coordinate so it doesn't matter
+                            # what itimes is
+                            itimes = 0
+
+                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
+
+                        # over which dimensions we take a mean:
+                        dims = globaldata.datasets[key].page[key].dims
+                        namesmean = list(dims)
+                        namesmean.remove('lev')
+                        idxmean = [dims.index(namemean) for namemean in namesmean]
+                        
+                        value = \
+                        globaldata.datasets[key].page[key].isel(time=itimes,
+                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
+
+                        # Ideally, source should be equal to the datakey of globaldata.library 
+                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
+                        #  but therefore the globaldata class requires a revision to make this work
+                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
+
+                else:
+                    # this procedure is for reading the ground fields (2d space). 
+                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
+
+    
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+    
+                       # first, we browse to the correct file
+                       #print(key)
+                       globaldata.datasets[key].browse_page(time=classdatetime)
+    
+                    if globaldata.datasets[key].page is not None:
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - self.pars.latitude))
+                        ilat = np.where((DIST) == np.min(DIST))[0][0]
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - self.pars.longitude))
+                        ilon = np.where((DIST) == np.min(DIST))[0][0]
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - (self.pars.latitude + 0.5)))
+                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmax = ilat
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - (self.pars.longitude  + 0.5)))
+                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmax = ilon
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lat.values\
+                                - (self.pars.latitude - 0.5)))
+                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmin = ilat
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lon.values\
+                                - (self.pars.longitude  - 0.5)))
+                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmin = ilon        
+                        
+                        if ilatmin < ilatmax:
+                            ilatrange = range(ilatmin,ilatmax+1)
+                        else:
+                            ilatrange = range(ilatmax,ilatmin+1)
+                            
+                        if ilonmin < ilonmax:
+                            ilonrange = range(ilonmin,ilonmax+1)
+                        else:
+                            ilonrange = range(ilonmax,ilonmin+1)     
+                            
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                            
+                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+                                idatetime += 1
+                            
+                            classdatetimeend = np.datetime64(\
+                                                             self.pars.datetime +\
+                                                             dt.timedelta(seconds=self.pars.runtime)\
+                                                            ) 
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
+                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
+                                idatetimeend -= 1
+                            idatetime = np.min((idatetime,idatetimeend))
+                            #for gleam, we take the previous day values
+                            if key in ['wg', 'w2']:
+                                idatetime = idatetime - 1
+                                idatetimeend = idatetimeend - 1
+
+                            # in case of soil temperature, we take the exact
+                            # timing (which is the morning)
+                            if key in ['Tsoil','T2']:
+                                idatetimeend = idatetime
+                            
+                            idts = range(idatetime,idatetimeend+1)
+                            
+                            count = 0
+                            self.__dict__[key] = 0.
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    for iidts in idts:
+                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
+                                        count += 1
+                            value = value/count
+                            self.update(source='globaldata',pars={key:value.item()})
+                                
+                        else:
+                                
+                            count = 0
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
+                                    count += 1
+                            value = value/count                        
+
+                            self.update(source='globaldata',pars={key:value.item()})
+
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            self.logger.debug('also update LAI based on LAIpixel and cveg') 
+            # I suppose LAI pixel is already determined in the previous
+            # procedure. Anyway...
+            key = 'LAIpixel'
+
+            if globaldata.datasets[key].page is not None:
+                # first, we browse to the correct file that has the current time
+                if 'time' in list(globaldata.datasets[key].page[key].dims):
+                    globaldata.datasets[key].browse_page(time=classdatetime)
+            
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - self.pars.latitude))
+                ilat = np.where((DIST) == np.min(DIST))[0][0]
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - self.pars.longitude))
+                ilon = np.where((DIST) == np.min(DIST))[0][0]
+                 
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude + 0.5)))
+                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmax = ilat
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values \
+                        - (self.pars.longitude  + 0.5)))
+                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmax = ilon
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude - 0.5)))
+                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmin = ilat
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - (self.pars.longitude  - 0.5)))
+                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmin = ilon        
+                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                
+                
+                if ilatmin < ilatmax:
+                    ilatrange = range(ilatmin,ilatmax+1)
+                else:
+                    ilatrange = range(ilatmax,ilatmin+1)
+                    
+                if ilonmin < ilonmax:
+                    ilonrange = range(ilonmin,ilonmax+1)
+                else:
+                    ilonrange = range(ilonmax,ilonmin+1)           
+                
+                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
+                LAIpixel = 0.
+                count = 0
+                for iilat in [ilat]: #ilatrange
+                    for iilon in [ilon]: #ilonrange
+                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
+                        
+                                        
+                        # if np.isnan(tarray[idatetime]):
+                        #     print("interpolating GIMMS LAIpixel nan value")
+                        #     
+                        #     mask = np.isnan(tarray)
+                        #     
+                        #     #replace each nan value with a interpolated value
+                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+                        #         
+                        #     else:
+                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
+                    
+                        #         tarray *= np.nan 
+                        
+                        count += 1
+                        #tarray_res += tarray
+                LAIpixel = LAIpixel/count
+                
+                count = 0
+                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
+  
+                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
+                #print('LAIpixel:',self.__dict__['LAIpixel'])
+                #print('cveg:',self.__dict__['cveg'])
+                
+                # finally, we rescale the LAI according to the vegetation
+                # fraction
+                value = 0. 
+                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
+                   value =self.pars.LAIpixel/self.pars.cveg
+                else:
+                    # in case of small vegetation fraction, we take just a standard 
+                    # LAI value. It doesn't have a big influence anyway for
+                    # small vegetation
+                    value = 2.
+                #print('LAI:',self.__dict__['LAI'])
+                self.update(source='globaldata',pars={'LAI':value}) 
+
+
+        # in case we have 'sp', we also calculate the 3d pressure fields at
+        # full level and half level
+        if ('sp' in keys) and ('sp' in self.pars.__dict__):
+            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
+
+            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # hydrostatic thickness of each model layer
+            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
+            # # dz = rhodz/(R * T / pfull)
+
+
+            # # subsidence multiplied by density. We calculate the subsidence of
+            # # the in class itself
+            # wrho = np.zeros_like(phalf)
+            # wrho[-1] = 0. 
+
+            # for ihlev in range(0,wrho.shape[0]-1):
+            #     # subsidence multiplied by density is the integral of
+            #     # divergences multiplied by the layer thicknessies
+            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
+            #                     self.air_ac['divU_y'][ihlev:]) * \
+            #                    delpdgrav[ihlev:]).sum()
+
+
+            
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'p':list(pfull)}))
+            self.update(source='globaldata',\
+                        air_ach=pd.DataFrame({'p':list(phalf)}))
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
+            # self.update(source='globaldata',\
+            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
+
+    def check_source(self,source,check_only_sections=None):
+        """ this procedure checks whether data of a specified source is valid.
+
+        INPUT:
+            source: the data source we want to check
+            check_only_sections: a string or list with sections to be checked
+        OUTPUT:
+            returns True or False
+        """
+
+        # we set source ok to false as soon as we find a invalid input
+        source_ok = True
+
+        # convert to a single-item list in case of a string
+        check_only_sections_def = (([check_only_sections]) if \
+                                   type(check_only_sections) is str else \
+                                    check_only_sections)
+                                  
+        if source not in self.sources.keys():
+            self.logger.info('Source '+source+' does not exist')
+            source_ok = False
+
+        for sectiondatakey in self.sources[source]:                             
+            section,datakey = sectiondatakey.split(':')                         
+            if ((check_only_sections_def is None) or \
+                (section in check_only_sections_def)):                          
+                checkdatakeys = []
+                if type(self.__dict__[section]) is pd.DataFrame:
+                    checkdata = self.__dict__[section]
+                elif type(self.__dict__[section]) is model_input:
+                    checkdata = self.__dict__[section].__dict__
+
+                if (datakey not in checkdata):                              
+                    # self.logger.info('Expected key '+datakey+\
+                    #                  ' is not in parameter input')                        
+                    source_ok = False                                           
+                elif (checkdata[datakey] is None) or \
+                     (pd.isnull(checkdata[datakey]) is True):                    
+        
+                    # self.logger.info('Key value of "'+datakey+\
+                    #                  '" is invalid: ('+ \
+                    # str(self.__dict__[section].__dict__[datakey])+')')         
+                    source_ok = False
+
+        return source_ok
+
+    def check_source_globaldata(self):
+        """ this procedure checks whether all global parameter data is
+        available, according to the keys in the self.sources"""
+
+        source_globaldata_ok = True
+
+        #self.get_values_air_input()
+
+        # and now we can get the surface values
+        #class_settings = class4gl_input()
+        #class_settings.set_air_input(input_atm)
+        
+        # we only allow non-polar stations
+        if not (self.pars.lat <= 60.):
+            source_globaldata_ok = False
+            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+        
+        # check lat and lon
+        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
+            source_globaldata_ok = False
+            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
+            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
+        else:
+            # we only check the ground parameter data (pars section). The 
+            # profile data (air_ap section) are supposed to be valid in any 
+            # case.
+            source_ok = self.check_source(source='globaldata',\
+                                          check_only_sections=['air_ac',\
+                                                               'air_ap',\
+                                                               'pars'])
+            if not source_ok:
+                source_globaldata_ok = False
+        
+            # Additional check: we exclude desert-like
+            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
+                source_globaldata_ok = False
+                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
+                source_globaldata_ok = False
+                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
+            elif self.pars.cveg < 0.02:
+                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
+                source_globaldata_ok = False
+
+        return source_globaldata_ok
+
+
+class c4gli_iterator():
+    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
+    
+        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
+    """
+    def __init__(self,file):
+        # take file as IO stream
+        self.file = file
+        self.yaml_generator = yaml.load_all(file)
+        self.current_dict = {}
+        self.current_class4gl_input = class4gl_input()
+        separator = self.file.readline() # this is just dummy
+        self.header = file.readline()
+        if self.header != '# CLASS4GL record; format version: 0.1\n':
+            raise NotImplementedError("Wrong format version: '"+self.header+"'")
+    def __iter__(self):
+        return self
+    def __next__(self):
+        self.current_dict = self.yaml_generator.__next__()
+        self.current_class4gl_input.load_yaml_dict(self.current_dict)
+        return self.current_class4gl_input
+
+
+
+#get_cape and lift_parcel are adapted from the SkewT package
+    
+class gl_dia(object):
+    def get_lifted_index(self,timestep=-1):
+        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
+    
+#from SkewT
+#def get_lcl(startp,startt,startdp,nsteps=101):
+#    from numpy import interp
+#    #--------------------------------------------------------------------
+#    # Lift a parcel dry adiabatically from startp to LCL.
+#    # Init temp is startt in K, Init dew point is stwrtdp,
+#    # pressure levels are in Pa    
+#    #--------------------------------------------------------------------
+#
+#    assert startdp<=startt
+#
+#    if startdp==startt:
+#        return np.array([startp]),np.array([startt]),np.array([startdp]),
+#
+#    # Pres=linspace(startp,60000.,nsteps)
+#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
+#
+#    # Lift the dry parcel
+#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
+#    # Mixing ratio isopleth
+#    starte=VaporPressure(startdp)
+#    startw=MixRatio(starte,startp)
+#    e=Pres*startw/(.622+startw)
+#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
+#
+#    # Solve for the intersection of these lines (LCL).
+#    # interp requires the x argument (argument 2)
+#    # to be ascending in order!
+#    P_lcl=interp(0.,T_iso-T_dry,Pres)
+#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
+#
+#    # # presdry=linspace(startp,P_lcl)
+#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
+#
+#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
+#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
+#
+#    return P_lcl,T_lcl
+
+
+
+def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
+    """ Calculate mixed-layer height from temperature and wind speed profile
+
+        Input:
+            HAGL: height coordinates [m]
+            THTV: virtual potential temperature profile [K]
+            WSPD: wind speed profile [m/s]
+
+        Output:
+            BLH: best-guess mixed-layer height
+            BLHu: upper limit of mixed-layer height
+            BLHl: lower limit of mixed-layer height
+
+    """
+    
+    #initialize error BLH
+    BLHe = 0.
+    eps = 2.#security limit
+    iTHTV_0 = np.where(~np.isnan(THTV))[0]
+    if len(iTHTV_0) > 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHl
+
+
+
+#from class
+def get_lcl(startp,startt,startqv):
+        # Find lifting condensation level iteratively
+    lcl = 20.
+    RHlcl = 0.5
+    
+    itmax = 30
+    it = 0
+    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it= self.DT) and (self.DT.year == DT.year)):
             self.DT = DT
@@ -184,7 +184,7 @@ def find_next(self,get_atm=False):
        
 
 
-    def get_values_air_input(self):
+    def get_values_air_input(self,latitude=None,longitude=None):
 
         # for iDT,DT in enumerate(DTS):
         
@@ -208,6 +208,7 @@ def get_values_air_input(self):
         #PARAMS.insert(0,'date',DT)
 
         PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
         
         THTV = np.array(ONE_COLUMN.THTV,dtype='float')
         #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
@@ -232,6 +233,9 @@ def get_values_air_input(self):
         #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
 
         BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        BLHV = np.max((BLHV,10.))
+        BLHVu = np.max((BLHVu,10.))
+        BLHVd = np.max((BLHVd,10.))
         #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
 
         #security values for mixed-layer jump values dthetav, dtheta and dq
@@ -245,23 +249,42 @@ def get_values_air_input(self):
             ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
             
             listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTV','THTA','QABS']):
-                if len(np.where(HAGL <= BLH)[0]) >= 4:
-                    meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL <= BLH][2:-1],dtype=np.float))
+            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
+                
+                # get index of lowest valid observation. This seems to vary
+                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
+                if len(idxvalid) > 0:
+                    #print('idxvalid',idxvalid)
+                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
+                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
+                    else:
+                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
                 else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][1:2],dtype=np.float)
+                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+                    #print(col,meanabl)
+               
+                
+                # if col == 'PRES':
+                #     meanabl =  
             
                 new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
                 #THTVM = np.nanmean(THTV[HAGL <= BLH])
                 #print("new_pro_h",new_pro_h)
                 # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV']:
+                if col in ['THTA','THTV',]:
                     #for moisture
                     #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
                     #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
                     if len(listHAGLNEW) > 4:
                         #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta = np.max((0.1,(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) )
+                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+                        dtheta = np.max((0.1,dtheta_pre))
+                        #meanabl = meanabl - (dtheta - dtheta_pre)
+                        #print('dtheta_pre',dtheta_pre)
+                        #print('dtheta',dtheta)
+                        #print('meanabl',meanabl)
+                        #stop
+                        
                     else:
                         dtheta = np.nan
                 else:
@@ -283,9 +306,18 @@ def get_values_air_input(self):
             
         # we just make a copy of the fields, so that it can be read correctly by CLASS 
         for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(ONE_COLUMNNEW[-1].columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
+            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+            
+            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
+        
+            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
+            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
+
 
         # assign fields adopted by CLASS
         if self.mode == 'o': #original 
@@ -299,14 +331,56 @@ def get_values_air_input(self):
         else:
             PARAMS.insert(0,'h',   BLHV)
             
-        PARAMS.insert(0,'day', PARAMS['datetime'][0].day)
-        PARAMS.insert(0,'tstart', PARAMS['datetime'][0].hour)
-        PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-        PARAMS.insert(0,'lon', np.float(PARAMS['Station longitude'][0]))
-        PARAMS['ldatetime'] = PARAMS.datetime.value - dt.timedelta(hours=PARAMS.lon.value/360.*24.) 
-        PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-        PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-        PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+
+        try:
+            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
+        except:
+            print("could not convert latitude coordinate")
+            PARAMS.insert(0,'latitude', np.nan)
+            PARAMS.insert(0,'lat', np.nan)
+        try:
+            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
+            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
+            PARAMS.insert(0,'lon', 0.)
+        except:
+            print("could not convert longitude coordinate")
+            PARAMS.insert(0,'longitude', np.nan)
+            PARAMS.insert(0,'lon', 0.)
+
+        if latitude is not None:
+            print('overwriting latitude with specified value')
+            PARAMS['latitude'] = np.float(latitude)
+            PARAMS['lat'] = np.float(latitude)
+        if longitude is not None:
+            print('overwriting longitude with specified value')
+            PARAMS['longitude'] = np.float(longitude)
+        try:
+            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
+            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
+            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            # This is the nearest datetime when sun is up (for class)
+            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
+            # apply the same time shift for UTC datetime
+            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
+            
+        except:
+            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
+            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
+            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
+            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
+
+        
+
+        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
+        # as we are forcing lon equal to zero this is is expressed in local suntime
+        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
+
            
         ONE_COLUMNb = ONE_COLUMNNEW[0]
         ONE_COLUMNu = ONE_COLUMNNEW[1]
@@ -325,8 +399,7 @@ def get_values_air_input(self):
 
         BLHVe = abs(BLHV - BLHVu)
         BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-        PARAMS.insert(0,'OK',((BLHVe < 100.) and (len(np.where(~pd.isnull(ONE_COLUMN['THTV'][ONE_COLUMN['HAGL'] > 5000.]))[0]) >0 )))
-        
+
         #PARAMS.insert(0,'dq',0.)
         
         PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
@@ -335,42 +408,66 @@ def get_values_air_input(self):
         #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
         
         if self.mode == 'o': #original 
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
-            
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
         elif self.mode == 'b': # best BLH
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNb['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNb['THTA'])[2]-list(ONE_COLUMNb['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNb['QABS'])[2]-list(ONE_COLUMNb['QABS'])[1]))
-            
-        elif self.mode == 'u':# upper BLH
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNu['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNu['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNu['THTA'])[2]-list(ONE_COLUMNu['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNu['QABS'])[2]-list(ONE_COLUMNu['QABS'])[1]))
-            
-        elif self.mode == 'd': # lower BLH
-            PARAMS.insert(0,'theta',np.float(list(ONE_COLUMNd['THTA'])[1]))
-            PARAMS.insert(0,'q',np.float(list(ONE_COLUMNd['QABS'])[1]))
-            PARAMS.insert(0,'dtheta',np.float(list(ONE_COLUMNd['THTA'])[2]-list(ONE_COLUMNd['THTA'])[1]))
-            PARAMS.insert(0,'dq',np.float(list(ONE_COLUMNd['QABS'])[2]-list(ONE_COLUMNd['QABS'])[1]))
-            
+            USE_ONECOLUMN = ONE_COLUMNb
+            BLCOLUMN = ONE_COLUMNb
+        elif self.mode == 'u': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNu
+            BLCOLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNd
+            BLCOLUMN = ONE_COLUMNd
+        else:
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb
+
+        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
+        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
+        # print(BLCOLUMN['HAGL'][lt6000])
+        # print(BLCOLUMN['HAGL'][lt2500])
+        # 
+        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
+
+        #print(BLCOLUMN['HAGL'][lt2500])
+        PARAMS.insert(0,'OK',
+                      ((BLHVe < 200.) and 
+                       ( len(np.where(lt6000)[0]) > 5) and
+                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
+                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
+                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
+                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
+                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
+                      )
+                     )
+
+        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
+        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
+        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
+        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
+        
         
         PARAMS = PARAMS.T
 
         
         self.PARAMS = PARAMS
-        if self.mode == 'o': #original 
-            self.ONE_COLUMN = ONE_COLUMN
-        elif self.mode == 'b': # best BLH
-            self.ONE_COLUMN = ONE_COLUMNb
-        elif self.mode == 'u':# upper BLH
-            self.ONE_COLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # lower BLH
-            self.ONE_COLUMN=ONE_COLUMNd
-        else:
-            self.ONE_COLUMN = ONE_COLUMN
+        self.ONE_COLUMN = USE_ONECOLUMN
+        # if self.mode == 'o': #original 
+        #     self.ONE_COLUMN = ONE_COLUMN
+        # elif self.mode == 'b': # best BLH
+        #     self.ONE_COLUMN = ONE_COLUMNb
+        # elif self.mode == 'u':# upper BLH
+        #     self.ONE_COLUMN = ONE_COLUMNu
+        # elif self.mode == 'd': # lower BLH
+        #     self.ONE_COLUMN=ONE_COLUMNd
+        # else:
+        #     self.ONE_COLUMN = ONE_COLUMN
 
diff --git a/data_global.py b/data_global.py
new file mode 100644
index 0000000..9c3d9b5
--- /dev/null
+++ b/data_global.py
@@ -0,0 +1,936 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: Hendrik Wouters
+
+Purpose: provides class routines for ground and atmosphere conditions used for
+the CLASS miced-layer model
+
+Usage:
+    from data_global import data_global
+    from class4gl import class4gl_input
+    from data_soundings import wyoming
+
+    # create a data_global object and load initial data pages
+    globaldata = data_global()
+    globaldata.load_datasets()
+    # create a class4gl_input object
+    c4gli = class4gl_input()
+    # Initialize it with profile data. We need to do this first. Actually this
+    # will set the coordinate parameters (datetime, latitude, longitude) in
+    # class4gl_input.pars.__dict__, which is required to read point data from
+    # the data_global object.
+
+    # open a Wyoming stream for a specific station
+    wy_strm = wyoming(STNM=91376)
+    # load the first profile
+    wy_strm.find_first()
+    # load the profile data into the class4gl_input object
+    c4gli.get_profile_wyoming(wy_strm)
+    
+    # and finally, read the global input data for this profile
+    c4gli.get_global_input(globaldata)
+
+
+"""
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+#import pynacolada as pcd
+import pandas as pd
+import xarray as xr
+import os
+import glob
+import sys
+import errno
+import warnings
+import logging
+
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+class book(object):
+    """ this is a class for a dataset spread over multiple files. It has a
+    similar purpose  open_mfdataset, but only 1 file (called current 'page')
+    one is loaded at a time. This saves precious memory.  """
+    def __init__(self,fn,concat_dim = None,debug_level=None):
+        self.logger = logging.getLogger('book')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # filenames are expanded as a list and sorted by filename
+        self.pages = glob.glob(fn); self.pages.sort()
+        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
+        if len(self.pages) == 0:
+            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
+        self.ipage = -1; self.page = None
+        self.renames = {} # each time when opening a file, a renaming should be done.
+        self.set_page(0)
+
+        # we consider that the outer dimension is the one we concatenate
+        self.concat_dim = concat_dim
+        if self.concat_dim is None:
+            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
+
+    # this wraps the xarray sel-commmand
+    def sel(*args, **kwargs):
+        for dim in kwargs.keys():
+            if dim == self.concat_dim:
+                self.browse_page(**{dim: kwargs[dim]})
+        return page.sel(*args,**kwargs)
+
+
+    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
+    #def __getattr__(self,attr):
+    #    orig_attr = self.page.__getattribute__(attr)
+    #    if callable(orig_attr):
+    #        def hooked(*args, **kwargs):
+    #            for dim in kwargs.keys():
+    #                if dim == self.concat_dim:
+    #                    self.browse_page(**{dim: kwargs[dim]})
+    #
+    #            result = orig_attr(*args, **kwargs)
+    #            # prevent wrapped_class from becoming unwrapped
+    #            if result == self.page:
+    #                return self
+    #            self.post()
+    #            return result
+    #        return hooked
+    #    else:
+    #        return orig_attr
+
+    def set_renames(self,renames):
+        #first, we convert back to original names, and afterwards, we apply the update of the renames.
+        reverse_renames = dict((v,k) for k,v in self.renames.items())
+        self.renames = renames
+        if self.page is not None:
+            self.page = self.page.rename(reverse_renames)
+            self.page = self.page.rename(self.renames)
+
+    def set_page(self,ipage,page=None):
+        """ this sets the right page according to ipage:
+                - We do not switch the page if we are already at the right one
+                - we set the correct renamings (level -> lev, latitude -> lat,
+                etc.)
+                - The dataset is also squeezed.
+        """
+
+        if ((ipage != self.ipage) or (page is not None)):
+
+            if self.page is not None:
+                self.page.close()
+
+            self.ipage = ipage
+            if page is not None:
+                self.page = page
+            else:
+                if self.ipage == -1:
+                   self.page = None
+                else:
+                    #try:
+
+                    self.logger.info("Switching to page "+str(self.ipage)+': '\
+                                     +self.pages[self.ipage])
+                    self.page = xr.open_dataset(self.pages[self.ipage])
+
+
+            # do some final corrections to the dataset to make them uniform
+            if self.page is not None:
+               if 'latitude' in self.page.dims:
+#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
+               if 'level' in self.page.dims:
+                   self.page = self.page.rename({'level':'lev'})
+
+               self.page = self.page.rename(self.renames)
+               self.page = self.page.squeeze(drop=True)
+
+    def browse_page(self,rewind=2,**args):
+
+        # at the moment, this is only tested with files that are stacked according to the time dimension.
+        dims = args.keys()
+
+
+        if self.ipage == -1:
+            self.set_page(0)
+
+        found = False
+        iipage = 0
+        startipage = self.ipage - rewind
+        while (iipage < len(self.pages)) and not found:
+            ipage = (iipage+startipage) % len(self.pages)
+            for dim in args.keys():
+                this_file = True
+
+                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
+                if 'dims' not in self.__dict__:
+                    self.dims = {}
+                if dim not in self.dims.keys():
+                    self.dims[dim] = [None]*len(self.pages)
+
+                if self.dims[dim][ipage] is None:
+                    self.logger.info('Loading coordinates of dimension "'+dim+\
+                                     '" of page "' +str(ipage)+'".')
+                    self.set_page(ipage)
+                    # print(ipage)
+                    # print(dim)
+                    # print(dim,self.page[dim].values)
+                    self.dims[dim][ipage] = self.page[dim].values
+
+                # determine current time range of the current page
+                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
+                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
+
+                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
+                    this_file = False
+
+            if this_file:
+                found = True
+                self.set_page(ipage)
+            else:
+
+                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
+                #    iipage = len(self.pages) # we stop searching
+
+                iipage += 1
+
+        if not found:
+            self.logger.info("Page not found. Setting to page -1")
+            #iipage = len(self.pages) # we stop searching further
+            self.set_page(-1)
+
+        if self.ipage != -1:
+            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
+        else:
+            self.logger.debug("I'm now at page "+ str(self.ipage))
+
+
+class data_global(object):
+    def __init__(self,sources= {
+        # # old gleam
+        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
+        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
+        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
+        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
+        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
+        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
+        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
+        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
+        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
+        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
+        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
+        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
+        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
+        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
+        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
+        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
+        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
+        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
+        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
+        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
+        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
+        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
+        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
+        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
+        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
+        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
+        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
+        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
+        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
+        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
+        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
+        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
+        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
+        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
+        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
+        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
+        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
+        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
+        },debug_level=None):
+        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
+        self.sources = sources
+        self.datarefs = {}
+        self.datasets = {}
+        self.datetime = dt.datetime(1981,1,1)
+
+        self.logger = logging.getLogger('data_global')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+        self.debug_level = debug_level
+
+        warnings.warn('omitting pressure field p and advection')
+
+    def in_library(self,fn):
+        if fn not in self.library.keys():
+            return False
+        else:
+            print("Warning: "+fn+" is already in the library.")
+            return True
+
+    def add_to_library(self,fn):
+        if not self.in_library(fn):
+            print("opening: "+fn)
+            self.library[fn] = \
+                book(fn,concat_dim='time',debug_level=self.debug_level)
+
+            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
+            #if 'latitude' in self.library[fn].variables:
+            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+
+    # default procedure for loading datasets into the globaldata library
+    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
+        if type(varssource) is str:
+            varssource = [varssource]
+        if type(varsdest) is str:
+            varsdest = [varsdest]
+
+        self.add_to_library(input_fn)
+
+        if varssource is None:
+            varssource = []
+            for var in self.sources[input_fn].variables:
+                avoid = \
+                ['lat','lon','latitude','longitude','time','lev','level']
+                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
+                    varssource.append(var)
+
+        if varsdest is None:
+            varsdest = varssource
+
+        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        for ivar,vardest in enumerate(varsdest):
+            varsource = varssource[ivar]
+            print('setting '+vardest+' as '+varsource+' from '+input_fn)
+
+            if vardest in self.datarefs.keys():
+                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
+            #self.add_to_library(fn,varsource,vardest)
+            if vardest != varsource:
+                libkey = input_fn+'.'+varsource+'.'+vardest
+                if libkey not in self.library.keys():
+                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
+                    self.library[libkey] = book(input_fn,\
+                                                debug_level=self.debug_level)
+                    self.library[libkey].set_renames({varsource: vardest})
+
+                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+            else:
+                self.datarefs[vardest] = input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+
+            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
+            #     print('Warning: '+ vardest "not in " + input_fn)
+
+
+
+    def load_datasets(self,sources = None,recalc=0):
+
+        if sources is None:
+            sources = self.sources
+        for key in sources.keys():
+            #datakey,vardest,*args = key.split(':')
+            datakey,vardest = key.split(':')
+            #print(datakey)
+
+            fnvarsource = sources[key].split(':')
+            if len(fnvarsource) > 2:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource,fnargs = fnvarsource
+                fnargs = [fnargs]
+            elif len(fnvarsource) > 1:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource = fnvarsource
+                fnargs = []
+            else:
+                fn = sources[key]
+                varsource = vardest
+            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
+
+    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
+            # the default way of loading a 2d dataset
+            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
+                self.load_dataset_default(fn,varsource,vardest)
+            elif datakey == 'IGBPDIS':
+                if vardest == 'alpha':
+                    ltypes = ['W','B','H','TC']
+                    for ltype in ltypes:
+                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
+                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
+
+
+                    # landfr = {}
+                    # for ltype in ['W','B','H','TC']:
+                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
+
+
+
+                    keytemp = 'alpha'
+                    fnkeytemp = fn+':IGBPDIS:alpha'
+                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
+                        self.library[fnkeytemp]  = book(fnkeytemp,
+                                                        debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+                    else:
+                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
+                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
+                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
+                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
+                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
+                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
+                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
+
+                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+
+                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
+                        for ltype in ltypes:
+                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
+
+                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
+                        print('writing file to: '+fnkeytemp)
+                        os.system('rm '+fnkeytemp)
+                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
+                        self.library[fnkeytemp].close()
+
+
+                        self.library[fnkeytemp]  = \
+                            book(fnkeytemp,debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+
+
+                else:
+                    self.load_dataset_default(fn,varsource,vardest)
+
+
+            elif datakey == 'GLAS':
+                self.load_dataset_default(fn,varsource,vardest)
+                if vardest == 'z0m':
+                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
+                elif vardest == 'z0h':
+                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
+            elif datakey == 'DSMW':
+
+
+                # Procedure of the thermal properties:
+                # 1. determine soil texture from DSMW/10.
+                # 2. soil type with look-up table (according to DWD/EXTPAR)
+                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
+                #    with parameter look-up table from Noilhan and Planton (1989).
+                #    Note: The look-up table is inspired on DWD/COSMO
+
+                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
+
+
+
+                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
+                self.load_dataset_default(fn,'DSMW')
+                print('calculating texture')
+                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
+                TEMP  = {}
+                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
+                TEMP3 = {}
+                for SPKEY in SPKEYS:
+
+
+                    keytemp = SPKEY+'_values'
+                    fnoutkeytemp = fnout+':DSMW:'+keytemp
+                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                    else:
+                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
+                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
+                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+                        # for faster computation, we need to get it to memory out of Dask.
+                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
+                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
+
+                # yes, I know I only check the last file.
+                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
+                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
+                        print('idx',idx,SPKEY)
+                        SEL = (TEMP2 == idx)
+                    #     print(idx,len(TEMP3))
+                        for SPKEY in SPKEYS:
+                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
+
+                    for SPKEY in SPKEYS:
+                        keytemp = SPKEY+'_values'
+                        fnoutkeytemp = fnout+':DSMW:'+keytemp
+                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
+                        os.system('rm '+fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].close()
+
+
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                keytemp = 'texture'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+                else:
+                    self.library[fn+':DSMW:texture'] = xr.Dataset()
+                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
+                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
+                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
+                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+
+                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
+
+                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
+                    zundef[zundef < 0] = np.nan
+                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
+                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
+
+                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+
+
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+                print('calculating texture type')
+
+
+
+                keytemp = 'itex'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+                else:
+                    self.library[fnoutkeytemp] = xr.Dataset()
+                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+                    X = self.datasets['texture'].page['texture'].values*100
+                    X[pd.isnull(X)] = -9
+
+
+                    self.datasets[keytemp][keytemp].values = X
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
+                    self.datasets['itex'].close()
+
+
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+
+                keytemp = 'isoil'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                isoil_reprocessed = False
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+                else:
+                    isoil_reprocessed = True
+                    print('calculating soil type')
+                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    ITEX = self.datasets['itex'].page['itex'].values
+                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
+                    LOOKUP = [
+                              [-10 ,9],# ocean
+                              [0 ,7],# fine textured, clay (soil type 7)
+                              [20,6],# medium to fine textured, loamy clay (soil type 6)
+                              [40,5],# medium textured, loam (soil type 5)
+                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                              [80,3],# coarse textured, sand (soil type 3)
+                              [100,9],# coarse textured, sand (soil type 3)
+                            ]
+                    for iitex,iisoil in LOOKUP:
+                        ISOIL[ITEX > iitex] = iisoil
+                        print('iitex,iisoil',iitex,iisoil)
+
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    LOOKUP = [
+                              [9001, 1 ], # ice, glacier (soil type 1)
+                              [9002, 2 ], # rock, lithosols (soil type 2)
+                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                              [9,    9 ], # undefined (ocean)
+                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                              [9000, 9 ], # undefined (inland lake)
+                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                            ]
+                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
+
+                    CODE_VALUES[ITEX == 901200] = 9012
+                    for icode,iisoil in LOOKUP:
+                        ISOIL[CODE_VALUES == icode] = iisoil
+
+                    self.datasets['isoil']['isoil'].values = ISOIL
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+                    print('saved inbetween file to: '+fnoutkeytemp)
+
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                #adopted from data_soil.f90 (COSMO5.0)
+                SP_LOOKUP = {
+                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
+                  # (by index)                                           loam                    loam                                water      ice
+                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+                  # Important note: For peat, the unknown values below are set equal to that of loam
+                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
+                  #error in table 2 of NP89: values need to be multiplied by e-6
+                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
+
+                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
+                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
+                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
+                }
+
+
+                # isoil_reprocessed = False
+                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+
+                #     self.library[fn+':DSMW:isoil'] = \
+                #             book(fnoutkeytemp,debug_level=self.debug_level)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+                # else:
+                #     isoil_reprocessed = True
+                #     print('calculating soil type')
+                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+
+
+
+                # this should become cleaner in future but let's hard code it for now.
+                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
+                print('calculating soil parameter')
+                DATATEMPSPKEY = {}
+                if (recalc < 1) and (isoil_reprocessed == False): 
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        keytemp = SPKEY
+                        fnoutkeytemp=fnout+':DSMW:'+keytemp
+                        self.library[fn+':DSMW:'+SPKEY] =\
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
+                        self.datarefs[SPKEY] =fnoutkeytemp
+                else:
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+
+                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
+                    ISOIL = self.datasets['isoil'].page['isoil'].values
+                    print(np.where(ISOIL>0.))
+                    for i in range(11):
+                        SELECT = (ISOIL == i)
+                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
+
+                        os.system('rm '+fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].close()
+                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
+
+                        self.library[fn+':DSMW:'+SPKEY] = \
+                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+
+
+            else:
+                self.load_dataset_default(fn,varsource,vardest)
+
+
+
+
+
+
+#
+#                 # only print the last parameter value in the plot
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'cala'
+#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'crhoc'
+#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#     key = "CERES"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         CERES_start_date = dt.datetime(2000,3,1)
+#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+#
+#         var = 'cc'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#         print(class_settings.lat,class_settings.lon)
+#
+#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
+#
+#         input_nc.close()
+#
+
+
+#     key = "GIMMS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+#         print("Reading Leag Area Index from "+input_fn)
+#         var = 'LAI'
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+#
+#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+#
+#         if np.isnan(tarray[idatetime]):
+#             print("interpolating GIMMS cveg nan value")
+#
+#             mask = np.isnan(tarray)
+#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+#             else:
+#                 print("Warning. Could not interpolate GIMMS cveg nan value")
+#
+#         class_settings.__dict__[var] = tarray[idatetime]
+#
+#         input_nc.close()
+#
+#     key = "IGBPDIS_ALPHA"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         var = 'alpha'
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+#         print("Reading albedo from "+input_fn)
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#
+#         landfr = {}
+#         for ltype in ['W','B','H','TC']:
+#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+#
+#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+#
+#         alpha=0.
+#         for ltype in landfr.keys():
+#             alpha += landfr[ltype]*aweights[ltype]
+#
+#
+#         class_settings.__dict__[var] = alpha
+#         input_nc.close()
+#
+#
+#     key = "ERAINT_ST"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         print("Reading soil temperature from "+input_fn)
+#
+#         var = 'Tsoil'
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         var = 'T2'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+#
+#
+#         input_nc.close()
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #var = 'T2'
+#     #valold = class_settings.__dict__[var]
+#     #
+#     #class_settings.__dict__[var] = 305.
+#     #class_settings.__dict__['Tsoil'] = 302.
+#     #valnew = class_settings.__dict__[var]
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #
+#     #var = 'Lambda'
+#     #valold = class_settings.__dict__[var]
+#
+#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
+#     ## I need to ask Chiel.
+#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+#     #
+#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
+#     #class_settings.__dict__[var] = valnew
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     key = "GLAS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+#         print("Reading canopy height for determining roughness length from "+input_fn)
+#         var = 'z0m'
+#
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+#
+#         lowerlimit = 0.01
+#         if testval < lowerlimit:
+#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+#             class_settings.__dict__[var] = lowerlimit
+#         else:
+#             class_settings.__dict__[var] = testval
+#
+#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+#
+#
+#         input_nc.close()
+
+
+
+
+
diff --git a/interface_functions.py b/interface_functions.py
new file mode 100644
index 0000000..3e483f3
--- /dev/null
+++ b/interface_functions.py
@@ -0,0 +1,506 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+#from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+#'_afternoon.yaml'
+def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
+    filename = yaml_file.name
+    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+    #yaml_file = open(filename)
+
+    #print('going to next observation',filename)
+    yaml_file.seek(index_start)
+
+    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+
+    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+    filebuffer.write(buf)
+    filebuffer.close()
+    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+    
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+
+    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+    print(command)
+    os.system(command)
+    jsonstream = open(filename+'.buffer.json.'+str(index_start))
+    record_dict = json.load(jsonstream)
+    jsonstream.close()
+    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+
+
+    if mode =='mod':
+        modelout = class4gl()
+        modelout.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        return modelout
+    elif mode == 'ini':
+
+ 
+        # datetimes are incorrectly converted to strings. We need to convert them
+        # again to datetimes
+        for key,value in record_dict['pars'].items():
+            # we don't want the key with columns that have none values
+            if value is not None: 
+                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
+               # elif (type(value) == str):
+                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+
+            if (value == 0.9e19) or (value == '.9e19'):
+                record_dict['pars'][key] = np.nan
+        for key in record_dict.keys():
+            #print(key)
+            if key in ['air_ap','air_balloon',]:
+                #NNprint('check')
+                for datakey,datavalue in record_dict[key].items():
+                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+
+        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        c4gli = class4gl_input()
+        print(c4gli.logger,'hello')
+        c4gli.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+
+class stations(object):
+    def __init__(self,path,suffix='ini',refetch_stations=False):
+
+        self.path = path
+
+        self.file = self.path+'/stations_list.csv'
+        if (os.path.isfile(self.file)) and (not refetch_stations):
+            self.table = pd.read_csv(self.file)
+        else:
+            self.table = self.get_stations(suffix=suffix)
+            self.table.to_csv(self.file)
+        
+        self.table = self.table.set_index('STNID')
+        #print(self.table)
+
+    def get_stations(self,suffix):
+        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
+        if len(stations_list_files) == 0:
+            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
+        stations_list_files.sort()
+        print(stations_list_files)
+        if len(stations_list_files) == 0:
+            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
+        stations_list = []
+        for stations_list_file in stations_list_files:
+            thisfile = open(stations_list_file,'r')
+            yamlgen = yaml.load_all(thisfile)
+            try:
+                first_record  = yamlgen.__next__()
+            except:
+                first_record = None
+            if first_record is not None:
+                stations_list.append({})
+                for column in ['STNID','latitude','longitude']:
+                    #print(first_record['pars'].keys())
+                    stations_list[-1][column] = first_record['pars'][column]
+                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
+            yamlgen.close()
+            thisfile.close()
+    
+        print(stations_list)
+        return pd.DataFrame(stations_list)
+
+class stations_iterator(object):
+    def __init__(self,stations):
+        self.stations = stations
+        self.ix = -1 
+    def __iter__(self):
+        return self
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.stations.table)) 
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_row(self,row):
+        self.ix = row
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_STNID(self,STNID):
+        self.ix = np.where((self.stations.table.index == STNID))[0][0]
+        print(self.ix)
+        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+    def close():
+        del(self.ix)
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.records))
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+# #'_afternoon.yaml'
+# def get_record_yaml(yaml_file,index_start,index_end):
+#     filename = yaml_file.name
+#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+#     #yaml_file = open(filename)
+# 
+#     #print('going to next observation',filename)
+#     yaml_file.seek(index_start)
+# 
+#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+# 
+#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+#     filebuffer.write(buf)
+#     filebuffer.close()
+#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+#     
+#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+# 
+#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+#     print(command)
+#     os.system(command)
+#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
+#     record_dict = json.load(jsonstream)
+#     jsonstream.close()
+#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+#  
+#     # datetimes are incorrectly converted to strings. We need to convert them
+#     # again to datetimes
+#     for key,value in record_dict['pars'].items():
+#         # we don't want the key with columns that have none values
+#         if value is not None: 
+#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
+#            # elif (type(value) == str):
+#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+#                 
+#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
+# 
+#         if (value == 0.9e19) or (value == '.9e19'):
+#             record_dict['pars'][key] = np.nan
+#     for key in record_dict.keys():
+#         print(key)
+#         if key in ['air_ap','air_balloon',]:
+#             print('check')
+#             for datakey,datavalue in record_dict[key].items():
+#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+# 
+#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+# 
+#     c4gli = class4gl_input()
+#     c4gli.load_yaml_dict(record_dict)
+#     return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
+
+    records = pd.DataFrame()
+    for STNID,station in stations.iterrows():
+        dictfnchunks = []
+        if getchunk is 'all':
+
+            # we try the old single-chunk filename format first (usually for
+            # original profile pairs)
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
+            if os.path.isfile(fn):
+                chunk = 0
+                dictfnchunks.append(dict(fn=fn,chunk=chunk))
+
+            # otherwise, we use the new multi-chunk filename format
+            else:
+                chunk = 0
+                end_of_chunks = False
+                while not end_of_chunks:
+                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                    if os.path.isfile(fn):
+                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                    else:
+                        end_of_chunks = True
+                    chunk += 1
+
+            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
+            # yamlfilenames = glob.glob(globyamlfilenames)
+            # yamlfilenames.sort()
+        else:
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
+            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
+            
+        if len(dictfnchunks) > 0:
+            for dictfnchunk in dictfnchunks:
+                yamlfilename = dictfnchunk['fn']
+                chunk = dictfnchunk['chunk']
+                print(chunk)
+
+                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
+                pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
+                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
+                generate_pkl = False
+                if not os.path.isfile(pklfilename): 
+                    print('pkl file does not exist. I generate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                elif not (os.path.getmtime(yamlfilename) <  \
+                    os.path.getmtime(pklfilename)):
+                    print('pkl file older than yaml file, so I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+
+                if refetch_records:
+                    print('refetch_records flag is True. I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                if not generate_pkl:
+                    records = pd.concat([records,pd.read_pickle(pklfilename)])
+                   # irecord = 0
+                else:
+                    with open(yamlfilename) as yaml_file:
+
+                        dictout = {}
+
+                        next_record_found = False
+                        end_of_file = False
+                        while (not next_record_found) and (not end_of_file):
+                            linebuffer = yaml_file.readline()
+                            next_record_found = (linebuffer == '---\n')
+                            end_of_file = (linebuffer == '')
+                        next_tell = yaml_file.tell()
+                        
+                        while not end_of_file:
+
+                            print(' next record:',next_tell)
+                            current_tell = next_tell
+                            next_record_found = False
+                            yaml_file.seek(current_tell)
+                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            linebuffer = ''
+                            while ( (not next_record_found) and (not end_of_file)):
+                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                                linebuffer = yaml_file.readline()
+                                next_record_found = (linebuffer == '---\n')
+                                end_of_file = (linebuffer == '')
+                            filebuffer.close()
+                            
+                            next_tell = yaml_file.tell()
+                            index_start = current_tell
+                            index_end = next_tell
+
+                            
+                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
+                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            print(command)
+                            
+                            os.system(command)
+                            #jsonoutput = subprocess.check_output(command,shell=True) 
+                            #print(jsonoutput)
+                            #jsonstream = io.StringIO(jsonoutput)
+                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
+                            record = json.load(jsonstream)
+                            dictouttemp = {}
+                            for key,value in record['pars'].items():
+                                # we don't want the key with columns that have none values
+                                if value is not None: 
+                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
+                                   if (type(value) in regular_numeric_types):
+                                        dictouttemp[key] = value
+                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
+                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
+                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
+                            recordindex = record['index']
+                            dictouttemp['chunk'] = chunk
+                            dictouttemp['index_start'] = index_start
+                            dictouttemp['index_end'] = index_end
+                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
+                            for key,value in dictouttemp.items():
+                                if key not in dictout.keys():
+                                    dictout[key] = {}
+                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
+                            print(' obs record registered')
+                            jsonstream.close()
+                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                    records_station = pd.DataFrame.from_dict(dictout)
+                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
+                    print('writing table file ('+pklfilename+') for station '\
+                          +str(STNID))
+                    records_station.to_pickle(pklfilename)
+                    # else:
+                    #     os.system('rm '+pklfilename)
+                    records = pd.concat([records,records_station])
+    return records
+
+def stdrel(mod,obs,columns):
+    stdrel = pd.DataFrame(columns = columns)
+    for column in columns:
+        stdrel[column] = \
+                (mod.groupby('STNID')[column].transform('mean') -
+                 obs.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') + \
+                (mod[column] -
+                 mod.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') 
+    return stdrel
+
+def pct(obs,columns):
+    pct = pd.DataFrame(columns=columns)
+    for column in columns:
+        #print(column)
+        pct[column] = ""
+        pct[column] = obs[column].rank(pct=True)
+    return pct
+
+def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
+    stats = pd.DataFrame()
+    for key in keys: 
+        stats['d'+key+'dt'] = ""
+        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+                              (obs_afternoon.ldatetime - \
+                               obs_morning.ldatetime).dt.seconds*3600.
+    return stats
+
diff --git a/interface_multi.py b/interface_multi.py
new file mode 100644
index 0000000..83148e5
--- /dev/null
+++ b/interface_multi.py
@@ -0,0 +1,2061 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+# from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+cdictpres = {'blue': (\
+                   (0.,    0.,  0.),
+                   (0.25,  0.25, 0.25),
+                   (0.5,  .70, 0.70),
+                   (0.75, 1.0, 1.0),
+                   (1,     1.,  1.),
+                   ),
+       'green': (\
+                   (0. ,   0., 0.0),
+                   (0.25,  0.50, 0.50),
+                   (0.5,  .70, 0.70),
+                   (0.75,  0.50, 0.50),
+                   (1  ,    0,  0.),
+                   ),
+       'red':  (\
+                  (0 ,  1.0, 1.0),
+                  (0.25 ,  1.0, 1.0),
+                   (0.5,  .70, 0.70),
+                  (0.75 , 0.25, 0.25),
+                  (1,    0., 0.),
+                  )}
+
+statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+os.system('module load Ruby')
+
+class c4gl_interface_soundings(object):
+    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+        """ creates an interactive interface for analysing class4gl experiments
+
+        INPUT:
+            path_exp : path of the experiment output
+            path_obs : path of the observations 
+            globaldata: global data that is being shown on the map
+            refetch_stations: do we need to build the list of the stations again?
+        OUTPUT:
+            the procedure returns an interface object with interactive plots
+
+        """
+        
+        # set the ground
+        self.globaldata = globaldata
+
+ 
+        self.path_exp = path_exp
+        self.path_obs = path_obs
+        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
+
+        # # get the list of stations
+        # stationsfile = self.path_exp+'/stations_list.csv'
+        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
+        #     stations = pd.read_csv(stationsfile)
+        # else:
+        #     stations = get_stations(self.path_exp)
+        #     stations.to_csv(stationsfile)
+
+        # stations = stations.set_index('STNID')
+
+        self.frames = {}
+
+        self.frames['stats'] = {}
+        self.frames['worldmap'] = {}
+                
+        self.frames['profiles'] = {}
+        self.frames['profiles'] = {}
+        self.frames['profiles']['DT'] = None
+        self.frames['profiles']['STNID'] = None
+
+        #self.frames['worldmap']['stationsfile'] = stationsfile
+        self.frames['worldmap']['stations'] = stations(self.path_exp, \
+                                                       suffix='ini',\
+                                                       refetch_stations=refetch_stations)
+
+        # Initially, the stats frame inherets the values/iterators of
+        # worldmap
+        for key in self.frames['worldmap'].keys():
+            self.frames['stats'][key] = self.frames['worldmap'][key]
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_ini'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='ini',\
+                                           refetch_records=refetch_records
+                                           )
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_mod'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='mod',\
+                                           refetch_records=refetch_records
+                                           )
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_obs_afternoon'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_obs,\
+                                           subset='afternoon',\
+                                           refetch_records=refetch_records
+                                           )
+
+        self.frames['stats']['records_all_stations_mod'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['records_all_stations_ini']['dates'] = \
+            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+
+
+        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
+
+        self.frames['stats']['records_all_stations_obs_afternoon'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['viewkeys'] = ['h','theta','q']
+        print('Calculating table statistics')
+        self.frames['stats']['records_all_stations_mod_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_mod'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+
+        self.frames['stats']['inputkeys'] = inputkeys
+        
+        # self.frames['stats']['inputkeys'] = \
+        #     [ key for key in \
+        #       self.globaldata.datasets.keys() \
+        #       if key in \
+        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
+
+
+        # get units from the class4gl units database
+        self.units = dict(units)
+        # for those that don't have a definition yet, we just ask a question
+        # mark
+        for var in self.frames['stats']['inputkeys']:
+            self.units[var] = '?'
+
+        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
+        self.frames['stats']['records_all_stations_ini_pct'] = \
+                  pct(self.frames['stats']['records_all_stations_ini'], \
+                      columns = self.frames['stats']['inputkeys'])
+
+        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
+        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+        #     mod['
+
+        # 
+        # 
+        # \
+        #        self.frames['stats']['records_all_stations_mod'], \
+
+
+
+        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
+        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
+        #               columns = [ 'd'+key+'dt' for key in \
+        #                           self.frames['stats']['viewkeys']], \
+        #              )
+
+        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
+        #               obs = self.frames['stats']['records_all_stations_ini'], \
+        #               columns = self.frames['stats']['viewkeys'], \
+        #              )
+        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+        
+        print('filtering pathological data')
+        # some observational sounding still seem problematic, which needs to be
+        # investigated. In the meantime, we filter them
+        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+
+        # we filter ALL data frames!!!
+        for key in self.frames['stats'].keys():
+            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
+               (self.frames['stats'][key].index.names == indextype):
+                self.frames['stats'][key] = self.frames['stats'][key][valid]
+        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+
+        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
+
+
+        print("filtering stations from interface that have no records")
+        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
+            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                    == STNID).sum() == 0):
+                print("dropping", STNID)
+                self.frames['worldmap']['stations'].table = \
+                        self.frames['worldmap']['stations'].table.drop(STNID)
+                    
+        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+        
+        # TO TEST: should be removed, since it's is also done just below
+        self.frames['stats']['stations_iterator'] = \
+            self.frames['worldmap']['stations_iterator'] 
+
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
+        self.next_station()
+
+        # self.goto_datetime_worldmap(
+        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+        #     'after')
+    def sel_station(self,STNID=None,rownumber=None):
+
+        if (STNID is not None) and (rownumber is not None):
+            raise ValueError('Please provide either STNID or rownumber, not both.')
+
+        if (STNID is None) and (rownumber is None):
+            raise ValueError('Please provide either STNID or rownumber.')
+            
+        if STNID is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
+            print(
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+            )
+            self.update_station()
+        elif rownumber is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
+            self.update_station()
+
+
+
+    def next_station(self,event=None,jump=1):
+        with suppress(StopIteration):
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+                = self.frames['worldmap']['stations_iterator'].__next__(jump)
+            # self.frames['worldmap']['stations_iterator'].close()
+            # del(self.frames['worldmap']['stations_iterator'])
+            # self.frames['worldmap']['stations_iterator'] = \
+            #                 selfself.frames['worldmap']['stations'].iterrows()
+            # self.frames['worldmap']['STNID'],\
+            # self.frames['worldmap']['current_station'] \
+            #     = self.frames['worldmap']['stations_iterator'].__next__()
+
+        self.update_station()
+
+    def prev_station(self,event=None):
+        self.next_station(jump = -1,event=event)
+    def update_station(self):
+        for key in ['STNID','current_station','stations_iterator']: 
+            self.frames['stats'][key] = self.frames['worldmap'][key] 
+
+
+
+        # generate index of the current station
+        self.frames['stats']['records_current_station_index'] = \
+            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+             == \
+             self.frames['stats']['current_station'].name)
+
+        # create the value table of the records of the current station
+        tab_suffixes = \
+                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        for tab_suffix in tab_suffixes:
+            self.frames['stats']['records_current_station'+tab_suffix] = \
+                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+        # go to first record of current station
+        self.frames['stats']['records_iterator'] = \
+                        records_iterator(self.frames['stats']['records_current_station_mod'])
+        (self.frames['stats']['STNID'] , \
+        self.frames['stats']['current_record_chunk'] , \
+        self.frames['stats']['current_record_index']) , \
+        self.frames['stats']['current_record_mod'] = \
+                        self.frames['stats']['records_iterator'].__next__()
+
+        for key in self.frames['stats'].keys():
+            self.frames['profiles'][key] = self.frames['stats'][key]
+
+        STNID = self.frames['profiles']['STNID']
+        chunk = self.frames['profiles']['current_record_chunk']
+        if 'current_station_file_ini' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_ini'].close()
+        self.frames['profiles']['current_station_file_ini'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+        if 'current_station_file_mod' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_mod'].close()
+        self.frames['profiles']['current_station_file_mod'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_afternoon'].close()
+        self.frames['profiles']['current_station_file_afternoon'] = \
+            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+        self.frames['profiles']['records_iterator'] = \
+                        records_iterator(self.frames['profiles']['records_current_station_mod'])
+        (self.frames['profiles']['STNID'] , \
+        self.frames['profiles']['current_record_chunk'] , \
+        self.frames['profiles']['current_record_index']) , \
+        self.frames['profiles']['current_record_mod'] = \
+                        self.frames['profiles']['records_iterator'].__next__()
+
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+
+        self.update_record()
+
+    def next_record(self,event=None,jump=1):
+        with suppress(StopIteration):
+            (self.frames['profiles']['STNID'] , \
+            self.frames['profiles']['current_record_chunk'] , \
+            self.frames['profiles']['current_record_index']) , \
+            self.frames['profiles']['current_record_mod'] = \
+                      self.frames['profiles']['records_iterator'].__next__(jump)
+        # except (StopIteration):
+        #     self.frames['profiles']['records_iterator'].close()
+        #     del( self.frames['profiles']['records_iterator'])
+        #     self.frames['profiles']['records_iterator'] = \
+        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
+        #     (self.frames['profiles']['STNID'] , \
+        #     self.frames['profiles']['current_record_index']) , \
+        #     self.frames['profiles']['current_record_mod'] = \
+        #                     self.frames['profiles']['records_iterator'].__next__()
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        self.update_record()
+
+    def prev_record(self,event=None):
+        self.next_record(jump=-1,event=event)
+
+    def update_record(self):
+        self.frames['profiles']['current_record_ini'] =  \
+            self.frames['profiles']['records_current_station_ini'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'],\
+                  self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon'] =  \
+            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'] , \
+                  self.frames['profiles']['current_record_index'])]
+
+        self.frames['profiles']['current_record_mod_stats'] = \
+                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+                    self.frames['profiles']['STNID'], \
+                    self.frames['profiles']['current_record_chunk'], \
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
+                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_ini_pct'] = \
+                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        # frame
+        # note that the current station, record is the same as the stats frame for initialization
+
+        # select first 
+        #self.frames['profiles']['current_record_index'], \
+        #self.frames['profiles']['record_yaml_mod'] = \
+        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
+        #                   self.frames['stats']['current_record_index'])
+        self.frames['profiles']['record_yaml_mod'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_mod'], \
+               self.frames['profiles']['current_record_mod'].index_start,
+               self.frames['profiles']['current_record_mod'].index_end,
+               mode='mod')
+                                
+        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_ini'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_ini'], \
+               record_ini.index_start,
+               record_ini.index_end,
+                mode='ini')
+
+        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_obs_afternoon'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_afternoon'], \
+               record_afternoon.index_start,
+               record_afternoon.index_end,
+                mode='ini')
+
+
+        key = self.frames['worldmap']['inputkey']
+        # only redraw the map if the current world map has a time
+        # dimension
+        if 'time' in self.globaldata.datasets[key].page[key].dims:
+            self.goto_datetime_worldmap(
+                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                'after')
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap',
+                                                  'profiles'])
+        else:
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap_stations',
+                                                  'profiles'])
+
+    def abline(self,slope, intercept,axis):
+        """Plot a line from slope and intercept"""
+        #axis = plt.gca()
+        x_vals = np.array(axis.get_xlim())
+        y_vals = intercept + slope * x_vals
+        axis.plot(x_vals, y_vals, 'k--')
+
+    def plot(self):
+        import pylab as pl
+        from matplotlib.widgets import Button
+        import matplotlib.pyplot as plt
+        import matplotlib as mpl
+        '''
+        Definition of the axes for the sounding table stats
+        '''
+        
+        fig = pl.figure(figsize=(14,9))
+        axes = {} #axes
+        btns = {} #buttons
+
+        # frames, which sets attributes for a group of axes, buttens, 
+        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+            label = 'stats_'+str(key)
+            axes[label] = fig.add_subplot(\
+                            len(self.frames['stats']['viewkeys']),\
+                            5,\
+                            5*ikey+1,label=label)
+            # Actually, the axes should be a part of the frame!
+            #self.frames['stats']['axes'] = axes[
+
+            # pointer to the axes' point data
+            axes[label].data = {}
+
+            # pointer to the axes' color fields
+            axes[label].fields = {}
+
+
+        fig.tight_layout()
+        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
+
+        label ='stats_colorbar'
+        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
+        axes[label].fields = {}
+
+        from matplotlib.colors import LinearSegmentedColormap
+        cdictpres = {'blue': (\
+                           (0.,    0.,  0.),
+                           (0.25,  0.25, 0.25),
+                           (0.5,  .70, 0.70),
+                           (0.75, 1.0, 1.0),
+                           (1,     1.,  1.),
+                           ),
+               'green': (\
+                           (0. ,   0., 0.0),
+                           (0.25,  0.50, 0.50),
+                           (0.5,  .70, 0.70),
+                           (0.75,  0.50, 0.50),
+                           (1  ,    0,  0.),
+                           ),
+               'red':  (\
+                          (0 ,  1.0, 1.0),
+                          (0.25 ,  1.0, 1.0),
+                           (0.5,  .70, 0.70),
+                          (0.75 , 0.25, 0.25),
+                          (1,    0., 0.),
+                          )}
+        
+        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+        label = 'times'
+               
+        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+
+
+        label = 'worldmap'
+               
+        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+        axes[label].lat = None
+        axes[label].lon = None
+
+        label = 'worldmap_colorbar'
+        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
+        axes[label].fields = {}
+
+        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
+        label = 'worldmap_stations'
+        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
+        axes[label].data = {}
+
+        fig.canvas.mpl_connect('pick_event', self.on_pick)
+        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
+
+
+        """ buttons definitions """
+        
+        label = 'bprev_dataset'
+        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous dataset')
+        btns[label].on_clicked(self.prev_dataset)
+
+        label = 'bnext_dataset'
+        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next dataset')
+        btns[label].on_clicked(self.next_dataset)
+
+        label = 'bprev_datetime'
+        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous datetime')
+        btns[label].on_clicked(self.prev_datetime)
+
+        label = 'bnext_datetime'
+        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next datetime')
+        btns[label].on_clicked(self.next_datetime)
+
+
+        label = 'bprev_station'
+        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous station')
+        btns[label].on_clicked(self.prev_station)
+
+        label = 'bnext_station'
+        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next station')
+        btns[label].on_clicked(self.next_station)
+
+        label = 'bprev_record'
+        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous record')
+        btns[label].on_clicked(self.prev_record)
+
+        label = 'bnext_record'
+        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next record')
+        btns[label].on_clicked(self.next_record)
+
+
+        # self.nstatsview = nstatsview
+        # self.statsviewcmap = statsviewcmap
+        self.fig = fig
+        self.axes = axes
+        self.btns = btns
+        self.tbox = {}
+        # self.hover_active = False
+
+        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
+        #                                transform=plt.gcf().transFigure)
+
+        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
+                                          transform=plt.gcf().transFigure)
+
+        label = 'air_ap:theta'
+        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
+
+        label = 'air_ap:q'
+        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
+
+        label = 'out:h'
+        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
+
+        label = 'out:theta'
+        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
+
+        label = 'out:q'
+        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
+
+        label = 'SEB'
+        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
+
+
+        self.hover_active = False
+        self.fig = fig
+        self.fig.show()
+        self.fig.canvas.draw()
+        self.refresh_plot_interface()
+
+
+    # def scan_stations(self):
+    #     blabla
+        
+
+
+    # def get_records(current_file):
+    #     records = pd.DataFrame()
+
+    #     # initial position
+    #     next_record_found = False
+    #     while(not next_record_found):
+    #         next_record_found = (current_file.readline() == '---\n')
+    #     next_tell = current_file.tell() 
+    #     end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #     while not end_of_file:
+    #         current_tell = next_tell
+    #         next_record_found = False
+    #         current_file.seek(current_tell)
+    #         while ( (not next_record_found) and (not end_of_file)):
+    #             current_line = current_file.readline()
+    #             next_record_found = (currentline == '---\n')
+    #             end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #         # we store the position of the next record
+    #         next_tell = current_file.tell() 
+    #         
+    #         # we get the current record. Unfortunately we need to reset the
+    #         # yaml record generator first.
+    #         current_yamlgen.close()
+    #         current_yamlgen = yaml.load_all(current_file)
+    #         current_file.seek(current_tell)
+    #         current_record_mod = current_yamlgen.__next__()
+    #     current_yamlgen.close()
+
+    #     return records
+
+       #      next_record_found = False
+       #      while(not record):
+       #          next_record_found = (self.current_file.readline() == '---\n')
+       #      self.current_tell0 = self.current_file.tell() 
+
+       #  
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell0 = self.current_file.tell() 
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell1 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell0)
+       #  self.r0 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell1)
+       #  next_record_found = False
+       #  while ( (not next_record_found) and (not end_of_file):
+       #      current_line = self.current_file.readline()
+       #      next_record_found = (currentline == '---\n')
+       #      end_of_file = (currentline == '') # an empty line means we are at the end
+
+       #  self.current_tell2 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell1)
+       #  self.r1 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell2)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell3 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell2)
+       #  self.r2 = self.current_yamlgen.__next__()
+
+       #  # go to position of next record in file
+       #  self.current_file.seek(self.current_tell3)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell4 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell3)
+       #  self.r3 = self.current_yamlgen.__next__()
+ 
+       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
+
+    def goto_datetime_worldmap(self,DT,shift=None):
+        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
+            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
+            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
+            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
+                self.frames['worldmap']['iDT'] += 1
+            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
+                self.frames['worldmap']['iDT'] -= 1 
+            # for gleam, we take the values of the previous day
+            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
+                self.frames['worldmap']['iDT'] -= 2 
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+        #else:
+        #    self.frames['worldmap'].pop('DT')
+
+    def next_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def prev_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def next_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+    def prev_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+
+
+    def sel_dataset(self,inputkey):
+        self.frames['worldmap']['inputkey'] = inputkey
+        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
+        self.goto_datetime_worldmap(
+            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+            'after')# get nearest datetime of the current dataset to the profile
+        if "fig" in self.__dict__.keys():
+            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
+       
+    # def prev_station(self,event=None):
+    #     self.istation = (self.istation - 1) % self.stations.shape[0]
+    #     self.update_station()
+
+
+
+
+    #def update_datetime(self):
+    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
+    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
+    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
+    #        print(self.worldmapfocus['DT'])
+    #        self.refresh_plot_interface(only='worldmap')
+
+    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
+
+        #print('r1')
+        for argkey in args.keys():
+            self.__dict__[arg] = args[argkey]
+
+        axes = self.axes
+        tbox = self.tbox
+        frames = self.frames
+        fig = self.fig
+ 
+        if (only is None) or ('worldmap' in only):
+            globaldata = self.globaldata
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
+            else:
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+            keystotranspose = ['lat','lon']
+            for key in dict(datasetxr.dims).keys():
+                if key not in keystotranspose:
+                    keystotranspose.append(key)
+
+            datasetxr = datasetxr.transpose(*keystotranspose)
+            datasetxr = datasetxr.sortby('lat',ascending=False)
+
+            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
+            lonleft = lonleft - 360.
+            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
+            label = 'worldmap'
+            axes[label].clear()
+            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
+            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+
+        if (only is None) or ('worldmap' in only):
+            #if 'axmap' not in self.__dict__ :
+            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
+            #else:
+
+            #stations = self.stations
+
+
+            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
+            #     resolution = 'l', 
+            # area_thresh = 0.1,
+            #     llcrnrlon=-180., llcrnrlat=-90.0,
+            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
+            # 
+            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
+            # self.gmap.drawcountries(color='white',linewidth=0.3)
+            # #self.gmap.fillcontinents(color = 'gray')
+            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
+            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
+            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
+            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # #self.ax5.shadedrelief()
+
+           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
+
+
+            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
+            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+
+            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
+            if 'lev' in field.dims:
+                field = field.isel(lev=-1)
+
+            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
+            axes[label].axis('off')
+
+            from matplotlib import cm
+            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
+            
+            
+            title=frames['worldmap']['inputkey']
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+            axes[label].set_title(title)
+
+            label ='worldmap_colorbar'
+            axes[label].clear()
+            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
+
+
+            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
+            # x,y = self.gmap(lons,lats)
+            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
+            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+
+        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
+
+            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
+            store_xlim = {}
+            store_ylim = {}
+            for ikey, key in enumerate(statskeys_out):
+                if (only is not None) and ('stats_lightupdate' in only):
+                    store_xlim[key] = axes['stats_'+key].get_xlim()
+                    store_ylim[key] = axes['stats_'+key].get_ylim()
+                self.axes['stats_'+key].clear()    
+
+            label = 'times'
+            self.axes[label].clear()
+
+            key = 'dthetadt'
+            x = self.frames['stats']['records_all_stations_ini']['datetime']
+            #print(x)
+            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+            #print(y)
+            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            #print(z)
+
+            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
+            self.axes[label].data[label] = self.axes[label].scatter(x.values,
+                                                                    y.values,
+                                                                    c=z.values,
+                                                                    cmap=self.statsviewcmap,
+                                                                    s=2,
+                                                                    vmin=0.,
+                                                                    vmax=1.,
+                                                                    alpha=alpha_cloud_pixels)
+
+            
+            x = self.frames['stats']['records_current_station_ini']['datetime']
+            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+
+            x = self.frames['profiles']['records_current_station_ini']['datetime']
+            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
+            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
+
+            for ikey, key in enumerate(statskeys_out):
+
+                # show data of all stations
+                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_all_stations_mod_stats'][key]
+                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                qvalmax = x.quantile(0.999)
+                qvalmin = x.quantile(0.001)
+                print('applying extra filter over extreme values for plotting stats')
+                selx = (x >= qvalmin) & (x < qvalmax)
+                sely = (x >= qvalmin) & (x < qvalmax)
+                x = x[selx & sely]
+                y = y[selx & sely]
+                z = z[selx & sely]
+                self.axes['stats_'+key].data['stats_'+key] = \
+                       self.axes['stats_'+key].scatter(x,y, c=z,\
+                                cmap=self.statsviewcmap,\
+                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
+
+                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_current_station_mod_stats'][key]
+                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['profiles']['records_current_station_mod_stats'][key]
+                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
+
+                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
+                y = self.frames['stats']['current_record_mod_stats'][key]
+                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
+                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
+                    axes['stats_'+key].annotate(text, \
+                                               xy=(x,y),\
+                                               xytext=(0.05,0.05),\
+                                               textcoords='axes fraction',\
+                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
+                                               color='white',\
+                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
+                # self.axes['stats_'+key].data[key+'_current_record'] = \
+                #        self.axes['stats_'+key].scatter(x,y, c=z,\
+                #                 cmap=self.statsviewcmap,\
+                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
+
+                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
+                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
+                # # highlight data for curent station
+                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
+
+                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
+
+                if ikey == len(statskeys_out)-1:
+                    self.axes['stats_'+key].set_xlabel('external')
+                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
+                axes['stats_'+key].set_ylabel('model')
+
+
+                if (only is not None) and ('stats_lightupdate' in only):
+                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
+                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
+                else:
+                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
+                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
+                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
+                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
+                self.abline(1,0,axis=self.axes['stats_'+key])
+
+        if (only is None) or ('stats_colorbar' in only):
+            label ='stats_colorbar'
+            axes[label].clear()
+            import matplotlib as mpl
+            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
+            self.axes[label].fields[label] = \
+             mpl.colorbar.ColorbarBase(self.axes[label],\
+                        orientation='horizontal',\
+                        label="percentile of "+self.frames['worldmap']['inputkey'],
+                        alpha=1.,
+                                cmap=self.statsviewcmap,\
+                                       norm=norm
+                         )
+
+        #print('r1')
+        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
+            #print('r2')
+            label = 'worldmap_stations'
+            axes[label].clear()
+            
+            stations = self.frames['worldmap']['stations'].table
+            globaldata = self.globaldata
+            
+            key = label
+
+            #print('r3')
+            if (stations is not None):
+                xlist = []
+                ylist = []
+                #print('r4')
+                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
+            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    xlist.append(x)
+                    ylist.append(y)
+                #picker is needed to make it clickable (pick_event)
+                axes[label].data[label] = axes[label].scatter(xlist,ylist,
+                                                              c='r', s=15,
+                                                              picker = 15,
+                                                              label=key,
+                                                              edgecolor='k',
+                                                              linewidth=0.8)
+
+            # cb.set_label('Wilting point [kg kg-3]')
+                #print('r5')
+
+                
+            #     xseries = []
+            #     yseries = []
+            #     for iSTN,STN in stations.iterrows():
+            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
+            #         xseries.append(x)                    
+            #         yseries.append(y)
+            #         
+            #         
+            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
+                    
+                if ('current_station' in frames['worldmap']):
+                    #print('r5')
+                    STN = frames['stats']['current_station']
+                    STNID = frames['stats']['STNID']
+                    #print('r5')
+
+                    x,y = len(axes['worldmap'].lon)* \
+                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
+                          len(axes['worldmap'].lat)* \
+                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    #print('r6')
+                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
+                    #                          == \
+                    #                          self.frames['worldmap']['STNID'])\
+                    #                         & \
+                    #                         (self.seltablestats['DT'] \
+                    #                          == self.axes['statsview0].focus['DT']) \
+                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
+                    #print('r7')
+                    text = 'STNID: '+ format(STNID,'10.0f') + \
+                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
+                            ', LON: '+format(STN['longitude'],'3.3f')+ \
+                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
+
+                            #+', VAL: '+format(VAL,'.3e')
+
+                    axes[label].scatter(x,y, c='r', s=30,\
+                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
+                    #print('r8')
+            
+                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
+                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
+                    #colorstation = max((min((1.,colorstation)),0.))
+                    colorstation =0.2
+                    from matplotlib import cm
+                    axes[label].annotate(text,
+                                         xy=(x,y),
+                                         xytext=(0.05,0.05),
+                                         textcoords='axes fraction', 
+                                         bbox=dict(boxstyle="round",
+                                         fc = cm.viridis(colorstation)),
+                                         arrowprops=dict(arrowstyle="->",
+                                                         linewidth=1.1),
+                                         color='white' if colorstation < 0.5 else 'black')
+                    #print('r9')
+
+                    # #pos = sc.get_offsets()[ind["ind"][0]]
+                    # 
+                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
+                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
+                    # axes[label].data[label+'statannotate'].set_text(text)
+                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
+                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
+            #print('r9')
+            axes[label].axis('off')
+            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
+            axes[label].set_ylim((len(axes['worldmap'].lat),0))
+            #print('r10')
+
+        if (only is None) or ('profiles' in only): 
+            #print('r11')
+
+            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
+            # # self.update_station(goto_first_sounding=False)
+            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
+            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
+            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
+            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
+
+            label = 'air_ap:theta'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+                # +\
+                # ' -> '+ \
+                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+            
+            
+            
+            
+            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+            #print('r12')
+
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
+            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
+            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
+            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            #print('r13')
+            # 
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r14')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+
+            #print('r15')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+                          
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r16')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r17')
+            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            print(hmax)
+            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
+            if valid_mod:
+
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="mod "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
+
+            #print('r18')
+            axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('theta [K]')
+
+            label = 'air_ap:q'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+
+            #print('r19')
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            if valid_mod:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            else:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            # 
+            #print('r20')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r21')
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            #print('r23')
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r24')
+            if valid_mod:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="fit ")#+\
+                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
+                             #+'LT')
+            #print('r25')
+            #axes[label].legend()
+
+            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            #axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('q [kg/kg]')
+
+            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+
+            # #pl.subplots_adjust(right=0.6)
+
+            # label = 'q_pro'
+            # axes[label].clear()
+
+            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
+            # 
+            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
+            # 
+            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
+
+            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
+            # #pl.subplots_adjust(right=0.6)
+            # axes[label].set_xlabel('specific humidity [kg/kg]')
+ 
+
+            #print('r26')
+            time = self.frames['profiles']['record_yaml_mod'].out.time
+            for ilabel,label in enumerate(['h','theta','q']):
+                axes["out:"+label].clear()
+                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
+                axes["out:"+label].set_ylabel(label)
+                if ilabel == 2:
+                    axes["out:"+label].set_xlabel('local sun time [h]')
+                
+            #print('r27')
+            label = 'SEB'
+            axes[label].clear()
+            
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
+            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
+            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
+            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
+                
+            #print('r28')
+            
+            axes[label].legend()
+            
+            #         for ax in self.fig_timeseries_axes:
+#             ax.clear()
+#         
+#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
+#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
+#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
+#         #print(self.morning_sounding.c4gl.out.Swin)
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
+#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
+#         self.fig_timeseries_axes[3].legend()
+#         self.fig.canvas.draw()
+            
+
+
+
+
+
+
+        #self.ready()
+        #print('r29')
+        fig.canvas.draw()
+        #fig.show()
+
+        self.axes = axes
+        self.tbox = tbox
+        self.fig = fig
+
+    def on_pick(self,event):
+        #print("HELLO")
+        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
+        #self.axes['theta_pro'].clear()
+        #self.axes['q_pro'].clear()
+        
+
+        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
+        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
+        keys_to_axes = {}
+        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
+
+        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
+        keys_to_axes['worldmap'] = 'worldmap'
+        
+        axes = self.axes
+        #nstatsview = self.nstatsview
+        #statsviewcmap = self.statsviewcmap
+        stations = self.frames['worldmap']['stations'].table
+
+
+        #print("p1")
+        current = event
+        artist = event.artist
+        
+        selkey = artist.get_label()
+        
+        #print(keys_to_axes)
+        
+        label = keys_to_axes[selkey]
+        #print("HELLO",selkey,label)
+
+        # # Get to know in which axes we are
+        # label = None
+        # for axeskey in axes.keys():
+        #     if event.inaxes == axes[axeskey]:
+        #         label = axeskey
+        #         
+
+        # cont, pos = None, None
+        
+        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
+        ind = event.ind
+        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
+        d = axes[label].collections[0]
+        #d.set_offset_position('data')
+        xy = d.get_offsets()
+        x, y =  xy[:,0],xy[:,1]
+        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
+
+        #print("p2")
+        if len(ind) > 0:
+            #print("p3")
+            pos = x[ind[0]], y[ind[0]]
+
+            #if label[:-1] == 'statsview':
+            #    #seltablestatsstdrel = self.seltablestatsstdrel
+            #    #seltablestatspct = self.seltablestatspct
+
+            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+            #    
+            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
+            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+            #    
+            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
+            #el
+            if (label == 'worldmap') or (label == 'worldmap_stations'):
+                self.hover_active = False
+                if (self.frames['worldmap']['STNID'] !=
+                    self.frames['profiles']['STNID']):
+                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
+                # so we just need to perform update_station
+                    self.update_station()
+            elif (label[:5] == 'stats'):
+
+                self.hover_active = False
+                if (self.frames['stats']['STNID'] !=
+                self.frames['profiles']['STNID']) or \
+                   (self.frames['stats']['current_record_chunk'] != 
+                    self.frames['profiles']['current_record_chunk']) or \
+                   (self.frames['stats']['current_record_index'] != 
+                    self.frames['profiles']['current_record_index']):
+
+
+
+                    for key in ['STNID','current_station','stations_iterator']: 
+                        self.frames['worldmap'][key] = self.frames['stats'][key] 
+
+                    for key in self.frames['stats'].keys():
+                        self.frames['profiles'][key] = self.frames['stats'][key]
+
+                    STNID = self.frames['profiles']['STNID']
+                    chunk = self.frames['profiles']['current_record_chunk']
+                    if 'current_station_file_ini' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_ini'].close()
+                    self.frames['profiles']['current_station_file_ini'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+                    if 'current_station_file_mod' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_mod'].close()
+                    self.frames['profiles']['current_station_file_mod'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_afternoon'].close()
+                    self.frames['profiles']['current_station_file_afternoon'] = \
+                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+                    # go to hovered record of current station
+                    self.frames['profiles']['records_iterator'] = \
+                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # ... and go to the record of the profile window (last one that
+                    # was picked by the user)
+                    found = False
+                    EOF = False
+                    while (not found) and (not EOF):
+                        try:
+                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
+                            #print("hello*")
+                            #print(self.frames['profiles']['current_record_index'])
+                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
+                               (index == self.frames['profiles']['current_record_index']) and \
+                               (STNID == self.frames['profiles']['STNID']):
+                                #print('found!')
+                                found = True
+                        except StopIteration:
+                            EOF = True
+                    if found:
+                        self.frames['stats']['current_record_mod'] = record
+                        self.frames['stats']['current_record_chunk'] = chunk
+                        self.frames['stats']['current_record_index'] = index
+                    # # for the profiles we make a distinct record iterator, so that the
+                    # # stats iterator can move independently
+                    # self.frames['profiles']['records_iterator'] = \
+                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # (self.frames['profiles']['STNID'] , \
+                    # self.frames['profiles']['current_record_index']) , \
+                    # self.frames['profiles']['current_record_mod'] = \
+                    #                 self.frames['profiles']['records_iterator'].__next__()
+
+
+                    # for the profiles we make a distinct record iterator, so that the
+                    # stats iterator can move independently
+
+                    self.update_record()
+
+
+
+    def on_plot_hover(self,event):
+        axes = self.axes
+        #print('h1')
+
+        # Get to know in which axes we are
+        label = None
+        for axeskey in axes.keys():
+            if event.inaxes == axes[axeskey]:
+                label = axeskey
+                
+        #print('h2')
+
+        cont, pos = None, None
+        #print (label)
+        
+        if label is not None:
+            if  ('data' in axes[label].__dict__.keys()) and \
+                (label in axes[label].data.keys()) and \
+                (axes[label].data[label] is not None):
+                
+                #print('h3')
+                cont, ind =  axes[label].data[label].contains(event)
+                selkey = axes[label].data[label].get_label()
+                if len(ind["ind"]) > 0:
+                    #print('h4')
+                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
+                    #print('pos',pos,selkey)
+
+
+                    #if label[:-1] == 'statsview':
+                    #    seltablestatsstdrel = self.seltablestatsstdrel
+                    #    seltablestatspct = self.seltablestatspct
+
+                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
+                    #    self.hover_active = True
+                    #    
+                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
+                    #    
+                    #el
+                    #print(label[:5])
+                    if (label[:5] == 'stats') or (label == 'times'):
+                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
+                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
+                        
+
+                        if label[:5] == 'stats':
+                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            (self.frames['stats']['STNID'] ,
+                             self.frames['stats']['current_record_chunk'], 
+                             self.frames['stats']['current_record_index']) = \
+                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                        # elif label[:5] == 'stats':
+                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
+                        #     (self.frames['stats']['STNID'] ,
+                        #      self.frames['stats']['current_record_chunk'], 
+                        #      self.frames['stats']['current_record_index']) = \
+                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+
+
+                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+                        
+                        # # TO TEST: should be removed, since it's is also done just below
+                        # self.frames['stats']['stations_iterator'] = \
+                        #     self.frames['worldmap']['stations_iterator'] 
+                
+                
+                        # self.goto_datetime_worldmap(
+                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+                        #     'after')
+
+
+                        # scrolling to the right station
+                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                        EOF = False
+                        found = False
+                        while (not found and not EOF):
+                            if (STNID == self.frames['stats']['STNID']):
+                                   found = True 
+                            if not found:
+                                try:
+                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                                except (StopIteration):
+                                    EOF = True
+                        if found:
+                        #    self.frames['stats']['STNID'] = STNID
+                            self.frames['stats']['current_station'] =  station
+
+                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
+                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
+
+
+                        # generate index of the current station
+                        self.frames['stats']['records_current_station_index'] = \
+                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                             == self.frames['stats']['STNID'])
+
+
+                        tab_suffixes = \
+                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            self.frames['stats']['records_current_station'+tab_suffix] = \
+                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+
+                        # go to hovered record of current station
+                        self.frames['stats']['records_iterator'] = \
+                                        records_iterator(self.frames['stats']['records_current_station_mod'])
+
+
+                        # ... and go to the record of the profile window (last one that
+                        # was picked by the user)
+                        found = False
+                        EOF = False
+                        while (not found) and (not EOF):
+                            try:
+                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                                #print("hello*")
+                                #print(self.frames['profiles']['current_record_index'])
+                                if (index == self.frames['stats']['current_record_index']) and \
+                                   (chunk == self.frames['stats']['current_record_chunk']) and \
+                                   (STNID == self.frames['stats']['STNID']):
+                                    #print('found!')
+                                    found = True
+                            except StopIteration:
+                                EOF = True
+                        if found:
+                            #print('h5')
+                            self.frames['stats']['current_record_mod'] = record
+                            self.frames['stats']['current_record_chunk'] = chunk
+                            self.frames['stats']['current_record_index'] = index
+
+                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
+                        tab_suffixes = \
+                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            #print(tab_suffix)
+                            #print(self.frames['stats']['records_current_station'+tab_suffix])
+                            self.frames['stats']['current_record'+tab_suffix] =  \
+                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                      (self.frames['stats']['STNID'] , \
+                                       self.frames['stats']['current_record_chunk'] , \
+                                       self.frames['stats']['current_record_index'])]
+
+
+                        self.hover_active = True
+                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                        # print('h13')
+                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
+                        #     self.goto_datetime_worldmap(
+                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                        #         'after')
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap',
+                        #                                           'profiles'])
+                        # else:
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap_stations',
+                        #                                           'profiles'])
+
+
+
+                    elif label in ['worldmap_stations','worldmap']:
+                        #print('h5')
+
+                        if (self.axes['worldmap'].lat is not None) and \
+                           (self.axes['worldmap'].lon is not None):
+
+
+                            #self.loading()
+                            self.fig.canvas.draw()
+                            self.fig.show()
+
+
+                            # get position of 
+                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
+                                                                 self.axes['worldmap'].lat[0]) + \
+                                           self.axes['worldmap'].lat[0],4)
+                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
+                                                                 self.axes['worldmap'].lon[0]) + \
+                                           self.axes['worldmap'].lon[0],4)
+                        
+                            stations = self.frames['worldmap']['stations'].table
+                            #print('h7')
+                        
+                            #reset stations iterator:
+                            # if 'stations_iterator' in self.frames['worldmap'].keys():
+                            #     self.frames['worldmap']['stations_iterator'].close()
+                            #     del(self.frames['worldmap']['stations_iterator'])
+                            # if 'stations_iterator' in self.frames['stats'].keys():
+                            #     self.frames['stats']['stations_iterator'].close()
+                            #     del(self.frames['stats']['stations_iterator'])
+                            self.frames['worldmap']['stations_iterator'] =\
+                               stations_iterator(self.frames['worldmap']['stations'])
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                            EOF = False
+                            found = False
+                            while (not found and not EOF):
+                                #print('h8',station.latitude,latmap)
+                                #print('h8',station.longitude,lonmap)
+                                if (round(station.latitude,3) == round(latmap,3)) and \
+                                    (round(station.longitude,3) == round(lonmap,3)):
+                                       found = True 
+                                if not found:
+                                    try:
+                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                                    except (StopIteration):
+                                        EOF = True
+                            if found:
+                                self.frames['worldmap']['STNID'] = STNID
+                                self.frames['worldmap']['current_station'] = \
+                                        station
+                        
+                            self.frames['stats']['stations_iterator'] = \
+                                self.frames['worldmap']['stations_iterator'] 
+                            #print('h8')
+                            # inherit station position for the stats frame...
+                            for key in self.frames['worldmap'].keys():
+                                self.frames['stats'][key] = self.frames['worldmap'][key]
+                                
+                            ## fetch records of current station...
+                            #self.frames['stats']['records_current_station_mod'] =\
+                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                            # ... and their indices
+                            self.frames['stats']['records_current_station_index'] = \
+                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                                     == \
+                                     self.frames['stats']['current_station'].name)
+
+
+                            tab_suffixes = \
+                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['records_current_station'+tab_suffix] = \
+                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+                            # ... create a record iterator ...
+                            #self.frames['stats']['records_iterator'].close()
+                            del(self.frames['stats']['records_iterator'])
+                            self.frames['stats']['records_iterator'] = \
+                                self.frames['stats']['records_current_station_mod'].iterrows()
+
+
+
+                        
+                            #print('h9')
+                            # ... and go to to the first record of the current station
+                            (self.frames['stats']['STNID'] , \
+                             self.frames['stats']['current_record_chunk'] , \
+                             self.frames['stats']['current_record_index']) , \
+                            self.frames['stats']['current_record_mod'] = \
+                                self.frames['stats']['records_iterator'].__next__()
+                        
+
+
+
+                            #print('h10')
+                            # cash the current record
+                            tab_suffixes = \
+                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['current_record'+tab_suffix] =  \
+                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                          (self.frames['stats']['STNID'] , \
+                                           self.frames['stats']['current_record_chunk'] , \
+                                           self.frames['stats']['current_record_index'])]
+
+                            #print('h11')
+                            
+                            self.hover_active = True
+                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                            #print('h13')
+
+                        
+
+            #if (stations is not None):
+            #    for iSTN,STN in stations.iterrows():
+            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
+            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
+
+        # self.fig.show()
+ 
+        # we are hovering on nothing, so we are going back to the position of
+        # the profile sounding
+        if pos is None:
+            if self.hover_active == True:
+                #print('h1*')
+                
+                #self.loading()
+                # to do: reset stations iterators
+
+                # get station and record index from the current profile
+                for key in ['STNID', 'current_station']:
+                    self.frames['stats'][key] = self.frames['profiles'][key]
+
+                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
+                self.frames['stats']['current_station'] = \
+                        self.frames['profiles']['current_station']
+                #print('h3a*')
+                self.frames['stats']['records_current_station_mod'] = \
+                        self.frames['profiles']['records_current_station_mod']
+                #print('h3b*')
+
+                # the next lines recreate the records iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+
+                # reset stations iterator...
+                #self.frames['stats']['records_iterator'].close()
+                del(self.frames['stats']['records_iterator'])
+                self.frames['stats']['records_iterator'] = \
+                    self.frames['stats']['records_current_station_mod'].iterrows()
+                #print('h4*')
+
+                # ... and go to the record of the profile window (last one that
+                # was picked by the user)
+                found = False
+                EOF = False
+                while (not found) and (not EOF):
+                    try:
+                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                        #print("hello*")
+                        #print(self.frames['profiles']['current_record_index'])
+                        #print(self.frames['profiles']['STNID'])
+                        #print(STNID,index)
+                        if (index == self.frames['profiles']['current_record_index']) and \
+                            (chunk == self.frames['profiles']['current_record_chunk']) and \
+                            (STNID == self.frames['profiles']['STNID']):
+                            #print('found!')
+                            found = True
+                    except StopIteration:
+                        EOF = True
+                if found:
+                    #print('h5*')
+                    self.frames['stats']['current_record_mod'] = record
+                    self.frames['stats']['current_record_chunk'] = chunk
+                    self.frames['stats']['current_record_index'] = index
+
+                #print('h6*')
+
+
+
+                # # fetch records of current station...
+                # self.frames['stats']['records_current_station_mod'] =\
+                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                # ... and their indices
+                self.frames['stats']['records_current_station_index'] = \
+                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                         == \
+                         self.frames['stats']['current_station'].name)
+
+
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['records_current_station'+tab_suffix] = \
+                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+                
+
+                # cash the records of the current stations
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['current_record'+tab_suffix] =  \
+                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                              (self.frames['stats']['STNID'] , \
+                               self.frames['stats']['current_record_chunk'] , \
+                               self.frames['stats']['current_record_index'])]
+
+
+                # the next lines recreate the stations iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+                #print('h7*')
+
+                # reset the stations iterators
+                for framekey in ['stats','worldmap']:
+                    ##print(framekey)
+                    if 'stations_iterator' in self.frames[framekey]:
+                        #self.frames[framekey]['stations_iterator'].close()
+                        del(self.frames[framekey]['stations_iterator'])
+
+                self.frames['worldmap']['current_station'] = \
+                        self.frames['profiles']['current_station']
+
+                #recreate the stations iterator for the worldmap...
+                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+
+                # ... and go the position of the profile
+                #print('h8*')
+                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                EOF = False
+                found = False
+                while (not found and not EOF):
+                    if STNID == self.frames['profiles']['STNID'] :
+                        found = True 
+                    if not found:
+                        try:
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                        except (StopIteration):
+                            EOF = True
+                if found:
+                    self.frames['worldmap']['current_station'] = station
+                    self.frames['worldmap']['STNID'] = STNID
+                #print('h9*')
+                self.frames['stats']['stations_iterator'] = \
+                    self.frames['worldmap']['stations_iterator'] 
+
+                # the stats window now inherits the current station from the
+                # worldmap
+                for key in ['STNID','current_station','stations_iterator']: 
+                    self.frames['stats'][key] = self.frames['worldmap'][key] 
+                #print('h10*')
+
+                # # we now only need inherit station position and go to first record
+                # for key in self.frames['worldmap'].keys():
+                #     self.frames['stats'][key] = self.frames['worldmap'][key]
+
+                # self.frames['stats']['records_current_station'] =\
+                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
+
+                # #print(self.frames['stats']['records_current_station'])
+                # self.frames['stats']['records_iterator'] = \
+                #                 self.frames['stats']['records_current_station'].iterrows()
+                # (self.frames['stats']['STNID'] , \
+                # self.frames['stats']['current_record_index']) , \
+                # self.frames['stats']['current_record_mod'] = \
+                #                 self.frames['stats']['records_iterator'].__next__()
+                
+
+
+
+
+
+
+                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
+                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
+                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
+                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+                self.hover_active = False
+                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
+    # def loading(self):
+    #     self.tbox['loading'].set_text('Loading...')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+    #     sleep(0.1)
+    # def ready(self):
+    #     self.tbox['loading'].set_text('Ready')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+
+
+
diff --git a/model.py b/model.py
index 077e6c1..8760411 100644
--- a/model.py
+++ b/model.py
@@ -1,4 +1,4 @@
-#_ 
+# 
 # CLASS
 # Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
 # Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
@@ -25,18 +25,154 @@
 import copy as cp
 import numpy as np
 import sys
+import warnings
+import pandas as pd
+from ribtol_hw import zeta_hs2 , funcsche
+import logging
+#from SkewT.thermodynamics import Density
 #import ribtol
 
+grav = 9.81
 def esat(T):
     return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
 
 def qsat(T,p):
     return 0.622 * esat(T) / p
 
+
+def ribtol(Rib, zsl, z0m, z0h): 
+    Rib = np.float64(Rib)
+    zsl = np.float64(zsl)
+    z0m = np.float64(z0m)
+    z0h = np.float64(z0h)
+    #print(Rib,zsl,z0m,z0h)
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    #print(Rib,zsl,z0m,z0h)
+    while (abs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+        #print(L,fx/fxdif)
+        if(abs(L) > 1e12):
+            break
+
+    return L
+  
+def psim(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psim = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+  
+def psih(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * np.log( (1. + x*x) / 2.)
+        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+ 
 class model:
-    def __init__(self, model_input):
-        # initialize the different components of the model
-        self.input = cp.deepcopy(model_input)
+    def __init__(self, model_input = None,debug_level=None):
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        self.logger = logging.getLogger('model')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        """ initialize the different components of the model """ 
+
+        if model_input is not None:
+            # class4gl style input
+            if 'pars' in model_input.__dict__.keys():
+
+                # we make a reference to the full input first, so we can dump it
+                # afterwards
+                self.input_c4gl = model_input
+
+                # we copy the regular parameters first. We keep the classical input
+                # format as self.input so that we don't have to change the entire
+                # model code.
+                self.input = cp.deepcopy(model_input.pars)
+
+                # we copy other sections we are interested in, such as profile
+                # data, and store it also under input
+
+                # I know we mess up a bit the structure of the class4gl_input, but
+                # we will make it clean again at the time of dumping data
+
+                # So here, we copy the profile data into self.input
+                # 1. Air circulation data 
+                if 'sw_ac' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ac']:
+                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
+                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
+
+                    # correct pressure of levels according to surface pressure
+                    # error (so that interpolation is done in a consistent way)
+
+                    p_e = self.input.Ps - self.input.sp
+                    for irow in self.input.air_ac.index[::-1]:
+                       self.input.air_ac.p.iloc[irow] =\
+                        self.input.air_ac.p.iloc[irow] + p_e
+                       p_e = p_e -\
+                       (self.input.air_ac.p.iloc[irow]+p_e)/\
+                        self.input.air_ac.p.iloc[irow] *\
+                        self.input.air_ac.delpdgrav.iloc[irow]*grav
+
+
+
+                # 2. Air circulation data 
+                if 'sw_ap' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ap']:
+                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
+
+            # standard class input
+            else:
+                self.input = cp.deepcopy(model_input)
+
+    def load_yaml_dict(self,yaml_dict):
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                for keydata,value in data.items():
+                    self.__dict__[keydata] = value
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            #elif key == 'sources':
+            #    self.__dict__[key] = data
+            elif key == 'out':
+                # lets convert it to a list of dictionaries
+                dictouttemp = pd.DataFrame(data).to_dict('list')
+            else: 
+                 warnings.warn("Key '"+key+"' is be implemented.")
+            #     self.__dict__[key] = data
+
+
+        self.tsteps = len(dictouttemp['h'])
+        self.out = model_output(self.tsteps)
+        for keydictouttemp in dictouttemp.keys():
+            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
+
+
   
     def run(self):
         # initialize model variables
@@ -104,14 +240,19 @@ def init(self):
         self.sw_ls      = self.input.sw_ls      # land surface switch
         self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
         self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
+
+        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
+        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
+        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
   
         # initialize mixed-layer
         self.h          = self.input.h          # initial ABL height [m]
         self.Ps         = self.input.Ps         # surface pressure [Pa]
+        self.sp         = self.input.sp         # This is also surface pressure
+                                                #but derived from the global data [Pa]
         self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
         self.ws         = None                  # large-scale vertical velocity [m s-1]
         self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
-        self.fc         = self.input.fc         # coriolis parameter [s-1]
         self.we         = -1.                   # entrainment velocity [m s-1]
        
          # Temperature 
@@ -181,105 +322,6 @@ def init(self):
         self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
         self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
       
-  # BEGIN -- HW 20170606
-
-        # z-coordinate for vertical profiles of stratification above the mixed-layer height
-
-        self.z_pro      = self.input.z_pro  # initial profile of potential temperature [K]
-
-        self.theta_pro  = self.input.theta_pro  # initial profile of potential temperature [K]
-
-
-        if ((self.theta_pro is not None) and (self.z_pro is not None)):
-
-            
-            indextheta = np.where(self.z_pro == self.h)
-            if len(indextheta) == 0:
-                raise RuntimeError("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
-                
-                
-            if indextheta[0][0] !=1:
-                print("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
-                stop
-
-            
-            if self.theta_pro[1] != self.theta_pro[0]:
-                print("error input profile consistency: two lowest profile levels should be equal.")
-            
-            # initialize theta from its profile when available
-            theta_old = self.theta
-            theta_new = self.theta_pro[indextheta[0][0]]
-            
-            
-            
-            if ((theta_old is not None) & (theta_old != theta_new)):
-                print("Warning: theta input was provided ("+str(theta_old)+\
-                    "kg kg-1), but it is now overwritten by the first level (index 0) of theta_pro which is different ("\
-                    +str(theta_new)+"K).")
-                                    
-            self.theta = theta_new
-
-            # make a profile of the stratification 
-            # please note that the stratification between z_pro[i] and z_pro[i+1] 
-            # is given by gammatheta_pro[i]
-
-            # self.gammatheta_pro = np.gradient(self.theta_pro) / np.gradient(self.z_pro)
-            with np.errstate(divide='ignore'):
-                self.gammatheta_pro = np.array(self.theta_pro[1:] - self.theta_pro[:-1]) \
-                           / np.array(self.z_pro[1:] -  self.z_pro[:-1]    )
-                           
-            self.gammatheta = self.gammatheta_pro[np.where(self.h >= self.z_pro)[0][-1]]
-        else:
-            self.gammatheta_pro = None
-
-
-        self.q_pro  = self.input.q_pro  # initial profile of potential temperature [K]
-
-        if ((self.q_pro is not None) and (self.z_pro is not None)):
-            
-            indexq = np.where(self.z_pro == self.h)
-            if len(indexq) == 0:
-                print("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
-                stop   
-                
-            if indexq[0][0] !=1:
-                print("Error input profile consistency: mixed-layer height needs to be equal to the second level of the vertical profile input!")
-                stop
-
-            
-            if self.q_pro[1] != self.q_pro[0]:
-                print("error inpuy ptogilr consistency: two lowest profile levels should be equal.")
-            
-            # initialize q from its profile when available
-            q_old = self.q
-            q_new = self.q_pro[indexq[0][0]]
-            
-            
-            
-            if ((q_old is not None) & (q_old != q_new)):
-                print("Warning: q input was provided ("+str(q_old)+\
-                    "kg kg-1), but it is now overwritten by the first level (index 0) of q_pro which is different ("\
-                    +str(q_new)+"kg kg-1).")
-                                    
-            self.q = q_new
-
-            # make a profile of the stratification 
-            # please note that the stratification between z_pro[i] and z_pro[i+1] 
-            # is given by gammaq_pro[i]
-
-            # self.gammaq_pro = np.gradient(self.q_pro) / np.gradient(self.z_pro)
-            with np.errstate(divide='ignore'):
-                self.gammaq_pro = np.array(self.q_pro[1:] - self.q_pro[:-1]) \
-                           / np.array(self.z_pro[1:] -  self.z_pro[:-1]    )
-                           
-            self.gammaq = self.gammaq_pro[np.where(self.h >= self.z_pro)[0][-1]]
-        else:
-            self.gammaq_pro = None
-
-# END -- HW 20170606      
-        
-        
-        
         
         
         # CO2
@@ -304,7 +346,248 @@ def init(self):
         self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
         self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
         self.advv       = self.input.advv       # advection of v-wind [m s-2]
- 
+         
+  # BEGIN -- HW 20170606
+        # z-coordinate for vertical profiles of stratification above the mixed-layer height
+
+        if self.sw_ac:
+        # this is the data frame with the grided profile on the L60 grid
+        # (subsidence, and advection) 
+            self.air_ac      = self.input.air_ac  # full level air circulation
+                                                  # forcing
+            # self.air_ach     = self.input.air_ach # half level air circulation
+            #                                       # forcing
+            
+
+        if self.sw_ap:
+        # this is the data frame with the fitted profile (including HAGL,
+        # THTA,WSPD, SNDU,WNDV PRES ...)
+            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
+
+            # just for legacy reasons...
+            if 'z' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
+            if 'p' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
+
+            indexh = np.where(self.air_ap.z.values == self.h)
+            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
+                raise ValueError("Error input profile consistency: mixed- \
+                                 layer height needs to be equal to the second \
+                                 and third \
+                                 level of the vertical profile input!")
+            # initialize q from its profile when available
+            p_old = self.Ps
+            p_new = self.air_ap.p[indexh[0][0]]
+            
+            if ((p_old is not None) & (p_old != p_new)):
+                print("Warning: Ps input was provided ("+str(p_old)+\
+                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
+                    +str(p_new)+"Pa).")
+                                    
+            self.Ps = p_new
+            # these variables/namings are more convenient to work with in the code
+            # we will update the original variables afterwards
+            #self.air_ap['q'] = self.air_ap.QABS/1000.
+
+            self.air_ap = \
+                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
+            # we require the temperature fields, since we need to consider
+            # advection
+            # if self.sw_ac:
+            #     #self.air_ap['theta'] = self.air_ap['t'] *
+
+            #     # we consider self.sp in case of air-circulation input (for
+            #     # consistence)
+            #     self.air_ap['t'] = \
+            #                 self.air_ap.theta *  \
+            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
+            # else:
+            # we consider self.Ps in case of balloon input only 
+            self.air_ap = self.air_ap.assign(t = lambda x: \
+                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
+
+            #self.air_ap['theta'] = self.air_ap.THTA
+            if 'u' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
+            if 'v' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
+
+            for var in ['theta','q','u','v']:
+
+                
+                if self.air_ap[var][1] != self.air_ap[var][0]:
+                    raise ValueError("Error input profile consistency: two \
+                                     lowest profile levels for "+var+" should \
+                                     be equal.")
+                
+                # initialize the value from its profile when available
+                value_old = self.__dict__[var]
+                value_new = self.air_ap[var][indexh[0][0]]
+                
+                if ((value_old is not None) & (value_old != value_new)):
+                    warnings.warn("Warning:  input was provided \
+                                     ("+str(value_old)+ "kg kg-1), \
+                                     but it is now overwritten by the first \
+                                     level (index 0) of air_ap]var\ which is \
+                                     different (" +str(value_new)+"K).")
+                                        
+                self.__dict__[var] = value_new
+
+                # make a profile of the stratification 
+                # please note that the stratification between z_pro[i] and
+                # z_pro[i+1] is given by air_ap.GTHT[i]
+
+                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
+                # np.gradient(self.z_pro)
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
+
+
+                self.__dict__['gamma'+var] = \
+                    self.air_ap['gamma'+var][np.where(self.h >= \
+                                                     self.air_ap.z)[0][-1]]
+
+
+
+        # the variable p_pro is just for diagnosis of lifted index
+            
+            
+
+            # input Ph is wrong, so we correct it according to hydrostatic equation
+            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+
+            #if self.sw_ac:
+                # note that we use sp as surface pressure, which is determined
+                # from era-interim instead of the observations. This is to
+                # avoid possible failure of the interpolation routine
+                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
+                #                          + \
+                #                          list(self.air_ap.p[3:]))
+
+            # else:
+                # in the other case, it is updated at the time of calculting
+                # the statistics 
+
+# END -- HW 20170606      
+        #print(self.air_ap)
+
+        if self.sw_ac and not self.sw_ap:
+            raise ValueError("air circulation switch only possible when air \
+                             profiles are given")
+        
+        if self.sw_ac:
+
+            # # # we comment this out, because subsidence is calculated
+            # according to advection
+            # #interpolate subsidence towards the air_ap height coordinate
+            # self.air_ap['w'] = np.interp(self.air_ap.p,\
+            #                               self.air_ac.p,\
+            #                               self.air_ac.w) 
+            # #subsidence at the mixed-layer top
+            # self.w = self.air_ap.w[1]
+        
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+                # in case we didn't find any points, we just take the lowest one.
+                # actually, this can happen if ERA-INTERIM pressure levels are
+                # inconsistent with 
+                if in_ml.sum() == 0:
+                    warnings.warn(" no circulation points in the mixed layer \
+                                  found. We just take the bottom one.")
+                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+
+                for var in ['t','q','u','v']:
+    
+                   # calculation of the advection variables for the mixed layer
+                   # we weight by the hydrostatic thickness of each layer and
+                   # divide by the total thickness
+                   self.__dict__['adv'+var] = \
+                            ((self.air_ac['adv'+var+'_x'][in_ml] \
+                             + \
+                             self.air_ac['adv'+var+'_y'][in_ml])* \
+                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                            self.air_ac['delpdgrav'][in_ml].sum()
+
+                   # calculation of the advection variables for the profile above
+                   # (lowest 3 values are not used by class)
+                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
+                   self.air_ap['adv'+var] = \
+                           np.interp(self.air_ap.p,\
+                                     self.air_ac.p,\
+                                     self.air_ac['adv'+var+'_x']) \
+                           + \
+                           np.interp(self.air_ap.p, \
+                                       self.air_ac.p, \
+                                       self.air_ac['adv'+var+'_y'])
+
+                # as an approximation, we consider that advection of theta in the
+                # mixed layer is equal to advection of t. This is a sufficient
+                # approximation since theta and t are very similar at the surface
+                # pressure.
+                self.__dict__['advtheta'] = self.__dict__['advt']
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # self.wrho = np.interp(self.P_h,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) 
+            # self.ws   = self.air_ap.w.iloc[1]
+
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                self.air_ap = self.air_ap.assign(wp = 0.)
+                self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                              self.air_ac.p, \
+                                              self.air_ac['wp'])
+                self.air_ap = self.air_ap.assign(R = 0.)
+                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                     self.Rv*self.air_ap.q)
+                self.air_ap = self.air_ap.assign(rho = 0.)
+                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+                
+                self.air_ap = self.air_ap.assign(w = 0.)
+                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+                #print('hello w ini')
+
+                # Note: in case of sw_ac is False, we update it from prescribed
+                # divergence
+                self.ws   = self.air_ap.w[1]
+
+                # self.ws   = self.wrho/self.rho
+                # self.ws   = self.wrho/(self.P_h/ \
+                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
+                #                         self.theta) # this should be T!!!
+
+                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+                #                         + \
+                #                         self.air_ac['divU_y'][in_ml])* \
+                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                #             self.air_ac['delpdgrav'][in_ml].sum() \
+        
+
         # Tendencies 
         self.htend      = None                  # tendency of CBL [m s-1]
         self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
@@ -333,6 +616,8 @@ def init(self):
   
         # initialize radiation
         self.lat        = self.input.lat        # latitude [deg]
+        #self.fc         = self.input.fc         # coriolis parameter [s-1]
+        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
         self.lon        = self.input.lon        # longitude [deg]
         self.doy        = self.input.doy        # day of the year [-]
         self.tstart     = self.input.tstart     # time of the day [-]
@@ -441,54 +726,79 @@ def init(self):
             self.run_mixed_layer()
 
     def timestep(self):
+
+        self.dtmax = +np.inf
+        self.logger.debug('before stats') 
         self.statistics()
 
         # run radiation model
+        self.logger.debug('before rad') 
         if(self.sw_rad):
             self.run_radiation()
   
         # run surface layer model
         if(self.sw_sl):
+            self.logger.debug('before surface layer') 
             self.run_surface_layer()
         
         # run land surface model
         if(self.sw_ls):
+            self.logger.debug('before land surface') 
             self.run_land_surface()
  
         # run cumulus parameterization
         if(self.sw_cu):
+            self.logger.debug('before cumulus') 
             self.run_cumulus()
    
+        self.logger.debug('before mixed layer') 
         # run mixed-layer model
         if(self.sw_ml):
             self.run_mixed_layer()
+        self.logger.debug('after mixed layer') 
  
         #get first profile data point above mixed layer
-        zidx_first = np.where(self.z_pro > self.h)[0][0]
-        
-        if self.htend != 0.:
-            dtmax = ( self.z_pro[zidx_first] - self.h)/self.htend
-        else:
-            dtmax = +np.inf
-        
+        if self.sw_ap:
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                # here we correct for the fact that the upper profile also
+                # shifts in the vertical.
+
+                diffhtend = self.htend - self.air_ap.w[zidx_first]
+                if diffhtend > 0:
+                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            else:
+                if self.htend > 0:
+                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            #print(self.h,zidx_first,self.ws,self.air_ap.z)
+
         
-        self.substep =  (self.dtcur > dtmax)
+        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
+        self.logger.debug('before store') 
+        self.substep =  (self.dtcur > self.dtmax)
         if self.substep:
-            dtnext = self.dtcur - dtmax
-            self.dtcur = dtmax
+            dtnext = self.dtcur - self.dtmax
+            self.dtcur = self.dtmax
+
+        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
+
         # HW: this will be done multiple times in case of a substep is needed
         # store output before time integration
         if self.firsttime:
             self.store()
   
+        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
         # time integrate land surface model
         if(self.sw_ls):
             self.integrate_land_surface()
-  
+        self.logger.debug('before integrate mixed layer') 
         # time integrate mixed-layer model
         if(self.sw_ml):
-            self.integrate_mixed_layer()
-        
+            self.integrate_mixed_layer() 
+        self.logger.debug('after integrate mixed layer') 
         if self.substep:
             self.dtcur = dtnext
             self.firsttime = False
@@ -498,7 +808,7 @@ def timestep(self):
             self.t += 1 
             self.firsttime = True
             self.substeps = 0
-        
+        self.logger.debug('going to next step')
         
         
   
@@ -509,6 +819,10 @@ def statistics(self):
         self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
         # Mixed-layer top properties
         self.P_h    = self.Ps - self.rho * self.g * self.h
+        # else:
+            # in the other case, it is updated at the time that the profile is
+            # updated (and at the initialization
+
         self.T_h    = self.theta - self.g/self.cp * self.h
 
         #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
@@ -533,6 +847,7 @@ def statistics(self):
             it          += 1
 
         if(it == itmax):
+
             print("LCL calculation not converged!!")
             print("RHlcl = %f, zlcl=%f, theta=%f, q=%f"%(RHlcl, self.lcl,self.theta,self.q))
 
@@ -565,7 +880,12 @@ def run_mixed_layer(self):
 
 
         # calculate large-scale vertical velocity (subsidence)
-        self.ws = -self.divU * self.h
+        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
+            self.ws = -self.divU * self.h
+        # else:
+        #     in case the air circulation switch is turned on, subsidence is
+        #     calculated from the circulate profile at the initialization and
+        #     in the integrate_mixed_layer routine
               
         # calculate compensation to fix the free troposphere in case of subsidence 
         if(self.sw_fixft):
@@ -603,29 +923,105 @@ def run_mixed_layer(self):
         self.wqe         = -self.we * self.dq
         self.wCO2e       = -self.we * self.dCO2
         
-        self.htend       = self.we + self.ws + self.wf - self.M
+        htend_pre       = self.we + self.ws + self.wf - self.M
         
-        self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        self.qtend       = (self.wq     - self.wqe     - self.wqM  ) / self.h + self.advq
-        self.CO2tend     = (self.wCO2   - self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
         
-        self.dthetatend  = self.gammatheta * (self.we + self.wf - self.M) - self.thetatend + w_th_ft
-        self.dqtend      = self.gammaq     * (self.we + self.wf - self.M) - self.qtend     + w_q_ft
-        self.dCO2tend    = self.gammaCO2   * (self.we + self.wf - self.M) - self.CO2tend   + w_CO2_ft
+ 
+        #print('thetatend_pre',thetatend_pre)
+        
+        #preliminary boundary-layer top chenage
+        #htend_pre = self.we + self.ws + self.wf - self.M
+        #preliminary change in temperature jump
+        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
+                          thetatend_pre + w_th_ft
+        
+        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
+        l_entrainment = True
+
+        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
+            l_entrainment = False
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! temperature jump is at the lower limit \
+                          and is not growing: entrainment is disabled for this (sub)timestep.") 
+        elif dtheta_pre < 0.1:
+            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
+            l_entrainment = True
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          " Warning! Potential temperature jump at mixed- \
+                          layer height would become too low limiting timestep \
+                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
+            self.dtmax = min(self.dtmax,dtmax_new)
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "next subtimestep, entrainment will be disabled")
+            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
+
+
+
+        # when entrainment is disabled, we just use the simplified formulation
+        # as in Wouters et al., 2013 (section 2.2.1)
+
+        self.dthetatend = l_entrainment*dthetatend_pre + \
+                        (1.-l_entrainment)*0.
+        self.thetatend = l_entrainment*thetatend_pre + \
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
+        self.htend = l_entrainment*htend_pre + \
+                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
+        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
+        #stop
+
+
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+
+
+        # self.qtend = l_entrainment*qtend_pre + \
+        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
+        # self.CO2tend = l_entrainment*CO2tend_pre + \
+        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+
+
+        #     # part of the timestep for which the temperature mixed-layer jump
+        #     # was changing, and for which entrainment took place. For the other
+        #     # part, we don't assume entrainment anymore, and we use the
+        #     # simplified formulation  of Wouters et al., 2013
+
+        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
+        #   
+        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
+        #                      self.dthetatend + w_th_ft) + \
+        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
+        #     self.htend = fac*self.htend + \
+        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
+        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
+        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+
+        # else:
+        #     #self.htend = htend_pre
+        #     self.dthetatend = dthetatend_pre
+        #     self.thetatend = thetatend_pre
+        
+        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
+        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
      
         # assume u + du = ug, so ug - u = du
         if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + self.we * self.du)  / self.h + self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + self.we * self.dv)  / self.h + self.advv
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
   
-            self.dutend      = self.gammau * (self.we + self.wf - self.M) - self.utend
-            self.dvtend      = self.gammav * (self.we + self.wf - self.M) - self.vtend
+            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
+            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
         
         # tendency of the transition layer thickness
         if(self.ac > 0 or self.lcl - self.h < 300):
             self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
         else:
             self.dztend = 0.
+
    
     def integrate_mixed_layer(self):
         # set values previous time step
@@ -650,16 +1046,18 @@ def integrate_mixed_layer(self):
             
 
 # END -- HW 20170606        
-        
         self.h        = h0      + self.dtcur * self.htend
+        # print(self.h,self.htend)
+        # stop
         self.theta    = theta0  + self.dtcur * self.thetatend
+        #print(dtheta0,self.dtcur,self.dthetatend)
         self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
         self.q        = q0      + self.dtcur * self.qtend
         self.dq       = dq0     + self.dtcur * self.dqtend
         self.CO2      = CO20    + self.dtcur * self.CO2tend
         self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
         self.dz_h     = dz0     + self.dtcur * self.dztend
-
+            
         # Limit dz to minimal value
         dz0 = 50
         if(self.dz_h < dz0):
@@ -671,61 +1069,312 @@ def integrate_mixed_layer(self):
             self.v        = v0      + self.dtcur * self.vtend
             self.dv       = dv0     + self.dtcur * self.dvtend
 
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
 
-        
-        # note that theta and q itself are updatet by class itself
+            for var in ['t','q','u','v']:
+                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
 
-        #Afterwards, update the vertical profiles by removing any data points below the new h
-        if self.theta_pro is not None:
-            self.theta_pro = np.array([self.theta,self.theta,self.theta+self.dtheta]+list(self.theta_pro[self.z_pro > self.h]))
+            # take into account advection for the whole profile
+                
+                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
 
-        if self.q_pro is not None:
-            self.q_pro = np.array([self.q,self.q,self.q+self.dq]+list(self.q_pro[self.z_pro > self.h]))
+            var = 'z'
+            #print(self.air_ap[var])
+                #     print(self.air_ap['adv'+var])
 
 
 
-        if self.z_pro is not None:
-            self.z_pro = np.array([2.,self.h,self.h]+list(self.z_pro[self.z_pro > self.h]))
 
-        
-        if self.gammatheta_pro is not None:
-            
-                        # self.gammatheta_pro = np.gradient(self.theta_pro) / np.gradient(self.z_pro)
-            with np.errstate(divide='ignore'):
-                self.gammatheta_pro = np.array(self.theta_pro[1:] - self.theta_pro[:-1]) \
-                           / np.array(self.z_pro[1:] -  self.z_pro[:-1]    )
-                           
-            self.gammatheta = self.gammatheta_pro[np.where(self.h >= self.z_pro)[0][-1]]
-            
-            
-            
-            #self.gammatheta_pro = np.array([self.gammatheta,self.gammatheta,self.gammatheta]+list(self.gammatheta_pro[self.z_pro[:-1] > self.h]))
+            #moving the profile vertically according to the vertical wind
+                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
 
-        if self.gammaq_pro is not None:            
-                        
-            #self.gammaq_pro = np.gradient(self.q_pro) / np.gradient(self.z_pro)
-            with np.errstate(divide='ignore'):
-                self.gammaq_pro = (self.q_pro[1:] -self.q_pro[:-1])  / (self.z_pro[1:] -self.z_pro[:-1])
-            
-            self.gammaq = self.gammaq_pro[np.where(self.h >= self.z_pro)[0][-1]]
+
+            # air_apvarold = pd.Series(np.array(self.air_ap.z))
+            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
+            # stop
+
+
+                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
+                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
+
+            #As t is updated, we also need to recalculate theta (and R)
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+
+            # air_aptheta_old = pd.Series(self.air_ap['theta'])
+            self.air_ap['theta'] = \
+                        self.air_ap.t * \
+                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
+                                         self.dtcur * self.air_ap.w[zidx_first:]
+
+#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
+#            print(self.t, self.dtcur,self.dt,self.htend)
+
+            # # the pressure levels of the profiles are recalculated according to
+            # # there new height (after subsidence)
+            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
+            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
+            #         * self.dtcur *  self.air_ap.w[zidx_first:]
+
+            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
+                    self.dtcur * self.air_ap.wp[zidx_first:]
+
+            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
+        # note that theta and q itself are updatet by class itself
+
+    
+        if self.sw_ap:
+            # Just for model consistency preservation purposes, we set the
+            # theta variables of the mixed-layer to nan values, since the
+            # mixed-layer values should overwritte by the mixed-layer
+            # calculations of class.
+            self.air_ap['theta'][0:3] = np.nan 
+            self.air_ap['p'][0:3] = np.nan 
+            self.air_ap['q'][0:3] = np.nan 
+            self.air_ap['u'][0:3] = np.nan 
+            self.air_ap['v'][0:3] = np.nan 
+            self.air_ap['t'][0:3] = np.nan 
+            self.air_ap['z'][0:3] = np.nan 
+
+            # Update the vertical profiles: 
+            #   - new mixed layer properties( h, theta, q ...)
+            #   - any data points below the new ixed-layer height are removed
+
+            # Three data points at the bottom that describe the mixed-layer
+            # properties
+            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
+                                           # columns as air_ap
+            # air_ap_head['z'].iloc[0] = 2.
+            # air_ap_head['z'].iloc[1] = self.__dict__['h']
+            # air_ap_head['z'].iloc[2] = self.__dict__['h']
+            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
+                        [2.,self.__dict__['h'],self.__dict__['h']]
+            for var in ['theta','q','u','v']:
+
+                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
+                        [self.__dict__[var], \
+                         self.__dict__[var], \
+                         self.__dict__[var] + self.__dict__['d'+var]]
+                
+            #print(self.air_ap)
+
+            # This is the remaining profile considering the remaining
+            # datapoints above the mixed layer height
+            air_ap_tail = self.air_ap.iloc[3:]
+            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
+
+            # print('h',self.h)
+            # # only select samples monotonically increasing with height
+            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            # air_ap_tail = pd.DataFrame()
+            # theta_low = self.theta
+            # z_low =     self.h
+            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            # for ibottom in range(1,len(air_ap_tail_orig)):
+            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
+            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+            # make theta increase strong enough to avoid numerical
+            # instability
+            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            air_ap_tail = pd.DataFrame()
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            theta_low = self.theta
+            z_low =     self.h
+            ibottom = 0
+            itop = 0
+            # print(air_ap_tail_orig)
+            # stop
+
+            # HW: this is the lower limit that we use for gammatheta, which is
+            # there to avoid model crashes. Besides on this limit, the upper
+            # air profile is modified in a way that is still conserves total
+            # quantities of moisture and temperature. The limit is set by trial
+            # and error. The numerics behind the crash should be investigated
+            # so that a cleaner solution can be provided.
+            gammatheta_lower_limit = 0.002
+            while ((itop in range(0,1)) or (itop != ibottom)):
+                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+                if (
+                    #(z_mean > (z_low+0.2)) and \
+                    #(theta_mean > (theta_low+0.02) ) and \
+                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
+                  (itop >= (len(air_ap_tail_orig)-1)) \
+                   :
+
+                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                    ibottom = itop+1
+                    theta_low = air_ap_tail.theta.iloc[-1]
+                    z_low =     air_ap_tail.z.iloc[-1]
+    
+
+                itop +=1
+                # elif  (itop > len(air_ap_tail_orig)-10):
+                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+                #print(itop,ibottom)
+
+            if itop > 1:
+                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! Temperature profile was too steep. \
+                                  Modifying profile: "+ \
+                                  str(itop - 1)+ " measurements were dropped \
+                                  and replaced with its average \
+                                  Modifying profile. \
+                                  mean with next profile point(s).") 
+
+
+            self.air_ap = pd.concat((air_ap_head,\
+                                     air_ap_tail,\
+                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
+                                                                      axis=1)
+
+            if  self.sw_ac:
+                qvalues = \
+                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
+
+                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
+                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
+                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+                self.P_h    = self.Ps - self.rho * self.g * self.h
+                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
+                        [self.Ps,  self.P_h, self.P_h-0.1]
+
+                self.air_ap.t = \
+                            self.air_ap.theta * \
+                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
+
+
+        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
+
+
+
+
+        # else:
+            # in the other case, it is updated at the time the statistics are
+            # calculated 
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if in_ml.sum() == 0:
+                warnings.warn(" no circulation points in the mixed layer \
+                              found. We just take the bottom one.")
+                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+            for var in ['t','q','u','v']:
+
+                # calculation of the advection variables for the mixed-layer
+                # these will be used for the next timestep
+                # Warning: w is excluded for now.
+
+                self.__dict__['adv'+var] = \
+                        ((self.air_ac['adv'+var+'_x'][in_ml] \
+                         + \
+                         self.air_ac['adv'+var+'_y'][in_ml])* \
+                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                        self.air_ac['delpdgrav'][in_ml].sum()
+
+                # calculation of the advection variables for the profile above
+                # the mixed layer (also for the next timestep)
+                self.air_ap['adv'+var] = \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p,\
+                                              self.air_ac['adv'+var+'_x']) \
+                                    + \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p, \
+                                              self.air_ac['adv'+var+'_y'])
+                # if var == 't':
+                #     print(self.air_ap['adv'+var])
+                #     stop
+
+            # as an approximation, we consider that advection of theta in the
+            # mixed layer is equal to advection of t. This is a sufficient
+            # approximation since theta and t are very similar at the surface
+            # pressure.
+
+            self.__dict__['advtheta'] = self.__dict__['advt']
+
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            # update the vertical wind profile
+            self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                          self.air_ac.p, \
+                                          self.air_ac['wp'])
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
             
+            air_apwold = self.air_ap['w']
+            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+            #print('hello w upd')
+
+            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # # self.wrho = np.interp(self.P_h,\
+            # #                      self.air_ach.p,\
+            # #                      self.air_ach['wrho']) \
+
+
+
+            # Also update the vertical wind at the mixed-layer height
+            # (subsidence)
+            self.ws   = self.air_ap.w[1]
+        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
+
+            ## Finally, we update he 
+            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+            #                        + \
+            #                        self.air_ac['divU_y'][in_ml])* \
+            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+            #            self.air_ac['delpdgrav'][in_ml].sum() 
             
-            self.gammaq_pro = np.array([self.gammaq,self.gammaq,self.gammaq]+list(self.gammaq_pro[self.z_pro[:-1] > self.h]))
 
+        if self.sw_ap:
+            for var in ['theta','q','u','v']:
 
-#	# BEGIN -- HW 20170606
-#        # get new gammatheta(h) from its vertical profile when available:
-#        if(self.gammatheta_pro != None):
-#            #self.gammatheta = np.interp(self.h, self.z_pro, self.gammatheta_pro) 
-#            self.gammatheta = self.gammatheta_pro[np.where(self.h >= self.z_pro )[0][-1]]
-#
-#        # get gammaq(h) from its vertical profile when available:
- #       if(self.gammaq_pro != None):
- #           #self.gammaq = np.interp(self.h, self.z_pro, self.gammaq_pro) 
- #           self.gammaq = self.gammaq_pro[np.where(self.h >= self.z_pro)[0][-1]]
-#
-        
- 
+                # update of the slope (gamma) for the different variables, for
+                # the next timestep!
+
+                # there is an warning message that tells about dividing through
+                # zero, which we ignore
+
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                    # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap['gamma'+var] = gammavar
+
+                # Based on the above, update the gamma value at the mixed-layer
+                # top
+                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
+                                                                     self.air_ap.z)[0][-1]]
+
+            
     def run_radiation(self):
         sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
         sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
@@ -743,9 +1392,23 @@ def run_radiation(self):
         self.Lwout = self.bolz * self.Ts ** 4.
           
         self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
+        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
   
     def run_surface_layer(self):
-        ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+        # HW: I had to raise the minimum wind speed to make the simulation with
+        # the non-iterative solution stable (this solution was a wild guess, so I don't
+        # know the exact problem of the instability in case of very low wind
+        # speeds yet)
+        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        # version of 20180730 where there are still some runs crashing. Maybe
+        # an upper limit should be set on the monin-obukhov length instead of
+        # a lower limmit on the wind speed?
+        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        
         self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
         qsatsurf       = qsat(self.thetasurf, self.Ps)
         cq             = (1. + self.Cs * ueff * self.rs) ** -1.
@@ -755,23 +1418,56 @@ def run_surface_layer(self):
   
         zsl       = 0.1 * self.h
         self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
-        self.Rib  = min(self.Rib, 0.2)
+        
 
-        self.L     = self.ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
-        #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
- 
-        self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - self.psim(zsl / self.L) + self.psim(self.z0m / self.L)) ** 2.
-        self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - self.psim(zsl / self.L) + self.psim(self.z0m / self.L)) / (np.log(zsl / self.z0h) - self.psih(zsl / self.L) + self.psih(self.z0h / self.L))
-  
-        self.ustar = np.sqrt(self.Cm) * ueff
-        self.uw    = - self.Cm * ueff * self.u
-        self.vw    = - self.Cm * ueff * self.v
- 
-        # diagnostic meteorological variables
-        self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - self.psih(2. / self.L) + self.psih(self.z0h / self.L))
-        self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - self.psih(2. / self.L) + self.psih(self.z0h / self.L))
-        self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - self.psim(2. / self.L) + self.psim(self.z0m / self.L))
-        self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - self.psim(2. / self.L) + self.psim(self.z0m / self.L))
+
+        if self.sw_lit:
+            self.Rib  = min(self.Rib, 0.2)
+            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
+            self.zeta  = zsl/self.L
+            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
+            
+        
+            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
+            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
+            
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+        
+     
+            # diagnostic meteorological variables
+            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
+            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
+            
+            # diagnostic meteorological variables
+        else:
+            
+            ## circumventing any iteration with Wouters et al., 2012
+            self.zslz0m = np.max((zsl/self.z0m,10.))
+            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
+            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
+            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
+            self.L = zsl/self.zeta
+            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
+        
+            self.Cm = self.k**2.0/funm/funm
+            self.Cs = self.k**2.0/funm/funh
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+            
+            # extrapolation from mixed layer (instead of from surface) to 2meter
+            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
+            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
+            self.u2m    =                - self.uw     / self.ustar / self.k * funm
+            self.v2m    =                - self.vw     / self.ustar / self.k * funm
+        
+        
         self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
         self.e2m    = self.q2m * self.Ps / 0.622
      
@@ -782,6 +1478,7 @@ def ribtol(self, Rib, zsl, z0m, z0h):
         else:
             L  = -1.
             L0 = -2.
+        #print(Rib,zsl,z0m,z0h)
         
         while (abs(L - L0) > 0.001):
             L0      = L
@@ -793,8 +1490,8 @@ def ribtol(self, Rib, zsl, z0m, z0h):
                       - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
                                           (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
             L       = L - fx / fxdif
-
-            if(abs(L) > 1e15):
+            #print(L)
+            if(abs(L) > 1e12):
                 break
 
         return L
@@ -938,12 +1635,15 @@ def ags(self):
     def run_land_surface(self):
         # compute ra
         ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
+        #print('ueff',self.u,self.v,self.wstar)
 
         if(self.sw_sl):
           self.ra = (self.Cs * ueff)**-1.
         else:
           self.ra = ueff / max(1.e-3, self.ustar)**2.
 
+        #print('ra',self.ra,self.ustar,ueff)
+
         # first calculate essential thermodynamic variables
         self.esat    = esat(self.theta)
         self.qsat    = qsat(self.theta, self.Ps)
@@ -966,6 +1666,7 @@ def run_land_surface(self):
         self.rssoil = self.rssoilmin * f2 
  
         Wlmx = self.LAI * self.Wmax
+        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
         self.cliq = min(1., self.Wl / Wlmx) 
      
         # calculate skin temperature implictly
@@ -976,6 +1677,10 @@ def run_land_surface(self):
             / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
             + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
 
+        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
+        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
+        #print('Ts',self.rs)
+
         esatsurf      = esat(self.Ts)
         self.qsatsurf = qsat(self.Ts, self.Ps)
 
@@ -987,6 +1692,7 @@ def run_land_surface(self):
   
         self.LE     = self.LEsoil + self.LEveg + self.LEliq
         self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
+        #print('H',self.ra,self.Ts,self.theta)
         self.G      = self.Lambda * (self.Ts - self.Tsoil)
         self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
         self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
@@ -1003,6 +1709,7 @@ def run_land_surface(self):
   
         # calculate kinematic heat fluxes
         self.wtheta   = self.H  / (self.rho * self.cp)
+        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
         self.wq       = self.LE / (self.rho * self.Lv)
  
     def integrate_land_surface(self):
@@ -1018,14 +1725,29 @@ def integrate_land_surface(self):
     # store model output
     def store(self):
         t                      = self.t
-        self.out.t[t]          = t * self.dt / 3600. + self.tstart
+        
+        self.out.time[t]          = t * self.dt / 3600. + self.tstart
+
+        # in case we are at the end of the simulation, we store the vertical
+        # profiles to the output
+        
+        # if t == (len(self.out.time) - 1):
+        #     self.out.air_ac = self.air_ac
+        #     self.out.air_ap = self.air_ap
+
+        
+        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
+        #  for key in self.out.__dict__.keys():
+        #      if key in self.__dict__:
+        #          self.out.__dict__[key][t]  = self.__dict__[key]
+        
         self.out.h[t]          = self.h
         
-        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the following loop:
-        #   for key in list(self.out.__dict__.keys()):
-        #       self.out.__dict__[key][t] = self[key]
+        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
         
         self.out.gammatheta[t] = self.gammatheta
+        self.out.gammau[t]     = self.gammau
+        self.out.gammav[t]     = self.gammav
         self.out.gammaq[t]     = self.gammaq
         self.out.theta[t]      = self.theta
         self.out.thetav[t]     = self.thetav
@@ -1068,6 +1790,12 @@ def store(self):
         self.out.v2m[t]        = self.v2m
         self.out.e2m[t]        = self.e2m
         self.out.esat2m[t]     = self.esat2m
+
+
+        self.out.Tsoil[t]      = self.Tsoil
+        self.out.T2[t]         = self.T2
+        self.out.Ts[t]         = self.Ts
+        self.out.wg[t]         = self.wg
         
         self.out.thetasurf[t]  = self.thetasurf
         self.out.thetavsurf[t] = self.thetavsurf
@@ -1256,7 +1984,7 @@ def exitmodel(self):
 # class for storing mixed-layer model output data
 class model_output:
     def __init__(self, tsteps):
-        self.t          = np.zeros(tsteps)    # time [s]
+        self.time          = np.zeros(tsteps)    # time [s]
 
         # mixed-layer variables
         self.h          = np.zeros(tsteps)    # ABL height [m]
@@ -1264,6 +1992,8 @@ def __init__(self, tsteps):
         self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
         self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
         self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammau     = np.zeros(tsteps)
+        self.gammav     = np.zeros(tsteps)
         self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
         self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
         self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
@@ -1306,6 +2036,12 @@ def __init__(self, tsteps):
         self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
         self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
 
+        # ground variables
+        self.Tsoil       = np.zeros(tsteps)
+        self.T2          = np.zeros(tsteps)
+        self.Ts          = np.zeros(tsteps)
+        self.wg          = np.zeros(tsteps)
+
         # surface-layer variables
         self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
         self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
@@ -1346,11 +2082,19 @@ def __init__(self, tsteps):
         self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
         self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
         
+        
         self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
 
 # class for storing mixed-layer model input data
 class model_input:
     def __init__(self):
+
+        # # comment not valid
+        # we comment out the initialization, because there is a problem when
+        # inheriting values from one the another class4gl_iput. We also expect
+        # that the user specifies all the required parmameters (if not, an error
+        # is raised). 
+
         # general model variables
         self.runtime    = None  # duration of model run [s]
         self.dt         = None  # time step [s]
@@ -1365,9 +2109,9 @@ def __init__(self):
         self.fc         = None  # Coriolis parameter [s-1]
         
         self.theta      = None  # initial mixed-layer potential temperature [K]
-        self.theta_pro  = None  # optional/initial profile of potential temperature [K]
+        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
 
-        self.z_pro      = None  # height coordinate of the optional input profiles [m]
+        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
 
         self.dtheta     = None  # initial temperature jump at h [K]
         self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
@@ -1376,7 +2120,8 @@ def __init__(self):
         self.wtheta     = None  # surface kinematic heat flux [K m s-1]
         
         self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
-        self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
 
         self.dq         = None  # initial specific humidity jump at h [kg kg-1]
         self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
@@ -1464,6 +2209,6 @@ def __init__(self):
         self.dz_h       = None  # Transition layer thickness [m]
         
 # BEGIN -- HW 20171027
-        self.cala       = None      # soil heat conductivity [W/(K*m)]
-        self.crhoc      = None      # soil heat capacity  [J/K*m**3]
+        # self.cala       = None      # soil heat conductivity [W/(K*m)]
+        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
 # END -- HW 20171027
diff --git a/ribtol_hw.py b/ribtol_hw.py
new file mode 100644
index 0000000..1946cc8
--- /dev/null
+++ b/ribtol_hw.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Jan 12 10:46:20 2018
+
+@author: vsc42247
+"""
+
+
+
+# purpose of calc_cm_ch: calculate momentum and thermal turbulent diffusion coefficients of the surface layer with a non-iterative procedure (Wouters et al., 2012)
+
+# input:
+
+# zrib = bulk Richardson number = (g/T)* DT * z/(ua^2)
+#   with:
+#     g = 9.81 m/s2 the gravitational acceleration
+#     z = height (in meters) of the surface layer under consideration 
+#     T = (reference) temperature (in Kelvin) at height z 
+#     DT = (T - T_s) = temperature (in Kelvin) gradient between the surface and height z 
+#     u_a^2 = u^2 +  v^2 is the squared horizontal absolute wind speed 
+# zzz0m = ratio z/z0 between the height z and the momentum roughness length z0m
+# zkbm = ln(z0m/z0h), with z0m, z0h the momentum and thermal roughness length, respectively.
+
+# output: diffusion coefficients (CM and CH) which cna be used to determine surface-layer turbulent transport
+# u'w' = - CM ua^2.
+# w'T' = - CH ua DT 
+
+
+# Reference:
+# Wouters, H., De Ridder, K., and Lipzig, N. P. M.: Comprehensive
+# Parametrization of Surface-Layer Transfer Coefficients for Use
+# in Atmospheric Numerical Models, Bound.-Lay. Meteorol., 145,
+# 539–550, doi:10.1007/s10546-012-9744-3, 2012.
+
+import numpy as np
+
+def calc_cm_ch (zeta,zzz0m,zkbm):
+    krm = 0.4
+
+    #ZETA = zeta_hs2(zrib,zzz0m,zkbm)
+    FUNM,FUNH = funcsche(ZETA,zzz0m,zkbm)
+    CM = krm**2.0/FUNM/FUNM
+    CH = krm**2.0/FUNM/FUNH
+
+    # FUNMn,FUNHn = funcsche(0.,zzz0m,zkbm)
+    # CMn = krm**2.0/FUNMn/FUNMn
+    # CHn = krm**2.0/FUNMn/FUNHn
+
+    # print ZETA,FUNM,FUNH
+    # print 'CMCMN',CM/CMn
+    # print 'CHCHN',CH/CHn
+
+    return CM,CH
+
+
+def zeta_hs2(RiB,zzz0m,kBmin1):
+    #print(RiB,zzz0m,kBmin1)
+    mum=2.59
+    muh=0.95
+    nu=0.5
+    lam=1.5
+
+    betah = 5.0
+
+    zzz0h = zzz0m*np.exp(kBmin1)
+    zzzs = zzz0m*0.06 # to be changed!! r. 101 nog bekijken!!
+
+    L0M = np.log(zzz0m)
+    L0H = np.log(zzz0h)
+    facM = np.log(1.+lam/mum/zzzs)*np.exp(-mum*zzzs)/lam
+    facH = np.log(1.+lam/muh/zzzs)*np.exp(-muh*zzzs)/lam
+    L0Ms = L0M + facM 
+    L0Hs = L0H + facH
+
+    if RiB < 0.:
+        p = np.log(1.-RiB)
+        Q = -0.486 +0.219*p - 0.0331*p**2-4.93*np.exp(-L0H) - 3.65/L0H +\
+            0.38*p/L0H+ 14.8/L0H/L0H-0.946*p/L0H/L0H-10.0/L0H**3+ \
+            0.392*L0M/L0H-0.084*p*L0M/L0H+0.368*L0M/L0H/L0H
+        # print 'p: ',p
+        # print 'Q: ',Q
+        zeta = (1. + p*Q)* L0Ms**2/L0Hs * RiB
+    else:
+        betam = 4.76+7.03/zzz0m +0.24*zzz0m/zzz0h # to be changed
+        # betam = 5.0 + 1.59*10.**(-5.)*(np.exp(13.0-L0M)-1.0) \
+        #         +0.24*(np.exp(-kBmin1)-1.0) # to be changed!!
+        # print('betam',betam)
+        lL0M = np.log(L0M)
+        S0Ms = 1.-1./zzz0m + (1.+nu/mum/zzzs)*facM
+        S0Hs = 1.-1./zzz0h + (1.+nu/muh/zzzs)*facH
+        zetat = -0.316-0.515*np.exp(-L0H) + 25.8 *np.exp(-2.*L0H) + 4.36/L0H \
+                -6.39/L0H/L0H+0.834*lL0M - 0.0267*lL0M**2
+        # print('zetat',zetat)
+        RiBt = zetat *(L0Hs+ S0Hs*betah*zetat)/(L0Ms+S0Ms*betam*zetat)**2 
+        # print('RiBt',RiBt)
+
+        if (RiB > RiBt):
+            D = (L0Ms+S0Ms*betam*zetat)**3/\
+                (L0Ms*L0Hs+zetat*(2.*S0Hs * betah * L0Ms - S0Ms*betam*L0Hs))
+            zeta = zetat + D*(RiB-RiBt)
+        else:
+            r = RiB - S0Hs*betah/(S0Ms*betam)**2
+            B = S0Ms*betam*L0Hs- 2.*S0Hs*betah*L0Ms
+            C = 4.*(S0Ms*betam)**2 * L0Ms *(S0Hs*betah*L0Ms-S0Ms*betam*L0Hs)
+            zeta = - L0Ms / S0Ms/betam - B*C/(4.*(S0Ms*betam)**3 *(B**2+abs(C*r)))
+            if r != 0:
+                zeta = zeta + (B-np.sqrt(B**2+C*r) + B*C*r/(2.*(B**2+abs(C*r))))/(2.*(S0Ms*betam)**3*r)
+    # print('zeta',zeta)
+    return zeta
+
+def funcsche(zeta,zzz0,kBmin1):
+
+
+    mum=2.5
+    muh=0.9
+    nu=0.5
+    lam=1.5
+    
+    p2=3.141592/2.
+    
+    lnzzz0=np.log(zzz0)
+    zzzs=zzz0*0.06
+    zetamcorr=(1.+nu/(mum*zzzs))*zeta
+    zetam0=zeta/zzz0
+    zetahcorr=(1.+nu/(muh*zzzs))*zeta
+    zetah0=zeta/(zzz0*np.exp(kBmin1))
+    
+    if (zeta <= 0.):
+    
+        gamma=15.2
+        alfam=0.25
+        xx=(1.-gamma*zeta)**alfam
+        psim=2.*np.log((1.+xx)/2.)+np.log((1.+xx**2.)/2.)-2.*np.arctan(xx)+p2
+        xx0=(1.-gamma*zetam0)**alfam
+        psim0=2.*np.log((1.+xx0)/2.)+np.log((1.+xx0**2.)/2.)-2.*np.arctan(xx0)+p2
+        phimcorr=(1.-gamma*zetamcorr)**(-alfam)
+        
+        alfah=0.5
+        yy=(1.-gamma*zeta)**alfah
+        psih=2.*np.log((1.+yy)/2.)
+        yy0=(1.-gamma*zetah0)**alfah
+        psih0=2.*np.log((1.+yy0)/2.)
+        phihcorr=(1.-gamma*zetahcorr)**(-alfah)
+    else: 
+    
+        aa=6.1
+        bb=2.5
+        psim=-aa*np.log(zeta+(1.+zeta**bb)**(1./bb))
+        psim0=-aa*np.log(zetam0+(1.+zetam0**bb)**(1./bb))
+        phimcorr=1.+aa*(zetamcorr+zetamcorr**bb*(1.+zetamcorr**bb)**((1.-bb)/bb))/(zetamcorr+(1.+zetamcorr**bb)**(1./bb))
+        
+        cc=5.3
+        dd=1.1
+        psih=-cc*np.log(zeta+(1.+zeta**dd)**(1./dd))
+        psih0=-cc*np.log(zetah0+(1.+zetah0**dd)**(1./dd))
+        phihcorr=1.+cc*(zetahcorr+zetahcorr**dd*(1.+zetahcorr**dd)**((1.-dd)/dd))/(zetahcorr+(1.+zetahcorr**dd)**(1./dd))
+    
+    psistrm=phimcorr*(1./lam)*np.log(1.+lam/(mum*zzzs))*np.exp(-mum*zzzs)
+    psistrh=phihcorr*(1./lam)*np.log(1.+lam/(muh*zzzs))*np.exp(-muh*zzzs)
+    
+    funm=lnzzz0-psim+psim0 +psistrm
+    funh=lnzzz0+kBmin1-psih+psih0 +psistrh
+    return funm,funh
+

From 7b1c7bd6bf7faf8af52d5ff157518885bdce0579 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 20 Aug 2018 14:05:14 +0200
Subject: [PATCH 005/129] Create README.md

---
 README.md | 4 ++++
 1 file changed, 4 insertions(+)
 create mode 100644 README.md

diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2d11adc
--- /dev/null
+++ b/README.md
@@ -0,0 +1,4 @@
+# class4gl
+Chemistry Land-surface Atmosphere Soil Slab model (CLASS) | Python version
+
+This is the extension of class to be able to be used with global balloon soundings. 

From 23387b5165b8986531d5fac5026002a2f09991dc Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 20 Aug 2018 14:06:42 +0200
Subject: [PATCH 006/129] Create setup_global.py

---
 examples/setup_global.py | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 examples/setup_global.py

diff --git a/examples/setup_global.py b/examples/setup_global.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/examples/setup_global.py
@@ -0,0 +1 @@
+

From cdebdf0f8c870957e6b5a3b6009cdef2f30023e2 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 21 Aug 2018 21:30:24 +0200
Subject: [PATCH 007/129] initial commit

---
 MANIFEST                                      |   10 +
 bin/__init__.py                               |    7 +
 bin/setup/batch_setup_global.py               |   42 +
 bin/setup/setup_bllast.py                     |  719 ++++++
 bin/setup/setup_global.py                     |  310 +++
 bin/setup/setup_goamazon.py                   |  740 ++++++
 bin/setup/setup_humppa.py                     |  732 ++++++
 bin/setup/trash/setup_global_old.py           |  284 +++
 bin/simulations/batch_simulations.py          |   77 +
 bin/simulations/runmodel.py                   |  130 +
 bin/simulations/simulations.py                |  260 ++
 bin/simulations/simulations_iter.py           |  364 +++
 bin/simulations/simulations_iter_test.py      |  367 +++
 bin/simulations/trash/run_test.py             |  241 ++
 build/lib/bin/__init__.py                     |    7 +
 build/lib/lib/__init__.py                     |    7 +
 build/lib/lib/class4gl.py                     | 1611 ++++++++++++
 build/lib/lib/data_air.py                     |  473 ++++
 build/lib/lib/data_global.py                  |  936 +++++++
 build/lib/lib/interface_functions.py          |  506 ++++
 build/lib/lib/interface_multi.py              | 2061 +++++++++++++++
 build/lib/lib/model.py                        | 2214 +++++++++++++++++
 class4gl/__init__.py                          |    7 +
 class4gl/__pycache__/__init__.cpython-36.pyc  |  Bin 0 -> 376 bytes
 class4gl/__pycache__/class4gl.cpython-36.pyc  |  Bin 0 -> 28681 bytes
 class4gl/__pycache__/model.cpython-36.pyc     |  Bin 0 -> 36319 bytes
 class4gl/class4gl.py                          | 1611 ++++++++++++
 class4gl/data_air.py                          |  473 ++++
 class4gl/data_global.py                       |  936 +++++++
 class4gl/interface_functions.py               |  506 ++++
 class4gl/interface_multi.py                   | 2061 +++++++++++++++
 class4gl/model.py                             | 2214 +++++++++++++++++
 class4gl/ribtol/Makefile                      |    8 +
 class4gl/ribtol/MakefileMac                   |    9 +
 class4gl/ribtol/__init__.py                   |    7 +
 class4gl/ribtol/ribtol.cpp                    |   81 +
 class4gl/ribtol/ribtol.pyx                    |   48 +
 class4gl/ribtol/ribtol_hw.py                  |  165 ++
 class4gl/ribtol/setup.py                      |   12 +
 dist/class4gl-0.1dev.tar.gz                   |  Bin 0 -> 74685 bytes
 dist/class4gl-0.1dev/PKG-INFO                 |   14 +
 dist/class4gl-0.1dev/bin/__init__.py          |    7 +
 dist/class4gl-0.1dev/lib/__init__.py          |    7 +
 dist/class4gl-0.1dev/lib/class4gl.py          | 1611 ++++++++++++
 dist/class4gl-0.1dev/lib/data_air.py          |  473 ++++
 dist/class4gl-0.1dev/lib/data_global.py       |  936 +++++++
 .../lib/interface_functions.py                |  506 ++++
 dist/class4gl-0.1dev/lib/interface_multi.py   | 2061 +++++++++++++++
 dist/class4gl-0.1dev/lib/model.py             | 2214 +++++++++++++++++
 dist/class4gl-0.1dev/setup.py                 |    9 +
 examples/run_soundings/batch_run_soundings.py |   76 +
 examples/run_soundings/run.py                 |  264 ++
 examples/run_soundings/run_iter.py            |  364 +++
 examples/run_soundings/run_iter_test.py       |  367 +++
 examples/run_soundings/trash/run_test.py      |  241 ++
 examples/setup_global.py                      |    1 -
 examples/setup_soundings/setup_bllast.py      |  719 ++++++
 examples/setup_soundings/setup_global.py      |  310 +++
 examples/setup_soundings/setup_goamazon.py    |  740 ++++++
 examples/setup_soundings/setup_humppa.py      |  732 ++++++
 .../setup_soundings/trash/setup_global_old.py |  284 +++
 lib/class4gl.py                               | 1611 ++++++++++++
 lib/data_air.py                               |  473 ++++
 lib/data_global.py                            |  936 +++++++
 lib/interface_functions.py                    |  506 ++++
 lib/interface_multi.py                        | 2061 +++++++++++++++
 lib/model.py                                  | 2214 +++++++++++++++++
 lib/ribtol/Makefile                           |    8 +
 lib/ribtol/MakefileMac                        |    9 +
 lib/ribtol/ribtol.cpp                         |   81 +
 lib/ribtol/ribtol.pyx                         |   48 +
 lib/ribtol/ribtol_hw.py                       |  165 ++
 lib/ribtol/setup.py                           |   12 +
 setup.py                                      |   13 +-
 trash/data_ground.py                          |  393 +++
 75 files changed, 40693 insertions(+), 9 deletions(-)
 create mode 100644 MANIFEST
 create mode 100644 bin/__init__.py
 create mode 100644 bin/setup/batch_setup_global.py
 create mode 100644 bin/setup/setup_bllast.py
 create mode 100644 bin/setup/setup_global.py
 create mode 100644 bin/setup/setup_goamazon.py
 create mode 100644 bin/setup/setup_humppa.py
 create mode 100644 bin/setup/trash/setup_global_old.py
 create mode 100644 bin/simulations/batch_simulations.py
 create mode 100644 bin/simulations/runmodel.py
 create mode 100644 bin/simulations/simulations.py
 create mode 100644 bin/simulations/simulations_iter.py
 create mode 100644 bin/simulations/simulations_iter_test.py
 create mode 100644 bin/simulations/trash/run_test.py
 create mode 100644 build/lib/bin/__init__.py
 create mode 100644 build/lib/lib/__init__.py
 create mode 100644 build/lib/lib/class4gl.py
 create mode 100644 build/lib/lib/data_air.py
 create mode 100644 build/lib/lib/data_global.py
 create mode 100644 build/lib/lib/interface_functions.py
 create mode 100644 build/lib/lib/interface_multi.py
 create mode 100644 build/lib/lib/model.py
 create mode 100644 class4gl/__init__.py
 create mode 100644 class4gl/__pycache__/__init__.cpython-36.pyc
 create mode 100644 class4gl/__pycache__/class4gl.cpython-36.pyc
 create mode 100644 class4gl/__pycache__/model.cpython-36.pyc
 create mode 100644 class4gl/class4gl.py
 create mode 100644 class4gl/data_air.py
 create mode 100644 class4gl/data_global.py
 create mode 100644 class4gl/interface_functions.py
 create mode 100644 class4gl/interface_multi.py
 create mode 100644 class4gl/model.py
 create mode 100644 class4gl/ribtol/Makefile
 create mode 100644 class4gl/ribtol/MakefileMac
 create mode 100644 class4gl/ribtol/__init__.py
 create mode 100644 class4gl/ribtol/ribtol.cpp
 create mode 100644 class4gl/ribtol/ribtol.pyx
 create mode 100644 class4gl/ribtol/ribtol_hw.py
 create mode 100644 class4gl/ribtol/setup.py
 create mode 100644 dist/class4gl-0.1dev.tar.gz
 create mode 100644 dist/class4gl-0.1dev/PKG-INFO
 create mode 100644 dist/class4gl-0.1dev/bin/__init__.py
 create mode 100644 dist/class4gl-0.1dev/lib/__init__.py
 create mode 100644 dist/class4gl-0.1dev/lib/class4gl.py
 create mode 100644 dist/class4gl-0.1dev/lib/data_air.py
 create mode 100644 dist/class4gl-0.1dev/lib/data_global.py
 create mode 100644 dist/class4gl-0.1dev/lib/interface_functions.py
 create mode 100644 dist/class4gl-0.1dev/lib/interface_multi.py
 create mode 100644 dist/class4gl-0.1dev/lib/model.py
 create mode 100644 dist/class4gl-0.1dev/setup.py
 create mode 100644 examples/run_soundings/batch_run_soundings.py
 create mode 100644 examples/run_soundings/run.py
 create mode 100644 examples/run_soundings/run_iter.py
 create mode 100644 examples/run_soundings/run_iter_test.py
 create mode 100644 examples/run_soundings/trash/run_test.py
 delete mode 100644 examples/setup_global.py
 create mode 100644 examples/setup_soundings/setup_bllast.py
 create mode 100644 examples/setup_soundings/setup_global.py
 create mode 100644 examples/setup_soundings/setup_goamazon.py
 create mode 100644 examples/setup_soundings/setup_humppa.py
 create mode 100644 examples/setup_soundings/trash/setup_global_old.py
 create mode 100644 lib/class4gl.py
 create mode 100644 lib/data_air.py
 create mode 100644 lib/data_global.py
 create mode 100644 lib/interface_functions.py
 create mode 100644 lib/interface_multi.py
 create mode 100644 lib/model.py
 create mode 100644 lib/ribtol/Makefile
 create mode 100644 lib/ribtol/MakefileMac
 create mode 100644 lib/ribtol/ribtol.cpp
 create mode 100644 lib/ribtol/ribtol.pyx
 create mode 100644 lib/ribtol/ribtol_hw.py
 create mode 100644 lib/ribtol/setup.py
 create mode 100644 trash/data_ground.py

diff --git a/MANIFEST b/MANIFEST
new file mode 100644
index 0000000..534f2f4
--- /dev/null
+++ b/MANIFEST
@@ -0,0 +1,10 @@
+# file GENERATED by distutils, do NOT edit
+setup.py
+bin/__init__.py
+lib/__init__.py
+lib/class4gl.py
+lib/data_air.py
+lib/data_global.py
+lib/interface_functions.py
+lib/interface_multi.py
+lib/model.py
diff --git a/bin/__init__.py b/bin/__init__.py
new file mode 100644
index 0000000..a21583b
--- /dev/null
+++ b/bin/__init__.py
@@ -0,0 +1,7 @@
+from . import model,class4gl,interface_multi,data_air,data_global
+
+__version__ = '0.1.0'
+
+__author__ = 'Hendrik Wouters '
+
+__all__ = []
diff --git a/bin/setup/batch_setup_global.py b/bin/setup/batch_setup_global.py
new file mode 100644
index 0000000..4a3f623
--- /dev/null
+++ b/bin/setup/batch_setup_global.py
@@ -0,0 +1,42 @@
+
+
+''' 
+Purpose: 
+    launch array job to get sounding and other global forcing data in class4gl input format"
+Usage:
+    python start_setup_global.py
+
+Author:
+    Hendrik Wouters 
+
+'''
+
+import pandas as pd
+import os
+import math
+import numpy as np
+import sys
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+df_stations = pd.read_csv(fn_stations)
+
+# if sys.argv[1] == 'qsub':
+# with qsub
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = len(STNlist) 
+print(PROCS)
+BATCHSIZE = math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/SOUNDINGS/setup_global.pbs -t 0-'+str(PROCS-1))
+# elif sys.argv[1] == 'wsub':
+#     
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/SOUNDINGS/setup_global.pbs -t 0-'+str(PROCS-1))
+
diff --git a/bin/setup/setup_bllast.py b/bin/setup/setup_bllast.py
new file mode 100644
index 0000000..af8c8bb
--- /dev/null
+++ b/bin/setup/setup_bllast.py
@@ -0,0 +1,719 @@
+# -*- coding: utf-8 -*-
+# Read data from BLLAST campaing and convert it to class4gl input
+
+# WARNING!! stupid tab versus space formatting, grrrmmmlmlmlll!  the following command needs to be executed first: 
+#    for file in RS_2011????_????_site1_MODEM_CRA.cor ;  do expand -i -t 4 $file > $file.fmt ; done
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 42.971834,
+                  "longitude" : 0.3671169,
+                  "name" : "the BLLAST experiment"
+                })
+current_station.name = 90001
+
+
+
+
+
+# RS_20110624_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110630_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110702_1655_site1_MODEM_CRA.cor.fmt
+# RS_20110621_0509_site1_MODEM_CRA.cor.fmt
+
+HOUR_FILES = \
+{ dt.datetime(2011,6,19,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110619_0521_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110619_1750_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,20,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110620_0515_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110620_1750_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,25,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110625_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110625_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,26,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110626_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110626_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,27,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110627_0503_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110627_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 2,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110702_0501_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110702_1655_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 5,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110705_0448_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110705_1701_site1_MODEM_CRA.cor.fmt']},
+}
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_csv(balloon_file,delimiter='\t',)
+                                     #widths=[14]*19,
+                                     #skiprows=9,
+                                     #skipfooter=15,
+                                     #decimal='.',
+                                     #header=None,
+                                     #names = columns,
+                                     #na_values='-----')
+        air_balloon_in = air_balloon_in.rename(columns=lambda x: x.strip())
+        print(air_balloon_in.columns)
+        rowmatches = {
+            't':      lambda x: x['TaRad']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['Press']*100.,
+            'u':      lambda x: x['VHor'] * np.sin((90.-x['VDir'])/180.*np.pi),
+            'v':      lambda x: x['VHor'] * np.cos((90.-x['VDir'])/180.*np.pi),
+            'z':      lambda x: x['Altitude'] -582.,
+            # from virtual temperature to absolute humidity
+            'q':      lambda x: qfrom_e_p(efrom_rh100_T(x['UCal'],x['TaRad']+273.15),x['Press']*100.),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['VHor'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+        # filter data so that potential temperature always increases with
+        # height 
+        cols = []
+        for column in air_ap_tail.columns:
+            #if column != 'z':
+                cols.append(column)
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        # 
+        # # we copy the pressure at ground level from balloon sounding. The
+        # # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually
+        # write local solar time, we need to assign the timezone to UTC (which
+        # is WRONG!!!). Otherwise ruby cannot understand it (it always converts
+        # tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise']+dt.timedelta(hours=2))\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        print('ldatetime_daylight',dpars['ldatetime_daylight'])
+        print('ldatetime',dpars['ldatetime'])
+        print('lSunrise',dpars['lSunrise'])
+        dpars['day'] = dpars['ldatetime'].day
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        print('tstart',dpars['tstart'])
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='bllast',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1]
+    
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = bllast_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = bllast_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+
+    
+    c4gli_morning.pars.sw_ac = []
+    c4gli_morning.pars.sw_ap = True
+    c4gli_morning.pars.sw_lit = False
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/bin/setup/setup_global.py b/bin/setup/setup_global.py
new file mode 100644
index 0000000..79224d9
--- /dev/null
+++ b/bin/setup/setup_global.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under odir+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+
+
+#calculate the root mean square error
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    
+    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_csv(fn_stations)
+
+
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = 100
+BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+
+
+iPROC = int(sys.argv[1])
+
+
+for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+    one_run = False
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings after 1981
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+                
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            try:
+                c4gli.get_profile_wyoming(wy_strm)
+                #print(STN['ID'],c4gli.pars.datetime)
+                #c4gli.get_global_input(globaldata)
+
+                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+                logic = dict()
+                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+                logic['daylight'] = \
+                    ((c4gli.pars.ldatetime_daylight - 
+                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
+                
+                logic['springsummer'] = (c4gli.pars.theta > 278.)
+                
+                # we take 3000 because previous analysis (ie., HUMPPA) has
+                # focussed towards such altitude
+                le3000 = (c4gli.air_balloon.z <= 3000.)
+                logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+                leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta)) and \
+                        (rmse(c4gli.air_balloon.theta[leh] , \
+                              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                              )
+    
+
+                logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+                
+                print('logic:', logic)
+                # the result
+                morning_ok = np.mean(list(logic.values()))
+                print(morning_ok,c4gli.pars.ldatetime)
+
+            except:
+                morning_ok =False
+                print('obtain morning not good')
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                print('MORNING OK!')
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                print('AFTERNOON PROFILE CLEARED')
+                try:
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+                    print('AFTERNOON PROFILE OK')
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # we will dump the latest afternoon sounding that fits the
+                    # minimum criteria specified by logic_afternoon
+                    print(current_date,current_date_afternoon)
+                    c4gli_afternoon_for_dump = None
+                    while ((current_date_afternoon == current_date) and \
+                           (wy_strm.current is not None)):
+                        logic_afternoon =dict()
+
+                        logic_afternoon['afternoon'] = \
+                            (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                        logic_afternoon['daylight'] = \
+                          ((c4gli_afternoon.pars.ldatetime - \
+                            c4gli_afternoon.pars.ldatetime_daylight \
+                           ).total_seconds()/3600. <= 0.)
+
+
+                        le3000_afternoon = \
+                            (c4gli_afternoon.air_balloon.z <= 3000.)
+                        logic_afternoon['5measurements'] = \
+                            (np.sum(le3000_afternoon) >= 5) 
+
+                        # we only store the last afternoon sounding that fits these
+                        # minimum criteria
+
+                        afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                        print('logic_afternoon: ',logic_afternoon)
+                        print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                        if afternoon_ok == 1.:
+                            # # doesn't work :(
+                            # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                            
+                            # so we just create a new one from the same wyoming profile
+                            c4gli_afternoon_for_dump = class4gl_input()
+                            c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                        wy_strm.find_next()
+                        c4gli_afternoon.clear()
+                        c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                        if wy_strm.current is not None:
+                            current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                        else:
+                            # a dummy date: this will be ignored anyway
+                            current_date_afternoon = dt.date(1900,1,1)
+
+                        # Only in the case we have a good pair of soundings, we
+                        # dump them to disk
+                    if c4gli_afternoon_for_dump is not None:
+                        c4gli.update(source='pairs',pars={'runtime' : \
+                            int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                                 c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                        print('ALMOST...')
+                        if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+                                
+        
+                            c4gli.get_global_input(globaldata)
+                            print('VERY CLOSE...')
+                            if c4gli.check_source_globaldata() and \
+                                (c4gli.check_source(source='wyoming',\
+                                                   check_only_sections='pars')):
+                                c4gli.dump(fileout)
+                                
+                                c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                                
+                                
+                                # for keyEXP,dictEXP in experiments.items():
+                                #     
+                                #     c4gli.update(source=keyEXP,pars = dictEXP)
+                                #     c4gl = class4gl(c4gli)
+                                #     # c4gl.run()
+                                #     
+                                #     c4gl.dump(c4glfiles[key])
+                                
+                                print('HIT!!!')
+                                one_run = True
+                except:
+                    print('get profile failed')
+                
+    if one_run:
+        STN.name = STN['ID']
+        all_records_morning = get_records(pd.DataFrame([STN]),\
+                                      odir,\
+                                      subset='morning',
+                                      refetch_records=True,
+                                      )
+        all_records_afternoon = get_records(pd.DataFrame([STN]),\
+                                      odir,\
+                                      subset='afternoon',
+                                      refetch_records=True,
+                                      )
+    else:
+        os.system('rm '+fnout)
+        os.system('rm '+fnout_afternoon)
+
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/bin/setup/setup_goamazon.py b/bin/setup/setup_goamazon.py
new file mode 100644
index 0000000..f9efe2c
--- /dev/null
+++ b/bin/setup/setup_goamazon.py
@@ -0,0 +1,740 @@
+# -*- coding: utf-8 -*-
+
+import xarray as xr
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+import glob
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : -3.21,
+                  "longitude" : -60.6,
+                  "name" : "the GOAMAZON experiment"
+                })
+current_station.name = 90002
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC)
+DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC)
+
+
+DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))]
+HOUR_FILES = {}
+for iDT, DT in enumerate(DTS):
+    morning_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf')
+    if len(possible_files)>0:
+        morning_file= possible_files[0]
+    afternoon_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*.cdf')
+    if len(possible_files)>0:
+        afternoon_file= possible_files[0]
+
+    if (morning_file is not None) and (afternoon_file is not None):
+        HOUR_FILES[DT] = {'morning':[5.5,morning_file],
+                          'afternoon':[17.5,afternoon_file]}
+
+print(HOUR_FILES)
+
+# HOUR_FILES = \
+# {
+#     dt.datetime(2015,5,7,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150507.052900.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150507.172700.custom.cdf']},
+#     dt.datetime(2015,3,13,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150313.052700.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150313.173000.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+# }
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
+        print(balloon_file)
+        
+        xrin = balloon_file
+        air_balloon = pd.DataFrame()
+
+        air_balloon['t'] = xrin.tdry.values+273.15
+        air_balloon['p'] = xrin.pres.values*100.
+        
+        air_balloon['u'] = xrin.u_wind.values
+        air_balloon['v'] = xrin.v_wind.values
+        air_balloon['WSPD'] = xrin['wspd'].values
+        
+        print(xrin.rh.values.shape)
+        air_balloon['q'] = qfrom_e_p(efrom_rh100_T(xrin.rh.values,air_balloon['t'].values),air_balloon.p.values)
+        
+
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        # air_balloon_in = pd.read_fwf(balloon_file,
+        #                              widths=[14]*19,
+        #                              skiprows=9,
+        #                              skipfooter=15,
+        #                              decimal=',',
+        #                              header=None,
+        #                              names = columns,
+        #                              na_values='-----')
+    
+
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q,
+            'rho': lambda x: x.p /x.t / x.R ,
+        }
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        print('alt in xrin?:','alt' in xrin)
+        if 'alt' in xrin:
+            air_balloon['z'] = xrin.alt.values
+        else:
+            air_balloon['z'] = 0.
+            for irow,row in air_balloon.iloc[1:].iterrows():
+                air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \
+                        2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \
+                        (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1])
+                        
+             
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            print(air_balloon.z.shape,air_balloon.thetav.shape,)
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-4)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn =pair['morning'][1]
+    print(humpafn)
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn = pair['afternoon'][1]
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM//humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+"""
+stations_for_iter = stations(path_exp)
+for STNID,station in stations_iterator(stations_for_iter):
+    records_current_station_index = \
+            (records_ini.index.get_level_values('STNID') == STNID)
+    file_current_station_mod = STNID
+
+    with \
+    open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+        for (STNID,index),record_ini in records_iterator(records_ini):
+            c4gli_ini = get_record_yaml(file_station_ini, 
+                                        record_ini.index_start, 
+                                        record_ini.index_end,
+                                        mode='ini')
+            #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+
+            record_mod = records_mod.loc[(STNID,index)]
+            c4gl_mod = get_record_yaml(file_station_mod, 
+                                        record_mod.index_start, 
+                                        record_mod.index_end,
+                                        mode='mod')
+            record_afternoon = records_afternoon.loc[(STNID,index)]
+            c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+                                        record_afternoon.index_start, 
+                                        record_afternoon.index_end,
+                                        mode='ini')
+"""
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/bin/setup/setup_humppa.py b/bin/setup/setup_humppa.py
new file mode 100644
index 0000000..ff37628
--- /dev/null
+++ b/bin/setup/setup_humppa.py
@@ -0,0 +1,732 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 61.8448,
+                  "longitude" : 24.2882,
+                  "name" : "the HUMMPA experiment"
+                })
+current_station.name = 90000
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+
+HOUR_FILES = \
+{ dt.datetime(2010,7,12,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071210_0300.txt'],'afternoon':[15,'humppa_071210_1500.txt']},
+  dt.datetime(2010,7,13,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071310_0300.txt'],'afternoon':[18,'humppa_071310_1800.txt']},
+  dt.datetime(2010,7,14,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071410_0300.txt'],'afternoon':[16,'humppa_071410_1600.txt']},
+  dt.datetime(2010,7,15,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071510_0300.txt'],'afternoon':[15,'humppa_071510_1500.txt']},
+  dt.datetime(2010,7,16,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071610_0300.txt'],'afternoon':[21,'humppa_071610_2100.txt']},
+  dt.datetime(2010,7,17,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071710_0300.txt'],'afternoon':[18,'humppa_071710_1800.txt']},
+  dt.datetime(2010,7,18,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071810_0300.txt'],'afternoon':[21,'humppa_071810_2100.txt']},
+  dt.datetime(2010,7,19,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071910_0300.txt'],'afternoon':[21,'humppa_071910_2100.txt']},
+#  dt.datetime(2010,7,20):{'morning':[4,'humppa_072010_0400.txt'],'afternoon':[15,'humppa_072010_1500.txt']},
+  dt.datetime(2010,7,21,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072110_0300.txt'],'afternoon':[21,'humppa_072110_2100.txt']},
+  dt.datetime(2010,7,22,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072210_0400.txt'],'afternoon':[18,'humppa_072210_1800.txt']},
+ # something is wrong with ths profile
+ # dt.datetime(2010,7,23,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072310_0300.txt'],'afternoon':[15,'humppa_072310_1500.txt']},
+  dt.datetime(2010,7,24,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072410_0300.txt'],'afternoon':[16,'humppa_072410_1600.txt']},
+  dt.datetime(2010,7,25,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072510_0300.txt'],'afternoon':[21,'humppa_072510_2100.txt']},
+  dt.datetime(2010,7,26,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072610_0300.txt'],'afternoon':[21,'humppa_072610_2100.txt']},
+  dt.datetime(2010,7,27,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072710_0300.txt'],'afternoon':[15,'humppa_072710_1500.txt']},
+  dt.datetime(2010,7,28,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072810_0300.txt'],'afternoon':[15,'humppa_072810_1500.txt']},
+  dt.datetime(2010,7,29,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072910_0400.txt'],'afternoon':[18,'humppa_072910_1800.txt']},
+  dt.datetime(2010,7,30,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_073010_0900.txt'],'afternoon':[15,'humppa_073010_1500.txt']},
+  dt.datetime(2010,7,31,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_073110_0300_01.txt'],'afternoon':[15,'humppa_073110_1500.txt']},
+  dt.datetime(2010,8, 1,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080110_0300.txt'],'afternoon':[18,'humppa_080110_1800.txt']},
+  dt.datetime(2010,8, 2,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080210_0300.txt'],'afternoon':[18,'humppa_080210_1800.txt']},
+  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_080310_0900.txt'],'afternoon':[18,'humppa_080310_1800.txt']},
+  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[8,'humppa_080410_0800.txt'],'afternoon':[18,'humppa_080410_1800.txt']},
+  dt.datetime(2010,8, 5,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080510_0300.txt'],'afternoon':[18,'humppa_080510_1800.txt']},
+  dt.datetime(2010,8, 6,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_080610_0400.txt'],'afternoon':[18,'humppa_080610_1800.txt']},
+  dt.datetime(2010,8, 7,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080710_0300.txt'],'afternoon':[18,'humppa_080710_1800.txt']},
+  dt.datetime(2010,8, 8,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080810_0300.txt'],'afternoon':[18,'humppa_080810_1800.txt']},
+  dt.datetime(2010,8,10,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_081010_0300.txt'],'afternoon':[18,'humppa_081010_1800.txt']},
+}
+
+
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_fwf(balloon_file,
+                                     widths=[14]*19,
+                                     skiprows=9,
+                                     skipfooter=15,
+                                     decimal=',',
+                                     header=None,
+                                     names = columns,
+                                     na_values='-----')
+    
+        rowmatches = {
+            't':      lambda x: x['T[C]']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['P[hPa]']*100.,
+            'u':      lambda x: x['Wsp[m/s]'] * np.sin((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'v':      lambda x: x['Wsp[m/s]'] * np.cos((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'z':      lambda x: x['Altitude[m]'],
+            'q':      lambda x: np.clip((1. - (273.15+x['Virt. Temp[C]'])/(273.15+x['T[C]']))/(1. - 1./epsilon),a_min=0.,a_max=None),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['Wsp[m/s]'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # 
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-3)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1]
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/bin/setup/trash/setup_global_old.py b/bin/setup/trash/setup_global_old.py
new file mode 100644
index 0000000..d812684
--- /dev/null
+++ b/bin/setup/trash/setup_global_old.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under odir+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+
+
+#calculate the root mean square error
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    
+    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_csv(fn_stations)
+
+
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = 100
+BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+
+
+iPROC = int(sys.argv[1])
+
+
+for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings after 1981
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            c4gli.get_profile_wyoming(wy_strm)
+            #print(STN['ID'],c4gli.pars.datetime)
+            #c4gli.get_global_input(globaldata)
+
+            print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+            logic = dict()
+            logic['morning'] =  (c4gli.pars.ldatetime.hour < 12.)
+            logic['daylight'] = \
+                ((c4gli.pars.ldatetime_daylight - 
+                  c4gli.pars.ldatetime).total_seconds()/3600. <= 5.)
+            
+            logic['springsummer'] = (c4gli.pars.theta > 278.)
+            
+            # we take 3000 because previous analysis (ie., HUMPPA) has
+            # focussed towards such altitude
+            le3000 = (c4gli.air_balloon.z <= 3000.)
+            logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+            leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+            try:
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta)) and \
+                        (rmse(c4gli.air_balloon.theta[leh] , \
+                              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                              )
+    
+            except:
+                logic['mlerrlow'] = False
+                print('rmse probably failed')
+
+            logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+            
+            print('logic:', logic)
+            # the result
+            morning_ok = np.mean(list(logic.values()))
+            print(morning_ok,c4gli.pars.ldatetime)
+            
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                if wy_strm.current is not None:
+                    current_date_afternoon = \
+                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                       c4gli_afternoon.pars.ldatetime.month, \
+                                       c4gli_afternoon.pars.ldatetime.day)
+                else:
+                    # a dummy date: this will be ignored anyway
+                    current_date_afternoon = dt.date(1900,1,1)
+
+                # we will dump the latest afternoon sounding that fits the
+                # minimum criteria specified by logic_afternoon
+                c4gli_afternoon_for_dump = None
+                while ((current_date_afternoon == current_date) and \
+                       (wy_strm.current is not None)):
+                    logic_afternoon =dict()
+
+                    logic_afternoon['afternoon'] = \
+                        (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                    logic_afternoon['daylight'] = \
+                      ((c4gli_afternoon.pars.ldatetime - \
+                        c4gli_afternoon.pars.ldatetime_daylight \
+                       ).total_seconds()/3600. <= 2.)
+
+
+                    le3000_afternoon = \
+                        (c4gli_afternoon.air_balloon.z <= 3000.)
+                    logic_afternoon['5measurements'] = \
+                        (np.sum(le3000_afternoon) >= 5) 
+
+                    # we only store the last afternoon sounding that fits these
+                    # minimum criteria
+
+                    afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                    print('logic_afternoon: ',logic_afternoon)
+                    print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                    if afternoon_ok == 1.:
+                        # # doesn't work :(
+                        # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                        
+                        # so we just create a new one from the same wyoming profile
+                        c4gli_afternoon_for_dump = class4gl_input()
+                        c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                    wy_strm.find_next()
+                    c4gli_afternoon.clear()
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                       c4gli_afternoon.pars.ldatetime.month, \
+                                       c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # Only in the case we have a good pair of soundings, we
+                    # dump them to disk
+                if c4gli_afternoon_for_dump is not None:
+                    c4gli.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                             c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                    print('ALMOST...')
+                    if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+                            
+        
+                        c4gli.get_global_input(globaldata)
+                        print('VERY CLOSE...')
+                        if c4gli.check_source_globaldata() and \
+                            (c4gli.check_source(source='wyoming',\
+                                               check_only_sections='pars')):
+                            c4gli.dump(fileout)
+                            
+                            c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                            
+                            
+                            # for keyEXP,dictEXP in experiments.items():
+                            #     
+                            #     c4gli.update(source=keyEXP,pars = dictEXP)
+                            #     c4gl = class4gl(c4gli)
+                            #     # c4gl.run()
+                            #     
+                            #     c4gl.dump(c4glfiles[key])
+                            
+                            print('HIT!!!')
+                
+                
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/bin/simulations/batch_simulations.py b/bin/simulations/batch_simulations.py
new file mode 100644
index 0000000..b5d4cc3
--- /dev/null
+++ b/bin/simulations/batch_simulations.py
@@ -0,0 +1,77 @@
+
+import argparse
+
+import pandas as pd
+import os
+import math
+import numpy as np
+import sys
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+df_stations = pd.read_csv(fn_stations)
+
+# if 'path-soundings' in args.__dict__.keys():
+#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+# else:
+
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    #parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
+    parser.add_argument('--exec')
+    parser.add_argument('--experiments')#should be ';'-seperated list
+    parser.add_argument('--split-by',default=-1)
+    args = parser.parse_args()
+
+experiments = args.experiments.split(';')
+#SET = 'GLOBAL'
+SET = args.dataset
+print(args.experiments)
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+for expname in experiments:
+    #exp = EXP_DEFS[expname]
+    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+    os.system('rm -R '+path_exp)
+
+totalchunks = 0
+for istation,current_station in all_stations.iterrows():
+    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
+    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
+    totalchunks +=chunks_current_station
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
+                                       ',split_by='+str(args.split_by)+\
+                                       ',exec='+str(args.exec)+\
+                                       ',experiments='+str(args.experiments))
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/bin/simulations/runmodel.py b/bin/simulations/runmodel.py
new file mode 100644
index 0000000..fc4fd19
--- /dev/null
+++ b/bin/simulations/runmodel.py
@@ -0,0 +1,130 @@
+#
+# Example of how to run the Python code, and access the output
+# This case is identical to the default setup of CLASS (the version with interface) 
+#
+
+from pylab import *
+from model import *
+
+""" 
+Create empty model_input and set up case
+"""
+run1input = model_input()
+
+run1input.dt         = 60.       # time step [s]
+run1input.runtime    = 12*3600    # total run time [s]
+
+# mixed-layer input
+run1input.sw_ml      = True      # mixed-layer model switch
+run1input.sw_shearwe = False     # shear growth mixed-layer switch
+run1input.sw_fixft   = False     # Fix the free-troposphere switch
+run1input.h          = 200.      # initial ABL height [m]
+run1input.Ps         = 101300.   # surface pressure [Pa]
+run1input.divU       = 0.        # horizontal large-scale divergence of wind [s-1]
+run1input.fc         = 1.e-4     # Coriolis parameter [m s-1]
+
+run1input.theta      = 288.      # initial mixed-layer potential temperature [K]
+run1input.dtheta     = 1.        # initial temperature jump at h [K]
+run1input.gammatheta = 0.006     # free atmosphere potential temperature lapse rate [K m-1]
+run1input.advtheta   = 0.        # advection of heat [K s-1]
+run1input.beta       = 0.2       # entrainment ratio for virtual heat [-]
+run1input.wtheta     = 0.1       # surface kinematic heat flux [K m s-1]
+
+run1input.q          = 0.008     # initial mixed-layer specific humidity [kg kg-1]
+run1input.dq         = -0.001    # initial specific humidity jump at h [kg kg-1]
+run1input.gammaq     = 0.        # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+run1input.advq       = 0.        # advection of moisture [kg kg-1 s-1]
+run1input.wq         = 0.1e-3    # surface kinematic moisture flux [kg kg-1 m s-1]
+
+run1input.CO2        = 422.      # initial mixed-layer CO2 [ppm]
+run1input.dCO2       = -44.      # initial CO2 jump at h [ppm]
+run1input.gammaCO2   = 0.        # free atmosphere CO2 lapse rate [ppm m-1]
+run1input.advCO2     = 0.        # advection of CO2 [ppm s-1]
+run1input.wCO2       = 0.        # surface kinematic CO2 flux [ppm m s-1]
+
+run1input.sw_wind    = False     # prognostic wind switch
+run1input.u          = 6.        # initial mixed-layer u-wind speed [m s-1]
+run1input.du         = 4.        # initial u-wind jump at h [m s-1]
+run1input.gammau     = 0.        # free atmosphere u-wind speed lapse rate [s-1]
+run1input.advu       = 0.        # advection of u-wind [m s-2]
+
+run1input.v          = -4.0      # initial mixed-layer u-wind speed [m s-1]
+run1input.dv         = 4.0       # initial u-wind jump at h [m s-1]
+run1input.gammav     = 0.        # free atmosphere v-wind speed lapse rate [s-1]
+run1input.advv       = 0.        # advection of v-wind [m s-2]
+
+run1input.sw_sl      = False     # surface layer switch
+run1input.ustar      = 0.3       # surface friction velocity [m s-1]
+run1input.z0m        = 0.02      # roughness length for momentum [m]
+run1input.z0h        = 0.002     # roughness length for scalars [m]
+
+run1input.sw_rad     = False     # radiation switch
+run1input.lat        = 51.97     # latitude [deg]
+run1input.lon        = -4.93     # longitude [deg]
+run1input.doy        = 268.      # day of the year [-]
+run1input.tstart     = 6.8       # time of the day [h UTC]
+run1input.cc         = 0.0       # cloud cover fraction [-]
+run1input.Q          = 400.      # net radiation [W m-2] 
+run1input.dFz        = 0.        # cloud top radiative divergence [W m-2] 
+
+run1input.sw_ls      = False     # land surface switch
+run1input.ls_type    = 'js'      # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+run1input.wg         = 0.21      # volumetric water content top soil layer [m3 m-3]
+run1input.w2         = 0.21      # volumetric water content deeper soil layer [m3 m-3]
+run1input.cveg       = 0.85      # vegetation fraction [-]
+run1input.Tsoil      = 285.      # temperature top soil layer [K]
+run1input.T2         = 286.      # temperature deeper soil layer [K]
+run1input.a          = 0.219     # Clapp and Hornberger retention curve parameter a
+run1input.b          = 4.90      # Clapp and Hornberger retention curve parameter b
+run1input.p          = 4.        # Clapp and Hornberger retention curve parameter c
+run1input.CGsat      = 3.56e-6   # saturated soil conductivity for heat
+
+run1input.wsat       = 0.472     # saturated volumetric water content ECMWF config [-]
+run1input.wfc        = 0.323     # volumetric water content field capacity [-]
+run1input.wwilt      = 0.171     # volumetric water content wilting point [-]
+
+run1input.C1sat      = 0.132     
+run1input.C2ref      = 1.8
+
+run1input.LAI        = 2.        # leaf area index [-]
+run1input.gD         = 0.0       # correction factor transpiration for VPD [-]
+run1input.rsmin      = 110.      # minimum resistance transpiration [s m-1]
+run1input.rssoilmin  = 50.       # minimun resistance soil evaporation [s m-1]
+run1input.alpha      = 0.25      # surface albedo [-]
+
+run1input.Ts         = 290.      # initial surface temperature [K]
+
+run1input.Wmax       = 0.0002    # thickness of water layer on wet vegetation [m]
+run1input.Wl         = 0.0000    # equivalent water layer depth for wet vegetation [m]
+
+run1input.Lambda     = 5.9       # thermal diffusivity skin layer [-]
+
+run1input.c3c4       = 'c3'      # Plant type ('c3' or 'c4')
+
+run1input.sw_cu      = False     # Cumulus parameterization switch
+run1input.dz_h       = 150.      # Transition layer thickness [m]
+
+"""
+Init and run the model
+"""
+r1 = model(run1input)
+r1.run()
+
+"""
+Plot output
+"""
+figure()
+subplot(131)
+plot(r1.out.t, r1.out.h)
+xlabel('time [h]')
+ylabel('h [m]')
+
+subplot(132)
+plot(r1.out.t, r1.out.theta)
+xlabel('time [h]')
+ylabel('theta [K]')
+
+subplot(133)
+plot(r1.out.t, r1.out.q*1000.)
+xlabel('time [h]')
+ylabel('q [g kg-1]')
diff --git a/bin/simulations/simulations.py b/bin/simulations/simulations.py
new file mode 100644
index 0000000..719f9a5
--- /dev/null
+++ b/bin/simulations/simulations.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--global-chunk') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--first-station-row')
+parser.add_argument('--last-station-row')
+parser.add_argument('--station-id') # run a specific station id
+parser.add_argument('--dataset')
+parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--error-handling',default='dump_on_success')
+parser.add_argument('--experiments')
+parser.add_argument('--split-by',default=-1)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+path_soundingsSET = args.path_soundings+'/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iter = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iter.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    if args.station_id is not None:
+        stations_iter = stations_iterator(all_stations)
+        STNID,run_station = stations_iterator.set_STNID(STNID)
+        run_stations = pd.DataFrame(run_station)
+    else:
+        run_stations = pd.DataFrame(all_stations)
+        if args.last_station_row is not None:
+            run_stations = run_stations.iloc[:(int(args.last_station)+1)]
+        if args.first_station_row is not None:
+            run_stations = run_stations.iloc[int(args.first_station):]
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = args.path_experiments+'/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            print(records_morning_station_chunk)
+
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+
+                    if args.error_handling == 'dump_always':
+                        try:
+                            c4gl.run()
+                        except:
+                            print('run not succesfull')
+                        onerun = True
+
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                  include_input=False,\
+                                  #timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    # in this case, only the file will dumped if the runs were
+                    # successful
+                    elif args.error_handling == 'dump_on_succes':
+                        try:
+                            c4gl.run()
+                            print('run not succesfull')
+                            onerun = True
+
+                            c4gli_morning.dump(file_ini)
+                            
+                            
+                            c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                      #timeseries_only=timeseries_only,\
+                                     )
+                            onerun = True
+                        except:
+                            print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/bin/simulations/simulations_iter.py b/bin/simulations/simulations_iter.py
new file mode 100644
index 0000000..5dfbaff
--- /dev/null
+++ b/bin/simulations/simulations_iter.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk',default=0)
+    args = parser.parse_args()
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if 'global_chunk' in args.__dict__.keys():
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    if 'last_station' in args.__dict__.keys():
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    
+    if 'first_station' in args.__dict__.keys():
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    if 'station_chunk' in args.__dict__.keys():
+        run_station_chunk = args.station_chunk
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+    for istation,current_station in run_stations.iterrows():
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                #if iexp == 11:
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+
+                        c4gli_morning.pars.itersteps = i
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                   #   timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    except:
+                        print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/bin/simulations/simulations_iter_test.py b/bin/simulations/simulations_iter_test.py
new file mode 100644
index 0000000..eefd475
--- /dev/null
+++ b/bin/simulations/simulations_iter_test.py
@@ -0,0 +1,367 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    args = parser.parse_args()
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+                #if iexp == 11:
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+                        onerun = True
+
+                        c4gli_morning.pars.itersteps = i
+                    except:
+                        print('run not succesfull')
+                    c4gli_morning.dump(file_ini)
+                    
+                    
+                    c4gl.dump(file_mod,\
+                                  include_input=False,\
+                               #   timeseries_only=timeseries_only,\
+                             )
+                    onerun = True
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/bin/simulations/trash/run_test.py b/bin/simulations/trash/run_test.py
new file mode 100644
index 0000000..767d960
--- /dev/null
+++ b/bin/simulations/trash/run_test.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    parser.add_argument('--c4gl-path',default='')
+    args = parser.parse_args()
+
+if args.c4gl_path == '': 
+    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+else:
+    sys.path.insert(0, args.c4gl_path)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            print(records_morning_station_chunk)
+
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    try:
+                        c4gl.run()
+                    except:
+                        print('run not succesfull')
+                    onerun = True
+
+                    c4gli_morning.dump(file_ini)
+                    
+                    
+                    c4gl.dump(file_mod,\
+                              include_input=False,\
+                              #timeseries_only=timeseries_only,\
+                             )
+                    onerun = True
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/build/lib/bin/__init__.py b/build/lib/bin/__init__.py
new file mode 100644
index 0000000..a21583b
--- /dev/null
+++ b/build/lib/bin/__init__.py
@@ -0,0 +1,7 @@
+from . import model,class4gl,interface_multi,data_air,data_global
+
+__version__ = '0.1.0'
+
+__author__ = 'Hendrik Wouters '
+
+__all__ = []
diff --git a/build/lib/lib/__init__.py b/build/lib/lib/__init__.py
new file mode 100644
index 0000000..a21583b
--- /dev/null
+++ b/build/lib/lib/__init__.py
@@ -0,0 +1,7 @@
+from . import model,class4gl,interface_multi,data_air,data_global
+
+__version__ = '0.1.0'
+
+__author__ = 'Hendrik Wouters '
+
+__all__ = []
diff --git a/build/lib/lib/class4gl.py b/build/lib/lib/class4gl.py
new file mode 100644
index 0000000..7baaa51
--- /dev/null
+++ b/build/lib/lib/class4gl.py
@@ -0,0 +1,1611 @@
+# -*- coding: utf-8 -*-
+
+"""
+
+Created on Mon Jan 29 12:33:51 2018
+
+Module file for class4gl, which  extents the class-model to be able to take
+global air profiles as input. It exists of:
+
+CLASSES:
+    - an input object, namely class4gl_input. It includes:
+        - a function to read Wyoming sounding data from a yyoming stream object
+        - a function to read global data from a globaldata library object 
+    - the model object: class4gl
+    - ....    
+
+DEPENDENCIES:
+    - xarray
+    - numpy
+    - data_global
+    - Pysolar
+    - yaml
+
+@author: Hendrik Wouters
+
+"""
+
+
+
+""" Setup of envirnoment """
+
+# Standard modules of the stand class-boundary-layer model
+from model import model
+from model import model_output as class4gl_output
+from model import model_input
+from model import qsat
+#from data_soundings import wyoming 
+import Pysolar
+import yaml
+import logging
+import warnings
+import pytz
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+
+# Generic Python Packages
+import numpy as np
+import datetime as dt
+import pandas as pd
+import xarray as xr
+import io
+#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
+from data_global import data_global
+grav = 9.81
+
+# this is just a generic input object
+class generic_input(object):
+    def __init__(self):
+        self.init = True
+
+
+# all units from all variables in CLASS(4GL) should be defined here!
+units = {
+         'h':'m',
+         'theta':'K', 
+         'q':'kg/kg',
+         'cc': '-',
+         'cveg': '-',
+         'wg': 'm3 m-3',
+         'w2': 'm3 m-3',
+         #'wg': 'kg/kg',
+         'Tsoil': 'K',
+         'T2': 'K',
+         'z0m': 'm',
+         'alpha': '-',
+         'LAI': '-',
+         'dhdt':'m/h',
+         'dthetadt':'K/h',
+         'dqdt':'kg/kg/h',
+         'BR': '-',
+         'EF': '-',
+}
+
+class class4gl_input(object):
+# this was the way it was defined previously.
+#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
+
+    def __init__(self,set_pars_defaults=True,debug_level=None):
+
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        print('hello')
+        self.logger = logging.getLogger('class4gl_input')
+        print(self.logger)
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # # create logger
+        # self.logger = logging.getLogger('class4gl_input')
+        # self.logger.setLevel(debug_level)
+
+        # # create console handler and set level to debug
+        # ch = logging.StreamHandler()
+        # ch.setLevel(debug_level)
+
+        # # create formatter
+        # formatter = logging.Formatter('%(asctime)s - \
+        #                                %(name)s - \
+        #                                %(levelname)s - \
+        #                                %(message)s')
+        # add formatter to ch
+        # ch.setFormatter(formatter)
+     
+        # # add ch to logger
+        # self.logger.addHandler(ch)
+
+        # """ end set up logger """
+
+
+
+        # these are the standard model input single-value parameters for class
+        self.pars = model_input()
+
+        # diagnostic parameters of the initial profile
+        self.diag = dict()
+
+        # In this variable, we keep track of the different parameters from where it originates from. 
+        self.sources = {}
+
+        if set_pars_defaults:
+            self.set_pars_defaults()
+
+    def set_pars_defaults(self):
+
+        """ 
+        Create empty model_input and set up case
+        """
+        defaults = dict( 
+        dt         = 60.    , # time step [s] 
+        runtime    = 6*3600 ,  # total run time [s]
+        
+        # mixed-layer input
+        sw_ml      = True   ,  # mixed-layer model switch
+        sw_shearwe = False  ,  # shear growth mixed-layer switch
+        sw_fixft   = False  ,  # Fix the free-troposphere switch
+        h          = 200.   ,  # initial ABL height [m]
+        Ps         = 101300.,  # surface pressure [Pa]
+        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
+        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
+        
+        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
+        dtheta     = 1.     ,  # initial temperature jump at h [K]
+        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
+        advtheta   = 0.     ,  # advection of heat [K s-1]
+        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
+        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
+        
+        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
+        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
+        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
+        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
+        
+        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
+        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
+        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
+        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
+        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
+        sw_wind    = True  ,  # prognostic wind switch
+        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
+        du         = 0.     ,  # initial u-wind jump at h [m s-1]
+        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
+        advu       = 0.     ,  # advection of u-wind [m s-2]
+        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
+        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
+        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
+        advv       = 0.     ,  # advection of v-wind [m s-2]
+        sw_sl      = True   , # surface layer switch
+        ustar      = 0.3    ,  # surface friction velocity [m s-1]
+        z0m        = 0.02   ,  # roughness length for momentum [m]
+        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
+        sw_rad     = True   , # radiation switch
+        lat        = 51.97  ,  # latitude [deg]
+        lon        = -4.93  ,  # longitude [deg]
+        doy        = 268.   ,  # day of the year [-]
+        tstart     = 6.8    ,  # time of the day [h UTC]
+        cc         = 0.0    ,  # cloud cover fraction [-]
+        Q          = 400.   ,  # net radiation [W m-2] 
+        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
+        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
+        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
+        cveg       = 0.85   ,  # vegetation fraction [-]
+        Tsoil      = 295.   ,  # temperature top soil layer [K]
+        Ts         = 295.   ,    # initial surface temperature [K]
+        T2         = 296.   ,  # temperature deeper soil layer [K]
+        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
+        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
+        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
+        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
+        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
+        wfc        = 0.323  ,  # volumetric water content field capacity [-]
+        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
+        C1sat      = 0.132  ,  
+        C2ref      = 1.8    ,
+        LAI        = 2.     ,  # leaf area index [-]
+        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
+        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
+        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
+        alpha      = 0.25   ,  # surface albedo [-]
+        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
+        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
+        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
+        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
+        sw_cu      = False  ,  # Cumulus parameterization switch
+        dz_h       = 150.   ,  # Transition layer thickness [m]
+        cala       = None   ,  # soil heat conductivity [W/(K*m)]
+        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
+        sw_ls      = True   ,
+        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
+        sw_lit     = False,
+        )
+        pars = model_input()
+        for key in defaults:
+            pars.__dict__[key] = defaults[key]
+        
+        self.update(source='defaults',pars=pars)
+        
+    def clear(self):
+        """ this procudure clears the class4gl_input """
+
+        for key in list(self.__dict__.keys()):
+            del(self.__dict__[key])
+        self.__init__()
+
+    def dump(self,file):
+        """ this procedure dumps the class4gl_input object into a yaml file
+            
+            Input: 
+                - self.__dict__ (internal): the dictionary from which we read 
+            Output:
+                - file: All the parameters in self.__init__() are written to
+                the yaml file, including pars, air_ap, sources etc.
+        """
+        file.write('---\n')
+        index = file.tell()
+        file.write('# CLASS4GL input; format version: 0.1\n')
+
+        # write out the position of the current record
+        yaml.dump({'index':index}, file, default_flow_style=False)
+
+        # we do not include the none values
+        for key,data in self.__dict__.items():
+            #if ((type(data) == model_input) or (type(class4gl_input):
+            if key == 'pars':
+
+                pars = {'pars' : self.__dict__['pars'].__dict__}
+                parsout = {}
+                for key in pars.keys():
+                    if pars[key] is not None:
+                        parsout[key] = pars[key]
+
+                yaml.dump(parsout, file, default_flow_style=False)
+            elif type(data) == dict:
+                if key == 'sources':
+                    # in case of sources, we want to have a
+                    # condensed list format as well, so we leave out
+                    # 'default_flow_style=False'
+                    yaml.dump({key : data}, file)
+                else: 
+                    yaml.dump({key : data}, file,
+                              default_flow_style=False)
+            elif type(data) == pd.DataFrame:
+                # in case of dataframes (for profiles), we want to have a
+                # condensed list format as well, so we leave out
+                # 'default_flow_style=False'
+                yaml.dump({key: data.to_dict(orient='list')},file)
+
+                # # these are trials to get it into a more human-readable
+                # fixed-width format, but it is too complex
+                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
+                #file.write(stream)
+                
+                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
+                #file.write(key+': !!str |\n')
+                #file.write(str(data)+'\n')
+       
+    def load_yaml_dict(self,yaml_dict,reset=True):
+        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
+            
+            Input: 
+                - yaml_dict: the dictionary from which we read 
+                - reset: reset data before reading        
+            Output:
+                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
+        """
+        
+        if reset:
+            for key in list(self.__dict__.keys()):
+                del(self.__dict__[key])
+            self.__init__()
+
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                self.__dict__[key] = model_input()
+                self.__dict__[key].__dict__ = data
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            elif key == 'sources':
+                self.__dict__[key] = data
+            elif key == 'diag':
+                self.__dict__[key] = data
+            else: 
+                warnings.warn("Key '"+key+"' may not be implemented.")
+                self.__dict__[key] = data
+
+    def update(self,source,**kwargs):
+        """ this procedure is to make updates of input parameters and tracking
+        of their source more convenient. It implements the assignment of
+        parameter source/sensitivity experiment IDs ('eg.,
+        'defaults', 'sounding balloon', any satellite information, climate
+        models, sensitivity tests etc.). These are all stored in a convenient
+        way with as class4gl_input.sources.  This way, the user can always consult with
+        from where parameters data originates from.  
+        
+        Input:
+            - source:    name of the underlying dataset
+            - **kwargs: a dictionary of data input, for which the key values
+            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
+            the values is a again a dictionary/dataframe of datakeys/columns
+            ('wg','PRES','datetime', ...) and datavalues (either single values,
+            profiles ...), eg., 
+
+                pars = {'wg': 0.007  , 'w2', 0.005}
+                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
+                                     300.,...]}
+            
+        Output:
+            - self.__dict__[datatype] : object to which the parameters are
+                                        assigned. They can be consulted with
+                                        self.pars, self.profiles, etc.
+                                        
+            - self.sources[source] : It supplements the overview overview of
+                                     data sources can be consulted with
+                                     self.sources. The structure is as follows:
+                                     as:
+                self.sources = { 
+                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
+                'GLEAM' :  ['pars:wg','pars:w2', ...],
+                 ...
+                }
+        
+        """
+
+        #print(source,kwargs)
+
+        for key,data in kwargs.items():
+
+            #print(key)
+            # if the key is not in class4gl_input object, then just add it. In
+            # that case, the update procedures below will just overwrite it 
+            if key not in self.__dict__:
+                self.__dict__[key] = data
+
+
+            
+
+            #... we do an additional check to see whether there is a type
+            # match. I not then raise a key error
+            if (type(data) != type(self.__dict__[key]) \
+                # we allow dict input for model_input pars
+                and not ((key == 'pars') and (type(data) == dict) and \
+                (type(self.__dict__[key]) == model_input))):
+
+                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
+
+
+            # This variable keeps track of the added data that is supplemented
+            # by the current source. We add this to class4gl_input.sources
+            datakeys = []
+
+            #... and we update the class4gl_input data, and this depends on the
+            # data type
+
+            if type(self.__dict__[key]) == pd.DataFrame:
+                # If the data type is a dataframe, then we update the columns
+                for column in list(data.columns):
+                    #print(column)
+                    self.__dict__[key][column] = data[column]
+                    datakeys.append(column)
+                    
+
+            elif type(self.__dict__[key]) == model_input:
+                # if the data type is a model_input, then we update its internal
+                # dictionary of parameters
+                if type(data) == model_input:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data.__dict__}
+                    datakeys = list(data.__dict__.keys())
+                elif type(data) == dict:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data}
+                    datakeys = list(data.keys())
+                else:
+                    raise TypeError('input key '+key+' is not of the same type\
+                                    as the one in the class4gl_object')
+
+            elif type(self.__dict__[key]) == dict:
+                # if the data type is a dictionary, we update the
+                # dictionary 
+                self.__dict__[key] = {self.__dict__[key] , data}
+                datakeys = list(data.keys())
+
+
+            # if source entry is not existing yet, we add it
+            if source not in self.sources.keys():
+                self.sources[source] = []
+
+
+            # self.logger.debug('updating section "'+\
+            #                  key+' ('+' '.join(datakeys)+')'\
+            #                  '" from source \
+            #                  "'+source+'"')
+
+            # Update the source dictionary: add the provided data keys to the
+            # specified source list
+            for datakey in datakeys:
+                # At first, remove the occurences of the keys in the other
+                # source lists
+                for sourcekey,sourcelist in self.sources.items():
+                    if key+':'+datakey in sourcelist:
+                        self.sources[sourcekey].remove(key+':'+datakey)
+                # Afterwards, add it to the current source list
+                self.sources[source].append(key+':'+datakey)
+
+
+        # # in case the datatype is a class4gl_input_pars, we update its keys
+        # # according to **kwargs dictionary
+        # if type(self.__dict__[datatype]) == class4gl_input_pars:
+        #     # add the data parameters to the datatype object dictionary of the
+        #     # datatype
+        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
+        #                                        **kwargs}
+        # # in case, the datatype reflects a dataframe, we update the columns according
+        # # to the *args list
+        # elif type(self.__dict__[datatype]) == pd.DataFrame:
+        #     for dataframe in args:
+        #         for column in list(dataframe.columns):
+        #             self.__dict__[datatype][column] = dataframe[column]
+        
+
+    def get_profile(self,IOBJ, *args, **argv):
+        # if type(IOBJ) == wyoming:
+        self.get_profile_wyoming(IOBJ,*args,**argv)
+        # else:
+        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
+        
+    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
+        """ 
+            Purpose: 
+                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
+
+            Input:
+                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
+                function will take the profile at the stream's current
+                position. 
+                2. air_ap_mode: which air profile do we take? 
+                    - b : best
+                    - l : according to lower limit for the mixed-layer height
+                            estimate
+                    - u : according to upper limit for the mixed-layer height
+                            estimate
+
+
+            Output:
+                1. all single-value parameters are stored in the
+                   class4gl_input.pars object
+                2. the souding profiles are stored in the in the
+                   class4gl_input.air_balloon dataframe
+                3. modified sounding profiles for which the mixed layer height
+                   is fitted
+                4. ...
+
+        """
+
+
+        # Raise an error in case the input stream is not the correct object
+        # if type(wy_strm) is not wyoming:
+        #    raise TypeError('Not a wyoming type input stream')
+
+        # Let's tell the class_input object that it is a Wyoming fit type
+        self.air_ap_type = 'wyoming'
+        # ... and which mode of fitting we apply
+        self.air_ap_mode = air_ap_mode
+
+        """ Temporary variables used for output """
+        # single value parameters derived from the sounding profile
+        dpars = dict()
+        # profile values
+        air_balloon = pd.DataFrame()
+        # fitted profile values
+        air_ap = pd.DataFrame()
+        
+        string = wy_strm.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = wy_strm.current.find_next('pre').find_next('pre').text
+        
+        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
+        dpars = {**dpars,
+                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
+               }
+        
+        # we get weird output when it's a numpy Timestamp, so we convert it to
+        # pd.datetime type
+
+        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
+        dpars['STNID'] = dpars['Station number']
+
+        # altitude above ground level
+        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
+        # absolute humidity in g/kg
+        air_balloon['q']= (air_balloon.MIXR/1000.) \
+                              / \
+                             (air_balloon.MIXR/1000.+1.)
+        # convert wind speed from knots to m/s
+        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
+        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+        
+        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
+        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
+
+        
+
+        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+
+        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
+        air_balloon['p'] = air_balloon.PRES*100.
+
+
+        # Therefore, determine the sounding that are valid for 'any' column 
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        #is_valid = (air_balloon.z >= 0)
+        # # this is an alternative pipe/numpy method
+        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
+        valid_indices = air_balloon.index[is_valid].values
+        print(valid_indices)
+
+        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+
+        air_balloon['t'] = air_balloon['TEMP']+273.15
+        air_balloon['theta'] = (air_balloon.t) * \
+                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
+        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
+
+        if len(valid_indices) > 0:
+            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
+            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            
+            # the final mixed-layer height that will be used by class. We round it
+            # to 1 decimal so that we get a clean yaml output format
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+
+
+        if np.isnan(dpars['h']):
+            dpars['Ps'] = np.nan
+
+
+
+
+        if ~np.isnan(dpars['h']):
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u 
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+            
+
+
+
+        # First 3 data points of the mixed-layer fit. We create a empty head
+        # first
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+        
+        #calculate mixed-layer jump ( this should be larger than 0.1)
+        
+        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        air_ap_head['HGHT'] = air_ap_head['z'] \
+                                + \
+                                np.round(dpars[ 'Station elevation'],1)
+        
+        # make a row object for defining the jump
+        jump = air_ap_head.iloc[0] * np.nan
+            
+        if air_ap_tail.shape[0] > 1:
+
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = dpars['theta']
+        z_low =     dpars['h']
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                (z_mean > (z_low+10.)) and \
+                (theta_mean > (theta_low+0.2) ) and \
+                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+
+
+
+
+
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        #print(air_ap['PRES'].iloc[0])
+
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+
+        
+        dpars['lat'] = dpars['Station latitude']
+        dpars['latitude'] = dpars['lat']
+        
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        dpars['longitude'] = dpars['Station longitude']
+        
+        dpars['ldatetime'] = dpars['datetime'] \
+                            + \
+                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+
+        # # we make a pars object that is similar to the destination object
+        # pars = model_input()
+        # for key,value in dpars.items():
+        #     pars.__dict__[key] = value
+
+
+        # we round the columns to a specified decimal, so that we get a clean
+        # output format for yaml
+        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
+                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
+                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
+# 
+        for column,decimal in decimals.items():
+            air_balloon[column] = air_balloon[column].round(decimal)
+            air_ap[column] = air_ap[column].round(decimal)
+
+        self.update(source='wyoming',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+
+        
+    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
+    
+        """
+        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
+                 according to the position (lat lon) and the class datetime and timespan
+                 globaldata should be a globaldata multifile object
+        
+        Input: 
+            - globaldata: this is the library object
+            - only_keys: only extract specified keys
+            - exclude_keys: do not inherit specified keys
+        """
+        classdatetime      = np.datetime64(self.pars.datetime_daylight)
+        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
+                                           + \
+                                           dt.timedelta(seconds=self.pars.runtime)\
+                                          )
+
+
+        # # list of variables that we get from global ground data
+        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
+        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
+        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
+        #                 'texture', 'itex', 'isoil', 'BR',
+        #                 'b', 'cveg',
+        #                 'C1sat', 
+        #                 'C2ref', 'p', 'a',
+        #                 ] #globaldata.datasets.keys():
+
+        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
+        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
+
+
+        if type(globaldata) is not data_global:
+            raise TypeError("Wrong type of input library") 
+
+        # by default, we get all dataset keys
+        keys = list(globaldata.datasets.keys())
+
+        # We add LAI manually, because it is not listed in the datasets and
+        #they its retreival is hard coded below based on LAIpixel and cveg
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            keys.append('LAI')
+
+        # # In case there is surface pressure, we also calculate the half-level
+        # # and full-level pressure fields
+        # if ('sp' in keys):
+        #     keys.append('pfull')
+        #     keys.append('phalf')
+
+        # If specified, we only take the keys that are in only_keys
+        if only_keys is not None:
+            for key in keys:
+                if key not in only_keys:
+                    keys.remove(key)
+                
+        # If specified, we take out keys that are in exclude keys
+        if exclude_keys is not None:
+            for key in keys:
+                if key in exclude_keys:
+                    keys.remove(key)
+
+        # we set everything to nan first in the pars section (non-profile parameters
+        # without lev argument), so that we can check afterwards whether the
+        # data is well-fetched or not.
+        for key in keys:
+            if not ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None) and \
+                ('lev' in globaldata.datasets[key].page[key].dims)):
+                self.update(source='globaldata',pars={key:np.nan})
+            # # we do not check profile input for now. We assume it is
+            # # available
+            #else:
+            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
+
+        self.logger.debug('getting keys "'+', '.join(keys)+'\
+                          from global data')
+
+        for key in keys:
+            # If we find it, then we obtain the variables
+            if ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None)):
+
+                # check first whether the dataset has a height coordinate (3d space)
+                if 'lev' in globaldata.datasets[key].page[key].dims:
+
+                    # first, we browse to the correct file that has the current time
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+                        globaldata.datasets[key].browse_page(time=classdatetime)
+
+                    
+                    if (globaldata.datasets[key].page is not None):
+                        # find longitude and latitude coordinates
+                        ilats = (np.abs(globaldata.datasets[key].page.lat -
+                                        self.pars.latitude) < 0.5)
+                        ilons = (np.abs(globaldata.datasets[key].page.lon -
+                                        self.pars.longitude) < 0.5)
+                        
+                        # if we have a time dimension, then we look up the required timesteps during the class simulation
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            itimes = ((globaldata.datasets[key].page.time >= \
+                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
+
+                            # In case we didn't find any correct time, we take the
+                            # closest one.
+                            if np.sum(itimes) == 0.:
+
+
+                                classdatetimemean = \
+                                    np.datetime64(self.pars.datetime_daylight + \
+                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
+                                                ))
+
+                                dstimes = globaldata.datasets[key].page.time
+                                time = dstimes.sel(time=classdatetimemean,method='nearest')
+                                itimes = (globaldata.datasets[key].page.time ==
+                                          time)
+                                
+                        else:
+                            # we don't have a time coordinate so it doesn't matter
+                            # what itimes is
+                            itimes = 0
+
+                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
+
+                        # over which dimensions we take a mean:
+                        dims = globaldata.datasets[key].page[key].dims
+                        namesmean = list(dims)
+                        namesmean.remove('lev')
+                        idxmean = [dims.index(namemean) for namemean in namesmean]
+                        
+                        value = \
+                        globaldata.datasets[key].page[key].isel(time=itimes,
+                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
+
+                        # Ideally, source should be equal to the datakey of globaldata.library 
+                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
+                        #  but therefore the globaldata class requires a revision to make this work
+                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
+
+                else:
+                    # this procedure is for reading the ground fields (2d space). 
+                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
+
+    
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+    
+                       # first, we browse to the correct file
+                       #print(key)
+                       globaldata.datasets[key].browse_page(time=classdatetime)
+    
+                    if globaldata.datasets[key].page is not None:
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - self.pars.latitude))
+                        ilat = np.where((DIST) == np.min(DIST))[0][0]
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - self.pars.longitude))
+                        ilon = np.where((DIST) == np.min(DIST))[0][0]
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - (self.pars.latitude + 0.5)))
+                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmax = ilat
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - (self.pars.longitude  + 0.5)))
+                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmax = ilon
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lat.values\
+                                - (self.pars.latitude - 0.5)))
+                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmin = ilat
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lon.values\
+                                - (self.pars.longitude  - 0.5)))
+                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmin = ilon        
+                        
+                        if ilatmin < ilatmax:
+                            ilatrange = range(ilatmin,ilatmax+1)
+                        else:
+                            ilatrange = range(ilatmax,ilatmin+1)
+                            
+                        if ilonmin < ilonmax:
+                            ilonrange = range(ilonmin,ilonmax+1)
+                        else:
+                            ilonrange = range(ilonmax,ilonmin+1)     
+                            
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                            
+                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+                                idatetime += 1
+                            
+                            classdatetimeend = np.datetime64(\
+                                                             self.pars.datetime +\
+                                                             dt.timedelta(seconds=self.pars.runtime)\
+                                                            ) 
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
+                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
+                                idatetimeend -= 1
+                            idatetime = np.min((idatetime,idatetimeend))
+                            #for gleam, we take the previous day values
+                            if key in ['wg', 'w2']:
+                                idatetime = idatetime - 1
+                                idatetimeend = idatetimeend - 1
+
+                            # in case of soil temperature, we take the exact
+                            # timing (which is the morning)
+                            if key in ['Tsoil','T2']:
+                                idatetimeend = idatetime
+                            
+                            idts = range(idatetime,idatetimeend+1)
+                            
+                            count = 0
+                            self.__dict__[key] = 0.
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    for iidts in idts:
+                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
+                                        count += 1
+                            value = value/count
+                            self.update(source='globaldata',pars={key:value.item()})
+                                
+                        else:
+                                
+                            count = 0
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
+                                    count += 1
+                            value = value/count                        
+
+                            self.update(source='globaldata',pars={key:value.item()})
+
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            self.logger.debug('also update LAI based on LAIpixel and cveg') 
+            # I suppose LAI pixel is already determined in the previous
+            # procedure. Anyway...
+            key = 'LAIpixel'
+
+            if globaldata.datasets[key].page is not None:
+                # first, we browse to the correct file that has the current time
+                if 'time' in list(globaldata.datasets[key].page[key].dims):
+                    globaldata.datasets[key].browse_page(time=classdatetime)
+            
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - self.pars.latitude))
+                ilat = np.where((DIST) == np.min(DIST))[0][0]
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - self.pars.longitude))
+                ilon = np.where((DIST) == np.min(DIST))[0][0]
+                 
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude + 0.5)))
+                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmax = ilat
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values \
+                        - (self.pars.longitude  + 0.5)))
+                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmax = ilon
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude - 0.5)))
+                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmin = ilat
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - (self.pars.longitude  - 0.5)))
+                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmin = ilon        
+                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                
+                
+                if ilatmin < ilatmax:
+                    ilatrange = range(ilatmin,ilatmax+1)
+                else:
+                    ilatrange = range(ilatmax,ilatmin+1)
+                    
+                if ilonmin < ilonmax:
+                    ilonrange = range(ilonmin,ilonmax+1)
+                else:
+                    ilonrange = range(ilonmax,ilonmin+1)           
+                
+                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
+                LAIpixel = 0.
+                count = 0
+                for iilat in [ilat]: #ilatrange
+                    for iilon in [ilon]: #ilonrange
+                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
+                        
+                                        
+                        # if np.isnan(tarray[idatetime]):
+                        #     print("interpolating GIMMS LAIpixel nan value")
+                        #     
+                        #     mask = np.isnan(tarray)
+                        #     
+                        #     #replace each nan value with a interpolated value
+                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+                        #         
+                        #     else:
+                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
+                    
+                        #         tarray *= np.nan 
+                        
+                        count += 1
+                        #tarray_res += tarray
+                LAIpixel = LAIpixel/count
+                
+                count = 0
+                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
+  
+                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
+                #print('LAIpixel:',self.__dict__['LAIpixel'])
+                #print('cveg:',self.__dict__['cveg'])
+                
+                # finally, we rescale the LAI according to the vegetation
+                # fraction
+                value = 0. 
+                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
+                   value =self.pars.LAIpixel/self.pars.cveg
+                else:
+                    # in case of small vegetation fraction, we take just a standard 
+                    # LAI value. It doesn't have a big influence anyway for
+                    # small vegetation
+                    value = 2.
+                #print('LAI:',self.__dict__['LAI'])
+                self.update(source='globaldata',pars={'LAI':value}) 
+
+
+        # in case we have 'sp', we also calculate the 3d pressure fields at
+        # full level and half level
+        if ('sp' in keys) and ('sp' in self.pars.__dict__):
+            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
+
+            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # hydrostatic thickness of each model layer
+            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
+            # # dz = rhodz/(R * T / pfull)
+
+
+            # # subsidence multiplied by density. We calculate the subsidence of
+            # # the in class itself
+            # wrho = np.zeros_like(phalf)
+            # wrho[-1] = 0. 
+
+            # for ihlev in range(0,wrho.shape[0]-1):
+            #     # subsidence multiplied by density is the integral of
+            #     # divergences multiplied by the layer thicknessies
+            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
+            #                     self.air_ac['divU_y'][ihlev:]) * \
+            #                    delpdgrav[ihlev:]).sum()
+
+
+            
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'p':list(pfull)}))
+            self.update(source='globaldata',\
+                        air_ach=pd.DataFrame({'p':list(phalf)}))
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
+            # self.update(source='globaldata',\
+            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
+
+    def check_source(self,source,check_only_sections=None):
+        """ this procedure checks whether data of a specified source is valid.
+
+        INPUT:
+            source: the data source we want to check
+            check_only_sections: a string or list with sections to be checked
+        OUTPUT:
+            returns True or False
+        """
+
+        # we set source ok to false as soon as we find a invalid input
+        source_ok = True
+
+        # convert to a single-item list in case of a string
+        check_only_sections_def = (([check_only_sections]) if \
+                                   type(check_only_sections) is str else \
+                                    check_only_sections)
+                                  
+        if source not in self.sources.keys():
+            self.logger.info('Source '+source+' does not exist')
+            source_ok = False
+
+        for sectiondatakey in self.sources[source]:                             
+            section,datakey = sectiondatakey.split(':')                         
+            if ((check_only_sections_def is None) or \
+                (section in check_only_sections_def)):                          
+                checkdatakeys = []
+                if type(self.__dict__[section]) is pd.DataFrame:
+                    checkdata = self.__dict__[section]
+                elif type(self.__dict__[section]) is model_input:
+                    checkdata = self.__dict__[section].__dict__
+
+                if (datakey not in checkdata):                              
+                    # self.logger.info('Expected key '+datakey+\
+                    #                  ' is not in parameter input')                        
+                    source_ok = False                                           
+                elif (checkdata[datakey] is None) or \
+                     (pd.isnull(checkdata[datakey]) is True):                    
+        
+                    # self.logger.info('Key value of "'+datakey+\
+                    #                  '" is invalid: ('+ \
+                    # str(self.__dict__[section].__dict__[datakey])+')')         
+                    source_ok = False
+
+        return source_ok
+
+    def check_source_globaldata(self):
+        """ this procedure checks whether all global parameter data is
+        available, according to the keys in the self.sources"""
+
+        source_globaldata_ok = True
+
+        #self.get_values_air_input()
+
+        # and now we can get the surface values
+        #class_settings = class4gl_input()
+        #class_settings.set_air_input(input_atm)
+        
+        # we only allow non-polar stations
+        if not (self.pars.lat <= 60.):
+            source_globaldata_ok = False
+            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+        
+        # check lat and lon
+        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
+            source_globaldata_ok = False
+            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
+            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
+        else:
+            # we only check the ground parameter data (pars section). The 
+            # profile data (air_ap section) are supposed to be valid in any 
+            # case.
+            source_ok = self.check_source(source='globaldata',\
+                                          check_only_sections=['air_ac',\
+                                                               'air_ap',\
+                                                               'pars'])
+            if not source_ok:
+                source_globaldata_ok = False
+        
+            # Additional check: we exclude desert-like
+            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
+                source_globaldata_ok = False
+                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
+                source_globaldata_ok = False
+                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
+            elif self.pars.cveg < 0.02:
+                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
+                source_globaldata_ok = False
+
+        return source_globaldata_ok
+
+
+class c4gli_iterator():
+    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
+    
+        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
+    """
+    def __init__(self,file):
+        # take file as IO stream
+        self.file = file
+        self.yaml_generator = yaml.load_all(file)
+        self.current_dict = {}
+        self.current_class4gl_input = class4gl_input()
+        separator = self.file.readline() # this is just dummy
+        self.header = file.readline()
+        if self.header != '# CLASS4GL record; format version: 0.1\n':
+            raise NotImplementedError("Wrong format version: '"+self.header+"'")
+    def __iter__(self):
+        return self
+    def __next__(self):
+        self.current_dict = self.yaml_generator.__next__()
+        self.current_class4gl_input.load_yaml_dict(self.current_dict)
+        return self.current_class4gl_input
+
+
+
+#get_cape and lift_parcel are adapted from the SkewT package
+    
+class gl_dia(object):
+    def get_lifted_index(self,timestep=-1):
+        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
+    
+#from SkewT
+#def get_lcl(startp,startt,startdp,nsteps=101):
+#    from numpy import interp
+#    #--------------------------------------------------------------------
+#    # Lift a parcel dry adiabatically from startp to LCL.
+#    # Init temp is startt in K, Init dew point is stwrtdp,
+#    # pressure levels are in Pa    
+#    #--------------------------------------------------------------------
+#
+#    assert startdp<=startt
+#
+#    if startdp==startt:
+#        return np.array([startp]),np.array([startt]),np.array([startdp]),
+#
+#    # Pres=linspace(startp,60000.,nsteps)
+#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
+#
+#    # Lift the dry parcel
+#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
+#    # Mixing ratio isopleth
+#    starte=VaporPressure(startdp)
+#    startw=MixRatio(starte,startp)
+#    e=Pres*startw/(.622+startw)
+#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
+#
+#    # Solve for the intersection of these lines (LCL).
+#    # interp requires the x argument (argument 2)
+#    # to be ascending in order!
+#    P_lcl=interp(0.,T_iso-T_dry,Pres)
+#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
+#
+#    # # presdry=linspace(startp,P_lcl)
+#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
+#
+#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
+#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
+#
+#    return P_lcl,T_lcl
+
+
+
+def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
+    """ Calculate mixed-layer height from temperature and wind speed profile
+
+        Input:
+            HAGL: height coordinates [m]
+            THTV: virtual potential temperature profile [K]
+            WSPD: wind speed profile [m/s]
+
+        Output:
+            BLH: best-guess mixed-layer height
+            BLHu: upper limit of mixed-layer height
+            BLHl: lower limit of mixed-layer height
+
+    """
+    
+    #initialize error BLH
+    BLHe = 0.
+    eps = 2.#security limit
+    iTHTV_0 = np.where(~np.isnan(THTV))[0]
+    if len(iTHTV_0) > 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHl
+
+
+
+#from class
+def get_lcl(startp,startt,startqv):
+        # Find lifting condensation level iteratively
+    lcl = 20.
+    RHlcl = 0.5
+    
+    itmax = 30
+    it = 0
+    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHd
+
+def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
+    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
+    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
+
+
+#from os import listdir
+#from os.path import isfile #,join
+import glob
+
+
+class wyoming(object):
+    def __init__(self):
+       self.status = 'init'
+       self.found = False
+       self.DT = None
+       self.current = None
+       #self.mode = 'b'
+       self.profile_type = 'wyoming'  
+       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
+       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+         
+    def set_STNM(self,STNM):
+        self.__init__()
+        self.STNM = STNM
+        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
+        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
+        self.current = None
+        self.found = False
+        self.FILES.sort()
+        
+    def find_first(self,year=None,get_atm=False):
+        self.found = False    
+                
+        # check first file/year or specified year
+        if year == None:
+            self.iFN = 0
+            self.FN = self.FILES[self.iFN]
+        else:
+            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+        self.current = self.sounding_series.find('h2')
+        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
+        
+        # go through other files and find first sounding when year is not specified
+        self.iFN=self.iFN+1
+        while keepsearching:
+            self.FN = self.FILES[self.iFN]
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            self.iFN=self.iFN+1
+            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
+        self.found = (self.current is not None)
+
+        self.status = 'fetch'
+        if self.found:
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        
+        if self.found and get_atm:
+            self.get_values_air_input()
+        
+    
+    def find(self,DT,get_atm=False):
+        
+        self.found = False
+        keepsearching = True
+        #print(DT)
+        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
+        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
+            self.DT = DT
+            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            
+        keepsearching = (self.current is not None)
+        while keepsearching:
+            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            if DTcurrent == DT:
+                self.found = True
+                keepsearching = False
+                if get_atm:
+                    self.get_values_air_input()
+                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            elif DTcurrent > DT:
+                keepsearching = False
+                self.current = None
+            else:
+                self.current = self.current.find_next('h2')
+                if self.current is None:
+                    keepsearching = False
+        self.found = (self.current is not None)
+        self.status = 'fetch'
+
+    def find_next(self,get_atm=False):
+        self.found = False
+        self.DT = None
+        if self.current is None:
+            self.find_first()
+        else:                
+            self.current = self.current.find_next('h2')
+            self.found = (self.current is not None)
+            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
+            while keepsearching:
+                self.iFN=self.iFN+1
+                self.FN = self.FILES[self.iFN]
+                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+                self.current = self.sounding_series.find('h2')
+                
+                self.found = (self.current is not None)
+                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
+        if self.found:        
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        if self.found and get_atm:
+            self.get_values_air_input()
+       
+
+
+    def get_values_air_input(self,latitude=None,longitude=None):
+
+        # for iDT,DT in enumerate(DTS):
+        
+            #websource = urllib.request.urlopen(webpage)
+        #soup = BeautifulSoup(open(webpage), "html.parser")
+        
+       
+        #workaround for ...last line has 
 which results in stringlike first column
+        string = self.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = self.current.find_next('pre').find_next('pre').text
+
+        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
+        #PARAMS.insert(0,'date',DT)
+
+        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
+        
+        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
+        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
+        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
+        HAGL = HGHT - np.float(PARAMS['Station elevation'])
+        ONE_COLUMN.insert(0,'HAGL',HAGL)
+
+        
+        
+        
+        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
+        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
+        ONE_COLUMN.insert(0,'QABS',QABS)
+        
+        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
+
+        #mixed layer potential temperature
+        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
+
+        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
+        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
+        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
+
+        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        BLHV = np.max((BLHV,10.))
+        BLHVu = np.max((BLHVu,10.))
+        BLHVd = np.max((BLHVd,10.))
+        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
+
+        #security values for mixed-layer jump values dthetav, dtheta and dq
+        
+        # fit new profiles taking the above-estimated mixed-layer height
+        ONE_COLUMNNEW = []
+        for BLH in [BLHV,BLHVu,BLHVd]:
+            ONE_COLUMNNEW.append(pd.DataFrame())
+            
+            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+            
+            listHAGLNEW = list(HAGLNEW)
+            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
+                
+                # get index of lowest valid observation. This seems to vary
+                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
+                if len(idxvalid) > 0:
+                    #print('idxvalid',idxvalid)
+                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
+                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
+                    else:
+                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
+                else:
+                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+                    #print(col,meanabl)
+               
+                
+                # if col == 'PRES':
+                #     meanabl =  
+            
+                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+                #THTVM = np.nanmean(THTV[HAGL <= BLH])
+                #print("new_pro_h",new_pro_h)
+                # calculate jump ath the top of the mixed layer
+                if col in ['THTA','THTV',]:
+                    #for moisture
+                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+                    if len(listHAGLNEW) > 4:
+                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+                        dtheta = np.max((0.1,dtheta_pre))
+                        #meanabl = meanabl - (dtheta - dtheta_pre)
+                        #print('dtheta_pre',dtheta_pre)
+                        #print('dtheta',dtheta)
+                        #print('meanabl',meanabl)
+                        #stop
+                        
+                    else:
+                        dtheta = np.nan
+                else:
+                    if len(listHAGLNEW) > 4:
+                        #for moisture (it can have both negative and positive slope)
+                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+                    else:
+                        dtheta = np.nan
+                #print('dtheta',dtheta)
+                
+                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+            
+                
+                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+                
+            #QABSM = np.nanmean(QABS[HAGL <= BLH])
+            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+            
+        # we just make a copy of the fields, so that it can be read correctly by CLASS 
+        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
+            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+            
+            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
+        
+            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
+            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
+
+
+        # assign fields adopted by CLASS
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'b':
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'u':
+            PARAMS.insert(0,'h',   BLHVu)
+        elif self.mode == 'd':
+            PARAMS.insert(0,'h',   BLHVd)
+        else:
+            PARAMS.insert(0,'h',   BLHV)
+            
+
+        try:
+            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
+        except:
+            print("could not convert latitude coordinate")
+            PARAMS.insert(0,'latitude', np.nan)
+            PARAMS.insert(0,'lat', np.nan)
+        try:
+            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
+            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
+            PARAMS.insert(0,'lon', 0.)
+        except:
+            print("could not convert longitude coordinate")
+            PARAMS.insert(0,'longitude', np.nan)
+            PARAMS.insert(0,'lon', 0.)
+
+        if latitude is not None:
+            print('overwriting latitude with specified value')
+            PARAMS['latitude'] = np.float(latitude)
+            PARAMS['lat'] = np.float(latitude)
+        if longitude is not None:
+            print('overwriting longitude with specified value')
+            PARAMS['longitude'] = np.float(longitude)
+        try:
+            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
+            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
+            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            # This is the nearest datetime when sun is up (for class)
+            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
+            # apply the same time shift for UTC datetime
+            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
+            
+        except:
+            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
+            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
+            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
+            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
+
+        
+
+        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
+        # as we are forcing lon equal to zero this is is expressed in local suntime
+        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
+
+           
+        ONE_COLUMNb = ONE_COLUMNNEW[0]
+        ONE_COLUMNu = ONE_COLUMNNEW[1]
+        ONE_COLUMNd = ONE_COLUMNNEW[2]
+        
+
+        THTVM = np.nanmean(THTV[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+        
+        QABSM = np.nanmean(QABS[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
+        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
+        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
+
+        BLHVe = abs(BLHV - BLHVu)
+        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
+
+        #PARAMS.insert(0,'dq',0.)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
+        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+        
+        if self.mode == 'o': #original 
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
+        elif self.mode == 'b': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNb
+            BLCOLUMN = ONE_COLUMNb
+        elif self.mode == 'u': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNu
+            BLCOLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNd
+            BLCOLUMN = ONE_COLUMNd
+        else:
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb
+
+        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
+        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
+        # print(BLCOLUMN['HAGL'][lt6000])
+        # print(BLCOLUMN['HAGL'][lt2500])
+        # 
+        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
+
+        #print(BLCOLUMN['HAGL'][lt2500])
+        PARAMS.insert(0,'OK',
+                      ((BLHVe < 200.) and 
+                       ( len(np.where(lt6000)[0]) > 5) and
+                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
+                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
+                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
+                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
+                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
+                      )
+                     )
+
+        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
+        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
+        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
+        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
+        
+        
+        PARAMS = PARAMS.T
+
+        
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = USE_ONECOLUMN
+        # if self.mode == 'o': #original 
+        #     self.ONE_COLUMN = ONE_COLUMN
+        # elif self.mode == 'b': # best BLH
+        #     self.ONE_COLUMN = ONE_COLUMNb
+        # elif self.mode == 'u':# upper BLH
+        #     self.ONE_COLUMN = ONE_COLUMNu
+        # elif self.mode == 'd': # lower BLH
+        #     self.ONE_COLUMN=ONE_COLUMNd
+        # else:
+        #     self.ONE_COLUMN = ONE_COLUMN
+
diff --git a/build/lib/lib/data_global.py b/build/lib/lib/data_global.py
new file mode 100644
index 0000000..9c3d9b5
--- /dev/null
+++ b/build/lib/lib/data_global.py
@@ -0,0 +1,936 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: Hendrik Wouters
+
+Purpose: provides class routines for ground and atmosphere conditions used for
+the CLASS miced-layer model
+
+Usage:
+    from data_global import data_global
+    from class4gl import class4gl_input
+    from data_soundings import wyoming
+
+    # create a data_global object and load initial data pages
+    globaldata = data_global()
+    globaldata.load_datasets()
+    # create a class4gl_input object
+    c4gli = class4gl_input()
+    # Initialize it with profile data. We need to do this first. Actually this
+    # will set the coordinate parameters (datetime, latitude, longitude) in
+    # class4gl_input.pars.__dict__, which is required to read point data from
+    # the data_global object.
+
+    # open a Wyoming stream for a specific station
+    wy_strm = wyoming(STNM=91376)
+    # load the first profile
+    wy_strm.find_first()
+    # load the profile data into the class4gl_input object
+    c4gli.get_profile_wyoming(wy_strm)
+    
+    # and finally, read the global input data for this profile
+    c4gli.get_global_input(globaldata)
+
+
+"""
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+#import pynacolada as pcd
+import pandas as pd
+import xarray as xr
+import os
+import glob
+import sys
+import errno
+import warnings
+import logging
+
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+class book(object):
+    """ this is a class for a dataset spread over multiple files. It has a
+    similar purpose  open_mfdataset, but only 1 file (called current 'page')
+    one is loaded at a time. This saves precious memory.  """
+    def __init__(self,fn,concat_dim = None,debug_level=None):
+        self.logger = logging.getLogger('book')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # filenames are expanded as a list and sorted by filename
+        self.pages = glob.glob(fn); self.pages.sort()
+        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
+        if len(self.pages) == 0:
+            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
+        self.ipage = -1; self.page = None
+        self.renames = {} # each time when opening a file, a renaming should be done.
+        self.set_page(0)
+
+        # we consider that the outer dimension is the one we concatenate
+        self.concat_dim = concat_dim
+        if self.concat_dim is None:
+            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
+
+    # this wraps the xarray sel-commmand
+    def sel(*args, **kwargs):
+        for dim in kwargs.keys():
+            if dim == self.concat_dim:
+                self.browse_page(**{dim: kwargs[dim]})
+        return page.sel(*args,**kwargs)
+
+
+    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
+    #def __getattr__(self,attr):
+    #    orig_attr = self.page.__getattribute__(attr)
+    #    if callable(orig_attr):
+    #        def hooked(*args, **kwargs):
+    #            for dim in kwargs.keys():
+    #                if dim == self.concat_dim:
+    #                    self.browse_page(**{dim: kwargs[dim]})
+    #
+    #            result = orig_attr(*args, **kwargs)
+    #            # prevent wrapped_class from becoming unwrapped
+    #            if result == self.page:
+    #                return self
+    #            self.post()
+    #            return result
+    #        return hooked
+    #    else:
+    #        return orig_attr
+
+    def set_renames(self,renames):
+        #first, we convert back to original names, and afterwards, we apply the update of the renames.
+        reverse_renames = dict((v,k) for k,v in self.renames.items())
+        self.renames = renames
+        if self.page is not None:
+            self.page = self.page.rename(reverse_renames)
+            self.page = self.page.rename(self.renames)
+
+    def set_page(self,ipage,page=None):
+        """ this sets the right page according to ipage:
+                - We do not switch the page if we are already at the right one
+                - we set the correct renamings (level -> lev, latitude -> lat,
+                etc.)
+                - The dataset is also squeezed.
+        """
+
+        if ((ipage != self.ipage) or (page is not None)):
+
+            if self.page is not None:
+                self.page.close()
+
+            self.ipage = ipage
+            if page is not None:
+                self.page = page
+            else:
+                if self.ipage == -1:
+                   self.page = None
+                else:
+                    #try:
+
+                    self.logger.info("Switching to page "+str(self.ipage)+': '\
+                                     +self.pages[self.ipage])
+                    self.page = xr.open_dataset(self.pages[self.ipage])
+
+
+            # do some final corrections to the dataset to make them uniform
+            if self.page is not None:
+               if 'latitude' in self.page.dims:
+#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
+               if 'level' in self.page.dims:
+                   self.page = self.page.rename({'level':'lev'})
+
+               self.page = self.page.rename(self.renames)
+               self.page = self.page.squeeze(drop=True)
+
+    def browse_page(self,rewind=2,**args):
+
+        # at the moment, this is only tested with files that are stacked according to the time dimension.
+        dims = args.keys()
+
+
+        if self.ipage == -1:
+            self.set_page(0)
+
+        found = False
+        iipage = 0
+        startipage = self.ipage - rewind
+        while (iipage < len(self.pages)) and not found:
+            ipage = (iipage+startipage) % len(self.pages)
+            for dim in args.keys():
+                this_file = True
+
+                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
+                if 'dims' not in self.__dict__:
+                    self.dims = {}
+                if dim not in self.dims.keys():
+                    self.dims[dim] = [None]*len(self.pages)
+
+                if self.dims[dim][ipage] is None:
+                    self.logger.info('Loading coordinates of dimension "'+dim+\
+                                     '" of page "' +str(ipage)+'".')
+                    self.set_page(ipage)
+                    # print(ipage)
+                    # print(dim)
+                    # print(dim,self.page[dim].values)
+                    self.dims[dim][ipage] = self.page[dim].values
+
+                # determine current time range of the current page
+                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
+                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
+
+                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
+                    this_file = False
+
+            if this_file:
+                found = True
+                self.set_page(ipage)
+            else:
+
+                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
+                #    iipage = len(self.pages) # we stop searching
+
+                iipage += 1
+
+        if not found:
+            self.logger.info("Page not found. Setting to page -1")
+            #iipage = len(self.pages) # we stop searching further
+            self.set_page(-1)
+
+        if self.ipage != -1:
+            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
+        else:
+            self.logger.debug("I'm now at page "+ str(self.ipage))
+
+
+class data_global(object):
+    def __init__(self,sources= {
+        # # old gleam
+        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
+        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
+        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
+        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
+        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
+        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
+        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
+        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
+        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
+        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
+        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
+        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
+        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
+        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
+        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
+        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
+        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
+        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
+        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
+        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
+        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
+        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
+        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
+        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
+        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
+        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
+        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
+        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
+        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
+        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
+        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
+        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
+        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
+        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
+        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
+        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
+        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
+        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
+        },debug_level=None):
+        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
+        self.sources = sources
+        self.datarefs = {}
+        self.datasets = {}
+        self.datetime = dt.datetime(1981,1,1)
+
+        self.logger = logging.getLogger('data_global')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+        self.debug_level = debug_level
+
+        warnings.warn('omitting pressure field p and advection')
+
+    def in_library(self,fn):
+        if fn not in self.library.keys():
+            return False
+        else:
+            print("Warning: "+fn+" is already in the library.")
+            return True
+
+    def add_to_library(self,fn):
+        if not self.in_library(fn):
+            print("opening: "+fn)
+            self.library[fn] = \
+                book(fn,concat_dim='time',debug_level=self.debug_level)
+
+            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
+            #if 'latitude' in self.library[fn].variables:
+            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+
+    # default procedure for loading datasets into the globaldata library
+    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
+        if type(varssource) is str:
+            varssource = [varssource]
+        if type(varsdest) is str:
+            varsdest = [varsdest]
+
+        self.add_to_library(input_fn)
+
+        if varssource is None:
+            varssource = []
+            for var in self.sources[input_fn].variables:
+                avoid = \
+                ['lat','lon','latitude','longitude','time','lev','level']
+                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
+                    varssource.append(var)
+
+        if varsdest is None:
+            varsdest = varssource
+
+        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        for ivar,vardest in enumerate(varsdest):
+            varsource = varssource[ivar]
+            print('setting '+vardest+' as '+varsource+' from '+input_fn)
+
+            if vardest in self.datarefs.keys():
+                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
+            #self.add_to_library(fn,varsource,vardest)
+            if vardest != varsource:
+                libkey = input_fn+'.'+varsource+'.'+vardest
+                if libkey not in self.library.keys():
+                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
+                    self.library[libkey] = book(input_fn,\
+                                                debug_level=self.debug_level)
+                    self.library[libkey].set_renames({varsource: vardest})
+
+                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+            else:
+                self.datarefs[vardest] = input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+
+            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
+            #     print('Warning: '+ vardest "not in " + input_fn)
+
+
+
+    def load_datasets(self,sources = None,recalc=0):
+
+        if sources is None:
+            sources = self.sources
+        for key in sources.keys():
+            #datakey,vardest,*args = key.split(':')
+            datakey,vardest = key.split(':')
+            #print(datakey)
+
+            fnvarsource = sources[key].split(':')
+            if len(fnvarsource) > 2:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource,fnargs = fnvarsource
+                fnargs = [fnargs]
+            elif len(fnvarsource) > 1:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource = fnvarsource
+                fnargs = []
+            else:
+                fn = sources[key]
+                varsource = vardest
+            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
+
+    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
+            # the default way of loading a 2d dataset
+            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
+                self.load_dataset_default(fn,varsource,vardest)
+            elif datakey == 'IGBPDIS':
+                if vardest == 'alpha':
+                    ltypes = ['W','B','H','TC']
+                    for ltype in ltypes:
+                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
+                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
+
+
+                    # landfr = {}
+                    # for ltype in ['W','B','H','TC']:
+                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
+
+
+
+                    keytemp = 'alpha'
+                    fnkeytemp = fn+':IGBPDIS:alpha'
+                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
+                        self.library[fnkeytemp]  = book(fnkeytemp,
+                                                        debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+                    else:
+                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
+                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
+                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
+                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
+                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
+                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
+                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
+
+                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+
+                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
+                        for ltype in ltypes:
+                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
+
+                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
+                        print('writing file to: '+fnkeytemp)
+                        os.system('rm '+fnkeytemp)
+                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
+                        self.library[fnkeytemp].close()
+
+
+                        self.library[fnkeytemp]  = \
+                            book(fnkeytemp,debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+
+
+                else:
+                    self.load_dataset_default(fn,varsource,vardest)
+
+
+            elif datakey == 'GLAS':
+                self.load_dataset_default(fn,varsource,vardest)
+                if vardest == 'z0m':
+                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
+                elif vardest == 'z0h':
+                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
+            elif datakey == 'DSMW':
+
+
+                # Procedure of the thermal properties:
+                # 1. determine soil texture from DSMW/10.
+                # 2. soil type with look-up table (according to DWD/EXTPAR)
+                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
+                #    with parameter look-up table from Noilhan and Planton (1989).
+                #    Note: The look-up table is inspired on DWD/COSMO
+
+                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
+
+
+
+                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
+                self.load_dataset_default(fn,'DSMW')
+                print('calculating texture')
+                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
+                TEMP  = {}
+                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
+                TEMP3 = {}
+                for SPKEY in SPKEYS:
+
+
+                    keytemp = SPKEY+'_values'
+                    fnoutkeytemp = fnout+':DSMW:'+keytemp
+                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                    else:
+                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
+                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
+                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+                        # for faster computation, we need to get it to memory out of Dask.
+                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
+                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
+
+                # yes, I know I only check the last file.
+                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
+                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
+                        print('idx',idx,SPKEY)
+                        SEL = (TEMP2 == idx)
+                    #     print(idx,len(TEMP3))
+                        for SPKEY in SPKEYS:
+                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
+
+                    for SPKEY in SPKEYS:
+                        keytemp = SPKEY+'_values'
+                        fnoutkeytemp = fnout+':DSMW:'+keytemp
+                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
+                        os.system('rm '+fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].close()
+
+
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                keytemp = 'texture'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+                else:
+                    self.library[fn+':DSMW:texture'] = xr.Dataset()
+                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
+                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
+                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
+                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+
+                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
+
+                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
+                    zundef[zundef < 0] = np.nan
+                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
+                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
+
+                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+
+
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+                print('calculating texture type')
+
+
+
+                keytemp = 'itex'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+                else:
+                    self.library[fnoutkeytemp] = xr.Dataset()
+                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+                    X = self.datasets['texture'].page['texture'].values*100
+                    X[pd.isnull(X)] = -9
+
+
+                    self.datasets[keytemp][keytemp].values = X
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
+                    self.datasets['itex'].close()
+
+
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+
+                keytemp = 'isoil'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                isoil_reprocessed = False
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+                else:
+                    isoil_reprocessed = True
+                    print('calculating soil type')
+                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    ITEX = self.datasets['itex'].page['itex'].values
+                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
+                    LOOKUP = [
+                              [-10 ,9],# ocean
+                              [0 ,7],# fine textured, clay (soil type 7)
+                              [20,6],# medium to fine textured, loamy clay (soil type 6)
+                              [40,5],# medium textured, loam (soil type 5)
+                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                              [80,3],# coarse textured, sand (soil type 3)
+                              [100,9],# coarse textured, sand (soil type 3)
+                            ]
+                    for iitex,iisoil in LOOKUP:
+                        ISOIL[ITEX > iitex] = iisoil
+                        print('iitex,iisoil',iitex,iisoil)
+
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    LOOKUP = [
+                              [9001, 1 ], # ice, glacier (soil type 1)
+                              [9002, 2 ], # rock, lithosols (soil type 2)
+                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                              [9,    9 ], # undefined (ocean)
+                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                              [9000, 9 ], # undefined (inland lake)
+                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                            ]
+                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
+
+                    CODE_VALUES[ITEX == 901200] = 9012
+                    for icode,iisoil in LOOKUP:
+                        ISOIL[CODE_VALUES == icode] = iisoil
+
+                    self.datasets['isoil']['isoil'].values = ISOIL
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+                    print('saved inbetween file to: '+fnoutkeytemp)
+
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                #adopted from data_soil.f90 (COSMO5.0)
+                SP_LOOKUP = {
+                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
+                  # (by index)                                           loam                    loam                                water      ice
+                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+                  # Important note: For peat, the unknown values below are set equal to that of loam
+                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
+                  #error in table 2 of NP89: values need to be multiplied by e-6
+                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
+
+                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
+                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
+                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
+                }
+
+
+                # isoil_reprocessed = False
+                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+
+                #     self.library[fn+':DSMW:isoil'] = \
+                #             book(fnoutkeytemp,debug_level=self.debug_level)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+                # else:
+                #     isoil_reprocessed = True
+                #     print('calculating soil type')
+                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+
+
+
+                # this should become cleaner in future but let's hard code it for now.
+                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
+                print('calculating soil parameter')
+                DATATEMPSPKEY = {}
+                if (recalc < 1) and (isoil_reprocessed == False): 
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        keytemp = SPKEY
+                        fnoutkeytemp=fnout+':DSMW:'+keytemp
+                        self.library[fn+':DSMW:'+SPKEY] =\
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
+                        self.datarefs[SPKEY] =fnoutkeytemp
+                else:
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+
+                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
+                    ISOIL = self.datasets['isoil'].page['isoil'].values
+                    print(np.where(ISOIL>0.))
+                    for i in range(11):
+                        SELECT = (ISOIL == i)
+                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
+
+                        os.system('rm '+fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].close()
+                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
+
+                        self.library[fn+':DSMW:'+SPKEY] = \
+                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+
+
+            else:
+                self.load_dataset_default(fn,varsource,vardest)
+
+
+
+
+
+
+#
+#                 # only print the last parameter value in the plot
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'cala'
+#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'crhoc'
+#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#     key = "CERES"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         CERES_start_date = dt.datetime(2000,3,1)
+#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+#
+#         var = 'cc'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#         print(class_settings.lat,class_settings.lon)
+#
+#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
+#
+#         input_nc.close()
+#
+
+
+#     key = "GIMMS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+#         print("Reading Leag Area Index from "+input_fn)
+#         var = 'LAI'
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+#
+#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+#
+#         if np.isnan(tarray[idatetime]):
+#             print("interpolating GIMMS cveg nan value")
+#
+#             mask = np.isnan(tarray)
+#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+#             else:
+#                 print("Warning. Could not interpolate GIMMS cveg nan value")
+#
+#         class_settings.__dict__[var] = tarray[idatetime]
+#
+#         input_nc.close()
+#
+#     key = "IGBPDIS_ALPHA"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         var = 'alpha'
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+#         print("Reading albedo from "+input_fn)
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#
+#         landfr = {}
+#         for ltype in ['W','B','H','TC']:
+#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+#
+#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+#
+#         alpha=0.
+#         for ltype in landfr.keys():
+#             alpha += landfr[ltype]*aweights[ltype]
+#
+#
+#         class_settings.__dict__[var] = alpha
+#         input_nc.close()
+#
+#
+#     key = "ERAINT_ST"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         print("Reading soil temperature from "+input_fn)
+#
+#         var = 'Tsoil'
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         var = 'T2'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+#
+#
+#         input_nc.close()
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #var = 'T2'
+#     #valold = class_settings.__dict__[var]
+#     #
+#     #class_settings.__dict__[var] = 305.
+#     #class_settings.__dict__['Tsoil'] = 302.
+#     #valnew = class_settings.__dict__[var]
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #
+#     #var = 'Lambda'
+#     #valold = class_settings.__dict__[var]
+#
+#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
+#     ## I need to ask Chiel.
+#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+#     #
+#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
+#     #class_settings.__dict__[var] = valnew
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     key = "GLAS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+#         print("Reading canopy height for determining roughness length from "+input_fn)
+#         var = 'z0m'
+#
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+#
+#         lowerlimit = 0.01
+#         if testval < lowerlimit:
+#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+#             class_settings.__dict__[var] = lowerlimit
+#         else:
+#             class_settings.__dict__[var] = testval
+#
+#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+#
+#
+#         input_nc.close()
+
+
+
+
+
diff --git a/build/lib/lib/interface_functions.py b/build/lib/lib/interface_functions.py
new file mode 100644
index 0000000..3e483f3
--- /dev/null
+++ b/build/lib/lib/interface_functions.py
@@ -0,0 +1,506 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+#from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+#'_afternoon.yaml'
+def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
+    filename = yaml_file.name
+    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+    #yaml_file = open(filename)
+
+    #print('going to next observation',filename)
+    yaml_file.seek(index_start)
+
+    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+
+    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+    filebuffer.write(buf)
+    filebuffer.close()
+    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+    
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+
+    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+    print(command)
+    os.system(command)
+    jsonstream = open(filename+'.buffer.json.'+str(index_start))
+    record_dict = json.load(jsonstream)
+    jsonstream.close()
+    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+
+
+    if mode =='mod':
+        modelout = class4gl()
+        modelout.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        return modelout
+    elif mode == 'ini':
+
+ 
+        # datetimes are incorrectly converted to strings. We need to convert them
+        # again to datetimes
+        for key,value in record_dict['pars'].items():
+            # we don't want the key with columns that have none values
+            if value is not None: 
+                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
+               # elif (type(value) == str):
+                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+
+            if (value == 0.9e19) or (value == '.9e19'):
+                record_dict['pars'][key] = np.nan
+        for key in record_dict.keys():
+            #print(key)
+            if key in ['air_ap','air_balloon',]:
+                #NNprint('check')
+                for datakey,datavalue in record_dict[key].items():
+                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+
+        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        c4gli = class4gl_input()
+        print(c4gli.logger,'hello')
+        c4gli.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+
+class stations(object):
+    def __init__(self,path,suffix='ini',refetch_stations=False):
+
+        self.path = path
+
+        self.file = self.path+'/stations_list.csv'
+        if (os.path.isfile(self.file)) and (not refetch_stations):
+            self.table = pd.read_csv(self.file)
+        else:
+            self.table = self.get_stations(suffix=suffix)
+            self.table.to_csv(self.file)
+        
+        self.table = self.table.set_index('STNID')
+        #print(self.table)
+
+    def get_stations(self,suffix):
+        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
+        if len(stations_list_files) == 0:
+            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
+        stations_list_files.sort()
+        print(stations_list_files)
+        if len(stations_list_files) == 0:
+            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
+        stations_list = []
+        for stations_list_file in stations_list_files:
+            thisfile = open(stations_list_file,'r')
+            yamlgen = yaml.load_all(thisfile)
+            try:
+                first_record  = yamlgen.__next__()
+            except:
+                first_record = None
+            if first_record is not None:
+                stations_list.append({})
+                for column in ['STNID','latitude','longitude']:
+                    #print(first_record['pars'].keys())
+                    stations_list[-1][column] = first_record['pars'][column]
+                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
+            yamlgen.close()
+            thisfile.close()
+    
+        print(stations_list)
+        return pd.DataFrame(stations_list)
+
+class stations_iterator(object):
+    def __init__(self,stations):
+        self.stations = stations
+        self.ix = -1 
+    def __iter__(self):
+        return self
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.stations.table)) 
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_row(self,row):
+        self.ix = row
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_STNID(self,STNID):
+        self.ix = np.where((self.stations.table.index == STNID))[0][0]
+        print(self.ix)
+        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+    def close():
+        del(self.ix)
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.records))
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+# #'_afternoon.yaml'
+# def get_record_yaml(yaml_file,index_start,index_end):
+#     filename = yaml_file.name
+#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+#     #yaml_file = open(filename)
+# 
+#     #print('going to next observation',filename)
+#     yaml_file.seek(index_start)
+# 
+#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+# 
+#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+#     filebuffer.write(buf)
+#     filebuffer.close()
+#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+#     
+#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+# 
+#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+#     print(command)
+#     os.system(command)
+#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
+#     record_dict = json.load(jsonstream)
+#     jsonstream.close()
+#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+#  
+#     # datetimes are incorrectly converted to strings. We need to convert them
+#     # again to datetimes
+#     for key,value in record_dict['pars'].items():
+#         # we don't want the key with columns that have none values
+#         if value is not None: 
+#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
+#            # elif (type(value) == str):
+#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+#                 
+#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
+# 
+#         if (value == 0.9e19) or (value == '.9e19'):
+#             record_dict['pars'][key] = np.nan
+#     for key in record_dict.keys():
+#         print(key)
+#         if key in ['air_ap','air_balloon',]:
+#             print('check')
+#             for datakey,datavalue in record_dict[key].items():
+#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+# 
+#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+# 
+#     c4gli = class4gl_input()
+#     c4gli.load_yaml_dict(record_dict)
+#     return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
+
+    records = pd.DataFrame()
+    for STNID,station in stations.iterrows():
+        dictfnchunks = []
+        if getchunk is 'all':
+
+            # we try the old single-chunk filename format first (usually for
+            # original profile pairs)
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
+            if os.path.isfile(fn):
+                chunk = 0
+                dictfnchunks.append(dict(fn=fn,chunk=chunk))
+
+            # otherwise, we use the new multi-chunk filename format
+            else:
+                chunk = 0
+                end_of_chunks = False
+                while not end_of_chunks:
+                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                    if os.path.isfile(fn):
+                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                    else:
+                        end_of_chunks = True
+                    chunk += 1
+
+            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
+            # yamlfilenames = glob.glob(globyamlfilenames)
+            # yamlfilenames.sort()
+        else:
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
+            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
+            
+        if len(dictfnchunks) > 0:
+            for dictfnchunk in dictfnchunks:
+                yamlfilename = dictfnchunk['fn']
+                chunk = dictfnchunk['chunk']
+                print(chunk)
+
+                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
+                pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
+                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
+                generate_pkl = False
+                if not os.path.isfile(pklfilename): 
+                    print('pkl file does not exist. I generate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                elif not (os.path.getmtime(yamlfilename) <  \
+                    os.path.getmtime(pklfilename)):
+                    print('pkl file older than yaml file, so I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+
+                if refetch_records:
+                    print('refetch_records flag is True. I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                if not generate_pkl:
+                    records = pd.concat([records,pd.read_pickle(pklfilename)])
+                   # irecord = 0
+                else:
+                    with open(yamlfilename) as yaml_file:
+
+                        dictout = {}
+
+                        next_record_found = False
+                        end_of_file = False
+                        while (not next_record_found) and (not end_of_file):
+                            linebuffer = yaml_file.readline()
+                            next_record_found = (linebuffer == '---\n')
+                            end_of_file = (linebuffer == '')
+                        next_tell = yaml_file.tell()
+                        
+                        while not end_of_file:
+
+                            print(' next record:',next_tell)
+                            current_tell = next_tell
+                            next_record_found = False
+                            yaml_file.seek(current_tell)
+                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            linebuffer = ''
+                            while ( (not next_record_found) and (not end_of_file)):
+                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                                linebuffer = yaml_file.readline()
+                                next_record_found = (linebuffer == '---\n')
+                                end_of_file = (linebuffer == '')
+                            filebuffer.close()
+                            
+                            next_tell = yaml_file.tell()
+                            index_start = current_tell
+                            index_end = next_tell
+
+                            
+                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
+                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            print(command)
+                            
+                            os.system(command)
+                            #jsonoutput = subprocess.check_output(command,shell=True) 
+                            #print(jsonoutput)
+                            #jsonstream = io.StringIO(jsonoutput)
+                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
+                            record = json.load(jsonstream)
+                            dictouttemp = {}
+                            for key,value in record['pars'].items():
+                                # we don't want the key with columns that have none values
+                                if value is not None: 
+                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
+                                   if (type(value) in regular_numeric_types):
+                                        dictouttemp[key] = value
+                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
+                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
+                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
+                            recordindex = record['index']
+                            dictouttemp['chunk'] = chunk
+                            dictouttemp['index_start'] = index_start
+                            dictouttemp['index_end'] = index_end
+                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
+                            for key,value in dictouttemp.items():
+                                if key not in dictout.keys():
+                                    dictout[key] = {}
+                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
+                            print(' obs record registered')
+                            jsonstream.close()
+                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                    records_station = pd.DataFrame.from_dict(dictout)
+                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
+                    print('writing table file ('+pklfilename+') for station '\
+                          +str(STNID))
+                    records_station.to_pickle(pklfilename)
+                    # else:
+                    #     os.system('rm '+pklfilename)
+                    records = pd.concat([records,records_station])
+    return records
+
+def stdrel(mod,obs,columns):
+    stdrel = pd.DataFrame(columns = columns)
+    for column in columns:
+        stdrel[column] = \
+                (mod.groupby('STNID')[column].transform('mean') -
+                 obs.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') + \
+                (mod[column] -
+                 mod.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') 
+    return stdrel
+
+def pct(obs,columns):
+    pct = pd.DataFrame(columns=columns)
+    for column in columns:
+        #print(column)
+        pct[column] = ""
+        pct[column] = obs[column].rank(pct=True)
+    return pct
+
+def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
+    stats = pd.DataFrame()
+    for key in keys: 
+        stats['d'+key+'dt'] = ""
+        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+                              (obs_afternoon.ldatetime - \
+                               obs_morning.ldatetime).dt.seconds*3600.
+    return stats
+
diff --git a/build/lib/lib/interface_multi.py b/build/lib/lib/interface_multi.py
new file mode 100644
index 0000000..83148e5
--- /dev/null
+++ b/build/lib/lib/interface_multi.py
@@ -0,0 +1,2061 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+# from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+cdictpres = {'blue': (\
+                   (0.,    0.,  0.),
+                   (0.25,  0.25, 0.25),
+                   (0.5,  .70, 0.70),
+                   (0.75, 1.0, 1.0),
+                   (1,     1.,  1.),
+                   ),
+       'green': (\
+                   (0. ,   0., 0.0),
+                   (0.25,  0.50, 0.50),
+                   (0.5,  .70, 0.70),
+                   (0.75,  0.50, 0.50),
+                   (1  ,    0,  0.),
+                   ),
+       'red':  (\
+                  (0 ,  1.0, 1.0),
+                  (0.25 ,  1.0, 1.0),
+                   (0.5,  .70, 0.70),
+                  (0.75 , 0.25, 0.25),
+                  (1,    0., 0.),
+                  )}
+
+statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+os.system('module load Ruby')
+
+class c4gl_interface_soundings(object):
+    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+        """ creates an interactive interface for analysing class4gl experiments
+
+        INPUT:
+            path_exp : path of the experiment output
+            path_obs : path of the observations 
+            globaldata: global data that is being shown on the map
+            refetch_stations: do we need to build the list of the stations again?
+        OUTPUT:
+            the procedure returns an interface object with interactive plots
+
+        """
+        
+        # set the ground
+        self.globaldata = globaldata
+
+ 
+        self.path_exp = path_exp
+        self.path_obs = path_obs
+        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
+
+        # # get the list of stations
+        # stationsfile = self.path_exp+'/stations_list.csv'
+        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
+        #     stations = pd.read_csv(stationsfile)
+        # else:
+        #     stations = get_stations(self.path_exp)
+        #     stations.to_csv(stationsfile)
+
+        # stations = stations.set_index('STNID')
+
+        self.frames = {}
+
+        self.frames['stats'] = {}
+        self.frames['worldmap'] = {}
+                
+        self.frames['profiles'] = {}
+        self.frames['profiles'] = {}
+        self.frames['profiles']['DT'] = None
+        self.frames['profiles']['STNID'] = None
+
+        #self.frames['worldmap']['stationsfile'] = stationsfile
+        self.frames['worldmap']['stations'] = stations(self.path_exp, \
+                                                       suffix='ini',\
+                                                       refetch_stations=refetch_stations)
+
+        # Initially, the stats frame inherets the values/iterators of
+        # worldmap
+        for key in self.frames['worldmap'].keys():
+            self.frames['stats'][key] = self.frames['worldmap'][key]
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_ini'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='ini',\
+                                           refetch_records=refetch_records
+                                           )
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_mod'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='mod',\
+                                           refetch_records=refetch_records
+                                           )
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_obs_afternoon'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_obs,\
+                                           subset='afternoon',\
+                                           refetch_records=refetch_records
+                                           )
+
+        self.frames['stats']['records_all_stations_mod'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['records_all_stations_ini']['dates'] = \
+            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+
+
+        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
+
+        self.frames['stats']['records_all_stations_obs_afternoon'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['viewkeys'] = ['h','theta','q']
+        print('Calculating table statistics')
+        self.frames['stats']['records_all_stations_mod_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_mod'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+
+        self.frames['stats']['inputkeys'] = inputkeys
+        
+        # self.frames['stats']['inputkeys'] = \
+        #     [ key for key in \
+        #       self.globaldata.datasets.keys() \
+        #       if key in \
+        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
+
+
+        # get units from the class4gl units database
+        self.units = dict(units)
+        # for those that don't have a definition yet, we just ask a question
+        # mark
+        for var in self.frames['stats']['inputkeys']:
+            self.units[var] = '?'
+
+        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
+        self.frames['stats']['records_all_stations_ini_pct'] = \
+                  pct(self.frames['stats']['records_all_stations_ini'], \
+                      columns = self.frames['stats']['inputkeys'])
+
+        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
+        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+        #     mod['
+
+        # 
+        # 
+        # \
+        #        self.frames['stats']['records_all_stations_mod'], \
+
+
+
+        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
+        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
+        #               columns = [ 'd'+key+'dt' for key in \
+        #                           self.frames['stats']['viewkeys']], \
+        #              )
+
+        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
+        #               obs = self.frames['stats']['records_all_stations_ini'], \
+        #               columns = self.frames['stats']['viewkeys'], \
+        #              )
+        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+        
+        print('filtering pathological data')
+        # some observational sounding still seem problematic, which needs to be
+        # investigated. In the meantime, we filter them
+        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+
+        # we filter ALL data frames!!!
+        for key in self.frames['stats'].keys():
+            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
+               (self.frames['stats'][key].index.names == indextype):
+                self.frames['stats'][key] = self.frames['stats'][key][valid]
+        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+
+        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
+
+
+        print("filtering stations from interface that have no records")
+        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
+            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                    == STNID).sum() == 0):
+                print("dropping", STNID)
+                self.frames['worldmap']['stations'].table = \
+                        self.frames['worldmap']['stations'].table.drop(STNID)
+                    
+        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+        
+        # TO TEST: should be removed, since it's is also done just below
+        self.frames['stats']['stations_iterator'] = \
+            self.frames['worldmap']['stations_iterator'] 
+
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
+        self.next_station()
+
+        # self.goto_datetime_worldmap(
+        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+        #     'after')
+    def sel_station(self,STNID=None,rownumber=None):
+
+        if (STNID is not None) and (rownumber is not None):
+            raise ValueError('Please provide either STNID or rownumber, not both.')
+
+        if (STNID is None) and (rownumber is None):
+            raise ValueError('Please provide either STNID or rownumber.')
+            
+        if STNID is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
+            print(
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+            )
+            self.update_station()
+        elif rownumber is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
+            self.update_station()
+
+
+
+    def next_station(self,event=None,jump=1):
+        with suppress(StopIteration):
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+                = self.frames['worldmap']['stations_iterator'].__next__(jump)
+            # self.frames['worldmap']['stations_iterator'].close()
+            # del(self.frames['worldmap']['stations_iterator'])
+            # self.frames['worldmap']['stations_iterator'] = \
+            #                 selfself.frames['worldmap']['stations'].iterrows()
+            # self.frames['worldmap']['STNID'],\
+            # self.frames['worldmap']['current_station'] \
+            #     = self.frames['worldmap']['stations_iterator'].__next__()
+
+        self.update_station()
+
+    def prev_station(self,event=None):
+        self.next_station(jump = -1,event=event)
+    def update_station(self):
+        for key in ['STNID','current_station','stations_iterator']: 
+            self.frames['stats'][key] = self.frames['worldmap'][key] 
+
+
+
+        # generate index of the current station
+        self.frames['stats']['records_current_station_index'] = \
+            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+             == \
+             self.frames['stats']['current_station'].name)
+
+        # create the value table of the records of the current station
+        tab_suffixes = \
+                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        for tab_suffix in tab_suffixes:
+            self.frames['stats']['records_current_station'+tab_suffix] = \
+                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+        # go to first record of current station
+        self.frames['stats']['records_iterator'] = \
+                        records_iterator(self.frames['stats']['records_current_station_mod'])
+        (self.frames['stats']['STNID'] , \
+        self.frames['stats']['current_record_chunk'] , \
+        self.frames['stats']['current_record_index']) , \
+        self.frames['stats']['current_record_mod'] = \
+                        self.frames['stats']['records_iterator'].__next__()
+
+        for key in self.frames['stats'].keys():
+            self.frames['profiles'][key] = self.frames['stats'][key]
+
+        STNID = self.frames['profiles']['STNID']
+        chunk = self.frames['profiles']['current_record_chunk']
+        if 'current_station_file_ini' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_ini'].close()
+        self.frames['profiles']['current_station_file_ini'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+        if 'current_station_file_mod' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_mod'].close()
+        self.frames['profiles']['current_station_file_mod'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_afternoon'].close()
+        self.frames['profiles']['current_station_file_afternoon'] = \
+            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+        self.frames['profiles']['records_iterator'] = \
+                        records_iterator(self.frames['profiles']['records_current_station_mod'])
+        (self.frames['profiles']['STNID'] , \
+        self.frames['profiles']['current_record_chunk'] , \
+        self.frames['profiles']['current_record_index']) , \
+        self.frames['profiles']['current_record_mod'] = \
+                        self.frames['profiles']['records_iterator'].__next__()
+
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+
+        self.update_record()
+
+    def next_record(self,event=None,jump=1):
+        with suppress(StopIteration):
+            (self.frames['profiles']['STNID'] , \
+            self.frames['profiles']['current_record_chunk'] , \
+            self.frames['profiles']['current_record_index']) , \
+            self.frames['profiles']['current_record_mod'] = \
+                      self.frames['profiles']['records_iterator'].__next__(jump)
+        # except (StopIteration):
+        #     self.frames['profiles']['records_iterator'].close()
+        #     del( self.frames['profiles']['records_iterator'])
+        #     self.frames['profiles']['records_iterator'] = \
+        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
+        #     (self.frames['profiles']['STNID'] , \
+        #     self.frames['profiles']['current_record_index']) , \
+        #     self.frames['profiles']['current_record_mod'] = \
+        #                     self.frames['profiles']['records_iterator'].__next__()
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        self.update_record()
+
+    def prev_record(self,event=None):
+        self.next_record(jump=-1,event=event)
+
+    def update_record(self):
+        self.frames['profiles']['current_record_ini'] =  \
+            self.frames['profiles']['records_current_station_ini'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'],\
+                  self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon'] =  \
+            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'] , \
+                  self.frames['profiles']['current_record_index'])]
+
+        self.frames['profiles']['current_record_mod_stats'] = \
+                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+                    self.frames['profiles']['STNID'], \
+                    self.frames['profiles']['current_record_chunk'], \
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
+                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_ini_pct'] = \
+                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        # frame
+        # note that the current station, record is the same as the stats frame for initialization
+
+        # select first 
+        #self.frames['profiles']['current_record_index'], \
+        #self.frames['profiles']['record_yaml_mod'] = \
+        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
+        #                   self.frames['stats']['current_record_index'])
+        self.frames['profiles']['record_yaml_mod'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_mod'], \
+               self.frames['profiles']['current_record_mod'].index_start,
+               self.frames['profiles']['current_record_mod'].index_end,
+               mode='mod')
+                                
+        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_ini'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_ini'], \
+               record_ini.index_start,
+               record_ini.index_end,
+                mode='ini')
+
+        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_obs_afternoon'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_afternoon'], \
+               record_afternoon.index_start,
+               record_afternoon.index_end,
+                mode='ini')
+
+
+        key = self.frames['worldmap']['inputkey']
+        # only redraw the map if the current world map has a time
+        # dimension
+        if 'time' in self.globaldata.datasets[key].page[key].dims:
+            self.goto_datetime_worldmap(
+                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                'after')
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap',
+                                                  'profiles'])
+        else:
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap_stations',
+                                                  'profiles'])
+
+    def abline(self,slope, intercept,axis):
+        """Plot a line from slope and intercept"""
+        #axis = plt.gca()
+        x_vals = np.array(axis.get_xlim())
+        y_vals = intercept + slope * x_vals
+        axis.plot(x_vals, y_vals, 'k--')
+
+    def plot(self):
+        import pylab as pl
+        from matplotlib.widgets import Button
+        import matplotlib.pyplot as plt
+        import matplotlib as mpl
+        '''
+        Definition of the axes for the sounding table stats
+        '''
+        
+        fig = pl.figure(figsize=(14,9))
+        axes = {} #axes
+        btns = {} #buttons
+
+        # frames, which sets attributes for a group of axes, buttens, 
+        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+            label = 'stats_'+str(key)
+            axes[label] = fig.add_subplot(\
+                            len(self.frames['stats']['viewkeys']),\
+                            5,\
+                            5*ikey+1,label=label)
+            # Actually, the axes should be a part of the frame!
+            #self.frames['stats']['axes'] = axes[
+
+            # pointer to the axes' point data
+            axes[label].data = {}
+
+            # pointer to the axes' color fields
+            axes[label].fields = {}
+
+
+        fig.tight_layout()
+        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
+
+        label ='stats_colorbar'
+        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
+        axes[label].fields = {}
+
+        from matplotlib.colors import LinearSegmentedColormap
+        cdictpres = {'blue': (\
+                           (0.,    0.,  0.),
+                           (0.25,  0.25, 0.25),
+                           (0.5,  .70, 0.70),
+                           (0.75, 1.0, 1.0),
+                           (1,     1.,  1.),
+                           ),
+               'green': (\
+                           (0. ,   0., 0.0),
+                           (0.25,  0.50, 0.50),
+                           (0.5,  .70, 0.70),
+                           (0.75,  0.50, 0.50),
+                           (1  ,    0,  0.),
+                           ),
+               'red':  (\
+                          (0 ,  1.0, 1.0),
+                          (0.25 ,  1.0, 1.0),
+                           (0.5,  .70, 0.70),
+                          (0.75 , 0.25, 0.25),
+                          (1,    0., 0.),
+                          )}
+        
+        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+        label = 'times'
+               
+        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+
+
+        label = 'worldmap'
+               
+        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+        axes[label].lat = None
+        axes[label].lon = None
+
+        label = 'worldmap_colorbar'
+        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
+        axes[label].fields = {}
+
+        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
+        label = 'worldmap_stations'
+        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
+        axes[label].data = {}
+
+        fig.canvas.mpl_connect('pick_event', self.on_pick)
+        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
+
+
+        """ buttons definitions """
+        
+        label = 'bprev_dataset'
+        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous dataset')
+        btns[label].on_clicked(self.prev_dataset)
+
+        label = 'bnext_dataset'
+        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next dataset')
+        btns[label].on_clicked(self.next_dataset)
+
+        label = 'bprev_datetime'
+        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous datetime')
+        btns[label].on_clicked(self.prev_datetime)
+
+        label = 'bnext_datetime'
+        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next datetime')
+        btns[label].on_clicked(self.next_datetime)
+
+
+        label = 'bprev_station'
+        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous station')
+        btns[label].on_clicked(self.prev_station)
+
+        label = 'bnext_station'
+        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next station')
+        btns[label].on_clicked(self.next_station)
+
+        label = 'bprev_record'
+        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous record')
+        btns[label].on_clicked(self.prev_record)
+
+        label = 'bnext_record'
+        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next record')
+        btns[label].on_clicked(self.next_record)
+
+
+        # self.nstatsview = nstatsview
+        # self.statsviewcmap = statsviewcmap
+        self.fig = fig
+        self.axes = axes
+        self.btns = btns
+        self.tbox = {}
+        # self.hover_active = False
+
+        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
+        #                                transform=plt.gcf().transFigure)
+
+        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
+                                          transform=plt.gcf().transFigure)
+
+        label = 'air_ap:theta'
+        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
+
+        label = 'air_ap:q'
+        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
+
+        label = 'out:h'
+        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
+
+        label = 'out:theta'
+        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
+
+        label = 'out:q'
+        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
+
+        label = 'SEB'
+        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
+
+
+        self.hover_active = False
+        self.fig = fig
+        self.fig.show()
+        self.fig.canvas.draw()
+        self.refresh_plot_interface()
+
+
+    # def scan_stations(self):
+    #     blabla
+        
+
+
+    # def get_records(current_file):
+    #     records = pd.DataFrame()
+
+    #     # initial position
+    #     next_record_found = False
+    #     while(not next_record_found):
+    #         next_record_found = (current_file.readline() == '---\n')
+    #     next_tell = current_file.tell() 
+    #     end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #     while not end_of_file:
+    #         current_tell = next_tell
+    #         next_record_found = False
+    #         current_file.seek(current_tell)
+    #         while ( (not next_record_found) and (not end_of_file)):
+    #             current_line = current_file.readline()
+    #             next_record_found = (currentline == '---\n')
+    #             end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #         # we store the position of the next record
+    #         next_tell = current_file.tell() 
+    #         
+    #         # we get the current record. Unfortunately we need to reset the
+    #         # yaml record generator first.
+    #         current_yamlgen.close()
+    #         current_yamlgen = yaml.load_all(current_file)
+    #         current_file.seek(current_tell)
+    #         current_record_mod = current_yamlgen.__next__()
+    #     current_yamlgen.close()
+
+    #     return records
+
+       #      next_record_found = False
+       #      while(not record):
+       #          next_record_found = (self.current_file.readline() == '---\n')
+       #      self.current_tell0 = self.current_file.tell() 
+
+       #  
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell0 = self.current_file.tell() 
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell1 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell0)
+       #  self.r0 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell1)
+       #  next_record_found = False
+       #  while ( (not next_record_found) and (not end_of_file):
+       #      current_line = self.current_file.readline()
+       #      next_record_found = (currentline == '---\n')
+       #      end_of_file = (currentline == '') # an empty line means we are at the end
+
+       #  self.current_tell2 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell1)
+       #  self.r1 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell2)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell3 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell2)
+       #  self.r2 = self.current_yamlgen.__next__()
+
+       #  # go to position of next record in file
+       #  self.current_file.seek(self.current_tell3)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell4 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell3)
+       #  self.r3 = self.current_yamlgen.__next__()
+ 
+       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
+
+    def goto_datetime_worldmap(self,DT,shift=None):
+        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
+            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
+            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
+            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
+                self.frames['worldmap']['iDT'] += 1
+            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
+                self.frames['worldmap']['iDT'] -= 1 
+            # for gleam, we take the values of the previous day
+            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
+                self.frames['worldmap']['iDT'] -= 2 
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+        #else:
+        #    self.frames['worldmap'].pop('DT')
+
+    def next_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def prev_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def next_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+    def prev_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+
+
+    def sel_dataset(self,inputkey):
+        self.frames['worldmap']['inputkey'] = inputkey
+        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
+        self.goto_datetime_worldmap(
+            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+            'after')# get nearest datetime of the current dataset to the profile
+        if "fig" in self.__dict__.keys():
+            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
+       
+    # def prev_station(self,event=None):
+    #     self.istation = (self.istation - 1) % self.stations.shape[0]
+    #     self.update_station()
+
+
+
+
+    #def update_datetime(self):
+    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
+    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
+    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
+    #        print(self.worldmapfocus['DT'])
+    #        self.refresh_plot_interface(only='worldmap')
+
+    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
+
+        #print('r1')
+        for argkey in args.keys():
+            self.__dict__[arg] = args[argkey]
+
+        axes = self.axes
+        tbox = self.tbox
+        frames = self.frames
+        fig = self.fig
+ 
+        if (only is None) or ('worldmap' in only):
+            globaldata = self.globaldata
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
+            else:
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+            keystotranspose = ['lat','lon']
+            for key in dict(datasetxr.dims).keys():
+                if key not in keystotranspose:
+                    keystotranspose.append(key)
+
+            datasetxr = datasetxr.transpose(*keystotranspose)
+            datasetxr = datasetxr.sortby('lat',ascending=False)
+
+            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
+            lonleft = lonleft - 360.
+            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
+            label = 'worldmap'
+            axes[label].clear()
+            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
+            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+
+        if (only is None) or ('worldmap' in only):
+            #if 'axmap' not in self.__dict__ :
+            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
+            #else:
+
+            #stations = self.stations
+
+
+            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
+            #     resolution = 'l', 
+            # area_thresh = 0.1,
+            #     llcrnrlon=-180., llcrnrlat=-90.0,
+            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
+            # 
+            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
+            # self.gmap.drawcountries(color='white',linewidth=0.3)
+            # #self.gmap.fillcontinents(color = 'gray')
+            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
+            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
+            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
+            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # #self.ax5.shadedrelief()
+
+           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
+
+
+            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
+            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+
+            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
+            if 'lev' in field.dims:
+                field = field.isel(lev=-1)
+
+            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
+            axes[label].axis('off')
+
+            from matplotlib import cm
+            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
+            
+            
+            title=frames['worldmap']['inputkey']
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+            axes[label].set_title(title)
+
+            label ='worldmap_colorbar'
+            axes[label].clear()
+            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
+
+
+            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
+            # x,y = self.gmap(lons,lats)
+            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
+            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+
+        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
+
+            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
+            store_xlim = {}
+            store_ylim = {}
+            for ikey, key in enumerate(statskeys_out):
+                if (only is not None) and ('stats_lightupdate' in only):
+                    store_xlim[key] = axes['stats_'+key].get_xlim()
+                    store_ylim[key] = axes['stats_'+key].get_ylim()
+                self.axes['stats_'+key].clear()    
+
+            label = 'times'
+            self.axes[label].clear()
+
+            key = 'dthetadt'
+            x = self.frames['stats']['records_all_stations_ini']['datetime']
+            #print(x)
+            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+            #print(y)
+            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            #print(z)
+
+            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
+            self.axes[label].data[label] = self.axes[label].scatter(x.values,
+                                                                    y.values,
+                                                                    c=z.values,
+                                                                    cmap=self.statsviewcmap,
+                                                                    s=2,
+                                                                    vmin=0.,
+                                                                    vmax=1.,
+                                                                    alpha=alpha_cloud_pixels)
+
+            
+            x = self.frames['stats']['records_current_station_ini']['datetime']
+            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+
+            x = self.frames['profiles']['records_current_station_ini']['datetime']
+            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
+            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
+
+            for ikey, key in enumerate(statskeys_out):
+
+                # show data of all stations
+                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_all_stations_mod_stats'][key]
+                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                qvalmax = x.quantile(0.999)
+                qvalmin = x.quantile(0.001)
+                print('applying extra filter over extreme values for plotting stats')
+                selx = (x >= qvalmin) & (x < qvalmax)
+                sely = (x >= qvalmin) & (x < qvalmax)
+                x = x[selx & sely]
+                y = y[selx & sely]
+                z = z[selx & sely]
+                self.axes['stats_'+key].data['stats_'+key] = \
+                       self.axes['stats_'+key].scatter(x,y, c=z,\
+                                cmap=self.statsviewcmap,\
+                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
+
+                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_current_station_mod_stats'][key]
+                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['profiles']['records_current_station_mod_stats'][key]
+                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
+
+                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
+                y = self.frames['stats']['current_record_mod_stats'][key]
+                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
+                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
+                    axes['stats_'+key].annotate(text, \
+                                               xy=(x,y),\
+                                               xytext=(0.05,0.05),\
+                                               textcoords='axes fraction',\
+                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
+                                               color='white',\
+                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
+                # self.axes['stats_'+key].data[key+'_current_record'] = \
+                #        self.axes['stats_'+key].scatter(x,y, c=z,\
+                #                 cmap=self.statsviewcmap,\
+                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
+
+                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
+                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
+                # # highlight data for curent station
+                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
+
+                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
+
+                if ikey == len(statskeys_out)-1:
+                    self.axes['stats_'+key].set_xlabel('external')
+                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
+                axes['stats_'+key].set_ylabel('model')
+
+
+                if (only is not None) and ('stats_lightupdate' in only):
+                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
+                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
+                else:
+                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
+                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
+                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
+                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
+                self.abline(1,0,axis=self.axes['stats_'+key])
+
+        if (only is None) or ('stats_colorbar' in only):
+            label ='stats_colorbar'
+            axes[label].clear()
+            import matplotlib as mpl
+            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
+            self.axes[label].fields[label] = \
+             mpl.colorbar.ColorbarBase(self.axes[label],\
+                        orientation='horizontal',\
+                        label="percentile of "+self.frames['worldmap']['inputkey'],
+                        alpha=1.,
+                                cmap=self.statsviewcmap,\
+                                       norm=norm
+                         )
+
+        #print('r1')
+        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
+            #print('r2')
+            label = 'worldmap_stations'
+            axes[label].clear()
+            
+            stations = self.frames['worldmap']['stations'].table
+            globaldata = self.globaldata
+            
+            key = label
+
+            #print('r3')
+            if (stations is not None):
+                xlist = []
+                ylist = []
+                #print('r4')
+                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
+            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    xlist.append(x)
+                    ylist.append(y)
+                #picker is needed to make it clickable (pick_event)
+                axes[label].data[label] = axes[label].scatter(xlist,ylist,
+                                                              c='r', s=15,
+                                                              picker = 15,
+                                                              label=key,
+                                                              edgecolor='k',
+                                                              linewidth=0.8)
+
+            # cb.set_label('Wilting point [kg kg-3]')
+                #print('r5')
+
+                
+            #     xseries = []
+            #     yseries = []
+            #     for iSTN,STN in stations.iterrows():
+            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
+            #         xseries.append(x)                    
+            #         yseries.append(y)
+            #         
+            #         
+            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
+                    
+                if ('current_station' in frames['worldmap']):
+                    #print('r5')
+                    STN = frames['stats']['current_station']
+                    STNID = frames['stats']['STNID']
+                    #print('r5')
+
+                    x,y = len(axes['worldmap'].lon)* \
+                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
+                          len(axes['worldmap'].lat)* \
+                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    #print('r6')
+                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
+                    #                          == \
+                    #                          self.frames['worldmap']['STNID'])\
+                    #                         & \
+                    #                         (self.seltablestats['DT'] \
+                    #                          == self.axes['statsview0].focus['DT']) \
+                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
+                    #print('r7')
+                    text = 'STNID: '+ format(STNID,'10.0f') + \
+                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
+                            ', LON: '+format(STN['longitude'],'3.3f')+ \
+                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
+
+                            #+', VAL: '+format(VAL,'.3e')
+
+                    axes[label].scatter(x,y, c='r', s=30,\
+                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
+                    #print('r8')
+            
+                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
+                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
+                    #colorstation = max((min((1.,colorstation)),0.))
+                    colorstation =0.2
+                    from matplotlib import cm
+                    axes[label].annotate(text,
+                                         xy=(x,y),
+                                         xytext=(0.05,0.05),
+                                         textcoords='axes fraction', 
+                                         bbox=dict(boxstyle="round",
+                                         fc = cm.viridis(colorstation)),
+                                         arrowprops=dict(arrowstyle="->",
+                                                         linewidth=1.1),
+                                         color='white' if colorstation < 0.5 else 'black')
+                    #print('r9')
+
+                    # #pos = sc.get_offsets()[ind["ind"][0]]
+                    # 
+                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
+                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
+                    # axes[label].data[label+'statannotate'].set_text(text)
+                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
+                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
+            #print('r9')
+            axes[label].axis('off')
+            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
+            axes[label].set_ylim((len(axes['worldmap'].lat),0))
+            #print('r10')
+
+        if (only is None) or ('profiles' in only): 
+            #print('r11')
+
+            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
+            # # self.update_station(goto_first_sounding=False)
+            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
+            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
+            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
+            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
+
+            label = 'air_ap:theta'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+                # +\
+                # ' -> '+ \
+                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+            
+            
+            
+            
+            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+            #print('r12')
+
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
+            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
+            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
+            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            #print('r13')
+            # 
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r14')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+
+            #print('r15')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+                          
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r16')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r17')
+            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            print(hmax)
+            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
+            if valid_mod:
+
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="mod "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
+
+            #print('r18')
+            axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('theta [K]')
+
+            label = 'air_ap:q'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+
+            #print('r19')
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            if valid_mod:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            else:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            # 
+            #print('r20')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r21')
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            #print('r23')
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r24')
+            if valid_mod:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="fit ")#+\
+                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
+                             #+'LT')
+            #print('r25')
+            #axes[label].legend()
+
+            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            #axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('q [kg/kg]')
+
+            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+
+            # #pl.subplots_adjust(right=0.6)
+
+            # label = 'q_pro'
+            # axes[label].clear()
+
+            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
+            # 
+            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
+            # 
+            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
+
+            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
+            # #pl.subplots_adjust(right=0.6)
+            # axes[label].set_xlabel('specific humidity [kg/kg]')
+ 
+
+            #print('r26')
+            time = self.frames['profiles']['record_yaml_mod'].out.time
+            for ilabel,label in enumerate(['h','theta','q']):
+                axes["out:"+label].clear()
+                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
+                axes["out:"+label].set_ylabel(label)
+                if ilabel == 2:
+                    axes["out:"+label].set_xlabel('local sun time [h]')
+                
+            #print('r27')
+            label = 'SEB'
+            axes[label].clear()
+            
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
+            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
+            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
+            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
+                
+            #print('r28')
+            
+            axes[label].legend()
+            
+            #         for ax in self.fig_timeseries_axes:
+#             ax.clear()
+#         
+#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
+#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
+#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
+#         #print(self.morning_sounding.c4gl.out.Swin)
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
+#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
+#         self.fig_timeseries_axes[3].legend()
+#         self.fig.canvas.draw()
+            
+
+
+
+
+
+
+        #self.ready()
+        #print('r29')
+        fig.canvas.draw()
+        #fig.show()
+
+        self.axes = axes
+        self.tbox = tbox
+        self.fig = fig
+
+    def on_pick(self,event):
+        #print("HELLO")
+        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
+        #self.axes['theta_pro'].clear()
+        #self.axes['q_pro'].clear()
+        
+
+        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
+        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
+        keys_to_axes = {}
+        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
+
+        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
+        keys_to_axes['worldmap'] = 'worldmap'
+        
+        axes = self.axes
+        #nstatsview = self.nstatsview
+        #statsviewcmap = self.statsviewcmap
+        stations = self.frames['worldmap']['stations'].table
+
+
+        #print("p1")
+        current = event
+        artist = event.artist
+        
+        selkey = artist.get_label()
+        
+        #print(keys_to_axes)
+        
+        label = keys_to_axes[selkey]
+        #print("HELLO",selkey,label)
+
+        # # Get to know in which axes we are
+        # label = None
+        # for axeskey in axes.keys():
+        #     if event.inaxes == axes[axeskey]:
+        #         label = axeskey
+        #         
+
+        # cont, pos = None, None
+        
+        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
+        ind = event.ind
+        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
+        d = axes[label].collections[0]
+        #d.set_offset_position('data')
+        xy = d.get_offsets()
+        x, y =  xy[:,0],xy[:,1]
+        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
+
+        #print("p2")
+        if len(ind) > 0:
+            #print("p3")
+            pos = x[ind[0]], y[ind[0]]
+
+            #if label[:-1] == 'statsview':
+            #    #seltablestatsstdrel = self.seltablestatsstdrel
+            #    #seltablestatspct = self.seltablestatspct
+
+            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+            #    
+            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
+            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+            #    
+            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
+            #el
+            if (label == 'worldmap') or (label == 'worldmap_stations'):
+                self.hover_active = False
+                if (self.frames['worldmap']['STNID'] !=
+                    self.frames['profiles']['STNID']):
+                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
+                # so we just need to perform update_station
+                    self.update_station()
+            elif (label[:5] == 'stats'):
+
+                self.hover_active = False
+                if (self.frames['stats']['STNID'] !=
+                self.frames['profiles']['STNID']) or \
+                   (self.frames['stats']['current_record_chunk'] != 
+                    self.frames['profiles']['current_record_chunk']) or \
+                   (self.frames['stats']['current_record_index'] != 
+                    self.frames['profiles']['current_record_index']):
+
+
+
+                    for key in ['STNID','current_station','stations_iterator']: 
+                        self.frames['worldmap'][key] = self.frames['stats'][key] 
+
+                    for key in self.frames['stats'].keys():
+                        self.frames['profiles'][key] = self.frames['stats'][key]
+
+                    STNID = self.frames['profiles']['STNID']
+                    chunk = self.frames['profiles']['current_record_chunk']
+                    if 'current_station_file_ini' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_ini'].close()
+                    self.frames['profiles']['current_station_file_ini'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+                    if 'current_station_file_mod' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_mod'].close()
+                    self.frames['profiles']['current_station_file_mod'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_afternoon'].close()
+                    self.frames['profiles']['current_station_file_afternoon'] = \
+                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+                    # go to hovered record of current station
+                    self.frames['profiles']['records_iterator'] = \
+                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # ... and go to the record of the profile window (last one that
+                    # was picked by the user)
+                    found = False
+                    EOF = False
+                    while (not found) and (not EOF):
+                        try:
+                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
+                            #print("hello*")
+                            #print(self.frames['profiles']['current_record_index'])
+                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
+                               (index == self.frames['profiles']['current_record_index']) and \
+                               (STNID == self.frames['profiles']['STNID']):
+                                #print('found!')
+                                found = True
+                        except StopIteration:
+                            EOF = True
+                    if found:
+                        self.frames['stats']['current_record_mod'] = record
+                        self.frames['stats']['current_record_chunk'] = chunk
+                        self.frames['stats']['current_record_index'] = index
+                    # # for the profiles we make a distinct record iterator, so that the
+                    # # stats iterator can move independently
+                    # self.frames['profiles']['records_iterator'] = \
+                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # (self.frames['profiles']['STNID'] , \
+                    # self.frames['profiles']['current_record_index']) , \
+                    # self.frames['profiles']['current_record_mod'] = \
+                    #                 self.frames['profiles']['records_iterator'].__next__()
+
+
+                    # for the profiles we make a distinct record iterator, so that the
+                    # stats iterator can move independently
+
+                    self.update_record()
+
+
+
+    def on_plot_hover(self,event):
+        axes = self.axes
+        #print('h1')
+
+        # Get to know in which axes we are
+        label = None
+        for axeskey in axes.keys():
+            if event.inaxes == axes[axeskey]:
+                label = axeskey
+                
+        #print('h2')
+
+        cont, pos = None, None
+        #print (label)
+        
+        if label is not None:
+            if  ('data' in axes[label].__dict__.keys()) and \
+                (label in axes[label].data.keys()) and \
+                (axes[label].data[label] is not None):
+                
+                #print('h3')
+                cont, ind =  axes[label].data[label].contains(event)
+                selkey = axes[label].data[label].get_label()
+                if len(ind["ind"]) > 0:
+                    #print('h4')
+                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
+                    #print('pos',pos,selkey)
+
+
+                    #if label[:-1] == 'statsview':
+                    #    seltablestatsstdrel = self.seltablestatsstdrel
+                    #    seltablestatspct = self.seltablestatspct
+
+                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
+                    #    self.hover_active = True
+                    #    
+                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
+                    #    
+                    #el
+                    #print(label[:5])
+                    if (label[:5] == 'stats') or (label == 'times'):
+                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
+                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
+                        
+
+                        if label[:5] == 'stats':
+                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            (self.frames['stats']['STNID'] ,
+                             self.frames['stats']['current_record_chunk'], 
+                             self.frames['stats']['current_record_index']) = \
+                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                        # elif label[:5] == 'stats':
+                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
+                        #     (self.frames['stats']['STNID'] ,
+                        #      self.frames['stats']['current_record_chunk'], 
+                        #      self.frames['stats']['current_record_index']) = \
+                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+
+
+                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+                        
+                        # # TO TEST: should be removed, since it's is also done just below
+                        # self.frames['stats']['stations_iterator'] = \
+                        #     self.frames['worldmap']['stations_iterator'] 
+                
+                
+                        # self.goto_datetime_worldmap(
+                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+                        #     'after')
+
+
+                        # scrolling to the right station
+                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                        EOF = False
+                        found = False
+                        while (not found and not EOF):
+                            if (STNID == self.frames['stats']['STNID']):
+                                   found = True 
+                            if not found:
+                                try:
+                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                                except (StopIteration):
+                                    EOF = True
+                        if found:
+                        #    self.frames['stats']['STNID'] = STNID
+                            self.frames['stats']['current_station'] =  station
+
+                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
+                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
+
+
+                        # generate index of the current station
+                        self.frames['stats']['records_current_station_index'] = \
+                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                             == self.frames['stats']['STNID'])
+
+
+                        tab_suffixes = \
+                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            self.frames['stats']['records_current_station'+tab_suffix] = \
+                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+
+                        # go to hovered record of current station
+                        self.frames['stats']['records_iterator'] = \
+                                        records_iterator(self.frames['stats']['records_current_station_mod'])
+
+
+                        # ... and go to the record of the profile window (last one that
+                        # was picked by the user)
+                        found = False
+                        EOF = False
+                        while (not found) and (not EOF):
+                            try:
+                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                                #print("hello*")
+                                #print(self.frames['profiles']['current_record_index'])
+                                if (index == self.frames['stats']['current_record_index']) and \
+                                   (chunk == self.frames['stats']['current_record_chunk']) and \
+                                   (STNID == self.frames['stats']['STNID']):
+                                    #print('found!')
+                                    found = True
+                            except StopIteration:
+                                EOF = True
+                        if found:
+                            #print('h5')
+                            self.frames['stats']['current_record_mod'] = record
+                            self.frames['stats']['current_record_chunk'] = chunk
+                            self.frames['stats']['current_record_index'] = index
+
+                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
+                        tab_suffixes = \
+                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            #print(tab_suffix)
+                            #print(self.frames['stats']['records_current_station'+tab_suffix])
+                            self.frames['stats']['current_record'+tab_suffix] =  \
+                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                      (self.frames['stats']['STNID'] , \
+                                       self.frames['stats']['current_record_chunk'] , \
+                                       self.frames['stats']['current_record_index'])]
+
+
+                        self.hover_active = True
+                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                        # print('h13')
+                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
+                        #     self.goto_datetime_worldmap(
+                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                        #         'after')
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap',
+                        #                                           'profiles'])
+                        # else:
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap_stations',
+                        #                                           'profiles'])
+
+
+
+                    elif label in ['worldmap_stations','worldmap']:
+                        #print('h5')
+
+                        if (self.axes['worldmap'].lat is not None) and \
+                           (self.axes['worldmap'].lon is not None):
+
+
+                            #self.loading()
+                            self.fig.canvas.draw()
+                            self.fig.show()
+
+
+                            # get position of 
+                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
+                                                                 self.axes['worldmap'].lat[0]) + \
+                                           self.axes['worldmap'].lat[0],4)
+                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
+                                                                 self.axes['worldmap'].lon[0]) + \
+                                           self.axes['worldmap'].lon[0],4)
+                        
+                            stations = self.frames['worldmap']['stations'].table
+                            #print('h7')
+                        
+                            #reset stations iterator:
+                            # if 'stations_iterator' in self.frames['worldmap'].keys():
+                            #     self.frames['worldmap']['stations_iterator'].close()
+                            #     del(self.frames['worldmap']['stations_iterator'])
+                            # if 'stations_iterator' in self.frames['stats'].keys():
+                            #     self.frames['stats']['stations_iterator'].close()
+                            #     del(self.frames['stats']['stations_iterator'])
+                            self.frames['worldmap']['stations_iterator'] =\
+                               stations_iterator(self.frames['worldmap']['stations'])
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                            EOF = False
+                            found = False
+                            while (not found and not EOF):
+                                #print('h8',station.latitude,latmap)
+                                #print('h8',station.longitude,lonmap)
+                                if (round(station.latitude,3) == round(latmap,3)) and \
+                                    (round(station.longitude,3) == round(lonmap,3)):
+                                       found = True 
+                                if not found:
+                                    try:
+                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                                    except (StopIteration):
+                                        EOF = True
+                            if found:
+                                self.frames['worldmap']['STNID'] = STNID
+                                self.frames['worldmap']['current_station'] = \
+                                        station
+                        
+                            self.frames['stats']['stations_iterator'] = \
+                                self.frames['worldmap']['stations_iterator'] 
+                            #print('h8')
+                            # inherit station position for the stats frame...
+                            for key in self.frames['worldmap'].keys():
+                                self.frames['stats'][key] = self.frames['worldmap'][key]
+                                
+                            ## fetch records of current station...
+                            #self.frames['stats']['records_current_station_mod'] =\
+                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                            # ... and their indices
+                            self.frames['stats']['records_current_station_index'] = \
+                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                                     == \
+                                     self.frames['stats']['current_station'].name)
+
+
+                            tab_suffixes = \
+                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['records_current_station'+tab_suffix] = \
+                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+                            # ... create a record iterator ...
+                            #self.frames['stats']['records_iterator'].close()
+                            del(self.frames['stats']['records_iterator'])
+                            self.frames['stats']['records_iterator'] = \
+                                self.frames['stats']['records_current_station_mod'].iterrows()
+
+
+
+                        
+                            #print('h9')
+                            # ... and go to to the first record of the current station
+                            (self.frames['stats']['STNID'] , \
+                             self.frames['stats']['current_record_chunk'] , \
+                             self.frames['stats']['current_record_index']) , \
+                            self.frames['stats']['current_record_mod'] = \
+                                self.frames['stats']['records_iterator'].__next__()
+                        
+
+
+
+                            #print('h10')
+                            # cash the current record
+                            tab_suffixes = \
+                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['current_record'+tab_suffix] =  \
+                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                          (self.frames['stats']['STNID'] , \
+                                           self.frames['stats']['current_record_chunk'] , \
+                                           self.frames['stats']['current_record_index'])]
+
+                            #print('h11')
+                            
+                            self.hover_active = True
+                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                            #print('h13')
+
+                        
+
+            #if (stations is not None):
+            #    for iSTN,STN in stations.iterrows():
+            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
+            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
+
+        # self.fig.show()
+ 
+        # we are hovering on nothing, so we are going back to the position of
+        # the profile sounding
+        if pos is None:
+            if self.hover_active == True:
+                #print('h1*')
+                
+                #self.loading()
+                # to do: reset stations iterators
+
+                # get station and record index from the current profile
+                for key in ['STNID', 'current_station']:
+                    self.frames['stats'][key] = self.frames['profiles'][key]
+
+                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
+                self.frames['stats']['current_station'] = \
+                        self.frames['profiles']['current_station']
+                #print('h3a*')
+                self.frames['stats']['records_current_station_mod'] = \
+                        self.frames['profiles']['records_current_station_mod']
+                #print('h3b*')
+
+                # the next lines recreate the records iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+
+                # reset stations iterator...
+                #self.frames['stats']['records_iterator'].close()
+                del(self.frames['stats']['records_iterator'])
+                self.frames['stats']['records_iterator'] = \
+                    self.frames['stats']['records_current_station_mod'].iterrows()
+                #print('h4*')
+
+                # ... and go to the record of the profile window (last one that
+                # was picked by the user)
+                found = False
+                EOF = False
+                while (not found) and (not EOF):
+                    try:
+                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                        #print("hello*")
+                        #print(self.frames['profiles']['current_record_index'])
+                        #print(self.frames['profiles']['STNID'])
+                        #print(STNID,index)
+                        if (index == self.frames['profiles']['current_record_index']) and \
+                            (chunk == self.frames['profiles']['current_record_chunk']) and \
+                            (STNID == self.frames['profiles']['STNID']):
+                            #print('found!')
+                            found = True
+                    except StopIteration:
+                        EOF = True
+                if found:
+                    #print('h5*')
+                    self.frames['stats']['current_record_mod'] = record
+                    self.frames['stats']['current_record_chunk'] = chunk
+                    self.frames['stats']['current_record_index'] = index
+
+                #print('h6*')
+
+
+
+                # # fetch records of current station...
+                # self.frames['stats']['records_current_station_mod'] =\
+                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                # ... and their indices
+                self.frames['stats']['records_current_station_index'] = \
+                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                         == \
+                         self.frames['stats']['current_station'].name)
+
+
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['records_current_station'+tab_suffix] = \
+                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+                
+
+                # cash the records of the current stations
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['current_record'+tab_suffix] =  \
+                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                              (self.frames['stats']['STNID'] , \
+                               self.frames['stats']['current_record_chunk'] , \
+                               self.frames['stats']['current_record_index'])]
+
+
+                # the next lines recreate the stations iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+                #print('h7*')
+
+                # reset the stations iterators
+                for framekey in ['stats','worldmap']:
+                    ##print(framekey)
+                    if 'stations_iterator' in self.frames[framekey]:
+                        #self.frames[framekey]['stations_iterator'].close()
+                        del(self.frames[framekey]['stations_iterator'])
+
+                self.frames['worldmap']['current_station'] = \
+                        self.frames['profiles']['current_station']
+
+                #recreate the stations iterator for the worldmap...
+                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+
+                # ... and go the position of the profile
+                #print('h8*')
+                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                EOF = False
+                found = False
+                while (not found and not EOF):
+                    if STNID == self.frames['profiles']['STNID'] :
+                        found = True 
+                    if not found:
+                        try:
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                        except (StopIteration):
+                            EOF = True
+                if found:
+                    self.frames['worldmap']['current_station'] = station
+                    self.frames['worldmap']['STNID'] = STNID
+                #print('h9*')
+                self.frames['stats']['stations_iterator'] = \
+                    self.frames['worldmap']['stations_iterator'] 
+
+                # the stats window now inherits the current station from the
+                # worldmap
+                for key in ['STNID','current_station','stations_iterator']: 
+                    self.frames['stats'][key] = self.frames['worldmap'][key] 
+                #print('h10*')
+
+                # # we now only need inherit station position and go to first record
+                # for key in self.frames['worldmap'].keys():
+                #     self.frames['stats'][key] = self.frames['worldmap'][key]
+
+                # self.frames['stats']['records_current_station'] =\
+                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
+
+                # #print(self.frames['stats']['records_current_station'])
+                # self.frames['stats']['records_iterator'] = \
+                #                 self.frames['stats']['records_current_station'].iterrows()
+                # (self.frames['stats']['STNID'] , \
+                # self.frames['stats']['current_record_index']) , \
+                # self.frames['stats']['current_record_mod'] = \
+                #                 self.frames['stats']['records_iterator'].__next__()
+                
+
+
+
+
+
+
+                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
+                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
+                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
+                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+                self.hover_active = False
+                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
+    # def loading(self):
+    #     self.tbox['loading'].set_text('Loading...')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+    #     sleep(0.1)
+    # def ready(self):
+    #     self.tbox['loading'].set_text('Ready')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+
+
+
diff --git a/build/lib/lib/model.py b/build/lib/lib/model.py
new file mode 100644
index 0000000..8760411
--- /dev/null
+++ b/build/lib/lib/model.py
@@ -0,0 +1,2214 @@
+# 
+# CLASS
+# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
+# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
+# Copyright (c) 2011-2015 Chiel van Heerwaarden
+# Copyright (c) 2011-2015 Bart van Stratum
+# Copyright (c) 2011-2015 Kees van den Dries
+# 
+# This file is part of CLASS
+# 
+# CLASS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published bygamma
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# 
+# CLASS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with CLASS.  If not, see .
+#
+
+import copy as cp
+import numpy as np
+import sys
+import warnings
+import pandas as pd
+from ribtol_hw import zeta_hs2 , funcsche
+import logging
+#from SkewT.thermodynamics import Density
+#import ribtol
+
+grav = 9.81
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+
+def qsat(T,p):
+    return 0.622 * esat(T) / p
+
+
+def ribtol(Rib, zsl, z0m, z0h): 
+    Rib = np.float64(Rib)
+    zsl = np.float64(zsl)
+    z0m = np.float64(z0m)
+    z0h = np.float64(z0h)
+    #print(Rib,zsl,z0m,z0h)
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    #print(Rib,zsl,z0m,z0h)
+    while (abs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+        #print(L,fx/fxdif)
+        if(abs(L) > 1e12):
+            break
+
+    return L
+  
+def psim(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psim = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+  
+def psih(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * np.log( (1. + x*x) / 2.)
+        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+ 
+class model:
+    def __init__(self, model_input = None,debug_level=None):
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        self.logger = logging.getLogger('model')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        """ initialize the different components of the model """ 
+
+        if model_input is not None:
+            # class4gl style input
+            if 'pars' in model_input.__dict__.keys():
+
+                # we make a reference to the full input first, so we can dump it
+                # afterwards
+                self.input_c4gl = model_input
+
+                # we copy the regular parameters first. We keep the classical input
+                # format as self.input so that we don't have to change the entire
+                # model code.
+                self.input = cp.deepcopy(model_input.pars)
+
+                # we copy other sections we are interested in, such as profile
+                # data, and store it also under input
+
+                # I know we mess up a bit the structure of the class4gl_input, but
+                # we will make it clean again at the time of dumping data
+
+                # So here, we copy the profile data into self.input
+                # 1. Air circulation data 
+                if 'sw_ac' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ac']:
+                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
+                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
+
+                    # correct pressure of levels according to surface pressure
+                    # error (so that interpolation is done in a consistent way)
+
+                    p_e = self.input.Ps - self.input.sp
+                    for irow in self.input.air_ac.index[::-1]:
+                       self.input.air_ac.p.iloc[irow] =\
+                        self.input.air_ac.p.iloc[irow] + p_e
+                       p_e = p_e -\
+                       (self.input.air_ac.p.iloc[irow]+p_e)/\
+                        self.input.air_ac.p.iloc[irow] *\
+                        self.input.air_ac.delpdgrav.iloc[irow]*grav
+
+
+
+                # 2. Air circulation data 
+                if 'sw_ap' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ap']:
+                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
+
+            # standard class input
+            else:
+                self.input = cp.deepcopy(model_input)
+
+    def load_yaml_dict(self,yaml_dict):
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                for keydata,value in data.items():
+                    self.__dict__[keydata] = value
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            #elif key == 'sources':
+            #    self.__dict__[key] = data
+            elif key == 'out':
+                # lets convert it to a list of dictionaries
+                dictouttemp = pd.DataFrame(data).to_dict('list')
+            else: 
+                 warnings.warn("Key '"+key+"' is be implemented.")
+            #     self.__dict__[key] = data
+
+
+        self.tsteps = len(dictouttemp['h'])
+        self.out = model_output(self.tsteps)
+        for keydictouttemp in dictouttemp.keys():
+            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
+
+
+  
+    def run(self):
+        # initialize model variables
+        self.init()
+  
+        # time integrate model 
+        #for self.t in range(self.tsteps):
+        while self.t < self.tsteps:
+          
+            # time integrate components
+            self.timestep()
+  
+        # delete unnecessary variables from memory
+        self.exitmodel()
+    
+    def init(self):
+        # assign variables from input data
+        # initialize constants
+        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
+        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        self.rho        = 1.2                   # density of air [kg m-3]
+        self.k          = 0.4                   # Von Karman constant [-]
+        self.g          = 9.81                  # gravity acceleration [m s-2]
+        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+        self.bolz       = 5.67e-8               # Bolzman constant [-]
+        self.rhow       = 1000.                 # density of water [kg m-3]
+        self.S0         = 1368.                 # solar constant [W m-2]
+
+        # A-Gs constants and settings
+        # Plant type:       -C3-     -C4-
+        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
+        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
+        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
+        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
+        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
+        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
+        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
+        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
+        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
+        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
+        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
+
+        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
+        self.mair       =  28.9;                # molecular weight air [g mol -1]
+        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
+
+        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
+        self.wmax       =  0.55;                # upper reference value soil water [-]
+        self.wmin       =  0.005;               # lower reference value soil water [-]
+        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
+        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
+
+        # Read switches
+        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
+        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
+        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
+        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
+        self.sw_sl      = self.input.sw_sl      # surface layer switch
+        self.sw_rad     = self.input.sw_rad     # radiation switch
+        self.sw_ls      = self.input.sw_ls      # land surface switch
+        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
+        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
+
+        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
+        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
+        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+  
+        # initialize mixed-layer
+        self.h          = self.input.h          # initial ABL height [m]
+        self.Ps         = self.input.Ps         # surface pressure [Pa]
+        self.sp         = self.input.sp         # This is also surface pressure
+                                                #but derived from the global data [Pa]
+        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
+        self.ws         = None                  # large-scale vertical velocity [m s-1]
+        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
+        self.we         = -1.                   # entrainment velocity [m s-1]
+       
+         # Temperature 
+        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
+        
+        
+        self.substep    = False
+        self.substeps   = 0
+
+
+
+        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
+        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
+        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
+        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
+        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
+ 
+        self.wstar      = 0.                    # convective velocity scale [m s-1]
+ 
+        # 2m diagnostic variables 
+        self.T2m        = None                  # 2m temperature [K]
+        self.q2m        = None                  # 2m specific humidity [kg kg-1]
+        self.e2m        = None                  # 2m vapor pressure [Pa]
+        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
+        self.u2m        = None                  # 2m u-wind [m s-1]
+        self.v2m        = None                  # 2m v-wind [m s-1]
+ 
+        # Surface variables 
+        self.thetasurf  = self.input.theta      # surface potential temperature [K]
+        self.thetavsurf = None                  # surface virtual potential temperature [K]
+        self.qsurf      = None                  # surface specific humidity [g kg-1]
+
+        # Mixed-layer top variables
+        self.P_h        = None                  # Mixed-layer top pressure [pa]
+        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
+        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
+        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
+        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
+        self.dz_h       = None                  # Transition layer thickness [-]
+        self.lcl        = None                  # Lifting condensation level [m]
+
+        # Virtual temperatures and fluxes
+        self.thetav     = None                  # initial mixed-layer potential temperature [K]
+        self.dthetav    = None                  # initial virtual temperature jump at h [K]
+        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
+        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
+       
+        
+        
+        
+        
+        
+        # Moisture 
+        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
+
+        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
+        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
+        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
+  
+        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
+        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
+        self.e          = None                  # mixed-layer vapor pressure [Pa]
+        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
+        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
+      
+        
+        
+        # CO2
+        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
+        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
+        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
+        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
+        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
+        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
+        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
+        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
+        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
+        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
+       
+        # Wind 
+        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
+        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
+        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = self.input.advu       # advection of u-wind [m s-2]
+        
+        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
+        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = self.input.advv       # advection of v-wind [m s-2]
+         
+  # BEGIN -- HW 20170606
+        # z-coordinate for vertical profiles of stratification above the mixed-layer height
+
+        if self.sw_ac:
+        # this is the data frame with the grided profile on the L60 grid
+        # (subsidence, and advection) 
+            self.air_ac      = self.input.air_ac  # full level air circulation
+                                                  # forcing
+            # self.air_ach     = self.input.air_ach # half level air circulation
+            #                                       # forcing
+            
+
+        if self.sw_ap:
+        # this is the data frame with the fitted profile (including HAGL,
+        # THTA,WSPD, SNDU,WNDV PRES ...)
+            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
+
+            # just for legacy reasons...
+            if 'z' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
+            if 'p' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
+
+            indexh = np.where(self.air_ap.z.values == self.h)
+            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
+                raise ValueError("Error input profile consistency: mixed- \
+                                 layer height needs to be equal to the second \
+                                 and third \
+                                 level of the vertical profile input!")
+            # initialize q from its profile when available
+            p_old = self.Ps
+            p_new = self.air_ap.p[indexh[0][0]]
+            
+            if ((p_old is not None) & (p_old != p_new)):
+                print("Warning: Ps input was provided ("+str(p_old)+\
+                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
+                    +str(p_new)+"Pa).")
+                                    
+            self.Ps = p_new
+            # these variables/namings are more convenient to work with in the code
+            # we will update the original variables afterwards
+            #self.air_ap['q'] = self.air_ap.QABS/1000.
+
+            self.air_ap = \
+                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
+            # we require the temperature fields, since we need to consider
+            # advection
+            # if self.sw_ac:
+            #     #self.air_ap['theta'] = self.air_ap['t'] *
+
+            #     # we consider self.sp in case of air-circulation input (for
+            #     # consistence)
+            #     self.air_ap['t'] = \
+            #                 self.air_ap.theta *  \
+            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
+            # else:
+            # we consider self.Ps in case of balloon input only 
+            self.air_ap = self.air_ap.assign(t = lambda x: \
+                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
+
+            #self.air_ap['theta'] = self.air_ap.THTA
+            if 'u' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
+            if 'v' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
+
+            for var in ['theta','q','u','v']:
+
+                
+                if self.air_ap[var][1] != self.air_ap[var][0]:
+                    raise ValueError("Error input profile consistency: two \
+                                     lowest profile levels for "+var+" should \
+                                     be equal.")
+                
+                # initialize the value from its profile when available
+                value_old = self.__dict__[var]
+                value_new = self.air_ap[var][indexh[0][0]]
+                
+                if ((value_old is not None) & (value_old != value_new)):
+                    warnings.warn("Warning:  input was provided \
+                                     ("+str(value_old)+ "kg kg-1), \
+                                     but it is now overwritten by the first \
+                                     level (index 0) of air_ap]var\ which is \
+                                     different (" +str(value_new)+"K).")
+                                        
+                self.__dict__[var] = value_new
+
+                # make a profile of the stratification 
+                # please note that the stratification between z_pro[i] and
+                # z_pro[i+1] is given by air_ap.GTHT[i]
+
+                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
+                # np.gradient(self.z_pro)
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
+
+
+                self.__dict__['gamma'+var] = \
+                    self.air_ap['gamma'+var][np.where(self.h >= \
+                                                     self.air_ap.z)[0][-1]]
+
+
+
+        # the variable p_pro is just for diagnosis of lifted index
+            
+            
+
+            # input Ph is wrong, so we correct it according to hydrostatic equation
+            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+
+            #if self.sw_ac:
+                # note that we use sp as surface pressure, which is determined
+                # from era-interim instead of the observations. This is to
+                # avoid possible failure of the interpolation routine
+                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
+                #                          + \
+                #                          list(self.air_ap.p[3:]))
+
+            # else:
+                # in the other case, it is updated at the time of calculting
+                # the statistics 
+
+# END -- HW 20170606      
+        #print(self.air_ap)
+
+        if self.sw_ac and not self.sw_ap:
+            raise ValueError("air circulation switch only possible when air \
+                             profiles are given")
+        
+        if self.sw_ac:
+
+            # # # we comment this out, because subsidence is calculated
+            # according to advection
+            # #interpolate subsidence towards the air_ap height coordinate
+            # self.air_ap['w'] = np.interp(self.air_ap.p,\
+            #                               self.air_ac.p,\
+            #                               self.air_ac.w) 
+            # #subsidence at the mixed-layer top
+            # self.w = self.air_ap.w[1]
+        
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+                # in case we didn't find any points, we just take the lowest one.
+                # actually, this can happen if ERA-INTERIM pressure levels are
+                # inconsistent with 
+                if in_ml.sum() == 0:
+                    warnings.warn(" no circulation points in the mixed layer \
+                                  found. We just take the bottom one.")
+                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+
+                for var in ['t','q','u','v']:
+    
+                   # calculation of the advection variables for the mixed layer
+                   # we weight by the hydrostatic thickness of each layer and
+                   # divide by the total thickness
+                   self.__dict__['adv'+var] = \
+                            ((self.air_ac['adv'+var+'_x'][in_ml] \
+                             + \
+                             self.air_ac['adv'+var+'_y'][in_ml])* \
+                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                            self.air_ac['delpdgrav'][in_ml].sum()
+
+                   # calculation of the advection variables for the profile above
+                   # (lowest 3 values are not used by class)
+                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
+                   self.air_ap['adv'+var] = \
+                           np.interp(self.air_ap.p,\
+                                     self.air_ac.p,\
+                                     self.air_ac['adv'+var+'_x']) \
+                           + \
+                           np.interp(self.air_ap.p, \
+                                       self.air_ac.p, \
+                                       self.air_ac['adv'+var+'_y'])
+
+                # as an approximation, we consider that advection of theta in the
+                # mixed layer is equal to advection of t. This is a sufficient
+                # approximation since theta and t are very similar at the surface
+                # pressure.
+                self.__dict__['advtheta'] = self.__dict__['advt']
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # self.wrho = np.interp(self.P_h,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) 
+            # self.ws   = self.air_ap.w.iloc[1]
+
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                self.air_ap = self.air_ap.assign(wp = 0.)
+                self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                              self.air_ac.p, \
+                                              self.air_ac['wp'])
+                self.air_ap = self.air_ap.assign(R = 0.)
+                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                     self.Rv*self.air_ap.q)
+                self.air_ap = self.air_ap.assign(rho = 0.)
+                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+                
+                self.air_ap = self.air_ap.assign(w = 0.)
+                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+                #print('hello w ini')
+
+                # Note: in case of sw_ac is False, we update it from prescribed
+                # divergence
+                self.ws   = self.air_ap.w[1]
+
+                # self.ws   = self.wrho/self.rho
+                # self.ws   = self.wrho/(self.P_h/ \
+                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
+                #                         self.theta) # this should be T!!!
+
+                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+                #                         + \
+                #                         self.air_ac['divU_y'][in_ml])* \
+                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                #             self.air_ac['delpdgrav'][in_ml].sum() \
+        
+
+        # Tendencies 
+        self.htend      = None                  # tendency of CBL [m s-1]
+        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
+        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
+        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
+        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
+        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
+        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
+        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
+        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
+        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
+        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
+        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
+  
+        # initialize surface layer
+        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
+        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
+        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
+        self.z0m        = self.input.z0m        # roughness length for momentum [m]
+        self.z0h        = self.input.z0h        # roughness length for scalars [m]
+        self.Cm         = 1e12                  # drag coefficient for momentum [-]
+        self.Cs         = 1e12                  # drag coefficient for scalars [-]
+        self.L          = None                  # Obukhov length [m]
+        self.Rib        = None                  # bulk Richardson number [-]
+        self.ra         = None                  # aerodynamic resistance [s m-1]
+  
+        # initialize radiation
+        self.lat        = self.input.lat        # latitude [deg]
+        #self.fc         = self.input.fc         # coriolis parameter [s-1]
+        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
+        self.lon        = self.input.lon        # longitude [deg]
+        self.doy        = self.input.doy        # day of the year [-]
+        self.tstart     = self.input.tstart     # time of the day [-]
+        self.cc         = self.input.cc         # cloud cover fraction [-]
+        self.Swin       = None                  # incoming short wave radiation [W m-2]
+        self.Swout      = None                  # outgoing short wave radiation [W m-2]
+        self.Lwin       = None                  # incoming long wave radiation [W m-2]
+        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
+        self.Q          = self.input.Q          # net radiation [W m-2]
+        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
+  
+        # initialize land surface
+        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
+        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
+        self.T2         = self.input.T2         # temperature deeper soil layer [K]
+                           
+        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
+        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
+        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
+        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
+                           
+        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
+        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
+        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
+                           
+        self.C1sat      = self.input.C1sat      
+        self.C2ref      = self.input.C2ref      
+
+        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
+        
+        self.LAI        = self.input.LAI        # leaf area index [-]
+        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
+        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = self.input.alpha      # surface albedo [-]
+  
+        self.rs         = 1.e6                  # resistance transpiration [s m-1]
+        self.rssoil     = 1.e6                  # resistance soil [s m-1]
+                           
+        self.Ts         = self.input.Ts         # surface temperature [K]
+                           
+        self.cveg       = self.input.cveg       # vegetation fraction [-]
+        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
+        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
+        self.cliq       = None                  # wet fraction [-]
+                          
+        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
+  
+        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
+        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
+        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
+  
+        self.H          = None                  # sensible heat flux [W m-2]
+        self.LE         = None                  # evapotranspiration [W m-2]
+        self.LEliq      = None                  # open water evaporation [W m-2]
+        self.LEveg      = None                  # transpiration [W m-2]
+        self.LEsoil     = None                  # soil evaporation [W m-2]
+        self.LEpot      = None                  # potential evaporation [W m-2]
+        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
+        self.G          = None                  # ground heat flux [W m-2]
+
+        # initialize A-Gs surface scheme
+        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
+
+        # initialize cumulus parameterization
+        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
+        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
+        self.ac         = 0.                    # Cloud core fraction [-]
+        self.M          = 0.                    # Cloud core mass flux [m s-1] 
+        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
+  
+        # initialize time variables
+        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
+        self.dt     = self.input.dt
+        self.dtcur      = self.dt
+        self.firsttime = True
+        self.t      = 0
+ 
+        # Some sanity checks for valid input
+        if (self.c_beta is None): 
+            self.c_beta = 0                     # Zero curvature; linear response
+        assert(self.c_beta >= 0 or self.c_beta <= 1)
+
+        # initialize output
+        self.out = model_output(self.tsteps)
+ 
+        self.statistics()
+  
+        # calculate initial diagnostic variables
+        if(self.sw_rad):
+            self.run_radiation()
+ 
+        if(self.sw_sl):
+            for i in range(10): 
+                self.run_surface_layer()
+  
+        if(self.sw_ls):
+            self.run_land_surface()
+
+        if(self.sw_cu):
+            self.run_mixed_layer()
+            self.run_cumulus()
+        
+        if(self.sw_ml):
+            self.run_mixed_layer()
+
+    def timestep(self):
+
+        self.dtmax = +np.inf
+        self.logger.debug('before stats') 
+        self.statistics()
+
+        # run radiation model
+        self.logger.debug('before rad') 
+        if(self.sw_rad):
+            self.run_radiation()
+  
+        # run surface layer model
+        if(self.sw_sl):
+            self.logger.debug('before surface layer') 
+            self.run_surface_layer()
+        
+        # run land surface model
+        if(self.sw_ls):
+            self.logger.debug('before land surface') 
+            self.run_land_surface()
+ 
+        # run cumulus parameterization
+        if(self.sw_cu):
+            self.logger.debug('before cumulus') 
+            self.run_cumulus()
+   
+        self.logger.debug('before mixed layer') 
+        # run mixed-layer model
+        if(self.sw_ml):
+            self.run_mixed_layer()
+        self.logger.debug('after mixed layer') 
+ 
+        #get first profile data point above mixed layer
+        if self.sw_ap:
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                # here we correct for the fact that the upper profile also
+                # shifts in the vertical.
+
+                diffhtend = self.htend - self.air_ap.w[zidx_first]
+                if diffhtend > 0:
+                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            else:
+                if self.htend > 0:
+                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            #print(self.h,zidx_first,self.ws,self.air_ap.z)
+
+        
+        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
+        self.logger.debug('before store') 
+        self.substep =  (self.dtcur > self.dtmax)
+        if self.substep:
+            dtnext = self.dtcur - self.dtmax
+            self.dtcur = self.dtmax
+
+        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
+
+        # HW: this will be done multiple times in case of a substep is needed
+        # store output before time integration
+        if self.firsttime:
+            self.store()
+  
+        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
+        # time integrate land surface model
+        if(self.sw_ls):
+            self.integrate_land_surface()
+        self.logger.debug('before integrate mixed layer') 
+        # time integrate mixed-layer model
+        if(self.sw_ml):
+            self.integrate_mixed_layer() 
+        self.logger.debug('after integrate mixed layer') 
+        if self.substep:
+            self.dtcur = dtnext
+            self.firsttime = False
+            self.substeps += 1
+        else:
+            self.dtcur = self.dt
+            self.t += 1 
+            self.firsttime = True
+            self.substeps = 0
+        self.logger.debug('going to next step')
+        
+        
+  
+    def statistics(self):
+        # Calculate virtual temperatures 
+        self.thetav   = self.theta  + 0.61 * self.theta * self.q
+        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
+        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
+        # Mixed-layer top properties
+        self.P_h    = self.Ps - self.rho * self.g * self.h
+        # else:
+            # in the other case, it is updated at the time that the profile is
+            # updated (and at the initialization
+
+        self.T_h    = self.theta - self.g/self.cp * self.h
+
+        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
+        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
+
+        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
+
+        # Find lifting condensation level iteratively
+        if(self.t == 0):
+            self.lcl = self.h
+            RHlcl = 0.5
+        else:
+            RHlcl = 0.9998 
+
+        itmax = 30
+        it = 0
+        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
+            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
+            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
+        else:
+            self.q2_h   = 0.
+            self.CO22_h = 0.
+
+        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
+        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
+        self.M      = self.ac * self.wstar
+        self.wqM    = self.M * self.q2_h**0.5
+
+        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
+        if(self.dCO2 < 0):
+            self.wCO2M  = self.M * self.CO22_h**0.5
+        else:
+            self.wCO2M  = 0.
+
+    def run_mixed_layer(self):
+        if(not self.sw_sl):
+            # decompose ustar along the wind components
+            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
+            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
+
+
+
+        # calculate large-scale vertical velocity (subsidence)
+        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
+            self.ws = -self.divU * self.h
+        # else:
+        #     in case the air circulation switch is turned on, subsidence is
+        #     calculated from the circulate profile at the initialization and
+        #     in the integrate_mixed_layer routine
+              
+        # calculate compensation to fix the free troposphere in case of subsidence 
+        if(self.sw_fixft):
+            w_th_ft  = self.gammatheta * self.ws
+            w_q_ft   = self.gammaq     * self.ws
+            w_CO2_ft = self.gammaCO2   * self.ws 
+        else:
+            w_th_ft  = 0.
+            w_q_ft   = 0.
+            w_CO2_ft = 0. 
+      
+        # calculate mixed-layer growth due to cloud top radiative divergence
+        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
+       
+        # calculate convective velocity scale w* 
+        if(self.wthetav > 0.):
+            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
+        else:
+            self.wstar  = 1e-6;
+      
+        # Virtual heat entrainment flux 
+        self.wthetave    = -self.beta * self.wthetav 
+        
+        # compute mixed-layer tendencies
+        if(self.sw_shearwe):
+            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
+        else:
+            self.we    = -self.wthetave / self.dthetav
+        # Don't allow boundary layer shrinking if wtheta < 0 
+        if(self.we < 0):
+            self.we = 0.
+
+        # Calculate entrainment fluxes
+        self.wthetae     = -self.we * self.dtheta
+        self.wqe         = -self.we * self.dq
+        self.wCO2e       = -self.we * self.dCO2
+        
+        htend_pre       = self.we + self.ws + self.wf - self.M
+        
+        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+        
+ 
+        #print('thetatend_pre',thetatend_pre)
+        
+        #preliminary boundary-layer top chenage
+        #htend_pre = self.we + self.ws + self.wf - self.M
+        #preliminary change in temperature jump
+        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
+                          thetatend_pre + w_th_ft
+        
+        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
+        l_entrainment = True
+
+        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
+            l_entrainment = False
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! temperature jump is at the lower limit \
+                          and is not growing: entrainment is disabled for this (sub)timestep.") 
+        elif dtheta_pre < 0.1:
+            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
+            l_entrainment = True
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          " Warning! Potential temperature jump at mixed- \
+                          layer height would become too low limiting timestep \
+                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
+            self.dtmax = min(self.dtmax,dtmax_new)
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "next subtimestep, entrainment will be disabled")
+            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
+
+
+
+        # when entrainment is disabled, we just use the simplified formulation
+        # as in Wouters et al., 2013 (section 2.2.1)
+
+        self.dthetatend = l_entrainment*dthetatend_pre + \
+                        (1.-l_entrainment)*0.
+        self.thetatend = l_entrainment*thetatend_pre + \
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
+        self.htend = l_entrainment*htend_pre + \
+                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
+        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
+        #stop
+
+
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+
+
+        # self.qtend = l_entrainment*qtend_pre + \
+        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
+        # self.CO2tend = l_entrainment*CO2tend_pre + \
+        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+
+
+        #     # part of the timestep for which the temperature mixed-layer jump
+        #     # was changing, and for which entrainment took place. For the other
+        #     # part, we don't assume entrainment anymore, and we use the
+        #     # simplified formulation  of Wouters et al., 2013
+
+        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
+        #   
+        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
+        #                      self.dthetatend + w_th_ft) + \
+        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
+        #     self.htend = fac*self.htend + \
+        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
+        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
+        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+
+        # else:
+        #     #self.htend = htend_pre
+        #     self.dthetatend = dthetatend_pre
+        #     self.thetatend = thetatend_pre
+        
+        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
+        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
+     
+        # assume u + du = ug, so ug - u = du
+        if(self.sw_wind):
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
+  
+            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
+            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
+        
+        # tendency of the transition layer thickness
+        if(self.ac > 0 or self.lcl - self.h < 300):
+            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
+        else:
+            self.dztend = 0.
+
+   
+    def integrate_mixed_layer(self):
+        # set values previous time step
+        h0      = self.h
+        
+        theta0  = self.theta
+        dtheta0 = self.dtheta
+        q0      = self.q
+        dq0     = self.dq
+        CO20    = self.CO2
+        dCO20   = self.dCO2
+        
+        u0      = self.u
+        du0     = self.du
+        v0      = self.v
+        dv0     = self.dv
+
+        dz0     = self.dz_h
+  
+        # integrate mixed-layer equations
+        
+            
+
+# END -- HW 20170606        
+        self.h        = h0      + self.dtcur * self.htend
+        # print(self.h,self.htend)
+        # stop
+        self.theta    = theta0  + self.dtcur * self.thetatend
+        #print(dtheta0,self.dtcur,self.dthetatend)
+        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
+        self.q        = q0      + self.dtcur * self.qtend
+        self.dq       = dq0     + self.dtcur * self.dqtend
+        self.CO2      = CO20    + self.dtcur * self.CO2tend
+        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
+        self.dz_h     = dz0     + self.dtcur * self.dztend
+            
+        # Limit dz to minimal value
+        dz0 = 50
+        if(self.dz_h < dz0):
+            self.dz_h = dz0 
+  
+        if(self.sw_wind):
+            self.u        = u0      + self.dtcur * self.utend
+            self.du       = du0     + self.dtcur * self.dutend
+            self.v        = v0      + self.dtcur * self.vtend
+            self.dv       = dv0     + self.dtcur * self.dvtend
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+            for var in ['t','q','u','v']:
+                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
+
+            # take into account advection for the whole profile
+                
+                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
+
+            var = 'z'
+            #print(self.air_ap[var])
+                #     print(self.air_ap['adv'+var])
+
+
+
+
+            #moving the profile vertically according to the vertical wind
+                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
+
+
+            # air_apvarold = pd.Series(np.array(self.air_ap.z))
+            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
+            # stop
+
+
+                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
+                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
+
+            #As t is updated, we also need to recalculate theta (and R)
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+
+            # air_aptheta_old = pd.Series(self.air_ap['theta'])
+            self.air_ap['theta'] = \
+                        self.air_ap.t * \
+                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
+                                         self.dtcur * self.air_ap.w[zidx_first:]
+
+#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
+#            print(self.t, self.dtcur,self.dt,self.htend)
+
+            # # the pressure levels of the profiles are recalculated according to
+            # # there new height (after subsidence)
+            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
+            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
+            #         * self.dtcur *  self.air_ap.w[zidx_first:]
+
+            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
+                    self.dtcur * self.air_ap.wp[zidx_first:]
+
+            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
+        # note that theta and q itself are updatet by class itself
+
+    
+        if self.sw_ap:
+            # Just for model consistency preservation purposes, we set the
+            # theta variables of the mixed-layer to nan values, since the
+            # mixed-layer values should overwritte by the mixed-layer
+            # calculations of class.
+            self.air_ap['theta'][0:3] = np.nan 
+            self.air_ap['p'][0:3] = np.nan 
+            self.air_ap['q'][0:3] = np.nan 
+            self.air_ap['u'][0:3] = np.nan 
+            self.air_ap['v'][0:3] = np.nan 
+            self.air_ap['t'][0:3] = np.nan 
+            self.air_ap['z'][0:3] = np.nan 
+
+            # Update the vertical profiles: 
+            #   - new mixed layer properties( h, theta, q ...)
+            #   - any data points below the new ixed-layer height are removed
+
+            # Three data points at the bottom that describe the mixed-layer
+            # properties
+            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
+                                           # columns as air_ap
+            # air_ap_head['z'].iloc[0] = 2.
+            # air_ap_head['z'].iloc[1] = self.__dict__['h']
+            # air_ap_head['z'].iloc[2] = self.__dict__['h']
+            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
+                        [2.,self.__dict__['h'],self.__dict__['h']]
+            for var in ['theta','q','u','v']:
+
+                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
+                        [self.__dict__[var], \
+                         self.__dict__[var], \
+                         self.__dict__[var] + self.__dict__['d'+var]]
+                
+            #print(self.air_ap)
+
+            # This is the remaining profile considering the remaining
+            # datapoints above the mixed layer height
+            air_ap_tail = self.air_ap.iloc[3:]
+            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
+
+            # print('h',self.h)
+            # # only select samples monotonically increasing with height
+            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            # air_ap_tail = pd.DataFrame()
+            # theta_low = self.theta
+            # z_low =     self.h
+            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            # for ibottom in range(1,len(air_ap_tail_orig)):
+            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
+            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+            # make theta increase strong enough to avoid numerical
+            # instability
+            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            air_ap_tail = pd.DataFrame()
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            theta_low = self.theta
+            z_low =     self.h
+            ibottom = 0
+            itop = 0
+            # print(air_ap_tail_orig)
+            # stop
+
+            # HW: this is the lower limit that we use for gammatheta, which is
+            # there to avoid model crashes. Besides on this limit, the upper
+            # air profile is modified in a way that is still conserves total
+            # quantities of moisture and temperature. The limit is set by trial
+            # and error. The numerics behind the crash should be investigated
+            # so that a cleaner solution can be provided.
+            gammatheta_lower_limit = 0.002
+            while ((itop in range(0,1)) or (itop != ibottom)):
+                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+                if (
+                    #(z_mean > (z_low+0.2)) and \
+                    #(theta_mean > (theta_low+0.02) ) and \
+                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
+                  (itop >= (len(air_ap_tail_orig)-1)) \
+                   :
+
+                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                    ibottom = itop+1
+                    theta_low = air_ap_tail.theta.iloc[-1]
+                    z_low =     air_ap_tail.z.iloc[-1]
+    
+
+                itop +=1
+                # elif  (itop > len(air_ap_tail_orig)-10):
+                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+                #print(itop,ibottom)
+
+            if itop > 1:
+                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! Temperature profile was too steep. \
+                                  Modifying profile: "+ \
+                                  str(itop - 1)+ " measurements were dropped \
+                                  and replaced with its average \
+                                  Modifying profile. \
+                                  mean with next profile point(s).") 
+
+
+            self.air_ap = pd.concat((air_ap_head,\
+                                     air_ap_tail,\
+                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
+                                                                      axis=1)
+
+            if  self.sw_ac:
+                qvalues = \
+                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
+
+                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
+                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
+                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+                self.P_h    = self.Ps - self.rho * self.g * self.h
+                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
+                        [self.Ps,  self.P_h, self.P_h-0.1]
+
+                self.air_ap.t = \
+                            self.air_ap.theta * \
+                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
+
+
+        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
+
+
+
+
+        # else:
+            # in the other case, it is updated at the time the statistics are
+            # calculated 
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if in_ml.sum() == 0:
+                warnings.warn(" no circulation points in the mixed layer \
+                              found. We just take the bottom one.")
+                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+            for var in ['t','q','u','v']:
+
+                # calculation of the advection variables for the mixed-layer
+                # these will be used for the next timestep
+                # Warning: w is excluded for now.
+
+                self.__dict__['adv'+var] = \
+                        ((self.air_ac['adv'+var+'_x'][in_ml] \
+                         + \
+                         self.air_ac['adv'+var+'_y'][in_ml])* \
+                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                        self.air_ac['delpdgrav'][in_ml].sum()
+
+                # calculation of the advection variables for the profile above
+                # the mixed layer (also for the next timestep)
+                self.air_ap['adv'+var] = \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p,\
+                                              self.air_ac['adv'+var+'_x']) \
+                                    + \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p, \
+                                              self.air_ac['adv'+var+'_y'])
+                # if var == 't':
+                #     print(self.air_ap['adv'+var])
+                #     stop
+
+            # as an approximation, we consider that advection of theta in the
+            # mixed layer is equal to advection of t. This is a sufficient
+            # approximation since theta and t are very similar at the surface
+            # pressure.
+
+            self.__dict__['advtheta'] = self.__dict__['advt']
+
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            # update the vertical wind profile
+            self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                          self.air_ac.p, \
+                                          self.air_ac['wp'])
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+            
+            air_apwold = self.air_ap['w']
+            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+            #print('hello w upd')
+
+            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # # self.wrho = np.interp(self.P_h,\
+            # #                      self.air_ach.p,\
+            # #                      self.air_ach['wrho']) \
+
+
+
+            # Also update the vertical wind at the mixed-layer height
+            # (subsidence)
+            self.ws   = self.air_ap.w[1]
+        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
+
+            ## Finally, we update he 
+            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+            #                        + \
+            #                        self.air_ac['divU_y'][in_ml])* \
+            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+            #            self.air_ac['delpdgrav'][in_ml].sum() 
+            
+
+        if self.sw_ap:
+            for var in ['theta','q','u','v']:
+
+                # update of the slope (gamma) for the different variables, for
+                # the next timestep!
+
+                # there is an warning message that tells about dividing through
+                # zero, which we ignore
+
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                    # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap['gamma'+var] = gammavar
+
+                # Based on the above, update the gamma value at the mixed-layer
+                # top
+                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
+                                                                     self.air_ap.z)[0][-1]]
+
+            
+    def run_radiation(self):
+        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
+        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
+        sinlea = max(sinlea, 0.0001)
+        
+        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
+  
+        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
+  
+        self.Swin  = self.S0 * Tr * sinlea
+        self.Swout = self.alpha * self.S0 * Tr * sinlea
+        
+        
+        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
+        self.Lwout = self.bolz * self.Ts ** 4.
+          
+        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
+        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
+  
+    def run_surface_layer(self):
+        # HW: I had to raise the minimum wind speed to make the simulation with
+        # the non-iterative solution stable (this solution was a wild guess, so I don't
+        # know the exact problem of the instability in case of very low wind
+        # speeds yet)
+        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        # version of 20180730 where there are still some runs crashing. Maybe
+        # an upper limit should be set on the monin-obukhov length instead of
+        # a lower limmit on the wind speed?
+        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        
+        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
+        qsatsurf       = qsat(self.thetasurf, self.Ps)
+        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
+        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
+
+        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
+  
+        zsl       = 0.1 * self.h
+        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
+        
+
+
+        if self.sw_lit:
+            self.Rib  = min(self.Rib, 0.2)
+            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
+            self.zeta  = zsl/self.L
+            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
+            
+        
+            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
+            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
+            
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+        
+     
+            # diagnostic meteorological variables
+            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
+            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
+            
+            # diagnostic meteorological variables
+        else:
+            
+            ## circumventing any iteration with Wouters et al., 2012
+            self.zslz0m = np.max((zsl/self.z0m,10.))
+            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
+            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
+            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
+            self.L = zsl/self.zeta
+            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
+        
+            self.Cm = self.k**2.0/funm/funm
+            self.Cs = self.k**2.0/funm/funh
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+            
+            # extrapolation from mixed layer (instead of from surface) to 2meter
+            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
+            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
+            self.u2m    =                - self.uw     / self.ustar / self.k * funm
+            self.v2m    =                - self.vw     / self.ustar / self.k * funm
+        
+        
+        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
+        self.e2m    = self.q2m * self.Ps / 0.622
+     
+    def ribtol(self, Rib, zsl, z0m, z0h): 
+        if(Rib > 0.):
+            L    = 1.
+            L0   = 2.
+        else:
+            L  = -1.
+            L0 = -2.
+        #print(Rib,zsl,z0m,z0h)
+        
+        while (abs(L - L0) > 0.001):
+            L0      = L
+            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
+            Lstart  = L - 0.001*L
+            Lend    = L + 0.001*L
+            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
+                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+            L       = L - fx / fxdif
+            #print(L)
+            if(abs(L) > 1e12):
+                break
+
+        return L
+      
+    def psim(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psim = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+        return psim
+      
+    def psih(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psih  = 2. * np.log( (1. + x*x) / 2.)
+            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+        return psih
+ 
+    def jarvis_stewart(self):
+        # calculate surface resistances using Jarvis-Stewart model
+        if(self.sw_rad):
+            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
+        else:
+            f1 = 1.
+  
+        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
+            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
+        else:
+            f2 = 1.e8
+ 
+        # Limit f2 in case w2 > wfc, where f2 < 1
+        f2 = max(f2, 1.);
+ 
+        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
+        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
+  
+        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
+
+    def factorial(self,k):
+        factorial = 1
+        for n in range(2,k+1):
+            factorial = factorial * float(n)
+        return factorial;
+
+    def E1(self,x):
+        E1sum = 0
+        for k in range(1,100):
+            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
+        return -0.57721566490153286060 - np.log(x) - E1sum
+ 
+    def ags(self):
+        # Select index for plant type
+        if(self.c3c4 == 'c3'):
+            c = 0
+        elif(self.c3c4 == 'c4'):
+            c = 1
+        else:
+            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
+
+        # calculate CO2 compensation concentration
+        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
+
+        # calculate mesophyll conductance
+        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
+                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
+        gm            = gm / 1000. # conversion from mm s-1 to m s-1
+  
+        # calculate CO2 concentration inside the leaf (ci)
+        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
+        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
+  
+        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
+        D0            = (self.f0[c] - fmin) / self.ad[c]
+  
+        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
+        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
+        ci            = cfrac * (co2abs - CO2comp) + CO2comp
+  
+        # calculate maximal gross primary production in high light conditions (Ag)
+        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
+  
+        # calculate effect of soil moisture stress on gross assimilation rate
+        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
+  
+        # calculate stress function
+        if (self.c_beta == 0):
+            fstr = betaw;
+        else:
+            # Following Combe et al (2016)
+            if (self.c_beta < 0.25):
+                P = 6.4 * self.c_beta
+            elif (self.c_beta < 0.50):
+                P = 7.6 * self.c_beta - 0.3
+            else:
+                P = 2**(3.66 * self.c_beta + 0.34) - 1
+            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
+  
+        # calculate gross assimilation rate (Am)
+        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
+        Rdark        = (1. / 9.) * Am
+        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
+  
+        # calculate  light use efficiency
+        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
+  
+        # calculate gross primary productivity
+        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
+  
+        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
+        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
+        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
+  
+        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
+        a1           = 1. / (1. - self.f0[c])
+        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
+  
+        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
+  
+        # calculate surface resistance for moisture and carbon dioxide
+        self.rs      = 1. / (1.6 * gcco2)
+        rsCO2        = 1. / gcco2
+  
+        # calculate net flux of CO2 into the plant (An)
+        An           = -(co2abs - ci) / (self.ra + rsCO2)
+  
+        # CO2 soil surface flux
+        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
+        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
+  
+        # CO2 flux
+        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
+        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
+        self.wCO2    = self.wCO2A + self.wCO2R
+ 
+    def run_land_surface(self):
+        # compute ra
+        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
+        #print('ueff',self.u,self.v,self.wstar)
+
+        if(self.sw_sl):
+          self.ra = (self.Cs * ueff)**-1.
+        else:
+          self.ra = ueff / max(1.e-3, self.ustar)**2.
+
+        #print('ra',self.ra,self.ustar,ueff)
+
+        # first calculate essential thermodynamic variables
+        self.esat    = esat(self.theta)
+        self.qsat    = qsat(self.theta, self.Ps)
+        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
+        self.dqsatdT = 0.622 * desatdT / self.Ps
+        self.e       = self.q * self.Ps / 0.622
+
+        if(self.ls_type == 'js'): 
+            self.jarvis_stewart() 
+        elif(self.ls_type == 'ags'):
+            self.ags()
+        else:
+            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
+
+        # recompute f2 using wg instead of w2
+        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
+          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
+        else:
+          f2        = 1.e8
+        self.rssoil = self.rssoilmin * f2 
+ 
+        Wlmx = self.LAI * self.Wmax
+        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
+        self.cliq = min(1., self.Wl / Wlmx) 
+     
+        # calculate skin temperature implictly
+        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
+            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
+            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
+            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
+
+        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
+        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
+        #print('Ts',self.rs)
+
+        esatsurf      = esat(self.Ts)
+        self.qsatsurf = qsat(self.Ts, self.Ps)
+
+        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+  
+        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
+  
+        self.LE     = self.LEsoil + self.LEveg + self.LEliq
+        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
+        #print('H',self.ra,self.Ts,self.theta)
+        self.G      = self.Lambda * (self.Ts - self.Tsoil)
+        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
+        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
+        
+        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
+  
+        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
+   
+        d1          = 0.1
+        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
+        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
+        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
+        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
+  
+        # calculate kinematic heat fluxes
+        self.wtheta   = self.H  / (self.rho * self.cp)
+        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
+        self.wq       = self.LE / (self.rho * self.Lv)
+ 
+    def integrate_land_surface(self):
+        # integrate soil equations
+        Tsoil0        = self.Tsoil
+        wg0           = self.wg
+        Wl0           = self.Wl
+  
+        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
+        self.wg       = wg0     + self.dtcur * self.wgtend
+        self.Wl       = Wl0     + self.dtcur * self.Wltend
+  
+    # store model output
+    def store(self):
+        t                      = self.t
+        
+        self.out.time[t]          = t * self.dt / 3600. + self.tstart
+
+        # in case we are at the end of the simulation, we store the vertical
+        # profiles to the output
+        
+        # if t == (len(self.out.time) - 1):
+        #     self.out.air_ac = self.air_ac
+        #     self.out.air_ap = self.air_ap
+
+        
+        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
+        #  for key in self.out.__dict__.keys():
+        #      if key in self.__dict__:
+        #          self.out.__dict__[key][t]  = self.__dict__[key]
+        
+        self.out.h[t]          = self.h
+        
+        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
+        
+        self.out.gammatheta[t] = self.gammatheta
+        self.out.gammau[t]     = self.gammau
+        self.out.gammav[t]     = self.gammav
+        self.out.gammaq[t]     = self.gammaq
+        self.out.theta[t]      = self.theta
+        self.out.thetav[t]     = self.thetav
+        self.out.dtheta[t]     = self.dtheta
+        self.out.dthetav[t]    = self.dthetav
+        self.out.wtheta[t]     = self.wtheta
+        self.out.wthetav[t]    = self.wthetav
+        self.out.wthetae[t]    = self.wthetae
+        self.out.wthetave[t]   = self.wthetave
+        
+        self.out.q[t]          = self.q
+        self.out.dq[t]         = self.dq
+        self.out.wq[t]         = self.wq
+        self.out.wqe[t]        = self.wqe
+        self.out.wqM[t]        = self.wqM
+      
+        self.out.qsat[t]       = self.qsat
+        self.out.e[t]          = self.e
+        self.out.esat[t]       = self.esat
+      
+        fac = (self.rho*self.mco2)/self.mair
+        self.out.CO2[t]        = self.CO2
+        self.out.dCO2[t]       = self.dCO2
+        self.out.wCO2[t]       = self.wCO2  * fac
+        self.out.wCO2e[t]      = self.wCO2e * fac
+        self.out.wCO2R[t]      = self.wCO2R * fac
+        self.out.wCO2A[t]      = self.wCO2A * fac
+
+        self.out.u[t]          = self.u
+        self.out.du[t]         = self.du
+        self.out.uw[t]         = self.uw
+        
+        self.out.v[t]          = self.v
+        self.out.dv[t]         = self.dv
+        self.out.vw[t]         = self.vw
+        
+        self.out.T2m[t]        = self.T2m
+        self.out.q2m[t]        = self.q2m
+        self.out.u2m[t]        = self.u2m
+        self.out.v2m[t]        = self.v2m
+        self.out.e2m[t]        = self.e2m
+        self.out.esat2m[t]     = self.esat2m
+
+
+        self.out.Tsoil[t]      = self.Tsoil
+        self.out.T2[t]         = self.T2
+        self.out.Ts[t]         = self.Ts
+        self.out.wg[t]         = self.wg
+        
+        self.out.thetasurf[t]  = self.thetasurf
+        self.out.thetavsurf[t] = self.thetavsurf
+        self.out.qsurf[t]      = self.qsurf
+        self.out.ustar[t]      = self.ustar
+        self.out.Cm[t]         = self.Cm
+        self.out.Cs[t]         = self.Cs
+        self.out.L[t]          = self.L
+        self.out.Rib[t]        = self.Rib
+  
+        self.out.Swin[t]       = self.Swin
+        self.out.Swout[t]      = self.Swout
+        self.out.Lwin[t]       = self.Lwin
+        self.out.Lwout[t]      = self.Lwout
+        self.out.Q[t]          = self.Q
+  
+        self.out.ra[t]         = self.ra
+        self.out.rs[t]         = self.rs
+        self.out.H[t]          = self.H
+        self.out.LE[t]         = self.LE
+        self.out.LEliq[t]      = self.LEliq
+        self.out.LEveg[t]      = self.LEveg
+        self.out.LEsoil[t]     = self.LEsoil
+        self.out.LEpot[t]      = self.LEpot
+        self.out.LEref[t]      = self.LEref
+        self.out.G[t]          = self.G
+
+        self.out.zlcl[t]       = self.lcl
+        self.out.RH_h[t]       = self.RH_h
+
+        self.out.ac[t]         = self.ac
+        self.out.M[t]          = self.M
+        self.out.dz[t]         = self.dz_h
+        self.out.substeps[t]   = self.substeps
+  
+    # delete class variables to facilitate analysis in ipython
+    def exitmodel(self):
+        del(self.Lv)
+        del(self.cp)
+        del(self.rho)
+        del(self.k)
+        del(self.g)
+        del(self.Rd)
+        del(self.Rv)
+        del(self.bolz)
+        del(self.S0)
+        del(self.rhow)
+  
+        del(self.t)
+        del(self.dt)
+        del(self.tsteps)
+         
+        del(self.h)          
+        del(self.Ps)        
+        del(self.fc)        
+        del(self.ws)
+        del(self.we)
+        
+        del(self.theta)
+        del(self.dtheta)
+        del(self.gammatheta)
+        del(self.advtheta)
+        del(self.beta)
+        del(self.wtheta)
+    
+        del(self.T2m)
+        del(self.q2m)
+        del(self.e2m)
+        del(self.esat2m)
+        del(self.u2m)
+        del(self.v2m)
+        
+        del(self.thetasurf)
+        del(self.qsatsurf)
+        del(self.thetav)
+        del(self.dthetav)
+        del(self.thetavsurf)
+        del(self.qsurf)
+        del(self.wthetav)
+        
+        del(self.q)
+        del(self.qsat)
+        del(self.dqsatdT)
+        del(self.e)
+        del(self.esat)
+        del(self.dq)
+        del(self.gammaq)
+        del(self.advq)
+        del(self.wq)
+        
+        del(self.u)
+        del(self.du)
+        del(self.gammau)
+        del(self.advu)
+        
+        del(self.v)
+        del(self.dv)
+        del(self.gammav)
+        del(self.advv)
+  
+        del(self.htend)
+        del(self.thetatend)
+        del(self.dthetatend)
+        del(self.qtend)
+        del(self.dqtend)
+        del(self.utend)
+        del(self.dutend)
+        del(self.vtend)
+        del(self.dvtend)
+     
+        del(self.Tsoiltend) 
+        del(self.wgtend)  
+        del(self.Wltend) 
+  
+        del(self.ustar)
+        del(self.uw)
+        del(self.vw)
+        del(self.z0m)
+        del(self.z0h)        
+        del(self.Cm)         
+        del(self.Cs)
+        del(self.L)
+        del(self.Rib)
+        del(self.ra)
+  
+        del(self.lat)
+        del(self.lon)
+        del(self.doy)
+        del(self.tstart)
+   
+        del(self.Swin)
+        del(self.Swout)
+        del(self.Lwin)
+        del(self.Lwout)
+        del(self.cc)
+  
+        del(self.wg)
+        del(self.w2)
+        del(self.cveg)
+        del(self.cliq)
+        del(self.Tsoil)
+        del(self.T2)
+        del(self.a)
+        del(self.b)
+        del(self.p)
+        del(self.CGsat)
+  
+        del(self.wsat)
+        del(self.wfc)
+        del(self.wwilt)
+  
+        del(self.C1sat)
+        del(self.C2ref)
+  
+        del(self.LAI)
+        del(self.rs)
+        del(self.rssoil)
+        del(self.rsmin)
+        del(self.rssoilmin)
+        del(self.alpha)
+        del(self.gD)
+  
+        del(self.Ts)
+  
+        del(self.Wmax)
+        del(self.Wl)
+  
+        del(self.Lambda)
+        
+        del(self.Q)
+        del(self.H)
+        del(self.LE)
+        del(self.LEliq)
+        del(self.LEveg)
+        del(self.LEsoil)
+        del(self.LEpot)
+        del(self.LEref)
+        del(self.G)
+  
+        del(self.sw_ls)
+        del(self.sw_rad)
+        del(self.sw_sl)
+        del(self.sw_wind)
+        del(self.sw_shearwe)
+
+# class for storing mixed-layer model output data
+class model_output:
+    def __init__(self, tsteps):
+        self.time          = np.zeros(tsteps)    # time [s]
+
+        # mixed-layer variables
+        self.h          = np.zeros(tsteps)    # ABL height [m]
+        
+        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammau     = np.zeros(tsteps)
+        self.gammav     = np.zeros(tsteps)
+        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
+        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
+        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
+        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
+        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
+        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
+        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
+        
+        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
+        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
+        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
+        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
+
+        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
+        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
+        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
+
+        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
+        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
+        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
+        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
+        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
+        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
+        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
+        
+        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
+        
+        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
+
+        # diagnostic meteorological variables
+        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
+        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
+        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
+        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
+        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
+        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
+
+        # ground variables
+        self.Tsoil       = np.zeros(tsteps)
+        self.T2          = np.zeros(tsteps)
+        self.Ts          = np.zeros(tsteps)
+        self.wg          = np.zeros(tsteps)
+
+        # surface-layer variables
+        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
+        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
+        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
+        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
+        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
+        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
+        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
+        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
+        self.L          = np.zeros(tsteps)    # Obukhov length [m]
+        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
+
+        # radiation variables
+        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
+        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
+        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
+        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
+        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
+
+        # land surface variables
+        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
+        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
+        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
+        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
+        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
+        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
+        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
+        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
+        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
+        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
+
+        # Mixed-layer top variables
+        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
+        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
+
+        # cumulus variables
+        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
+        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
+        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
+        
+        
+        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
+
+# class for storing mixed-layer model input data
+class model_input:
+    def __init__(self):
+
+        # # comment not valid
+        # we comment out the initialization, because there is a problem when
+        # inheriting values from one the another class4gl_iput. We also expect
+        # that the user specifies all the required parmameters (if not, an error
+        # is raised). 
+
+        # general model variables
+        self.runtime    = None  # duration of model run [s]
+        self.dt         = None  # time step [s]
+
+        # mixed-layer variables
+        self.sw_ml      = None  # mixed-layer model switch
+        self.sw_shearwe = None  # Shear growth ABL switch
+        self.sw_fixft   = None  # Fix the free-troposphere switch
+        self.h          = None  # initial ABL height [m]
+        self.Ps         = None  # surface pressure [Pa]
+        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
+        self.fc         = None  # Coriolis parameter [s-1]
+        
+        self.theta      = None  # initial mixed-layer potential temperature [K]
+        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
+
+        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
+
+        self.dtheta     = None  # initial temperature jump at h [K]
+        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = None  # advection of heat [K s-1]
+        self.beta       = None  # entrainment ratio for virtual heat [-]
+        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
+        
+        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
+        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
+
+        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = None  # advection of moisture [kg kg-1 s-1]
+        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
+
+        self.CO2        = None  # initial mixed-layer potential temperature [K]
+        self.dCO2       = None  # initial temperature jump at h [K]
+        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advCO2     = None  # advection of heat [K s-1]
+        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
+        
+        self.sw_wind    = None  # prognostic wind switch
+        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.du         = None  # initial u-wind jump at h [m s-1]
+        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = None  # advection of u-wind [m s-2]
+
+        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = None  # initial u-wind jump at h [m s-1]
+        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = None  # advection of v-wind [m s-2]
+
+        # surface layer variables
+        self.sw_sl      = None  # surface layer switch
+        self.ustar      = None  # surface friction velocity [m s-1]
+        self.z0m        = None  # roughness length for momentum [m]
+        self.z0h        = None  # roughness length for scalars [m]
+        self.Cm         = None  # drag coefficient for momentum [-]
+        self.Cs         = None  # drag coefficient for scalars [-]
+        self.L          = None  # Obukhov length [-]
+        self.Rib        = None  # bulk Richardson number [-]
+
+        # radiation parameters
+        self.sw_rad     = None  # radiation switch
+        self.lat        = None  # latitude [deg]
+        self.lon        = None  # longitude [deg]
+        self.doy        = None  # day of the year [-]
+        self.tstart     = None  # time of the day [h UTC]
+        self.cc         = None  # cloud cover fraction [-]
+        self.Q          = None  # net radiation [W m-2] 
+        self.dFz        = None  # cloud top radiative divergence [W m-2] 
+
+        # land surface parameters
+        self.sw_ls      = None  # land surface switch
+        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
+        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = None  # temperature top soil layer [K]
+        self.T2         = None  # temperature deeper soil layer [K]
+        
+        self.a          = None  # Clapp and Hornberger retention curve parameter a
+        self.b          = None  # Clapp and Hornberger retention curve parameter b
+        self.p          = None  # Clapp and Hornberger retention curve parameter p 
+        self.CGsat      = None  # saturated soil conductivity for heat
+        
+        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
+        self.wfc        = None  # volumetric water content field capacity [-]
+        self.wwilt      = None  # volumetric water content wilting point [-]
+        
+        self.C1sat      = None 
+        self.C2ref      = None
+
+        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
+        
+        self.LAI        = None  # leaf area index [-]
+        self.gD         = None  # correction factor transpiration for VPD [-]
+        self.rsmin      = None  # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = None  # surface albedo [-]
+        
+        self.Ts         = None  # initial surface temperature [K]
+        
+        self.cveg       = None  # vegetation fraction [-]
+        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
+        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
+        
+        self.Lambda     = None  # thermal diffusivity skin layer [-]
+
+        # A-Gs parameters
+        self.c3c4       = None  # Plant type ('c3' or 'c4')
+
+        # Cumulus parameters
+        self.sw_cu      = None  # Cumulus parameterization switch
+        self.dz_h       = None  # Transition layer thickness [m]
+        
+# BEGIN -- HW 20171027
+        # self.cala       = None      # soil heat conductivity [W/(K*m)]
+        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
+# END -- HW 20171027
diff --git a/class4gl/__init__.py b/class4gl/__init__.py
new file mode 100644
index 0000000..a21583b
--- /dev/null
+++ b/class4gl/__init__.py
@@ -0,0 +1,7 @@
+from . import model,class4gl,interface_multi,data_air,data_global
+
+__version__ = '0.1.0'
+
+__author__ = 'Hendrik Wouters '
+
+__all__ = []
diff --git a/class4gl/__pycache__/__init__.cpython-36.pyc b/class4gl/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75cd64362b5997bc8f56bf6abf2c3fb3e5409324
GIT binary patch
literal 376
zcmX|-Jx>E67{|F6+OtjL11yq2ML1G1j#YvamEu%E
z3?sl|++qoCmjGuhoq+-Th`eP09aNfe>9U2Sb$wMzcPJ{$&5|xSQH`|1WsF)%s4%Yk
ztxBoqRPJGspXbH?^p;o5h;{!_H{MtGhpV5)*5AjaqC(~itws-n0|!h9Y?6R900y)?
zT$hlIcMDw+;yQ%T#;&ySy6v@;guDUM4X*<}@6meKtaXJ#AXM_oqH?2)qWC?o3pBl$
uqFOKQmKu(3?k2Mt`uif1VvYzAm9T{5&pVe5{SX?-M`0#)$a4~>et!YuvuOta

literal 0
HcmV?d00001

diff --git a/class4gl/__pycache__/class4gl.cpython-36.pyc b/class4gl/__pycache__/class4gl.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d84b87586ff096df14c9205c6ba38d2385d9224e
GIT binary patch
literal 28681
zcmdUY3vguFSzh1Ux1V~9Mx&858qLg|r=;~rn#Vrn*_CGJu`|1~+VPC+v3tGlZS`%n
zy4C8IZeP8c+wo$piHRArfN=ula#dUj7zYTUDrFN~;BtTfF_eo0;#@E+5DK;oVF{Qx
z8^HO#|J=Uat-$)takH8;EMZ_O1MN?6vjh14mn7reuxV#go
zguIifWG-IpuwqvHt|~WL>P&S>Tz6`Zyn9l6<=vawC-1&gAKr=5{^k9t{ir9I>VGO^
zby%G*hpf)rz;-w_XmwfL)*h<|e|z!QjlVAW>b3S+ed62y$-Suq$k%TTAm4!G8^ZUX
zbpYQ7w)NCOd<|I#@pVvQhph?guyy2REp^B;t)nl8QirWc>$r6SAxEr})+vOT)`&HV
z_fhMN^?>yb#2iB@r>$|6GM>}Ww&Paxc4+Ku=8zr=C6kxzT*k>+W~FRCi|6S~**y1{
zdG_4Y`SVi`oi)#$Is0fb`E12nE9K0E0-cI&W=olB^}&4Uw7IcZ$S#^@Zqvz?ovP_9
z=2YbPa>dG(Os8Vb=giDJ^1C~qnRp3c{w*$#hksx@8l@w@a?56<<@hh
zx1W4DSFL99C>w2y0&JHv(&(09$shUf*WdN|PaU7hva5|B@In!uK6rAWlm_CX1L1X`
zRoDsxc?_>BpDX9=LRLXs78FW;d^7tcJk=x|OLIeMP%mWCXXiLbg>r$-FsixI!jFaQ
z4kUWx*~z7?wWUgVl5Lwr(avOky>jNv8UOXznaRuNE>Bh~3(iKy&P@thOg6A^5?~d4
zOss5q@idxMaMI~%HiPX6X|?{g7Ed&i%%U?$&rwaMy<|F#IR&o6-WORe?P<%N_Dn6Zl%HJ6zoCuSqRZ#agn06XJprNc~9TqbIyy*RV#&(r_Y`9!nHHYUL;dmSTe1^3OSAX(WO22ZoZBoN%;tZQvh7(NckQKHJ(8#bAv7(q>gX;j^5i4OO
z@s3&tg|2H@JwVYB5+_y1J7Eo5pykM$v<`uy?}x7g)S1*=OLZzLj&(%G%pdW(8*`=L
zWo|ah_VCAd05R{yhXa_|*0hkL6?IGBCj5k*kQ*w78S3g*cqw6@<9E?;5s#FSZN#Ue
zPE6kM0>2WjX3rs2ayx_-N`SWZj9JY&=Gux`s^s%I+Z?Uta#QA_g`3!G^5j2ZXvT)a;jrt
zo9JXe00-lon_tVPg&gbzN5bg~c&bF+P&BS}YDo?5P&^zDM|DH14YbcifC&K-vvoJ|
zT*6cRDjZIU^`K<{NB|}T5rPK*BW`udLjMqaaV+Y4t%t=IM_Puo{u#3HKZ2P3)}z({
z03>c5z=H)If5QNxLtNasuv^EtsB=N*VvaOpT*|SC$E|U&01+(uXYgm>?+m`q-kq&$
zjzhNab~*GyZ$0d4ZukJerKr2%3!&AYx+5TDodW=!zpDcz&HykUtG!b#LYz^>
z37EO%6=zGWM+#W6>RJuJ$!4m#00i30nd$uB{McXo#zy1dg4^69b
zUaU3BxjK_SFTejZo&S!$pM2`~YagG^zyEu`^o`NKxjvKs{9)4geSYFEt~3%KY-Hb>
z@$_Of|NAv&{ncr|tgrjktTdYY5-8*(fyBgR-_i`qRm;{n``x#qaww
zclx2HNTd{q_HO`wxF{I)CGl@4oorAANB;|7&07?{EAnK$ujQUV6UK
zCbXL)77Bf3I-fcB?lTYk*zHE812ZW-dtOCS8WqZ{$*O_!kPB3yw{s@bsHi5W2$pyY2n>1dS8wnM&p}%UB{wh*R;5wXR6AG0*Sl
zhJ4xm%%EGVsi-7gO&LhIn$kB`Q{hX`okOv7;}S_<6wwmCK{tkWY!u3tr>&uoHNTKG
zDP+yl))Bbw2d+!ty7Z=6N=4SH4%nPStR9QNVk(NF?2P4wOBn}FrR;^R%9a;(n9i{W
z*iY?UpzBv_saUC+cD7b>_5p<2Lv#k|9E6k7=c@KFJf4Q-XWr9RQjtqffsaWU8+5}P
z3)xg;W1~=V5OJ14kxS?7+=6{Xa_29nB6bxlWGZ1-xlX}rA7+$sXF0Q(((jbK=#9+s
zyp>5A+4I>4rAJxx*{Y=%5tJ!qQjx5^SV1-DQ>p6lm03}*taP+gaJ+ax33V^3=%jrL
z^%P#517;~?x%u-x)Li|dAM)aK1OC4_X4oT0uV^n;e9~R4^3k;wFdaF>3Vr2DF6DaR
zrQFs|;!5iKJ$R}=0w)yFHT;FerLWzu2jNYhO7#!o#`P!&f3IE(m}y1LChl2U0=my-
zn9H%y{nPMl9+rg{3-NYnn{>6aj^((hx!U8Q)gDDrWqH-e-qjv}_*1A-s>!ZdYc?ez
z;9rYS1c}KfspPth6673v2K9Ib#=0sj)|js5;YkGV7=14y<^36AswB9fa32=8THlW4
zOcG_;7ye>+m{|H01$*jhMW4ZdPu2_xQH+FM1b^%51@sVWOXMzu>&>sA$3_t>2KxV{
z_v`g=-EhNJbV;{AX+;@lMGwF&X!S@TRF7_sx{+d3QV(Mlif|ROVqjT{%I`*HJu2P_
zIj9X^K~1X{oS18DYfcay)HB#zFqD`|
zghysyRT9Jye
z?+}7_;HP~9>HZ2&^%R^?f^26p+!yZC<9d%~Xx+fp9=#WC-00QGmUbJpeLJw1%?@OP
z2#N`i_m}YD>h|efh*DUHQW!+3uSt|PKMuq;1Q9DjBP~gIc$#V_I;dGVf#xWvHzLNjT#H)DddiKJJf-%qNrnZQ~v*83#^c
zF_}^?)zbMKKwBXzk;MPCHRt|ldw+yIZRYY5r_Is(Bi-q-Cf=#_JF0Ng>);z2w^9X6
zk_yavF^SABDufyZXw@Rmr2&$zL!8~{o8B)PG`!@4Y
zJ)#^c8@z4;NjgYB3NWgghzKPOjjLfC_(hSg8?N%<3t_cGJMgT%oM5>m4@8W622Zsg
zi|8x*W;cYW5b@|0{e@5ilZY8hG5digbUV5%UU~zcwzZ|Oy$QSl&Q&ZwHthS!L||dW
zVrMMrogsVEiC7TSfHiowwGV0eP(6Y?&2+#>;M$iq|JsSWMlm7mh*^)Kek{L@bZ!(F
z9VsT=C^2vKlXpVpUur7_wY>tpfZrPR)!oRAHW@|j6G5$MEX52dc98w4N6MF*y+A2X
zw&uGhS0hhP&@)5zto&eGi!dT6>m!Y_Tw~F%|3XM=_sfAe8!C3PB}*}+|8$;x?sT;a
zp-nIrU`^)hu}!w9Q_a;24`Qa`^#sO0G7xfOcoIud}T$TRy#q#s6CHmD&VYkvJYVXz#l9_%MB$9xoT>$)tcA33R9kT>`ZnEs1T3_
zB*a<^rMOy3A%K&Ctd!StWv+6f84s{g@hH&27xHCdC#new3YL4nq{(WoTrD_-^#a8D
z+$NSkNQuaD{c_bD9U)x_vW_%ljFHpk2-=}E;y&#_Sf;#XLIT1nizOBPQp-42@Y5jM
z1+3^z-GFF@!639&j`4tApDe=IggLhe;wjt*NRwH0&|M2fg1c&VI%wDiDAGp3S>#5+
z4n+1j59r6|$b2K^X<=H{s!(ucv0;!YAq?3n7Ad30w6o#O6
zrCU(#5heR+*?mzwAq$1rV6JCMYq@G$VGvKzd2YBMO=G-NG30+R?OaGW$!d*g_?j~<
zlSIW&Te&PwRcB(TGiD}_MYyfGqQG7tL&bixqm*nWvz5}?a=E>cqaz#nk<%kLZ(X^K
z7qNxv3p53r9bij=Bh2nsH=4ujqfb?gANG<|XcKTb*uiEJ^ZN^CvaR`&iUQf==6A4i
z@||bSJYt#{h>dfonV%2U@0(}Enz)Q6N%uxo!>7#W&mzyn>Bk;Acm6bLenCo`yEb?0d$bCX~y*ld<0dL
zV(MKNp9H!RM_;%Xut%P{apmH(BN+HbnS$b+?I}U9-LtW@w)tcmn`jV3313@F3MCZM
zSDWI1kyZ7HY?XvZNE}9y)T&bEI@Lx8(@K`Fy>Q>WjUjrI_gfbKhBh@j)@=(HJOgIZ
zo~6@Nz$NB@r&nydVmJ9?;iJ8nPb|T!udHxOmV&GB>0Z>%Edy-9af_~wyL(<#tr|_1
z89P(}RFYjUs*3Vren={zzz&hn56e0wI;eIg+rEf$uaL8*;2Rq4k0iBZtY7cb`t+WN
z5rGSix(AYNH)Plz{PEqbb?cCKb;!F~r_rM&laPJEgCEvw`x{G;uYps%QNVLkNC@Mh
zoKU5L85|naR12D`e+oSD$24qKaQ{@Bh0wnb+e&GlW~6-(_H~yj^kLk{E5^XCM~iU_8+p)MbQC+C
zE~Z58y?M>*?E_nD?Q^<|d$|30HwwL6v#w^1Y*Bmee(V_yCsH)~SIIzpRl9A!`(&htV^`8p9XF
ztKyNtkoAs(A@tMi3)Lx*9EJaw8{gI>-DxS2VaJ28apaw-C#=bO(mGS`aFf<4ahpim7`j#Ryp?W9$V{Ru#;emRW
z^A5L5sW*zJi#=`^M@Rbn5X-b6U|0`J9gj#I;~0a75N{NvHtFwKd?yZu_OVrl^{C`~
z6!}hBkKudd0Kx}U+!R_i!TQh=rAp;c+TKCx*dLh80EhcC4{XD;yx~YIm=L#
z_{4&mo9Q6*LQ5!epx;dUcuS}%;T%hpej#oUVj#X}B>j2#F1q>vdQEu27wRvMqNE3z
zN7B%z%Arr?qYo)%q@{J+8hSM6=fmi=#_S2bV%TR~j{GR|9SALjFcaPQ9&@{8CieIv
z6OobmRebHiI6b)(L8(6p@tCO|0!H?fhmgykDa=>oKuF+S#a#$W(UI>@nG#qPX-P3e
zj3#W<_exv$G6lwZZ@tIuDN;ek8SE*ZfDe!i-y`r5I(v$b!UvcPMh7YV{vxICLrUL=
zl$ifMt4}CIFZ$5zy#boB4=s5NX{W49C>LX*+&t(C=BHP14DJDz$5{gIafSemy;clm
z_bOawZNR8LcVF?HgkFmy7_q-=^QWvl1nSS!_q#prew5zp?nkbD^dl$6E$|~3uzFu{
zv=J^o2?d0!wy9VN1DrQX!8h6?<)ef?sR8@qPzFToMYQa!b=mDJJ}hwCmwuRSKnt%#
zh+{~7AoK{}jG->QA9DMP4+CBfxImtK!pPuCAw&(sg2w+~_LBG!4CMMAC$IpmVw%3Sqv
zHXA7-_e)`2Zb@}TQr$qwVe47(KP&!OP~hhfc2mOMC1JP3Zvrk46fZhYBK8*M^ftz0
zt}kT0yALyFz2~k5xCmQ!8flA{(4)%;c`xweO7SXuv#K<4QQ9_X-}@x?sr$sHB=%bI
zx{8;U-9g>YZ5!_=<$7Lfdp}aiy2cPH=TuA(_I^K%>AH(g6H1W+<*4*NjYTTo_d=r&
zh<`y+A?+aHK5TuU5!c#3m9F@V|Gr_p`=CmV+{I_DwEy)YlwP1W#o2A{$vpr?*O9vE
zJm;F2|C?3@I25-(q^Vtyzk$K_dNcxpX{k0-JEid
z`lU~x^!@c?ph3j`^X@T>3jAn&q$LD?so~W-WZrJU=nXq|fF$cKO>zw|Z^cX>_`G&8
zBelQx!A^7>pE@creYnceKt;`1^3Q-NSGyfxl&tObc27RExT&
z@6FCWkDK$5+UB94yQg9mT|#swo2}Tg`G-M4orJfdqi_>J82kU(>A!)^V~eN(CZmxaSrWft>fg@
z$>o#Q#KgEuevjy
zc1y*E-Zx`UAuTPHGcd@vCz)}9zur|ObHsKmPt&@?AO6E>7+%qCY+Ve@QsKq)8l93~
z)`=N@&C|)H^u@Fv9B*_1_HWAm?`Ni8qe$Mg25gx8VRSpO*MUs}5Oi1x$i}>HsrQ%D
zff;wLr!itM3oAIV|F_ShJTKn(^lQdIJc#h-0K^C^OY{nujVh}oUm
zfyM_{C#3>wZVAWy^)8iIz{IA@I7mV}4!Du(3hmyEVx>^_;ykR8Uf5Xh^g_jp-*#YZ
zoWK5@7sk5j>19~U!Bhmcc}AgBf&I8;S5~0xf~rWM#na}R$f;O?s{JnX1UAT9PR$Fi
zIay3i1;%)VTFxG4=U-xU9L2A2TSASl45y4F#}pPM1sOTC0!<8OD^)d`ON@*Zs^ttq
zv1g_R`YJ=CYUjc}iVrnyoG}Y*J${%%Y6tT&Ao>J{a^=
z^fo9BET$uJ2n2(+m|1}(Om)@92-=x4%)HZ5F=$g31JofK?EbJM*fku}f#rRe#@q7k
ztU+H6bC~7y7U0o~`DaE_9Z%()0IHMxP%!NoV2Dbo9)$R?tpM8#14BGx5o$&+s*nYS
zi#fEzQfg7ri0)zI3|i=j+Hvf-G6}$%ARL(ey@*dmEUubOP((Qb0QLtM8$&N~U||zS
z2Ua}&mX*?Pt*7FJY8qo*uu@&}Vvh^3P)hYQBGU6YS{E&*_O~Snf@81^;fNC)msbLa
zhDogze832$lroAmHJWErdX$zt&G0mL5K{@Ei)qx9iqzy4E6i6Mr?L!KbpWMFm4G#+
zqBW^86^1#er{$GS_k%1L)}LAMO0W)%`Mw>xP1{1U-@p=ArAF(
zllqW08tH?n=3Ml!)*0?acpqZ=sMIwK7$+I}FwBt-8#JMSf)^ntv|+tlKcV+){2MZc
zbf|&(7lv{-(reI=2})n6eUTF-CGlgqS)b7-9rD#DBr>II|u<-lB0!g2DD;w)k#Fl~f#2BwV(d&Wsn9sD89NkU-@
zwXRrKKoRUlVSBY`!J>n2sEVPa5*4Ek+pC2A%I0A=3W2Y`9&=-^4(pS3r0I5JV(YOE
z(Tg@(3H$SH>B1=MQ!VK}btklWQ_8s}<)8&i(aP&=i5r86w})j^UboQRMu>#>EJdq7
zhhFW4byp01?JeL0Bo7j-{+tMXR21V#L~%b=)UadeWWQlm5!)PQZLh>|D1q&k`e27~
zx>-JJWXNuHLK)X@b*Yh3?NhCRbq%dKJ`t4C50zOrPjChy(i4cnPoT8}&CueY%*_ER
zGp#*KP$}<-ACmZkL44xA_0t|M7$vB=jU}r4{c*&1M3j|Dlz+%gqP8B0=DPj;`1aw3
zkm4NnVP)?@wx|Iy9jzrn_1sxJ;&$-F5tKSzt#NMGl3q`ip)jlqJhhI}cZZzhM+kr2
zZg;WU5=i;Sp#=>`Q2JkvyWN)=x2IW}$tvI*1K`kyy`*Kjz{bmbLuwyn(SqA}?$D4Bo^=qg&;?f;;
z_W}-pOQODnO0nL1PwwF$x6J69=H7Qt?v}c(Jp-Zuf725Bf*$TEHd>B$?GoBcRW_8{
zPrJSKzI*CzY4zKxcYj+ADmN51yM*rZ>;00uufBgry@QQ$w)PCZbzl42N>?M*zoSkS
z%Dz&y3^gv)uGngRtMVPVFO0|RUvBT8)g|y}kea+Hcc2~)E#n%soty)Ve@ekG&p7tvTcKC8`I%EjF>YLbIKh;t}*8EM-C??>xV7UkaxJ)
zim-a>J)1AOgILwxTR(zz=>UA!>gMK*dkDTq>qp%l)OxhpKA%Dzb&t3vkICINipRG#
zj7*PvSVrjZE3wrV?_j-IeUG>|usR{`U_iOB2K5wAZ-*M|S9x8v2`!nGmRv6L6fj2{)eWZNoXdXWjb6aY-TW-makupl8JnFy2z)g%xsrsm{WXeG=pqf^{mis!Y&|Z`*xx&P6Gs?)A%P=H{vK)q}
zN|_Xt5qQ+rQhTe%TZ-U;6R61XkIiep79=Qy%^8OYTWKpzb1v9)!po~xU^@=O#ucap
zL}Sm$9hq{Qa{Gy&58FR9k}H&9LW}L1`{bljQ|V`_wS#wTXhfAM9c=5A$(`R!FOHiR
zRkKSW{kI+Sq)fjLkJy{>EWr!i
zb?qZ!cebs4pT3GMOHr_(PU`K(bWfi+lZtc8fSMX=#jOGkm5JpXl>%=hFVK`OTUlOt
z;<9=dVvU#`ZmJ
zsq7*@n`fMPyjhMm&p~0^UZDNMh(k40j!O*^P{Gifsf0TnDI^Tao-vk}eO~&ZsT@qrLsq89Au9QV4)vET3aCngg
z)U3bE?~8OkOXmb*uxo^iD(oMCKh-hMU8WqawLs#}Fo}%*N9f~bg#F!g1Z>0AwPpLK
z@d?dx4hlx>7nJOO#qWq{XYGH%q)jDyVm)IQc*zp8+F--k=jbZ4H81
zF7~4uk^C~Ssff|fLS2s=0t!++U#K51#MvD0l(Ax6d=hu1EM`gzc)^aKWdAxM8pK9`
zi1ftkp(?VKQjC$RY*M+~2?eU&*9%o`xSN)?3E162W!eo@s=R5hONFO47{wVlC~-Tr
z-tb^}7;Z0AyYh#xPHoWWF_PLa?09)LH4fWeo|(g``Eai`95G|VIH%eR1#l-+!QzJd
zk$YG>rx`6iDpw_pR>8iHiq>u@f$0t-_pp8#Eum_*PrmU5^(|W)M+*$3GEgrc{ym@#
zA!bk%)BGNVYB!EDU|Af7LKr2mg>03$P(?RdwTrLWDzt4l+|n{p7DKVDo!HAWx<+^q
z3TKoZK^>7nD58gBhkK(KNA=W>2n+;=_$PMGP+%rZ>udwhB|OzXfn#YfnZ@Z{Xn?h)
zxZUsSd9AFwI!$)LZBc`ab8Yqv{Ll#EBp@}YauSf{GxhN1rJ1m)FqUE+zk{+m)HijN$ah1f0SC=HhGLn$fKbKUH0^h?i@EF)%y+Ow
z%tJiDn+z@XGcBBmvPLIEB)c&!mO9sGZ@zo3{gk^(>>t`?VSaAdxNKx_v<>DdQe_)c
zBq3>`S>STG+b&0~)J~EKmXqtRI%U>~XF)va!sfSFGCcS0xqF&q=Wu?bTs7zHHE@Nt
zc@;}((;Q^37P~Fx_GWDljB&sjlOL0d?ylPJ2Y}3ub*eSzpW;Iu%DqhA2{<0E48uvg
z@8|anoq+JGyF8-UX#^c^?EfI{J%E-q*SHugs1
zdhOs2Tx%^a;N(CNLlHcJXEyPTy#4Q+fQoADO
z+$H_CR=p8?!L;1|CoOrSeqQLgn|VGI0vt;iGqoJC$#p!_v(-M
zrJ@~H7x3weL7ixq{c1~{yS7()kYvu4Zw5KBvw)qD!!3Ec{Wc+{rA-s6|J?IH`Wt@x
zdv+TaDT|ky?5%CS1v1k<%4gLBD?Ekag{Dw~V|-WDsD*t%W0B26v{#$rp(bdw6R(*R_z_Zwf#h8n9{VTwtoR|q7JRom_azd1{?i*
z2$-$)lHqO76x=4&(g3F-wr^;_Scl>tg^eSk+Fzq17%Uu?;OXx&O!&J$pzjDA|1Pss
zpF$6Rl_ypKHSJfKOyml2gc~Kvr!b1|q
zn8Uz1y!G0tH^Hx#294=+ICTT#COHR#D-7|)b4}fB|4gUsT?cZy%b2Z=
zv|JQp!%)PwcE2g6HlVZ
zan?)J2<=*vqzTql*21(>lL?Q^C@zb_Omgmc5C*FsS5SUQD{z^|A0wsQB?=W4ZVV;k
zYx53SAqBfP(Ejg;KgqW7){PKGqoyfGc=1Q_=Vw{!6T}nfPUaS9jFJX2vkF3F)xa%yl`iw=v)F!
z;h3>-8Mkxr2p3K~QDX-FU3{<-5FfA4;;q#BUHH;*!GrLuTK9^7LMuZ9vgHbhlSqT5
z!U7CxecLVEmIU>Ym}PolTt<`%&z)OV_wz+octWyadPJ{&<2n{Q0!S(@LbQ`xX&EDd
z&i{efNe)i~N>mR;%4w~6R%uf-^)hzMh|OYJif#`7>xh_Wl#hF2`Avwy@6~DF^Ofl=
zQpk^s3t>XWvIlJfZwJY!9)(t3!y~k354boB_o(oQB<)~uH>zP@!2Y9zflfhgsmIIA
zW7KgeryIwyB+ZS>K|ma>LwLm1aU=pqFYjW3*oVV8^#p5xjDxcL(vv8i_8SNR=kz*`
z6SDlI8*-DJM|`71ISQ4K5V(yH9)m(z!8eZD1nCDt^$ye(#gRl+Qgkqs=iX{3LSrga
zmE%SSR7{uOHsd~R`|96g+q$J~ox8TJtJ#aTwncGNuLJkCbhzEzBlKIX`qR}K5)YmRPbpyBL%6SHCY{(G^R97{hUw)x2AxzKTGi9z9
zYzNmSV>^OZqGPQ$TkH2LG@pN_JuhvxadP9HmZ6f#>I=>G?RHY($s5f9d=`#p;GUTk`$zfxIXa(#GqxvXaE_Ew^M??UGHw-~
z#C@7{b14Ij&!xh-l`8aooTK!aRMdZmkwg*{Qeh+$FYe}Kcg4-LkTRi?KZ4?)Clw=k
z5A}e%hJFaTGicpF1d=QcU_t*9!6k}OT9x4q$Gt0t
z{-OdnH$51dGhVt3Q@x$hD$B=}4v#=B%Z?y{Y3V1RkH5jU1%z;ROpx~|pE
zT&|cIs0PV-@#t4JSHc>SDQ#7ui~x{Qg8{L;`F}?M-Z;@xGx*hJcmFp-rO%O@0!)94
z051jxkDcAwzYgi2s_@I~pzz0uo^Ypb^BUKHhA^Cubm{}}utP3*8_X#!7yv)GN8X2y
z1;}f4WAhyN8AFBo_sB(HhlI;o0WokD#HgzWTpR;lJxA@XYX~JUY8$xlP$@gvdG2J4
z%T4UEwsJ?ql+SU_XccR&k|tZ$O{#v&rfdV6dAor9^~N?M3zQLEvljqPSe{#oxCaVf
zoyNu{$)*jXg-{dP#-ckmuWXNrej5(AgmOFY1+jk%F<;{#lS2*}ogm+RnyE?nA2TQf
zZagb906(#opd=&yBs?<802?cWKmEht4MDBx57SRz!i{&pJ6If&PgOG%*#<{S`Z`D)
zGiC+71gjd1HGAeo<$hRPN<9Z4c}Awb4T%U<*j2(fsgofBU>yW1(#2eP9sj+Jf-v}P
z{HHjr%z41~f1RBtB?`gVp7)Xp8=qxB;}?c!22XVeP9PRhAOfARA`p$RYN01FFWwNK
zTEz2ISCwN8Q)jYV%&O4D;mNw;8!fX0nxq>
zJ127SiB6k(XmoL3Az)dq3ue3S;tFG2OB~sJ8YMI?CcZ%Qwg#}nag(8PgCblU>yD8M
za?b?X3fNU6UcPbPA`d(x#p=yFEH8!!x13ttNb}Py!%D!;Q_9Eg3Rcm2V)JD;0b1;$
zt{`4&!mi5&T-`#yH3(RV`!(a;tQm4foG~bsbvM%ZJ2rmnzQF%-;AP5`e*H{On{Jkl
zwtfMZ18_Wgkw5YYy>SV$wGTOxsW
z_H)Db7rDPIxvB4o@Dh}rxg{3+NkLn>0W$|OU&Ay5Hb7IsAp57j@4bxKH@AVJ3|hGV
zSFT~Eh%j!7R4qrbWQ%DkI1}BBFd?NwWIt`PV`9tGCOxp6U4Fi?PS($j?J<9=zb*4_
zVgU>L|MV_sF}lG`(DqZH19Ho3i|YQb81|QRPQdZhePC&7&+P9(fTE;=NURPbS~4Gr6W3dc0-NOyWneV
z4Bmo2)unFm<}q5pVrowlJOyrL;9=>4TgbtCROJL3{_7ijQpMEad+@)1IjI;z0F1s3
zRCA2j$Zv8zxd~6G3k(@{_#q#K`yz(cV_>Nc{&0i~`+*FHGERq}B187Sr6coljK25M`6L{#
zLlo_oD&>VjUcgi_E-x{`I1@x+i)dx4vbmdzs{hj~rEl8y4>Ae)4g0@HI%RUG{+p%9
z8vdK7s_3spkts?wkujT0sPGKJ$|&6&d)orn!N7R&3pk*Q|N7|(uJF||93w^%27eG>
z{J*dfjvK)jEK7`VPb~PK9=PYU%m4Ne5BG4}rv2}7_fXGx@@l+4xj())-knVT9|4iZ
AF#rGn

literal 0
HcmV?d00001

diff --git a/class4gl/__pycache__/model.cpython-36.pyc b/class4gl/__pycache__/model.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e0fc2f4cee0c8fa20820b403d5cd75c7bb16d59
GIT binary patch
literal 36319
zcmdUYd0<>ub@#mYX47c3B+J@tk7UQTyhyU*IEmv#a=gSz9OKwdoD4~4#=4`?jFyq!
zj4XK=QHm3wK)~!I+_C_`c`!{8_+9VDhg9P(Ec$`eKHB&6p)$f6Oo6
zKrA5NU@U~MIT@a+iPcO+Vi7~ftButvD^{ocDsYc4RuY@+x-1>pnZWtLg7z%&<24MEXaq`VRJA_ETx@lnI?A%0ddO#%$$$@lgx-gJFlN%fybUs%P4jdRdFp$rU
z73Pz6YGD77eJ4%~j7}!=`CaMBfyvD2fvKEIP4>^6E&EgXWMLyx&6_NcVb=hT3J!!{
zcA397g{3oQoD
z8w;vnmi(naJJM193IBP+epAW67%2HlM#Jp_J8v{%!Xy21|bBFZo-Ehg^J(Z!uglYJH{93VFh!hQOFO5ocf&mBhIyzjXmc#YLSvJ;WMTKN
zvXwlYFI$tjblE?X&x{M4>Wh@Em%8AHr0nP~i$qc&-f+o*2C4~{r44-cpR?aQA&
z@#~%%*9Coo2>6S-NZ3VMN77^R_fk`@!Eg#?%X+s7nzz_&qlsxEvRRkvD;
zZJ&8k;zz_&$hRF~n~sy3<3_^wu4R3E-=YOC6Y?;5pT?ZCHP*7<>Q
zK$gOzqpTJGIF2e~>4>uoDa_fDFOD8p=0uGhEn)6r;lM&4Bkv@!WhNH^9J|>2&{iVQR835_SG;vRX&6
zE+p-IIgp=^Cr3eHGB&=ClN@Td8OLqr@!#UFucaJvQY+V_Q-vc^jB-#HryNG41R-lDg7)nN&J-|e;%mZ}K0%~0Y0^mHnknVOkQ
zO@ZM~ss6Gtj#Z6`*;f&;$L*^D`WniCOd&Oux3|JpHfL11<^VYLLw0g1RSp$$GOo&D
z@aEGP)A_QW7#N+@bU9ea7g96%a-ALyC<(@#y@yGYSJ8`TAZgplvwdOv3OL1T&L*cO
zb&?haYS~YzAQl2jq#QVtoSaKxNMS4^Ih1QA)u~m<#r4cLZGv^{jiTyD7egO0!MwQ`PXqQm|YC7Q)s%p`d
zeGYd&ap^hZDCY^sdf6!02arHHT*yqN*jDA5)Iz2p*+T$fi!85>WzS8&n>llM`^>Pv
z!K$;0HM+#`s}z&s2zd}L1^JmeN;}W-#j)4}8p;ARl^@Vj0YEHpfB_W(#GnNXsT#nr
ziU8KAT2+^3%!sO2^@7!M$rG%Oi=JThSoWF)Yfvq!Rj@|2TD1w*RPkM<);PY+SPL>;OZuv2XS>{1&6yVYfYYt<&e
zb!s!fDZl_@DlowMfWZLU3JeC=HefKo
zwgZC!wgVUpuzp}Lzy^T902>4b18gU-oyrF|B=B-I27H(JT>O?Yw+K8c@F@b13A|O{aY^?C(uG0UP6C4gb{jAlV7CK<0d@-5DT#N7z&i!T
z1U^;ZT>|eG_(zD-34>9Z?P#T(hw+Y7;z$JsSOORfup}@TV5fn>02?I}4cn-+u@ab)
zl*a^5OQ>;#fI;{SFc@H2U@*WYfWZKp1O@|a3K$HqX<#tGa=>7++s|9}>&0aTE-=|0
z=Y#gE#bp*Qvua1dhHW;RKW9vY5V|b30vOZ*Z1D`|=$Z+I`M=CR=x4
z5p)DOJ*^tA!w2RKn_m+(+;f<)Mtq2Kf1#wVEG%;2#A0~W8Xs^--N3Kb3uDQTdT>sl
z)X(;f8EOwo9#+@rl6&0uiu<)ry{}tV?_cVWYFO$$;)L3_Bvim&=Y+amLhaX~u)DIL
z(g(n}-aWktikO`(fpt}=cLhhwpalYU3@vb|6hP^3VCkhLp3cyVK{xD@_TYlg_P}B;
z?GdowQTXHRGt^t>XS{IC@D*&>pXuhK
zb}8SzW`|FDRF~MN#D_F0ej>mTh7wG4_@q6buEXv1
zoe#`zI>i>=j&c89e0S9Oh@pj#IxV~XeAxaCd^kEx80_LG&zm^lo5Ci^tN!xMqU
z8hp32tr4d|J%u(1FThJR*?Z1mTw|1n?5)!J$5<$DVGnv`R^
zxwi0DCtu7+~)K1_SJ`fx!Sn
zZ^hYTJz31$IA?E9_HTf}0DB)W7+~)Q1_SH^z+ixV5Eu-w4*`P#_P4-bfPEMk46wfg
z1_SIPz+iwq2<$;M3iyz~kBaNZV26SK$AQ5B`vfo;V1Ezn@1-q1DfUmn{wcA4THt2{
zepcW=2>hJD&kOuVf&V1%VS!%|_(e(UOGpa_xsXSL0d@zzPX*sK
zVGu8T>zPL3+i)pMy8jI9Q3@aUZa!aUH{tso$G!>nT??Is@2b=}0~D`eF#_N3(f54a
zY_Cv_EeLY}uHSd!Y=!+U*ni;IA9KFFa3{wj?jq<+>pk>`oA>Y2`+wsv97$g;fB!z5
ze)X&P_x0h*`I8Rw1dfa#vG{zClt$9`XFu|T&&*6;r(H%6UAz)6bCBq*Zi1`aEyl{G
zW4Gh-jGZG+UWOa^*Y3uoyNj7{Gkw%W9=4t6$MOzx-u0S+%hOtUto74jr#e3wPXF}Q
z53IZQC+~J$K0chj{}a)7+2b!7PM>{z)6VeQo__Hb1u4Eh=`V
zckJ)}$#-|h+=lwju#;(FI6Zr9`kay!8f~8Tc$b%GHX->f8
zxi3Po6&nc)BJGGrp<{9ctMx|7AOYzQQ$-eE;({qJr2LJQX>D+uY
zhveq%3^q-sqo>bG24fjJUx?~Fw`8WVy%!zqW9~C?Bo>_?&y0>UI?kNNQZ{yqkhC93
z_b=&J?l82#`Bz)${Cw%)Q5`L{G
z;X9GAeTv{DKvDh9c~Y3q{qAAS5R!dc48JhslO*VRi0E@E6gX^jh(ssOwzuqI;);kV&17|
z-Q3q-4rbERIXeX%ilEAHzLx{(LR{yD@mABDz6G@Utv&0H&~&J=T?Buny`$z`SsBtk+@guF&I@w{aE
z;yIR^n^yhNQ>kcn4r8d0oJdJ3r*nluZVKg2_3OE2#uv(F{H%RB=Z7EVD3rs5oXcfn
z9!+7+&)B0J-<;2|TlRR)R)X87@!FZ7dB&JVo^enAzLSrWP&kXz
z6Axm8yfTMmuG%uME<+f5DL_aP37KuxkUF9E{}}1}A&&RY2-%TBmW_$Bk+!)u+P49e
z{ik!2#j+ojm@k_r2FsECPZ=7`P0b8leN{Pl>&`(~(d$#RhWDZG7l;Mmk)A61Pwo^r
z1W2cS;v{eg&>VwaK~muqX>w*f3H;^-9NtW2rpx}R(cBOq21hwKJqHVD^Zt2k=pzF{
zgt3nAM3jSrHut^dKx%5T9Kl&tems@5=TkUwnvaiV7RCzY5U_cyDH00W4MEs#WXzMT
z$vlp&^6|pi8H`@oN9V+QGE=alY=jL2dkKU{4+6tUK<3PC=xCfs&5yyPq%)As+-cc4
z4?9R6^Foh|h|CL3@spCoX%3j+y!H<1FG?^^6^pWUatP`dvji!CAa#jonIkv@P$Tj3
zbM{y{BIp^x1GDn6j>U1>auUElI|LX+C-Lh)ewa`d0j@4(Vc^DLVx@gF_VU
zm6Cx|Hq~r7D0P^XQIGDJpDkPSvnhgGZ0_)jhEv%lC9+BI5t|QBmK41*Y*dQ!rD_^kX;K|;{
z%3jZ~;gk)oC0R(>hiDBZXJ)7xW98?j?49C*m??WkukA7qNdn4&ad193y^=N-G@^Sz
z@W8Bmf=YiwXaaEvv;B04IVXhv2F^HE_gi#A$zU?)Fmun$+s(+?CX0fpgvqx*Z@1Eg
z+Gl0cPGUqP3piBEp!k?x=GZ7)a+qf-cUF$isQP1$j+Xr=Fqg`K6Z3MMcSI~l#A4iv
z(1(f`8)+DH>B)R9gG1euLuCWh@pL(`{|1Z)OkhH59$5tD=QEQ9cSv8Y>4!
z<1#(0Bl~VFo9P4PfSu=*ud(w?kZ3??2Fi^&rd^Q@PW(pCq@dMwie$n(H3|D<1|#=~
zhy&#sDUWWe`LzC?nv}0`7#(MvUJ7`NcQs4U7%f~
zW#g8z#nBOfZdJ}Mhj8r012l|E=R7T-3Zrv&xkmU~@oLzY&y$~+BU+=n?8|fnJpA)2(4x@8Z
zbCYxVzFJ{y%NCM|1%=Tbj|FDpxk<&(bZQ&#Xgf_)Ry91Z-R)jfuQVyCy
zZ{7&av?Q!sBG(p<=oC>7_*ZkrxxSlsLnV&m;>HMstw<2YYBpMNhT97Dwjt;QFqKxb
z*)Yv@W-!=Z-|lz)8?1GPiPR9sXhR6Uf1Mc#HyG_k8~$yw8jxqZztz~(iP$JjxVF=1
zFxzn=3(?FF~q`b%h5TQ9=4tJsN2y44od`JlBbsaxI1ElhU{N`OYURcD5+
zun~sBU<2x{F%656s<2;?OsG>ticaF4za1lvnrIOSUC=6m-~}BBY-&N9$VdGl`O-~m
z7-2iuz6w4e$0uwbcjDl9NW#`QJ`sD;@xft{_&_^CzIFDq93LDlh)caE9~`QPTZ8=o
zLN$7PaKs`$P0&w@q0Fn`-<)j$Y@MJQC^UX(7Ae54=0QU4yNVhbt3?ND3$Qkr=Iol1
z(SZX9YR##zDB{M%y@l?;THAdk2%(EspTw=laYcEkH^rQ2?}8ue-|eY?ZDB23*2zIr9S)sn>kwPLbL<4ID#UvP
z?(04A8r&9z-_PL}_4qY9ex2t{dm~h9c_a_b&UjY|LnPayR$U2oiFnU+FXF+lY;a%a
zxUW;q3=2IOgh2?_wS_6l7GmqsV%_N!#7S(nfK2C$5yu81U2JuIG-=jyN3q3mC3TKE
zRJ+*VI6CyW*zDLn0!wjKatv&P-XhXz$!40!51vw>EXDOsbTS%Hd@7_6zM300NzCoh
zUm1PH?hC?sGwjkCRul1f9+GXXlf~vV1gtbSoTgI?1vElxrf-90rmzH#m9f%c*|oP|
z9s;MLmAwvvKMsun))RX+u^wW7O01XI^N4LEc8=HvV#0HOQalJ`3r|?^D6RiQAZ(Nc
zPQdPBj2F?Z8yI-_0-DACEFENbq^(k^rK{xXs=H^cv|r4Kok*!Kpcl$WF{2hBNPy9U
zP9=7q9NtZ8%k?q_CZ8>`d`smPr1S{rQGOhV4}=mbWJ2P?SayhepjihJ2?a9{zHTW0
zaM=O*hE{aD(E!!62KpJ{Aj#ksl0;}s3)F-YAxSJrYF!mHSBs}YCb=0zBV_WKs2>Kn
z-W|i96mZ!W!@$Rsg*r5ej&{cxCr(zOk3$!jAkKxMss@ZEM73%f`nvAg1!M<0J8&n_
zion!Ls5+jO!l#}w=nB&ydt|N;l!CKH0n;{1tp1$rpv8qeDhujupN1<`isLQHzi3QD
znG-7D!9^&^LW#2krP0uYk$()|@I)Z@Q;old0Z96=kRy*R=YC6p-95EnkZ$
zW|DX
z-zI0+LQ?9Frg`CI-M!y)x8I)__U@WLI-K5n^b0S#>%V_C{5ZE2)9c>&qr?5b{Ta61
z-}bz3j92af;50(!XmS0K{YRqMrLF9{Oy>&Rb;fpUI;DDgiW`m}hTIpuHhS6E_Gl64
zwM0Y=5o{K)Z?$J{;myd_emTLr2;NHY5`woAyoEsIVs3EQ9O$-8)YlW+PVg55A|Aeh
zn8-kHBK8>o+)N6>@^suPU4mc
z=7x-7%nINf4MFPnU^G&RS4O)NE^?7@wbOMNgVbpwtS!K%pxRxihvG5QYm|{Mqy3!8
znUW3ZTO;}e7E2@Jaoo}%ci38i(MU*fL6~d=<@ed&cKmRui@VPVT{9h&ylZ3(2A8PW
zSGJfi^mwp?)1Be;=Re24Uk+nnNdHsk&wdYq3+`jU;u8v6Z}~brr~8GdjQ3
zEUv2PAG#t>rI)#dMs`tp`O7E@+Qyf+3kDgLELcA2VK9s>^a&_az81e-xNo+G5LZs5
z6@yMJFnvp6NoY6r1+iq52s3w&^mce6n14A!*GL*NX5yrXb@s;qNh3iug$2a53Bg%X
z(nY#k7)wEi`UQDk3+m+#BgrymL9=9Ra%`-IYA|nU5hvrQ$LDGZw+7{0&AP_hF*eBl
zP<^yR$|NC#!6Or=;NDpa`iU=;i*;LM*GMW(s$H^PWV3aTCw+ty_a^FTCPjkR)HHFhI1oa*2%1i$eMB)u9I+9u_YO&
z5*jJt?iw!%Ed~pl;L`$~D4a@j?Sf`eNY&zA8C+EzPO?q=F6d1KQD$JOelc9wlI<&O
zh5t4%>J4CCiEkI?wNW##)6Xg4LWacneqj@@z+_vHUr;ogpu>8}G@6_gcSwq>#D;4Q
zI^{w?(%Mm~5jwEr5+!NY;Y;BXs06N1;KrE(Cad`XTm!Bv(qp?-TFePEh%lgYIwt%%
zF44|_gD~~%XD8%N*oP1r%(!0NvzL4NV3*{01$=i)m|iE$m7Xxr!$Z$sjW8VNYfEAF
z^pf84Wv@VZP+^4I13%7zmBU?)aB@i_ehtFxh0foL@x4|?*maU_P|C4Z@L^pC)}Co+
zMB&z?_d7rfnR*`U5=iEuXRKil9P_!!Uu;B{mK|9-shZ)^+c)mo`Tph2eBwS
zH#HN*5p5C&C^(Yld7K@c%uM0j_u_vPR^(K;5KZGM1|*CqG1JF-p@=2T*
zDLKo-;dLH&Tjoypxh!sf@qYIdx+i)phwZ#Vh6m%zONL?zqkpM0_a%qz*we(3;c1+#
zPvJBumt&1|eMAm&^yDvIwdk0Qv!kM)B2%$n3r%yU-C}QFQcdiFPV(TT1e=w&`w70lWFICFu}_##VLjeSw=WWWh(LIa4-)$_!IucWLhvkt
z?-P8Cfcuj6M+v@4@CkxP2p%H%8o|d2zD^+Qh%l?d-jw0i7ttJRXg*#TkB=2%!TIgVqo!Uh$M{ymkcUB#&
z2C&hk!Eu2!=D0LEF1(|z!!$WAmRzS|tW}N+Z(!>%xRfWzdCE@`h&dHS|Fp+hCsh%VrjB*k!vwWCYB<5E)+&JQlQGaz9Y
zqEmE;4g1^V*|VLp`S$txmZbqP)TjAHp&Lg0no
z>;{QV&cY`fnswOFgjwVY-vy4NLF<6SCMC~a=DN6Lal<$zin{)l7@J&|N(?8A#Mlh|
z`0t|}TbLiOx$%PRy(YBSUAKl6+;GM7f^gKnUmiEOf*Y=kUJwqK=3F#MzG{^o0c(6K
zr&g-hshziGkDcP|?8^4xCY9*!FE*9%Pq;z!8OqpH7y!4|1dezWv2$=i3}X=+rI^*=
zju)}RvA9|_FSac_R9aowiQ3_@4~zkS-d|c>YL$B$Zz`?I8ri;5YiYG=S)*?W3?Z#G
zaNoFsdwXe330Lq-?IrNc7=3NR58Z|IFE6bE#|>BLn&K`8%DM|_T#z!aI^Soatdiei
zE5adv)rT52vwpz00&k%|c5harEq;u)fQ>oXj}@-S?k-%3I%0cTwSoU8ewA?JYs7|-
zywiZauy%(lAIM%^YDDX;LMl?jG02nIJ?DJcYjB&aMRneX`OA%HY?NbM;@jovY1(vO
zW9xqmw=30q|*uq&}=lJk@o$6}m=y1z`
zS%J}3Ed#dLowT}S@1rXlW$cU50g>1=6$shuDs&1sW5KLknqf)xI^31$EOll_a3`V;
zyJtcBI9p$JJ6rj~*?#moIB}=n&Be|I=o(`lyhrB2u%>uwt=xa;l)2=(t#jQ-O|!#l
zJ$w-YcOsCR*M)Ye-Bz8N&4E2iGdyAG0Ouf%+^v^**`xnB2QcAe9Y8zhY`Rg2NaaoMGizDP-3
zYJ+oW{aPn%tqz;LOtrA=@%;$D8Mj#;T6k1#M_)X$SdS7P;1dDP-JtETQ{s`;3FBYS
z`fU{#pV}e!eYVNnASW!Zye|zaE5=<;1~lmG3hj$W%LzK0XAd#=e_C_Aq7p>JD0k
z?LG!fv+EGDw7SPBdY&c#-{yN9jGIAvDEaey{i9ONc7o$_DB#x=%B6dCs
z=?(ITor6xxlTn#q9;=g6nI)^CgPRb
z9HI)iCEwRCS~%3?PA+8f6&bcT@js`&~+Ze^{)lzF^-WEh);t6){i-xd?UdW6?yYlQA!1lni{4szf}JgE*1WL|zlg
zO$0O%#zZU=iHu^D7Ry8g6PZgyFp;^U^xa5sg5Wj+nQ#NdxPz<3R6ns#5^(D2c_t0I
z0oJlLjkj=Q#`1{3{uBX~!R)^$*hz35!7xD-^9#y`7)qi6W`B;3`v|Tl_$M(Z
z9%K`FaWH1i4&uER06%2PK|I(dA9Ieb0OlDY0*Zt7CKTTu0*J}m3(1){6d+Z_Qb=Yd
zV@=ScjKhbPc+SqGV_NqtjxCK?peUbEMr*s`O^iZrCe~I}S)_N$j>}d~OzRuQ*<7(;
zQGY|Tjt1Qp79*lFn}s#A)~S(sOTXiODOGjT7N_^LGU;MZ)w>-CYB-i34uko
zpA!2Q07pwAX602f7KGyHWGWdmPtw@S=RI{mPGbz`TM^M`^cv5?yxN61h52DYcg`%f
zRQHxDs*jb?&ygR5k@0{K7j1E19vCgO7fXV3{WiyOA19lm;_~I!jo{BL{
zyawY^pa0i#Z1Pz-?T;_C5is8B@xUDBR0+E!aN~Hz8Nc3bh;vlc@az^}*@2QV*eK_E
z*lm*0Dd)Ht(fYiYQnNfc$EU4$rie1|6t@+xDMSmafwichKHn|yG!0`LkmI!d95ARW
z(cU5PI9?%JwD`~h-8vjOV^O2(l-CJ)-nE9WDB|16(__@f&6Seb?;RUAf!(~?nHSQa
zbSJqvc>B`)q@-b|k0494?NN^?La&3QAvS%YE(%vwH)*eUFT!!(jJt7BsYjPC17s?+
ziPU#Ja8Dia{2$A-tQ?LEya7J!wT&?U6X{)sbb3T$x-d1Isj@dNDBTLZB5kq>Y4!eo
zX(7hNQrmpqnBI+c$H+q{p6`anpz?^e-%{v<3rF1+=}l*Rv9y#eanj2eozin1a<0jd
zwnA7uPNjElWgL}2@5Siz+o~H*Jdx`E-xzS`Zl#rZqA9eQJ<1hfNsm^1SLh#}dZIil
zw6^23S+T6!WPRG8aQ4RV*$t}JePtH0xmUupaR#qsy^8NpK1hiT?#UapCrIM(=d1`k
zd8?@UXp#>4zl`j=Zxden@!HC>uuc}Z4AigbJ4hlAg=XzS^`ei~$q>r_a(LVSOz`If
zwFI&n7^LXJ>c5U|4-m)-A&Y~Y{WQ_-`2=z*(@CtEpoM_+-0mh|ne@|$jl{l9&_Eyu
zRNo{fXH%~t_AY`~6Z{#$YXQJf<|py?sBO~4A|TUa-%s!wfWDB{%Ja{q#>VXT!X;*o
z&f2Sp`0uFarAOwp!4uBhzO>o!Z#sr}kVMo_D*>m=rbcR{t^M
zm~q579&9tCI6&#ep$V0_0dWX|V-Ta$+GrHlRF|tII{Bk4GymAel_iU}AVK)~q;4~w
z)D1vkEhtaw;;E}eylc2nDqsqgf@GVVCv{(0;uJG*(1GbCWYl>U7lRKA5nV~@#fHat
zX(K!4gp|j3;pblxk`M4YA+=%=R3}2E0G2UK4d74=&^S~hYDy+Z5+1KzA#dk-TPJVl
zf!h`Hu7+gZPDq`%q>txsS4bbOp1O(cB6X{VWcp4>w{AF!sgb94BWT-D3CH%RV}LD<
zR_0T?>+sa>I&91ZFoDDF1omv^RA=&&d1^Oc*P+j~no~V78U36L&czG4ZzJP%`2)B%)p7+sn&|P!^P+F;euF*5ZHwv~yY4jxyuCcN
zq&JwdmhFHzy`i7l_4Wn^uROJj9uOW?CbMXR@u^*7$y2*dn?uL2k=zz~Z0S?Gx)1)>
zJhgj4tIOV`~24@m<#@
zBo@R;^nam$jMvkJ2!hapoKRmh0E1WQT~{GLrf4dgg&-th$|QjDh$}c;;rb|vfX6^?
zq9ex37bpR=ndQ6|Pt>yKIQ!s34vkz7^`<$aDE(S6nrm(pUL@j@GH@Q6aLu?3$D7?J
zLL8~`p0B;ac@@QXrz@u7u@AI{&-Z})_y|~;z9y~9FD!t_hfg4c{Y!#h5QvmXvZ5F5
z{~}gH@Ye*wg#0(L-w=@NaCPj>v7NYZI21F-E{EB*jN+9WvPt_)CLhP{_B?JO{*0YV
zo&$SF#&#(C)r>L)laW-g7gWSX&r(%}?&(S7DZFGNWwhAVXKITObjO(0HY
z0qO)KSc~RtE7k`S#L7A0YX&dsvYypd&}|=5%2?pwPTr4Y+RIu8*SUArcpC?Ak{iXaZk&TV
zE3>v6QQ3adi6wL=>W7xW-GC-AR>pKx?7S-wl{*4WWG_T6_i62W_~S`wIqkumDR91m
z&B7UwNE#}{d3X-9O7^R;V}LUwhN)R84B9c0trd9z)K}8f@iCY@ew1PB=`U$JG=yuQ
z0!xA~B~u-{5z<-*=QGww{N6z=@<#4{Rd>#m-7g7;p2c1Q=0CzAp9c8V5$DbZ*c;hD
z6A2wFt>nO(=__H>d*xmiazoB6m$-w@;8j4fM@SoNC~>AO8adpX;En_&Ny!GzC0Qw`}M%7uu
zPUm8iYCvt+C*0%M;EXnwZlkn|+paa}Raxbcb~E)0xDrX8%_ZnBX&22XBh{7?>v0)V
zfS<=78W1{eSXxg)Amk>kAD-P@YLzsdxUwF>kHk;L5d3xdr4Fm{@I3oKMvc_E4UDzk
zuR_k$t7f{;?m!MOo^g#(+(V;Ixztd`9L5wGdkq)ck$PJRI#IH3umvHo&jW3Uni6Dj
zDOq2sgPQn@9FtpNwjniWU&HJWckWm8BOZ2dF}K-2c(GAo=LjjYOoMui3u*6yj*8v^
z;hG9aVXLbwR%`t%*_Rv&TX|{B*X!;j_(mS*66O$R>@|1=6L>uA
zF7zEE&O2Z0Mr6rKAI9(ByyJ#?=O=MK_7yiE-uRN=rFah>7CZU9zk1Cl9*B;lyFWFH
z{~sQ%BENa|&%be{di_Y>F1r=<9S;SZ#g%(L6Kzi-UwsI66Dj``t+LH9O>BmMW!8+r
zEHRs)NU%sCe1hyP3$t*BZu121BX}A?i6BRiC!ok;&k+dUAzaBahUC}u6;8#^QZ3MW
zmOwa=o9QN-ACJ=Q=>&gF@H2vE5c~x_OzR|!}QDSC#
zDi#>Sy+oYd@e?x-IoIi;6J(*M!^V8|7rI
z&01&m;^gZHHc8sd-MFb9wptN)J50ObzWj}xDl0WNKv)P_sXq+9P2@<*kNS=ccwA?{
zUT)oRzh1cM0CXs2O$G&SlA9@^>9&$l2+JNl?mjS{P>7K1;p4U)_T?f4Y7%n3REhZZg+Cp6;=J-05aoJutmeBPk{KC57kPATkS9NAbgIhHx8oIAFLN!#c^1
zIAQ>UlmUnvEW%Sz-qH6xmdEV8bWFyBk=r|5LxDTFUMfF?;%Xr`aPVDTrzcsLCtGs2
zg^<%l%jtyKq-%?vbN1&JjL?9LGwPMJ>Jow%0K{C;
zBVAJ71(z#ecEemL{#VKA`qkjO4YmZOZ*0brXWI#SV1SbC!B;2+I4J0OvQC^qc;mi7
z_-fuDVtRN=0TAhd0)bPOl)`YKF6Namk|!wua^QKF>!xoXiA_=%k)N}2{E-%E(?~1Q
zW6mNkL`q?&|8PPtaklIE2fW59Ya2N!r=|5
zws015K=T>bb8Wu^ep)^6mBgM+AO|3i5)-cB8^nb7knM@;p~&CR<0OIbBhMvvh>5cJ
z_ALZQ34~XuZh`!k;Y@l6ixD6uoC$SZ><|Hu%{8mU{Rdk(lQCk#ii8)a){D
zL|%RvB{#wb_L&=@Q-j|;!9xd4#vlweXyhOG6WmKN{oKeln}d&-FAqk*j~q6xGp@(c
zO{>A%EC-C3xe5RLMigAgb;h8DTflAAqZ#kpNEOC}%%}w;jL8uo81&1psTgphVD8ro
zK^$VP(7gHt*99(%#ax>_2B)Bo>r+si4y}p9VNJ
zIk>#N1`+tL*AK+TX^S2+k{0EB8v;ovSGwS
zc?(DL+Min$7MM0|TkkNK78ZK#60NvI9haVpOONByTXE@iTsBCm8yp*1P>Wj!QufQl
z1|eT;T!t+y#@fb(%EEG`ZCsu$EYjLW#Q+PJDYT8uhy?*#+qQ{qn`0w8V1X&neqm#;gWz3$z|2bUhmlUi*3JSJ0P|Lj_sh>4m!3&
zVmsv6ZV=lIj_t764m-9R#df1(yGd*}JGLWYJL1@G5!)?}?Wov}I<}{X?U-Y`RcyD?
zCX~&6ARC%(eaDxpn(YCZDf@ZB)!qwKUo?G)*vARpK_JWQn~A-I;4K6%CwMEtD+u03
zz?DJYCw&*OcN4saK$d7(lV#afbgOO;ypPuR6Hr3e?@)-qE~2|I1P{{Vn*?_fe2m~*
z1g|1^HNl?|P#-~b4($7h$(k&SrYy`t*#Dd!4-mYLK$wYd6B90@l~@yjY(e~(*iQ(4
zN+4{&qr_z0msMQWb6MzR$(Pk$7Ij(AWsQFmV&IoO`6|jA7`gIfhW$qRn5rmG81-5D
zfRam_eHoUI~R3Y5UQlfY_|z#5al%96l(lE5mGz}k_(ijlxNk-+MZz?zW2N{|5k
zPk`ztK&0yHK8%8~#*Nq~wZKsyqk7zxmc1gJv-G$C;_45&c@G$4VQpTK-i
z9D~73PaKCi0do@OHkjLCPQly(b0kRu%q)xzlZPq5%)y+2nTJ_`ISW&Sc^XW~<+2v-F$@KLBS0uY
zn$~fG69l&rWC$h*gmacDA^Ncg=q8kEfY|p5UO;f3Kq!?Esh854BzPHtkgF=Ocn#fz
zL{!O!5Qg{CLr9%axd&){9f6QKA#g&>MAWEKw6`;e(8_lb6Y^IjTo2LhqXbpzCDip3
zbo(cQhY7ww@I`_z5qy>4YXpxFe4St^GrC+wtC>Ms2v(wKkJ95i1V12njNpd^E0MY%
z(L)I46U0Q~5mBg0Q~#YFKPQL~R7ICx&?@rPe-aZh}d=_LXQj`zC^?5Zp{~3xT{MxRG7Efq>UX^m~Fm#5x(Ik)VM<
zWOO-wmhF9!)_WO_GL+3#&t_kFmP7QK&khr;1Goz&zXt#pHTH(v!wum-_UV6X!>7WV
z!>zD)h}-FKOZb*>M>v9it$3R+7(N+37Oo9XNjjrQqVh*=#t|6NTB!W8GZakld&y8Q
z$I-jrJ_!HHxj!S&>}RBxTF$;X&g!v0W6B=7#w`!-_u!%<#It>b*ScF84|&3bJ$Q`=
zk9hD}4_@cN>pggb2XFM?O&)xe2XFS^EgrnpgRl1BZ617$2XFV_9Ui>XgLirGZV$fJ
zgRk@8>pghXgZFswUJt&(gKzZUmwE6_9(=P0-{QghJor`*zRiPg_uxA`c)tf9@Zf_U
ze5VH=^5B9gJ0>vukzqmd+%M`-tqMJN(Cx)=gF4m~^b(#<
zNP3m7b7lD$7w1<3@!@(OwiD%O^8#3P*I3og@Lo6Pa8y!oarf&@yXscOs{VA-?w0eg
zPF(*qb-()?;zzMG+{sGD2%bv7>a4haaS8b->&c}fZ0IEgXyuRYBQTd*hjzn%6xqlh
zzm;|9+psB!TKMH=LPH=nJb_yR{Q?65@jIh%4+#tl#Cwx)i3qF}SSJu~kH8(T!2m*c
z5wJ<%DuK-cTLiWW#QPcu(;W87bhmk^x5q;q;n*`#eID|hU
z5VQ>VQJ11U#qQuB69MTQx?i|n{cxC2wgI}`MLu8-3y;?A>)$*#+#iIDK|-(lco(uznXLXQ5G
zzQXiBmzYpHkyff?@b{8Ye$7;VLm=|VG8siEs>nghWEASv=r^(7h`1LKHb?-rf@96U
z7KpEg51`7C0FMRaXdoD<*v%j=M?0=n|K-@y_{-Bb=74;{5Rir0S^S
z;;#J6T;UHQYNSVftgJ)S#J(Co!I7IeD}sMHFpY-?&+2nOIhfmrDCOV`UZhCo<(_}6
zM&EsikI#E<7HvW>dlx|sfym;W#2N`22q+N<8MT@1N<pBp^ksNr@od24P
Yu-^|QnXo^+mU2RaD@&~O%{PqyKlMa}-T(jq

literal 0
HcmV?d00001

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
new file mode 100644
index 0000000..7baaa51
--- /dev/null
+++ b/class4gl/class4gl.py
@@ -0,0 +1,1611 @@
+# -*- coding: utf-8 -*-
+
+"""
+
+Created on Mon Jan 29 12:33:51 2018
+
+Module file for class4gl, which  extents the class-model to be able to take
+global air profiles as input. It exists of:
+
+CLASSES:
+    - an input object, namely class4gl_input. It includes:
+        - a function to read Wyoming sounding data from a yyoming stream object
+        - a function to read global data from a globaldata library object 
+    - the model object: class4gl
+    - ....    
+
+DEPENDENCIES:
+    - xarray
+    - numpy
+    - data_global
+    - Pysolar
+    - yaml
+
+@author: Hendrik Wouters
+
+"""
+
+
+
+""" Setup of envirnoment """
+
+# Standard modules of the stand class-boundary-layer model
+from model import model
+from model import model_output as class4gl_output
+from model import model_input
+from model import qsat
+#from data_soundings import wyoming 
+import Pysolar
+import yaml
+import logging
+import warnings
+import pytz
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+
+# Generic Python Packages
+import numpy as np
+import datetime as dt
+import pandas as pd
+import xarray as xr
+import io
+#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
+from data_global import data_global
+grav = 9.81
+
+# this is just a generic input object
+class generic_input(object):
+    def __init__(self):
+        self.init = True
+
+
+# all units from all variables in CLASS(4GL) should be defined here!
+units = {
+         'h':'m',
+         'theta':'K', 
+         'q':'kg/kg',
+         'cc': '-',
+         'cveg': '-',
+         'wg': 'm3 m-3',
+         'w2': 'm3 m-3',
+         #'wg': 'kg/kg',
+         'Tsoil': 'K',
+         'T2': 'K',
+         'z0m': 'm',
+         'alpha': '-',
+         'LAI': '-',
+         'dhdt':'m/h',
+         'dthetadt':'K/h',
+         'dqdt':'kg/kg/h',
+         'BR': '-',
+         'EF': '-',
+}
+
+class class4gl_input(object):
+# this was the way it was defined previously.
+#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
+
+    def __init__(self,set_pars_defaults=True,debug_level=None):
+
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        print('hello')
+        self.logger = logging.getLogger('class4gl_input')
+        print(self.logger)
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # # create logger
+        # self.logger = logging.getLogger('class4gl_input')
+        # self.logger.setLevel(debug_level)
+
+        # # create console handler and set level to debug
+        # ch = logging.StreamHandler()
+        # ch.setLevel(debug_level)
+
+        # # create formatter
+        # formatter = logging.Formatter('%(asctime)s - \
+        #                                %(name)s - \
+        #                                %(levelname)s - \
+        #                                %(message)s')
+        # add formatter to ch
+        # ch.setFormatter(formatter)
+     
+        # # add ch to logger
+        # self.logger.addHandler(ch)
+
+        # """ end set up logger """
+
+
+
+        # these are the standard model input single-value parameters for class
+        self.pars = model_input()
+
+        # diagnostic parameters of the initial profile
+        self.diag = dict()
+
+        # In this variable, we keep track of the different parameters from where it originates from. 
+        self.sources = {}
+
+        if set_pars_defaults:
+            self.set_pars_defaults()
+
+    def set_pars_defaults(self):
+
+        """ 
+        Create empty model_input and set up case
+        """
+        defaults = dict( 
+        dt         = 60.    , # time step [s] 
+        runtime    = 6*3600 ,  # total run time [s]
+        
+        # mixed-layer input
+        sw_ml      = True   ,  # mixed-layer model switch
+        sw_shearwe = False  ,  # shear growth mixed-layer switch
+        sw_fixft   = False  ,  # Fix the free-troposphere switch
+        h          = 200.   ,  # initial ABL height [m]
+        Ps         = 101300.,  # surface pressure [Pa]
+        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
+        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
+        
+        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
+        dtheta     = 1.     ,  # initial temperature jump at h [K]
+        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
+        advtheta   = 0.     ,  # advection of heat [K s-1]
+        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
+        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
+        
+        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
+        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
+        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
+        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
+        
+        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
+        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
+        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
+        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
+        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
+        sw_wind    = True  ,  # prognostic wind switch
+        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
+        du         = 0.     ,  # initial u-wind jump at h [m s-1]
+        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
+        advu       = 0.     ,  # advection of u-wind [m s-2]
+        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
+        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
+        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
+        advv       = 0.     ,  # advection of v-wind [m s-2]
+        sw_sl      = True   , # surface layer switch
+        ustar      = 0.3    ,  # surface friction velocity [m s-1]
+        z0m        = 0.02   ,  # roughness length for momentum [m]
+        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
+        sw_rad     = True   , # radiation switch
+        lat        = 51.97  ,  # latitude [deg]
+        lon        = -4.93  ,  # longitude [deg]
+        doy        = 268.   ,  # day of the year [-]
+        tstart     = 6.8    ,  # time of the day [h UTC]
+        cc         = 0.0    ,  # cloud cover fraction [-]
+        Q          = 400.   ,  # net radiation [W m-2] 
+        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
+        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
+        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
+        cveg       = 0.85   ,  # vegetation fraction [-]
+        Tsoil      = 295.   ,  # temperature top soil layer [K]
+        Ts         = 295.   ,    # initial surface temperature [K]
+        T2         = 296.   ,  # temperature deeper soil layer [K]
+        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
+        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
+        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
+        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
+        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
+        wfc        = 0.323  ,  # volumetric water content field capacity [-]
+        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
+        C1sat      = 0.132  ,  
+        C2ref      = 1.8    ,
+        LAI        = 2.     ,  # leaf area index [-]
+        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
+        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
+        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
+        alpha      = 0.25   ,  # surface albedo [-]
+        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
+        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
+        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
+        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
+        sw_cu      = False  ,  # Cumulus parameterization switch
+        dz_h       = 150.   ,  # Transition layer thickness [m]
+        cala       = None   ,  # soil heat conductivity [W/(K*m)]
+        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
+        sw_ls      = True   ,
+        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
+        sw_lit     = False,
+        )
+        pars = model_input()
+        for key in defaults:
+            pars.__dict__[key] = defaults[key]
+        
+        self.update(source='defaults',pars=pars)
+        
+    def clear(self):
+        """ this procudure clears the class4gl_input """
+
+        for key in list(self.__dict__.keys()):
+            del(self.__dict__[key])
+        self.__init__()
+
+    def dump(self,file):
+        """ this procedure dumps the class4gl_input object into a yaml file
+            
+            Input: 
+                - self.__dict__ (internal): the dictionary from which we read 
+            Output:
+                - file: All the parameters in self.__init__() are written to
+                the yaml file, including pars, air_ap, sources etc.
+        """
+        file.write('---\n')
+        index = file.tell()
+        file.write('# CLASS4GL input; format version: 0.1\n')
+
+        # write out the position of the current record
+        yaml.dump({'index':index}, file, default_flow_style=False)
+
+        # we do not include the none values
+        for key,data in self.__dict__.items():
+            #if ((type(data) == model_input) or (type(class4gl_input):
+            if key == 'pars':
+
+                pars = {'pars' : self.__dict__['pars'].__dict__}
+                parsout = {}
+                for key in pars.keys():
+                    if pars[key] is not None:
+                        parsout[key] = pars[key]
+
+                yaml.dump(parsout, file, default_flow_style=False)
+            elif type(data) == dict:
+                if key == 'sources':
+                    # in case of sources, we want to have a
+                    # condensed list format as well, so we leave out
+                    # 'default_flow_style=False'
+                    yaml.dump({key : data}, file)
+                else: 
+                    yaml.dump({key : data}, file,
+                              default_flow_style=False)
+            elif type(data) == pd.DataFrame:
+                # in case of dataframes (for profiles), we want to have a
+                # condensed list format as well, so we leave out
+                # 'default_flow_style=False'
+                yaml.dump({key: data.to_dict(orient='list')},file)
+
+                # # these are trials to get it into a more human-readable
+                # fixed-width format, but it is too complex
+                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
+                #file.write(stream)
+                
+                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
+                #file.write(key+': !!str |\n')
+                #file.write(str(data)+'\n')
+       
+    def load_yaml_dict(self,yaml_dict,reset=True):
+        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
+            
+            Input: 
+                - yaml_dict: the dictionary from which we read 
+                - reset: reset data before reading        
+            Output:
+                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
+        """
+        
+        if reset:
+            for key in list(self.__dict__.keys()):
+                del(self.__dict__[key])
+            self.__init__()
+
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                self.__dict__[key] = model_input()
+                self.__dict__[key].__dict__ = data
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            elif key == 'sources':
+                self.__dict__[key] = data
+            elif key == 'diag':
+                self.__dict__[key] = data
+            else: 
+                warnings.warn("Key '"+key+"' may not be implemented.")
+                self.__dict__[key] = data
+
+    def update(self,source,**kwargs):
+        """ this procedure is to make updates of input parameters and tracking
+        of their source more convenient. It implements the assignment of
+        parameter source/sensitivity experiment IDs ('eg.,
+        'defaults', 'sounding balloon', any satellite information, climate
+        models, sensitivity tests etc.). These are all stored in a convenient
+        way with as class4gl_input.sources.  This way, the user can always consult with
+        from where parameters data originates from.  
+        
+        Input:
+            - source:    name of the underlying dataset
+            - **kwargs: a dictionary of data input, for which the key values
+            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
+            the values is a again a dictionary/dataframe of datakeys/columns
+            ('wg','PRES','datetime', ...) and datavalues (either single values,
+            profiles ...), eg., 
+
+                pars = {'wg': 0.007  , 'w2', 0.005}
+                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
+                                     300.,...]}
+            
+        Output:
+            - self.__dict__[datatype] : object to which the parameters are
+                                        assigned. They can be consulted with
+                                        self.pars, self.profiles, etc.
+                                        
+            - self.sources[source] : It supplements the overview overview of
+                                     data sources can be consulted with
+                                     self.sources. The structure is as follows:
+                                     as:
+                self.sources = { 
+                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
+                'GLEAM' :  ['pars:wg','pars:w2', ...],
+                 ...
+                }
+        
+        """
+
+        #print(source,kwargs)
+
+        for key,data in kwargs.items():
+
+            #print(key)
+            # if the key is not in class4gl_input object, then just add it. In
+            # that case, the update procedures below will just overwrite it 
+            if key not in self.__dict__:
+                self.__dict__[key] = data
+
+
+            
+
+            #... we do an additional check to see whether there is a type
+            # match. I not then raise a key error
+            if (type(data) != type(self.__dict__[key]) \
+                # we allow dict input for model_input pars
+                and not ((key == 'pars') and (type(data) == dict) and \
+                (type(self.__dict__[key]) == model_input))):
+
+                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
+
+
+            # This variable keeps track of the added data that is supplemented
+            # by the current source. We add this to class4gl_input.sources
+            datakeys = []
+
+            #... and we update the class4gl_input data, and this depends on the
+            # data type
+
+            if type(self.__dict__[key]) == pd.DataFrame:
+                # If the data type is a dataframe, then we update the columns
+                for column in list(data.columns):
+                    #print(column)
+                    self.__dict__[key][column] = data[column]
+                    datakeys.append(column)
+                    
+
+            elif type(self.__dict__[key]) == model_input:
+                # if the data type is a model_input, then we update its internal
+                # dictionary of parameters
+                if type(data) == model_input:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data.__dict__}
+                    datakeys = list(data.__dict__.keys())
+                elif type(data) == dict:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data}
+                    datakeys = list(data.keys())
+                else:
+                    raise TypeError('input key '+key+' is not of the same type\
+                                    as the one in the class4gl_object')
+
+            elif type(self.__dict__[key]) == dict:
+                # if the data type is a dictionary, we update the
+                # dictionary 
+                self.__dict__[key] = {self.__dict__[key] , data}
+                datakeys = list(data.keys())
+
+
+            # if source entry is not existing yet, we add it
+            if source not in self.sources.keys():
+                self.sources[source] = []
+
+
+            # self.logger.debug('updating section "'+\
+            #                  key+' ('+' '.join(datakeys)+')'\
+            #                  '" from source \
+            #                  "'+source+'"')
+
+            # Update the source dictionary: add the provided data keys to the
+            # specified source list
+            for datakey in datakeys:
+                # At first, remove the occurences of the keys in the other
+                # source lists
+                for sourcekey,sourcelist in self.sources.items():
+                    if key+':'+datakey in sourcelist:
+                        self.sources[sourcekey].remove(key+':'+datakey)
+                # Afterwards, add it to the current source list
+                self.sources[source].append(key+':'+datakey)
+
+
+        # # in case the datatype is a class4gl_input_pars, we update its keys
+        # # according to **kwargs dictionary
+        # if type(self.__dict__[datatype]) == class4gl_input_pars:
+        #     # add the data parameters to the datatype object dictionary of the
+        #     # datatype
+        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
+        #                                        **kwargs}
+        # # in case, the datatype reflects a dataframe, we update the columns according
+        # # to the *args list
+        # elif type(self.__dict__[datatype]) == pd.DataFrame:
+        #     for dataframe in args:
+        #         for column in list(dataframe.columns):
+        #             self.__dict__[datatype][column] = dataframe[column]
+        
+
+    def get_profile(self,IOBJ, *args, **argv):
+        # if type(IOBJ) == wyoming:
+        self.get_profile_wyoming(IOBJ,*args,**argv)
+        # else:
+        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
+        
+    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
+        """ 
+            Purpose: 
+                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
+
+            Input:
+                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
+                function will take the profile at the stream's current
+                position. 
+                2. air_ap_mode: which air profile do we take? 
+                    - b : best
+                    - l : according to lower limit for the mixed-layer height
+                            estimate
+                    - u : according to upper limit for the mixed-layer height
+                            estimate
+
+
+            Output:
+                1. all single-value parameters are stored in the
+                   class4gl_input.pars object
+                2. the souding profiles are stored in the in the
+                   class4gl_input.air_balloon dataframe
+                3. modified sounding profiles for which the mixed layer height
+                   is fitted
+                4. ...
+
+        """
+
+
+        # Raise an error in case the input stream is not the correct object
+        # if type(wy_strm) is not wyoming:
+        #    raise TypeError('Not a wyoming type input stream')
+
+        # Let's tell the class_input object that it is a Wyoming fit type
+        self.air_ap_type = 'wyoming'
+        # ... and which mode of fitting we apply
+        self.air_ap_mode = air_ap_mode
+
+        """ Temporary variables used for output """
+        # single value parameters derived from the sounding profile
+        dpars = dict()
+        # profile values
+        air_balloon = pd.DataFrame()
+        # fitted profile values
+        air_ap = pd.DataFrame()
+        
+        string = wy_strm.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = wy_strm.current.find_next('pre').find_next('pre').text
+        
+        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
+        dpars = {**dpars,
+                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
+               }
+        
+        # we get weird output when it's a numpy Timestamp, so we convert it to
+        # pd.datetime type
+
+        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
+        dpars['STNID'] = dpars['Station number']
+
+        # altitude above ground level
+        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
+        # absolute humidity in g/kg
+        air_balloon['q']= (air_balloon.MIXR/1000.) \
+                              / \
+                             (air_balloon.MIXR/1000.+1.)
+        # convert wind speed from knots to m/s
+        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
+        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+        
+        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
+        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
+
+        
+
+        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+
+        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
+        air_balloon['p'] = air_balloon.PRES*100.
+
+
+        # Therefore, determine the sounding that are valid for 'any' column 
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        #is_valid = (air_balloon.z >= 0)
+        # # this is an alternative pipe/numpy method
+        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
+        valid_indices = air_balloon.index[is_valid].values
+        print(valid_indices)
+
+        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+
+        air_balloon['t'] = air_balloon['TEMP']+273.15
+        air_balloon['theta'] = (air_balloon.t) * \
+                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
+        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
+
+        if len(valid_indices) > 0:
+            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
+            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            
+            # the final mixed-layer height that will be used by class. We round it
+            # to 1 decimal so that we get a clean yaml output format
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+
+
+        if np.isnan(dpars['h']):
+            dpars['Ps'] = np.nan
+
+
+
+
+        if ~np.isnan(dpars['h']):
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u 
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+            
+
+
+
+        # First 3 data points of the mixed-layer fit. We create a empty head
+        # first
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+        
+        #calculate mixed-layer jump ( this should be larger than 0.1)
+        
+        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        air_ap_head['HGHT'] = air_ap_head['z'] \
+                                + \
+                                np.round(dpars[ 'Station elevation'],1)
+        
+        # make a row object for defining the jump
+        jump = air_ap_head.iloc[0] * np.nan
+            
+        if air_ap_tail.shape[0] > 1:
+
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = dpars['theta']
+        z_low =     dpars['h']
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                (z_mean > (z_low+10.)) and \
+                (theta_mean > (theta_low+0.2) ) and \
+                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+
+
+
+
+
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        #print(air_ap['PRES'].iloc[0])
+
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+
+        
+        dpars['lat'] = dpars['Station latitude']
+        dpars['latitude'] = dpars['lat']
+        
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        dpars['longitude'] = dpars['Station longitude']
+        
+        dpars['ldatetime'] = dpars['datetime'] \
+                            + \
+                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+
+        # # we make a pars object that is similar to the destination object
+        # pars = model_input()
+        # for key,value in dpars.items():
+        #     pars.__dict__[key] = value
+
+
+        # we round the columns to a specified decimal, so that we get a clean
+        # output format for yaml
+        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
+                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
+                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
+# 
+        for column,decimal in decimals.items():
+            air_balloon[column] = air_balloon[column].round(decimal)
+            air_ap[column] = air_ap[column].round(decimal)
+
+        self.update(source='wyoming',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+
+        
+    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
+    
+        """
+        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
+                 according to the position (lat lon) and the class datetime and timespan
+                 globaldata should be a globaldata multifile object
+        
+        Input: 
+            - globaldata: this is the library object
+            - only_keys: only extract specified keys
+            - exclude_keys: do not inherit specified keys
+        """
+        classdatetime      = np.datetime64(self.pars.datetime_daylight)
+        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
+                                           + \
+                                           dt.timedelta(seconds=self.pars.runtime)\
+                                          )
+
+
+        # # list of variables that we get from global ground data
+        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
+        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
+        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
+        #                 'texture', 'itex', 'isoil', 'BR',
+        #                 'b', 'cveg',
+        #                 'C1sat', 
+        #                 'C2ref', 'p', 'a',
+        #                 ] #globaldata.datasets.keys():
+
+        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
+        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
+
+
+        if type(globaldata) is not data_global:
+            raise TypeError("Wrong type of input library") 
+
+        # by default, we get all dataset keys
+        keys = list(globaldata.datasets.keys())
+
+        # We add LAI manually, because it is not listed in the datasets and
+        #they its retreival is hard coded below based on LAIpixel and cveg
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            keys.append('LAI')
+
+        # # In case there is surface pressure, we also calculate the half-level
+        # # and full-level pressure fields
+        # if ('sp' in keys):
+        #     keys.append('pfull')
+        #     keys.append('phalf')
+
+        # If specified, we only take the keys that are in only_keys
+        if only_keys is not None:
+            for key in keys:
+                if key not in only_keys:
+                    keys.remove(key)
+                
+        # If specified, we take out keys that are in exclude keys
+        if exclude_keys is not None:
+            for key in keys:
+                if key in exclude_keys:
+                    keys.remove(key)
+
+        # we set everything to nan first in the pars section (non-profile parameters
+        # without lev argument), so that we can check afterwards whether the
+        # data is well-fetched or not.
+        for key in keys:
+            if not ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None) and \
+                ('lev' in globaldata.datasets[key].page[key].dims)):
+                self.update(source='globaldata',pars={key:np.nan})
+            # # we do not check profile input for now. We assume it is
+            # # available
+            #else:
+            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
+
+        self.logger.debug('getting keys "'+', '.join(keys)+'\
+                          from global data')
+
+        for key in keys:
+            # If we find it, then we obtain the variables
+            if ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None)):
+
+                # check first whether the dataset has a height coordinate (3d space)
+                if 'lev' in globaldata.datasets[key].page[key].dims:
+
+                    # first, we browse to the correct file that has the current time
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+                        globaldata.datasets[key].browse_page(time=classdatetime)
+
+                    
+                    if (globaldata.datasets[key].page is not None):
+                        # find longitude and latitude coordinates
+                        ilats = (np.abs(globaldata.datasets[key].page.lat -
+                                        self.pars.latitude) < 0.5)
+                        ilons = (np.abs(globaldata.datasets[key].page.lon -
+                                        self.pars.longitude) < 0.5)
+                        
+                        # if we have a time dimension, then we look up the required timesteps during the class simulation
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            itimes = ((globaldata.datasets[key].page.time >= \
+                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
+
+                            # In case we didn't find any correct time, we take the
+                            # closest one.
+                            if np.sum(itimes) == 0.:
+
+
+                                classdatetimemean = \
+                                    np.datetime64(self.pars.datetime_daylight + \
+                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
+                                                ))
+
+                                dstimes = globaldata.datasets[key].page.time
+                                time = dstimes.sel(time=classdatetimemean,method='nearest')
+                                itimes = (globaldata.datasets[key].page.time ==
+                                          time)
+                                
+                        else:
+                            # we don't have a time coordinate so it doesn't matter
+                            # what itimes is
+                            itimes = 0
+
+                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
+
+                        # over which dimensions we take a mean:
+                        dims = globaldata.datasets[key].page[key].dims
+                        namesmean = list(dims)
+                        namesmean.remove('lev')
+                        idxmean = [dims.index(namemean) for namemean in namesmean]
+                        
+                        value = \
+                        globaldata.datasets[key].page[key].isel(time=itimes,
+                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
+
+                        # Ideally, source should be equal to the datakey of globaldata.library 
+                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
+                        #  but therefore the globaldata class requires a revision to make this work
+                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
+
+                else:
+                    # this procedure is for reading the ground fields (2d space). 
+                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
+
+    
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+    
+                       # first, we browse to the correct file
+                       #print(key)
+                       globaldata.datasets[key].browse_page(time=classdatetime)
+    
+                    if globaldata.datasets[key].page is not None:
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - self.pars.latitude))
+                        ilat = np.where((DIST) == np.min(DIST))[0][0]
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - self.pars.longitude))
+                        ilon = np.where((DIST) == np.min(DIST))[0][0]
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - (self.pars.latitude + 0.5)))
+                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmax = ilat
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - (self.pars.longitude  + 0.5)))
+                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmax = ilon
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lat.values\
+                                - (self.pars.latitude - 0.5)))
+                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmin = ilat
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lon.values\
+                                - (self.pars.longitude  - 0.5)))
+                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmin = ilon        
+                        
+                        if ilatmin < ilatmax:
+                            ilatrange = range(ilatmin,ilatmax+1)
+                        else:
+                            ilatrange = range(ilatmax,ilatmin+1)
+                            
+                        if ilonmin < ilonmax:
+                            ilonrange = range(ilonmin,ilonmax+1)
+                        else:
+                            ilonrange = range(ilonmax,ilonmin+1)     
+                            
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                            
+                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+                                idatetime += 1
+                            
+                            classdatetimeend = np.datetime64(\
+                                                             self.pars.datetime +\
+                                                             dt.timedelta(seconds=self.pars.runtime)\
+                                                            ) 
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
+                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
+                                idatetimeend -= 1
+                            idatetime = np.min((idatetime,idatetimeend))
+                            #for gleam, we take the previous day values
+                            if key in ['wg', 'w2']:
+                                idatetime = idatetime - 1
+                                idatetimeend = idatetimeend - 1
+
+                            # in case of soil temperature, we take the exact
+                            # timing (which is the morning)
+                            if key in ['Tsoil','T2']:
+                                idatetimeend = idatetime
+                            
+                            idts = range(idatetime,idatetimeend+1)
+                            
+                            count = 0
+                            self.__dict__[key] = 0.
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    for iidts in idts:
+                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
+                                        count += 1
+                            value = value/count
+                            self.update(source='globaldata',pars={key:value.item()})
+                                
+                        else:
+                                
+                            count = 0
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
+                                    count += 1
+                            value = value/count                        
+
+                            self.update(source='globaldata',pars={key:value.item()})
+
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            self.logger.debug('also update LAI based on LAIpixel and cveg') 
+            # I suppose LAI pixel is already determined in the previous
+            # procedure. Anyway...
+            key = 'LAIpixel'
+
+            if globaldata.datasets[key].page is not None:
+                # first, we browse to the correct file that has the current time
+                if 'time' in list(globaldata.datasets[key].page[key].dims):
+                    globaldata.datasets[key].browse_page(time=classdatetime)
+            
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - self.pars.latitude))
+                ilat = np.where((DIST) == np.min(DIST))[0][0]
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - self.pars.longitude))
+                ilon = np.where((DIST) == np.min(DIST))[0][0]
+                 
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude + 0.5)))
+                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmax = ilat
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values \
+                        - (self.pars.longitude  + 0.5)))
+                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmax = ilon
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude - 0.5)))
+                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmin = ilat
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - (self.pars.longitude  - 0.5)))
+                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmin = ilon        
+                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                
+                
+                if ilatmin < ilatmax:
+                    ilatrange = range(ilatmin,ilatmax+1)
+                else:
+                    ilatrange = range(ilatmax,ilatmin+1)
+                    
+                if ilonmin < ilonmax:
+                    ilonrange = range(ilonmin,ilonmax+1)
+                else:
+                    ilonrange = range(ilonmax,ilonmin+1)           
+                
+                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
+                LAIpixel = 0.
+                count = 0
+                for iilat in [ilat]: #ilatrange
+                    for iilon in [ilon]: #ilonrange
+                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
+                        
+                                        
+                        # if np.isnan(tarray[idatetime]):
+                        #     print("interpolating GIMMS LAIpixel nan value")
+                        #     
+                        #     mask = np.isnan(tarray)
+                        #     
+                        #     #replace each nan value with a interpolated value
+                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+                        #         
+                        #     else:
+                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
+                    
+                        #         tarray *= np.nan 
+                        
+                        count += 1
+                        #tarray_res += tarray
+                LAIpixel = LAIpixel/count
+                
+                count = 0
+                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
+  
+                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
+                #print('LAIpixel:',self.__dict__['LAIpixel'])
+                #print('cveg:',self.__dict__['cveg'])
+                
+                # finally, we rescale the LAI according to the vegetation
+                # fraction
+                value = 0. 
+                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
+                   value =self.pars.LAIpixel/self.pars.cveg
+                else:
+                    # in case of small vegetation fraction, we take just a standard 
+                    # LAI value. It doesn't have a big influence anyway for
+                    # small vegetation
+                    value = 2.
+                #print('LAI:',self.__dict__['LAI'])
+                self.update(source='globaldata',pars={'LAI':value}) 
+
+
+        # in case we have 'sp', we also calculate the 3d pressure fields at
+        # full level and half level
+        if ('sp' in keys) and ('sp' in self.pars.__dict__):
+            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
+
+            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # hydrostatic thickness of each model layer
+            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
+            # # dz = rhodz/(R * T / pfull)
+
+
+            # # subsidence multiplied by density. We calculate the subsidence of
+            # # the in class itself
+            # wrho = np.zeros_like(phalf)
+            # wrho[-1] = 0. 
+
+            # for ihlev in range(0,wrho.shape[0]-1):
+            #     # subsidence multiplied by density is the integral of
+            #     # divergences multiplied by the layer thicknessies
+            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
+            #                     self.air_ac['divU_y'][ihlev:]) * \
+            #                    delpdgrav[ihlev:]).sum()
+
+
+            
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'p':list(pfull)}))
+            self.update(source='globaldata',\
+                        air_ach=pd.DataFrame({'p':list(phalf)}))
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
+            # self.update(source='globaldata',\
+            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
+
+    def check_source(self,source,check_only_sections=None):
+        """ this procedure checks whether data of a specified source is valid.
+
+        INPUT:
+            source: the data source we want to check
+            check_only_sections: a string or list with sections to be checked
+        OUTPUT:
+            returns True or False
+        """
+
+        # we set source ok to false as soon as we find a invalid input
+        source_ok = True
+
+        # convert to a single-item list in case of a string
+        check_only_sections_def = (([check_only_sections]) if \
+                                   type(check_only_sections) is str else \
+                                    check_only_sections)
+                                  
+        if source not in self.sources.keys():
+            self.logger.info('Source '+source+' does not exist')
+            source_ok = False
+
+        for sectiondatakey in self.sources[source]:                             
+            section,datakey = sectiondatakey.split(':')                         
+            if ((check_only_sections_def is None) or \
+                (section in check_only_sections_def)):                          
+                checkdatakeys = []
+                if type(self.__dict__[section]) is pd.DataFrame:
+                    checkdata = self.__dict__[section]
+                elif type(self.__dict__[section]) is model_input:
+                    checkdata = self.__dict__[section].__dict__
+
+                if (datakey not in checkdata):                              
+                    # self.logger.info('Expected key '+datakey+\
+                    #                  ' is not in parameter input')                        
+                    source_ok = False                                           
+                elif (checkdata[datakey] is None) or \
+                     (pd.isnull(checkdata[datakey]) is True):                    
+        
+                    # self.logger.info('Key value of "'+datakey+\
+                    #                  '" is invalid: ('+ \
+                    # str(self.__dict__[section].__dict__[datakey])+')')         
+                    source_ok = False
+
+        return source_ok
+
+    def check_source_globaldata(self):
+        """ this procedure checks whether all global parameter data is
+        available, according to the keys in the self.sources"""
+
+        source_globaldata_ok = True
+
+        #self.get_values_air_input()
+
+        # and now we can get the surface values
+        #class_settings = class4gl_input()
+        #class_settings.set_air_input(input_atm)
+        
+        # we only allow non-polar stations
+        if not (self.pars.lat <= 60.):
+            source_globaldata_ok = False
+            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+        
+        # check lat and lon
+        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
+            source_globaldata_ok = False
+            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
+            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
+        else:
+            # we only check the ground parameter data (pars section). The 
+            # profile data (air_ap section) are supposed to be valid in any 
+            # case.
+            source_ok = self.check_source(source='globaldata',\
+                                          check_only_sections=['air_ac',\
+                                                               'air_ap',\
+                                                               'pars'])
+            if not source_ok:
+                source_globaldata_ok = False
+        
+            # Additional check: we exclude desert-like
+            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
+                source_globaldata_ok = False
+                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
+                source_globaldata_ok = False
+                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
+            elif self.pars.cveg < 0.02:
+                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
+                source_globaldata_ok = False
+
+        return source_globaldata_ok
+
+
+class c4gli_iterator():
+    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
+    
+        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
+    """
+    def __init__(self,file):
+        # take file as IO stream
+        self.file = file
+        self.yaml_generator = yaml.load_all(file)
+        self.current_dict = {}
+        self.current_class4gl_input = class4gl_input()
+        separator = self.file.readline() # this is just dummy
+        self.header = file.readline()
+        if self.header != '# CLASS4GL record; format version: 0.1\n':
+            raise NotImplementedError("Wrong format version: '"+self.header+"'")
+    def __iter__(self):
+        return self
+    def __next__(self):
+        self.current_dict = self.yaml_generator.__next__()
+        self.current_class4gl_input.load_yaml_dict(self.current_dict)
+        return self.current_class4gl_input
+
+
+
+#get_cape and lift_parcel are adapted from the SkewT package
+    
+class gl_dia(object):
+    def get_lifted_index(self,timestep=-1):
+        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
+    
+#from SkewT
+#def get_lcl(startp,startt,startdp,nsteps=101):
+#    from numpy import interp
+#    #--------------------------------------------------------------------
+#    # Lift a parcel dry adiabatically from startp to LCL.
+#    # Init temp is startt in K, Init dew point is stwrtdp,
+#    # pressure levels are in Pa    
+#    #--------------------------------------------------------------------
+#
+#    assert startdp<=startt
+#
+#    if startdp==startt:
+#        return np.array([startp]),np.array([startt]),np.array([startdp]),
+#
+#    # Pres=linspace(startp,60000.,nsteps)
+#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
+#
+#    # Lift the dry parcel
+#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
+#    # Mixing ratio isopleth
+#    starte=VaporPressure(startdp)
+#    startw=MixRatio(starte,startp)
+#    e=Pres*startw/(.622+startw)
+#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
+#
+#    # Solve for the intersection of these lines (LCL).
+#    # interp requires the x argument (argument 2)
+#    # to be ascending in order!
+#    P_lcl=interp(0.,T_iso-T_dry,Pres)
+#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
+#
+#    # # presdry=linspace(startp,P_lcl)
+#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
+#
+#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
+#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
+#
+#    return P_lcl,T_lcl
+
+
+
+def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
+    """ Calculate mixed-layer height from temperature and wind speed profile
+
+        Input:
+            HAGL: height coordinates [m]
+            THTV: virtual potential temperature profile [K]
+            WSPD: wind speed profile [m/s]
+
+        Output:
+            BLH: best-guess mixed-layer height
+            BLHu: upper limit of mixed-layer height
+            BLHl: lower limit of mixed-layer height
+
+    """
+    
+    #initialize error BLH
+    BLHe = 0.
+    eps = 2.#security limit
+    iTHTV_0 = np.where(~np.isnan(THTV))[0]
+    if len(iTHTV_0) > 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHl
+
+
+
+#from class
+def get_lcl(startp,startt,startqv):
+        # Find lifting condensation level iteratively
+    lcl = 20.
+    RHlcl = 0.5
+    
+    itmax = 30
+    it = 0
+    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHd
+
+def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
+    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
+    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
+
+
+#from os import listdir
+#from os.path import isfile #,join
+import glob
+
+
+class wyoming(object):
+    def __init__(self):
+       self.status = 'init'
+       self.found = False
+       self.DT = None
+       self.current = None
+       #self.mode = 'b'
+       self.profile_type = 'wyoming'  
+       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
+       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+         
+    def set_STNM(self,STNM):
+        self.__init__()
+        self.STNM = STNM
+        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
+        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
+        self.current = None
+        self.found = False
+        self.FILES.sort()
+        
+    def find_first(self,year=None,get_atm=False):
+        self.found = False    
+                
+        # check first file/year or specified year
+        if year == None:
+            self.iFN = 0
+            self.FN = self.FILES[self.iFN]
+        else:
+            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+        self.current = self.sounding_series.find('h2')
+        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
+        
+        # go through other files and find first sounding when year is not specified
+        self.iFN=self.iFN+1
+        while keepsearching:
+            self.FN = self.FILES[self.iFN]
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            self.iFN=self.iFN+1
+            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
+        self.found = (self.current is not None)
+
+        self.status = 'fetch'
+        if self.found:
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        
+        if self.found and get_atm:
+            self.get_values_air_input()
+        
+    
+    def find(self,DT,get_atm=False):
+        
+        self.found = False
+        keepsearching = True
+        #print(DT)
+        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
+        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
+            self.DT = DT
+            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            
+        keepsearching = (self.current is not None)
+        while keepsearching:
+            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            if DTcurrent == DT:
+                self.found = True
+                keepsearching = False
+                if get_atm:
+                    self.get_values_air_input()
+                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            elif DTcurrent > DT:
+                keepsearching = False
+                self.current = None
+            else:
+                self.current = self.current.find_next('h2')
+                if self.current is None:
+                    keepsearching = False
+        self.found = (self.current is not None)
+        self.status = 'fetch'
+
+    def find_next(self,get_atm=False):
+        self.found = False
+        self.DT = None
+        if self.current is None:
+            self.find_first()
+        else:                
+            self.current = self.current.find_next('h2')
+            self.found = (self.current is not None)
+            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
+            while keepsearching:
+                self.iFN=self.iFN+1
+                self.FN = self.FILES[self.iFN]
+                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+                self.current = self.sounding_series.find('h2')
+                
+                self.found = (self.current is not None)
+                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
+        if self.found:        
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        if self.found and get_atm:
+            self.get_values_air_input()
+       
+
+
+    def get_values_air_input(self,latitude=None,longitude=None):
+
+        # for iDT,DT in enumerate(DTS):
+        
+            #websource = urllib.request.urlopen(webpage)
+        #soup = BeautifulSoup(open(webpage), "html.parser")
+        
+       
+        #workaround for ...last line has 
 which results in stringlike first column
+        string = self.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = self.current.find_next('pre').find_next('pre').text
+
+        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
+        #PARAMS.insert(0,'date',DT)
+
+        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
+        
+        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
+        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
+        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
+        HAGL = HGHT - np.float(PARAMS['Station elevation'])
+        ONE_COLUMN.insert(0,'HAGL',HAGL)
+
+        
+        
+        
+        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
+        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
+        ONE_COLUMN.insert(0,'QABS',QABS)
+        
+        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
+
+        #mixed layer potential temperature
+        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
+
+        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
+        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
+        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
+
+        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        BLHV = np.max((BLHV,10.))
+        BLHVu = np.max((BLHVu,10.))
+        BLHVd = np.max((BLHVd,10.))
+        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
+
+        #security values for mixed-layer jump values dthetav, dtheta and dq
+        
+        # fit new profiles taking the above-estimated mixed-layer height
+        ONE_COLUMNNEW = []
+        for BLH in [BLHV,BLHVu,BLHVd]:
+            ONE_COLUMNNEW.append(pd.DataFrame())
+            
+            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+            
+            listHAGLNEW = list(HAGLNEW)
+            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
+                
+                # get index of lowest valid observation. This seems to vary
+                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
+                if len(idxvalid) > 0:
+                    #print('idxvalid',idxvalid)
+                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
+                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
+                    else:
+                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
+                else:
+                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+                    #print(col,meanabl)
+               
+                
+                # if col == 'PRES':
+                #     meanabl =  
+            
+                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+                #THTVM = np.nanmean(THTV[HAGL <= BLH])
+                #print("new_pro_h",new_pro_h)
+                # calculate jump ath the top of the mixed layer
+                if col in ['THTA','THTV',]:
+                    #for moisture
+                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+                    if len(listHAGLNEW) > 4:
+                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+                        dtheta = np.max((0.1,dtheta_pre))
+                        #meanabl = meanabl - (dtheta - dtheta_pre)
+                        #print('dtheta_pre',dtheta_pre)
+                        #print('dtheta',dtheta)
+                        #print('meanabl',meanabl)
+                        #stop
+                        
+                    else:
+                        dtheta = np.nan
+                else:
+                    if len(listHAGLNEW) > 4:
+                        #for moisture (it can have both negative and positive slope)
+                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+                    else:
+                        dtheta = np.nan
+                #print('dtheta',dtheta)
+                
+                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+            
+                
+                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+                
+            #QABSM = np.nanmean(QABS[HAGL <= BLH])
+            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+            
+        # we just make a copy of the fields, so that it can be read correctly by CLASS 
+        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
+            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+            
+            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
+        
+            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
+            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
+
+
+        # assign fields adopted by CLASS
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'b':
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'u':
+            PARAMS.insert(0,'h',   BLHVu)
+        elif self.mode == 'd':
+            PARAMS.insert(0,'h',   BLHVd)
+        else:
+            PARAMS.insert(0,'h',   BLHV)
+            
+
+        try:
+            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
+        except:
+            print("could not convert latitude coordinate")
+            PARAMS.insert(0,'latitude', np.nan)
+            PARAMS.insert(0,'lat', np.nan)
+        try:
+            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
+            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
+            PARAMS.insert(0,'lon', 0.)
+        except:
+            print("could not convert longitude coordinate")
+            PARAMS.insert(0,'longitude', np.nan)
+            PARAMS.insert(0,'lon', 0.)
+
+        if latitude is not None:
+            print('overwriting latitude with specified value')
+            PARAMS['latitude'] = np.float(latitude)
+            PARAMS['lat'] = np.float(latitude)
+        if longitude is not None:
+            print('overwriting longitude with specified value')
+            PARAMS['longitude'] = np.float(longitude)
+        try:
+            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
+            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
+            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            # This is the nearest datetime when sun is up (for class)
+            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
+            # apply the same time shift for UTC datetime
+            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
+            
+        except:
+            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
+            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
+            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
+            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
+
+        
+
+        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
+        # as we are forcing lon equal to zero this is is expressed in local suntime
+        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
+
+           
+        ONE_COLUMNb = ONE_COLUMNNEW[0]
+        ONE_COLUMNu = ONE_COLUMNNEW[1]
+        ONE_COLUMNd = ONE_COLUMNNEW[2]
+        
+
+        THTVM = np.nanmean(THTV[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+        
+        QABSM = np.nanmean(QABS[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
+        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
+        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
+
+        BLHVe = abs(BLHV - BLHVu)
+        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
+
+        #PARAMS.insert(0,'dq',0.)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
+        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+        
+        if self.mode == 'o': #original 
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
+        elif self.mode == 'b': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNb
+            BLCOLUMN = ONE_COLUMNb
+        elif self.mode == 'u': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNu
+            BLCOLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNd
+            BLCOLUMN = ONE_COLUMNd
+        else:
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb
+
+        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
+        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
+        # print(BLCOLUMN['HAGL'][lt6000])
+        # print(BLCOLUMN['HAGL'][lt2500])
+        # 
+        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
+
+        #print(BLCOLUMN['HAGL'][lt2500])
+        PARAMS.insert(0,'OK',
+                      ((BLHVe < 200.) and 
+                       ( len(np.where(lt6000)[0]) > 5) and
+                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
+                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
+                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
+                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
+                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
+                      )
+                     )
+
+        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
+        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
+        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
+        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
+        
+        
+        PARAMS = PARAMS.T
+
+        
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = USE_ONECOLUMN
+        # if self.mode == 'o': #original 
+        #     self.ONE_COLUMN = ONE_COLUMN
+        # elif self.mode == 'b': # best BLH
+        #     self.ONE_COLUMN = ONE_COLUMNb
+        # elif self.mode == 'u':# upper BLH
+        #     self.ONE_COLUMN = ONE_COLUMNu
+        # elif self.mode == 'd': # lower BLH
+        #     self.ONE_COLUMN=ONE_COLUMNd
+        # else:
+        #     self.ONE_COLUMN = ONE_COLUMN
+
diff --git a/class4gl/data_global.py b/class4gl/data_global.py
new file mode 100644
index 0000000..9c3d9b5
--- /dev/null
+++ b/class4gl/data_global.py
@@ -0,0 +1,936 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: Hendrik Wouters
+
+Purpose: provides class routines for ground and atmosphere conditions used for
+the CLASS miced-layer model
+
+Usage:
+    from data_global import data_global
+    from class4gl import class4gl_input
+    from data_soundings import wyoming
+
+    # create a data_global object and load initial data pages
+    globaldata = data_global()
+    globaldata.load_datasets()
+    # create a class4gl_input object
+    c4gli = class4gl_input()
+    # Initialize it with profile data. We need to do this first. Actually this
+    # will set the coordinate parameters (datetime, latitude, longitude) in
+    # class4gl_input.pars.__dict__, which is required to read point data from
+    # the data_global object.
+
+    # open a Wyoming stream for a specific station
+    wy_strm = wyoming(STNM=91376)
+    # load the first profile
+    wy_strm.find_first()
+    # load the profile data into the class4gl_input object
+    c4gli.get_profile_wyoming(wy_strm)
+    
+    # and finally, read the global input data for this profile
+    c4gli.get_global_input(globaldata)
+
+
+"""
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+#import pynacolada as pcd
+import pandas as pd
+import xarray as xr
+import os
+import glob
+import sys
+import errno
+import warnings
+import logging
+
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+class book(object):
+    """ this is a class for a dataset spread over multiple files. It has a
+    similar purpose  open_mfdataset, but only 1 file (called current 'page')
+    one is loaded at a time. This saves precious memory.  """
+    def __init__(self,fn,concat_dim = None,debug_level=None):
+        self.logger = logging.getLogger('book')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # filenames are expanded as a list and sorted by filename
+        self.pages = glob.glob(fn); self.pages.sort()
+        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
+        if len(self.pages) == 0:
+            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
+        self.ipage = -1; self.page = None
+        self.renames = {} # each time when opening a file, a renaming should be done.
+        self.set_page(0)
+
+        # we consider that the outer dimension is the one we concatenate
+        self.concat_dim = concat_dim
+        if self.concat_dim is None:
+            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
+
+    # this wraps the xarray sel-commmand
+    def sel(*args, **kwargs):
+        for dim in kwargs.keys():
+            if dim == self.concat_dim:
+                self.browse_page(**{dim: kwargs[dim]})
+        return page.sel(*args,**kwargs)
+
+
+    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
+    #def __getattr__(self,attr):
+    #    orig_attr = self.page.__getattribute__(attr)
+    #    if callable(orig_attr):
+    #        def hooked(*args, **kwargs):
+    #            for dim in kwargs.keys():
+    #                if dim == self.concat_dim:
+    #                    self.browse_page(**{dim: kwargs[dim]})
+    #
+    #            result = orig_attr(*args, **kwargs)
+    #            # prevent wrapped_class from becoming unwrapped
+    #            if result == self.page:
+    #                return self
+    #            self.post()
+    #            return result
+    #        return hooked
+    #    else:
+    #        return orig_attr
+
+    def set_renames(self,renames):
+        #first, we convert back to original names, and afterwards, we apply the update of the renames.
+        reverse_renames = dict((v,k) for k,v in self.renames.items())
+        self.renames = renames
+        if self.page is not None:
+            self.page = self.page.rename(reverse_renames)
+            self.page = self.page.rename(self.renames)
+
+    def set_page(self,ipage,page=None):
+        """ this sets the right page according to ipage:
+                - We do not switch the page if we are already at the right one
+                - we set the correct renamings (level -> lev, latitude -> lat,
+                etc.)
+                - The dataset is also squeezed.
+        """
+
+        if ((ipage != self.ipage) or (page is not None)):
+
+            if self.page is not None:
+                self.page.close()
+
+            self.ipage = ipage
+            if page is not None:
+                self.page = page
+            else:
+                if self.ipage == -1:
+                   self.page = None
+                else:
+                    #try:
+
+                    self.logger.info("Switching to page "+str(self.ipage)+': '\
+                                     +self.pages[self.ipage])
+                    self.page = xr.open_dataset(self.pages[self.ipage])
+
+
+            # do some final corrections to the dataset to make them uniform
+            if self.page is not None:
+               if 'latitude' in self.page.dims:
+#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
+               if 'level' in self.page.dims:
+                   self.page = self.page.rename({'level':'lev'})
+
+               self.page = self.page.rename(self.renames)
+               self.page = self.page.squeeze(drop=True)
+
+    def browse_page(self,rewind=2,**args):
+
+        # at the moment, this is only tested with files that are stacked according to the time dimension.
+        dims = args.keys()
+
+
+        if self.ipage == -1:
+            self.set_page(0)
+
+        found = False
+        iipage = 0
+        startipage = self.ipage - rewind
+        while (iipage < len(self.pages)) and not found:
+            ipage = (iipage+startipage) % len(self.pages)
+            for dim in args.keys():
+                this_file = True
+
+                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
+                if 'dims' not in self.__dict__:
+                    self.dims = {}
+                if dim not in self.dims.keys():
+                    self.dims[dim] = [None]*len(self.pages)
+
+                if self.dims[dim][ipage] is None:
+                    self.logger.info('Loading coordinates of dimension "'+dim+\
+                                     '" of page "' +str(ipage)+'".')
+                    self.set_page(ipage)
+                    # print(ipage)
+                    # print(dim)
+                    # print(dim,self.page[dim].values)
+                    self.dims[dim][ipage] = self.page[dim].values
+
+                # determine current time range of the current page
+                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
+                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
+
+                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
+                    this_file = False
+
+            if this_file:
+                found = True
+                self.set_page(ipage)
+            else:
+
+                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
+                #    iipage = len(self.pages) # we stop searching
+
+                iipage += 1
+
+        if not found:
+            self.logger.info("Page not found. Setting to page -1")
+            #iipage = len(self.pages) # we stop searching further
+            self.set_page(-1)
+
+        if self.ipage != -1:
+            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
+        else:
+            self.logger.debug("I'm now at page "+ str(self.ipage))
+
+
+class data_global(object):
+    def __init__(self,sources= {
+        # # old gleam
+        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
+        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
+        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
+        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
+        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
+        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
+        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
+        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
+        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
+        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
+        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
+        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
+        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
+        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
+        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
+        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
+        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
+        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
+        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
+        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
+        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
+        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
+        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
+        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
+        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
+        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
+        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
+        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
+        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
+        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
+        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
+        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
+        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
+        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
+        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
+        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
+        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
+        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
+        },debug_level=None):
+        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
+        self.sources = sources
+        self.datarefs = {}
+        self.datasets = {}
+        self.datetime = dt.datetime(1981,1,1)
+
+        self.logger = logging.getLogger('data_global')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+        self.debug_level = debug_level
+
+        warnings.warn('omitting pressure field p and advection')
+
+    def in_library(self,fn):
+        if fn not in self.library.keys():
+            return False
+        else:
+            print("Warning: "+fn+" is already in the library.")
+            return True
+
+    def add_to_library(self,fn):
+        if not self.in_library(fn):
+            print("opening: "+fn)
+            self.library[fn] = \
+                book(fn,concat_dim='time',debug_level=self.debug_level)
+
+            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
+            #if 'latitude' in self.library[fn].variables:
+            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+
+    # default procedure for loading datasets into the globaldata library
+    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
+        if type(varssource) is str:
+            varssource = [varssource]
+        if type(varsdest) is str:
+            varsdest = [varsdest]
+
+        self.add_to_library(input_fn)
+
+        if varssource is None:
+            varssource = []
+            for var in self.sources[input_fn].variables:
+                avoid = \
+                ['lat','lon','latitude','longitude','time','lev','level']
+                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
+                    varssource.append(var)
+
+        if varsdest is None:
+            varsdest = varssource
+
+        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        for ivar,vardest in enumerate(varsdest):
+            varsource = varssource[ivar]
+            print('setting '+vardest+' as '+varsource+' from '+input_fn)
+
+            if vardest in self.datarefs.keys():
+                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
+            #self.add_to_library(fn,varsource,vardest)
+            if vardest != varsource:
+                libkey = input_fn+'.'+varsource+'.'+vardest
+                if libkey not in self.library.keys():
+                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
+                    self.library[libkey] = book(input_fn,\
+                                                debug_level=self.debug_level)
+                    self.library[libkey].set_renames({varsource: vardest})
+
+                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+            else:
+                self.datarefs[vardest] = input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+
+            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
+            #     print('Warning: '+ vardest "not in " + input_fn)
+
+
+
+    def load_datasets(self,sources = None,recalc=0):
+
+        if sources is None:
+            sources = self.sources
+        for key in sources.keys():
+            #datakey,vardest,*args = key.split(':')
+            datakey,vardest = key.split(':')
+            #print(datakey)
+
+            fnvarsource = sources[key].split(':')
+            if len(fnvarsource) > 2:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource,fnargs = fnvarsource
+                fnargs = [fnargs]
+            elif len(fnvarsource) > 1:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource = fnvarsource
+                fnargs = []
+            else:
+                fn = sources[key]
+                varsource = vardest
+            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
+
+    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
+            # the default way of loading a 2d dataset
+            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
+                self.load_dataset_default(fn,varsource,vardest)
+            elif datakey == 'IGBPDIS':
+                if vardest == 'alpha':
+                    ltypes = ['W','B','H','TC']
+                    for ltype in ltypes:
+                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
+                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
+
+
+                    # landfr = {}
+                    # for ltype in ['W','B','H','TC']:
+                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
+
+
+
+                    keytemp = 'alpha'
+                    fnkeytemp = fn+':IGBPDIS:alpha'
+                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
+                        self.library[fnkeytemp]  = book(fnkeytemp,
+                                                        debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+                    else:
+                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
+                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
+                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
+                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
+                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
+                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
+                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
+
+                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+
+                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
+                        for ltype in ltypes:
+                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
+
+                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
+                        print('writing file to: '+fnkeytemp)
+                        os.system('rm '+fnkeytemp)
+                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
+                        self.library[fnkeytemp].close()
+
+
+                        self.library[fnkeytemp]  = \
+                            book(fnkeytemp,debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+
+
+                else:
+                    self.load_dataset_default(fn,varsource,vardest)
+
+
+            elif datakey == 'GLAS':
+                self.load_dataset_default(fn,varsource,vardest)
+                if vardest == 'z0m':
+                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
+                elif vardest == 'z0h':
+                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
+            elif datakey == 'DSMW':
+
+
+                # Procedure of the thermal properties:
+                # 1. determine soil texture from DSMW/10.
+                # 2. soil type with look-up table (according to DWD/EXTPAR)
+                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
+                #    with parameter look-up table from Noilhan and Planton (1989).
+                #    Note: The look-up table is inspired on DWD/COSMO
+
+                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
+
+
+
+                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
+                self.load_dataset_default(fn,'DSMW')
+                print('calculating texture')
+                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
+                TEMP  = {}
+                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
+                TEMP3 = {}
+                for SPKEY in SPKEYS:
+
+
+                    keytemp = SPKEY+'_values'
+                    fnoutkeytemp = fnout+':DSMW:'+keytemp
+                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                    else:
+                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
+                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
+                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+                        # for faster computation, we need to get it to memory out of Dask.
+                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
+                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
+
+                # yes, I know I only check the last file.
+                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
+                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
+                        print('idx',idx,SPKEY)
+                        SEL = (TEMP2 == idx)
+                    #     print(idx,len(TEMP3))
+                        for SPKEY in SPKEYS:
+                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
+
+                    for SPKEY in SPKEYS:
+                        keytemp = SPKEY+'_values'
+                        fnoutkeytemp = fnout+':DSMW:'+keytemp
+                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
+                        os.system('rm '+fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].close()
+
+
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                keytemp = 'texture'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+                else:
+                    self.library[fn+':DSMW:texture'] = xr.Dataset()
+                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
+                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
+                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
+                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+
+                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
+
+                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
+                    zundef[zundef < 0] = np.nan
+                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
+                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
+
+                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+
+
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+                print('calculating texture type')
+
+
+
+                keytemp = 'itex'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+                else:
+                    self.library[fnoutkeytemp] = xr.Dataset()
+                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+                    X = self.datasets['texture'].page['texture'].values*100
+                    X[pd.isnull(X)] = -9
+
+
+                    self.datasets[keytemp][keytemp].values = X
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
+                    self.datasets['itex'].close()
+
+
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+
+                keytemp = 'isoil'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                isoil_reprocessed = False
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+                else:
+                    isoil_reprocessed = True
+                    print('calculating soil type')
+                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    ITEX = self.datasets['itex'].page['itex'].values
+                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
+                    LOOKUP = [
+                              [-10 ,9],# ocean
+                              [0 ,7],# fine textured, clay (soil type 7)
+                              [20,6],# medium to fine textured, loamy clay (soil type 6)
+                              [40,5],# medium textured, loam (soil type 5)
+                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                              [80,3],# coarse textured, sand (soil type 3)
+                              [100,9],# coarse textured, sand (soil type 3)
+                            ]
+                    for iitex,iisoil in LOOKUP:
+                        ISOIL[ITEX > iitex] = iisoil
+                        print('iitex,iisoil',iitex,iisoil)
+
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    LOOKUP = [
+                              [9001, 1 ], # ice, glacier (soil type 1)
+                              [9002, 2 ], # rock, lithosols (soil type 2)
+                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                              [9,    9 ], # undefined (ocean)
+                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                              [9000, 9 ], # undefined (inland lake)
+                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                            ]
+                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
+
+                    CODE_VALUES[ITEX == 901200] = 9012
+                    for icode,iisoil in LOOKUP:
+                        ISOIL[CODE_VALUES == icode] = iisoil
+
+                    self.datasets['isoil']['isoil'].values = ISOIL
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+                    print('saved inbetween file to: '+fnoutkeytemp)
+
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                #adopted from data_soil.f90 (COSMO5.0)
+                SP_LOOKUP = {
+                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
+                  # (by index)                                           loam                    loam                                water      ice
+                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+                  # Important note: For peat, the unknown values below are set equal to that of loam
+                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
+                  #error in table 2 of NP89: values need to be multiplied by e-6
+                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
+
+                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
+                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
+                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
+                }
+
+
+                # isoil_reprocessed = False
+                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+
+                #     self.library[fn+':DSMW:isoil'] = \
+                #             book(fnoutkeytemp,debug_level=self.debug_level)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+                # else:
+                #     isoil_reprocessed = True
+                #     print('calculating soil type')
+                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+
+
+
+                # this should become cleaner in future but let's hard code it for now.
+                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
+                print('calculating soil parameter')
+                DATATEMPSPKEY = {}
+                if (recalc < 1) and (isoil_reprocessed == False): 
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        keytemp = SPKEY
+                        fnoutkeytemp=fnout+':DSMW:'+keytemp
+                        self.library[fn+':DSMW:'+SPKEY] =\
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
+                        self.datarefs[SPKEY] =fnoutkeytemp
+                else:
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+
+                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
+                    ISOIL = self.datasets['isoil'].page['isoil'].values
+                    print(np.where(ISOIL>0.))
+                    for i in range(11):
+                        SELECT = (ISOIL == i)
+                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
+
+                        os.system('rm '+fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].close()
+                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
+
+                        self.library[fn+':DSMW:'+SPKEY] = \
+                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+
+
+            else:
+                self.load_dataset_default(fn,varsource,vardest)
+
+
+
+
+
+
+#
+#                 # only print the last parameter value in the plot
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'cala'
+#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'crhoc'
+#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#     key = "CERES"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         CERES_start_date = dt.datetime(2000,3,1)
+#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+#
+#         var = 'cc'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#         print(class_settings.lat,class_settings.lon)
+#
+#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
+#
+#         input_nc.close()
+#
+
+
+#     key = "GIMMS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+#         print("Reading Leag Area Index from "+input_fn)
+#         var = 'LAI'
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+#
+#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+#
+#         if np.isnan(tarray[idatetime]):
+#             print("interpolating GIMMS cveg nan value")
+#
+#             mask = np.isnan(tarray)
+#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+#             else:
+#                 print("Warning. Could not interpolate GIMMS cveg nan value")
+#
+#         class_settings.__dict__[var] = tarray[idatetime]
+#
+#         input_nc.close()
+#
+#     key = "IGBPDIS_ALPHA"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         var = 'alpha'
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+#         print("Reading albedo from "+input_fn)
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#
+#         landfr = {}
+#         for ltype in ['W','B','H','TC']:
+#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+#
+#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+#
+#         alpha=0.
+#         for ltype in landfr.keys():
+#             alpha += landfr[ltype]*aweights[ltype]
+#
+#
+#         class_settings.__dict__[var] = alpha
+#         input_nc.close()
+#
+#
+#     key = "ERAINT_ST"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         print("Reading soil temperature from "+input_fn)
+#
+#         var = 'Tsoil'
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         var = 'T2'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+#
+#
+#         input_nc.close()
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #var = 'T2'
+#     #valold = class_settings.__dict__[var]
+#     #
+#     #class_settings.__dict__[var] = 305.
+#     #class_settings.__dict__['Tsoil'] = 302.
+#     #valnew = class_settings.__dict__[var]
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #
+#     #var = 'Lambda'
+#     #valold = class_settings.__dict__[var]
+#
+#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
+#     ## I need to ask Chiel.
+#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+#     #
+#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
+#     #class_settings.__dict__[var] = valnew
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     key = "GLAS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+#         print("Reading canopy height for determining roughness length from "+input_fn)
+#         var = 'z0m'
+#
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+#
+#         lowerlimit = 0.01
+#         if testval < lowerlimit:
+#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+#             class_settings.__dict__[var] = lowerlimit
+#         else:
+#             class_settings.__dict__[var] = testval
+#
+#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+#
+#
+#         input_nc.close()
+
+
+
+
+
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
new file mode 100644
index 0000000..3e483f3
--- /dev/null
+++ b/class4gl/interface_functions.py
@@ -0,0 +1,506 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+#from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+#'_afternoon.yaml'
+def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
+    filename = yaml_file.name
+    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+    #yaml_file = open(filename)
+
+    #print('going to next observation',filename)
+    yaml_file.seek(index_start)
+
+    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+
+    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+    filebuffer.write(buf)
+    filebuffer.close()
+    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+    
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+
+    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+    print(command)
+    os.system(command)
+    jsonstream = open(filename+'.buffer.json.'+str(index_start))
+    record_dict = json.load(jsonstream)
+    jsonstream.close()
+    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+
+
+    if mode =='mod':
+        modelout = class4gl()
+        modelout.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        return modelout
+    elif mode == 'ini':
+
+ 
+        # datetimes are incorrectly converted to strings. We need to convert them
+        # again to datetimes
+        for key,value in record_dict['pars'].items():
+            # we don't want the key with columns that have none values
+            if value is not None: 
+                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
+               # elif (type(value) == str):
+                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+
+            if (value == 0.9e19) or (value == '.9e19'):
+                record_dict['pars'][key] = np.nan
+        for key in record_dict.keys():
+            #print(key)
+            if key in ['air_ap','air_balloon',]:
+                #NNprint('check')
+                for datakey,datavalue in record_dict[key].items():
+                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+
+        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        c4gli = class4gl_input()
+        print(c4gli.logger,'hello')
+        c4gli.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+
+class stations(object):
+    def __init__(self,path,suffix='ini',refetch_stations=False):
+
+        self.path = path
+
+        self.file = self.path+'/stations_list.csv'
+        if (os.path.isfile(self.file)) and (not refetch_stations):
+            self.table = pd.read_csv(self.file)
+        else:
+            self.table = self.get_stations(suffix=suffix)
+            self.table.to_csv(self.file)
+        
+        self.table = self.table.set_index('STNID')
+        #print(self.table)
+
+    def get_stations(self,suffix):
+        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
+        if len(stations_list_files) == 0:
+            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
+        stations_list_files.sort()
+        print(stations_list_files)
+        if len(stations_list_files) == 0:
+            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
+        stations_list = []
+        for stations_list_file in stations_list_files:
+            thisfile = open(stations_list_file,'r')
+            yamlgen = yaml.load_all(thisfile)
+            try:
+                first_record  = yamlgen.__next__()
+            except:
+                first_record = None
+            if first_record is not None:
+                stations_list.append({})
+                for column in ['STNID','latitude','longitude']:
+                    #print(first_record['pars'].keys())
+                    stations_list[-1][column] = first_record['pars'][column]
+                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
+            yamlgen.close()
+            thisfile.close()
+    
+        print(stations_list)
+        return pd.DataFrame(stations_list)
+
+class stations_iterator(object):
+    def __init__(self,stations):
+        self.stations = stations
+        self.ix = -1 
+    def __iter__(self):
+        return self
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.stations.table)) 
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_row(self,row):
+        self.ix = row
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_STNID(self,STNID):
+        self.ix = np.where((self.stations.table.index == STNID))[0][0]
+        print(self.ix)
+        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+    def close():
+        del(self.ix)
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.records))
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+# #'_afternoon.yaml'
+# def get_record_yaml(yaml_file,index_start,index_end):
+#     filename = yaml_file.name
+#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+#     #yaml_file = open(filename)
+# 
+#     #print('going to next observation',filename)
+#     yaml_file.seek(index_start)
+# 
+#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+# 
+#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+#     filebuffer.write(buf)
+#     filebuffer.close()
+#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+#     
+#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+# 
+#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+#     print(command)
+#     os.system(command)
+#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
+#     record_dict = json.load(jsonstream)
+#     jsonstream.close()
+#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+#  
+#     # datetimes are incorrectly converted to strings. We need to convert them
+#     # again to datetimes
+#     for key,value in record_dict['pars'].items():
+#         # we don't want the key with columns that have none values
+#         if value is not None: 
+#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
+#            # elif (type(value) == str):
+#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+#                 
+#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
+# 
+#         if (value == 0.9e19) or (value == '.9e19'):
+#             record_dict['pars'][key] = np.nan
+#     for key in record_dict.keys():
+#         print(key)
+#         if key in ['air_ap','air_balloon',]:
+#             print('check')
+#             for datakey,datavalue in record_dict[key].items():
+#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+# 
+#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+# 
+#     c4gli = class4gl_input()
+#     c4gli.load_yaml_dict(record_dict)
+#     return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
+
+    records = pd.DataFrame()
+    for STNID,station in stations.iterrows():
+        dictfnchunks = []
+        if getchunk is 'all':
+
+            # we try the old single-chunk filename format first (usually for
+            # original profile pairs)
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
+            if os.path.isfile(fn):
+                chunk = 0
+                dictfnchunks.append(dict(fn=fn,chunk=chunk))
+
+            # otherwise, we use the new multi-chunk filename format
+            else:
+                chunk = 0
+                end_of_chunks = False
+                while not end_of_chunks:
+                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                    if os.path.isfile(fn):
+                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                    else:
+                        end_of_chunks = True
+                    chunk += 1
+
+            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
+            # yamlfilenames = glob.glob(globyamlfilenames)
+            # yamlfilenames.sort()
+        else:
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
+            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
+            
+        if len(dictfnchunks) > 0:
+            for dictfnchunk in dictfnchunks:
+                yamlfilename = dictfnchunk['fn']
+                chunk = dictfnchunk['chunk']
+                print(chunk)
+
+                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
+                pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
+                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
+                generate_pkl = False
+                if not os.path.isfile(pklfilename): 
+                    print('pkl file does not exist. I generate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                elif not (os.path.getmtime(yamlfilename) <  \
+                    os.path.getmtime(pklfilename)):
+                    print('pkl file older than yaml file, so I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+
+                if refetch_records:
+                    print('refetch_records flag is True. I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                if not generate_pkl:
+                    records = pd.concat([records,pd.read_pickle(pklfilename)])
+                   # irecord = 0
+                else:
+                    with open(yamlfilename) as yaml_file:
+
+                        dictout = {}
+
+                        next_record_found = False
+                        end_of_file = False
+                        while (not next_record_found) and (not end_of_file):
+                            linebuffer = yaml_file.readline()
+                            next_record_found = (linebuffer == '---\n')
+                            end_of_file = (linebuffer == '')
+                        next_tell = yaml_file.tell()
+                        
+                        while not end_of_file:
+
+                            print(' next record:',next_tell)
+                            current_tell = next_tell
+                            next_record_found = False
+                            yaml_file.seek(current_tell)
+                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            linebuffer = ''
+                            while ( (not next_record_found) and (not end_of_file)):
+                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                                linebuffer = yaml_file.readline()
+                                next_record_found = (linebuffer == '---\n')
+                                end_of_file = (linebuffer == '')
+                            filebuffer.close()
+                            
+                            next_tell = yaml_file.tell()
+                            index_start = current_tell
+                            index_end = next_tell
+
+                            
+                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
+                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            print(command)
+                            
+                            os.system(command)
+                            #jsonoutput = subprocess.check_output(command,shell=True) 
+                            #print(jsonoutput)
+                            #jsonstream = io.StringIO(jsonoutput)
+                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
+                            record = json.load(jsonstream)
+                            dictouttemp = {}
+                            for key,value in record['pars'].items():
+                                # we don't want the key with columns that have none values
+                                if value is not None: 
+                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
+                                   if (type(value) in regular_numeric_types):
+                                        dictouttemp[key] = value
+                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
+                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
+                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
+                            recordindex = record['index']
+                            dictouttemp['chunk'] = chunk
+                            dictouttemp['index_start'] = index_start
+                            dictouttemp['index_end'] = index_end
+                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
+                            for key,value in dictouttemp.items():
+                                if key not in dictout.keys():
+                                    dictout[key] = {}
+                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
+                            print(' obs record registered')
+                            jsonstream.close()
+                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                    records_station = pd.DataFrame.from_dict(dictout)
+                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
+                    print('writing table file ('+pklfilename+') for station '\
+                          +str(STNID))
+                    records_station.to_pickle(pklfilename)
+                    # else:
+                    #     os.system('rm '+pklfilename)
+                    records = pd.concat([records,records_station])
+    return records
+
+def stdrel(mod,obs,columns):
+    stdrel = pd.DataFrame(columns = columns)
+    for column in columns:
+        stdrel[column] = \
+                (mod.groupby('STNID')[column].transform('mean') -
+                 obs.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') + \
+                (mod[column] -
+                 mod.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') 
+    return stdrel
+
+def pct(obs,columns):
+    pct = pd.DataFrame(columns=columns)
+    for column in columns:
+        #print(column)
+        pct[column] = ""
+        pct[column] = obs[column].rank(pct=True)
+    return pct
+
+def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
+    stats = pd.DataFrame()
+    for key in keys: 
+        stats['d'+key+'dt'] = ""
+        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+                              (obs_afternoon.ldatetime - \
+                               obs_morning.ldatetime).dt.seconds*3600.
+    return stats
+
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
new file mode 100644
index 0000000..83148e5
--- /dev/null
+++ b/class4gl/interface_multi.py
@@ -0,0 +1,2061 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+# from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+cdictpres = {'blue': (\
+                   (0.,    0.,  0.),
+                   (0.25,  0.25, 0.25),
+                   (0.5,  .70, 0.70),
+                   (0.75, 1.0, 1.0),
+                   (1,     1.,  1.),
+                   ),
+       'green': (\
+                   (0. ,   0., 0.0),
+                   (0.25,  0.50, 0.50),
+                   (0.5,  .70, 0.70),
+                   (0.75,  0.50, 0.50),
+                   (1  ,    0,  0.),
+                   ),
+       'red':  (\
+                  (0 ,  1.0, 1.0),
+                  (0.25 ,  1.0, 1.0),
+                   (0.5,  .70, 0.70),
+                  (0.75 , 0.25, 0.25),
+                  (1,    0., 0.),
+                  )}
+
+statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+os.system('module load Ruby')
+
+class c4gl_interface_soundings(object):
+    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+        """ creates an interactive interface for analysing class4gl experiments
+
+        INPUT:
+            path_exp : path of the experiment output
+            path_obs : path of the observations 
+            globaldata: global data that is being shown on the map
+            refetch_stations: do we need to build the list of the stations again?
+        OUTPUT:
+            the procedure returns an interface object with interactive plots
+
+        """
+        
+        # set the ground
+        self.globaldata = globaldata
+
+ 
+        self.path_exp = path_exp
+        self.path_obs = path_obs
+        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
+
+        # # get the list of stations
+        # stationsfile = self.path_exp+'/stations_list.csv'
+        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
+        #     stations = pd.read_csv(stationsfile)
+        # else:
+        #     stations = get_stations(self.path_exp)
+        #     stations.to_csv(stationsfile)
+
+        # stations = stations.set_index('STNID')
+
+        self.frames = {}
+
+        self.frames['stats'] = {}
+        self.frames['worldmap'] = {}
+                
+        self.frames['profiles'] = {}
+        self.frames['profiles'] = {}
+        self.frames['profiles']['DT'] = None
+        self.frames['profiles']['STNID'] = None
+
+        #self.frames['worldmap']['stationsfile'] = stationsfile
+        self.frames['worldmap']['stations'] = stations(self.path_exp, \
+                                                       suffix='ini',\
+                                                       refetch_stations=refetch_stations)
+
+        # Initially, the stats frame inherets the values/iterators of
+        # worldmap
+        for key in self.frames['worldmap'].keys():
+            self.frames['stats'][key] = self.frames['worldmap'][key]
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_ini'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='ini',\
+                                           refetch_records=refetch_records
+                                           )
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_mod'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='mod',\
+                                           refetch_records=refetch_records
+                                           )
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_obs_afternoon'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_obs,\
+                                           subset='afternoon',\
+                                           refetch_records=refetch_records
+                                           )
+
+        self.frames['stats']['records_all_stations_mod'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['records_all_stations_ini']['dates'] = \
+            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+
+
+        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
+
+        self.frames['stats']['records_all_stations_obs_afternoon'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['viewkeys'] = ['h','theta','q']
+        print('Calculating table statistics')
+        self.frames['stats']['records_all_stations_mod_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_mod'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+
+        self.frames['stats']['inputkeys'] = inputkeys
+        
+        # self.frames['stats']['inputkeys'] = \
+        #     [ key for key in \
+        #       self.globaldata.datasets.keys() \
+        #       if key in \
+        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
+
+
+        # get units from the class4gl units database
+        self.units = dict(units)
+        # for those that don't have a definition yet, we just ask a question
+        # mark
+        for var in self.frames['stats']['inputkeys']:
+            self.units[var] = '?'
+
+        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
+        self.frames['stats']['records_all_stations_ini_pct'] = \
+                  pct(self.frames['stats']['records_all_stations_ini'], \
+                      columns = self.frames['stats']['inputkeys'])
+
+        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
+        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+        #     mod['
+
+        # 
+        # 
+        # \
+        #        self.frames['stats']['records_all_stations_mod'], \
+
+
+
+        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
+        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
+        #               columns = [ 'd'+key+'dt' for key in \
+        #                           self.frames['stats']['viewkeys']], \
+        #              )
+
+        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
+        #               obs = self.frames['stats']['records_all_stations_ini'], \
+        #               columns = self.frames['stats']['viewkeys'], \
+        #              )
+        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+        
+        print('filtering pathological data')
+        # some observational sounding still seem problematic, which needs to be
+        # investigated. In the meantime, we filter them
+        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+
+        # we filter ALL data frames!!!
+        for key in self.frames['stats'].keys():
+            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
+               (self.frames['stats'][key].index.names == indextype):
+                self.frames['stats'][key] = self.frames['stats'][key][valid]
+        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+
+        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
+
+
+        print("filtering stations from interface that have no records")
+        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
+            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                    == STNID).sum() == 0):
+                print("dropping", STNID)
+                self.frames['worldmap']['stations'].table = \
+                        self.frames['worldmap']['stations'].table.drop(STNID)
+                    
+        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+        
+        # TO TEST: should be removed, since it's is also done just below
+        self.frames['stats']['stations_iterator'] = \
+            self.frames['worldmap']['stations_iterator'] 
+
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
+        self.next_station()
+
+        # self.goto_datetime_worldmap(
+        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+        #     'after')
+    def sel_station(self,STNID=None,rownumber=None):
+
+        if (STNID is not None) and (rownumber is not None):
+            raise ValueError('Please provide either STNID or rownumber, not both.')
+
+        if (STNID is None) and (rownumber is None):
+            raise ValueError('Please provide either STNID or rownumber.')
+            
+        if STNID is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
+            print(
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+            )
+            self.update_station()
+        elif rownumber is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
+            self.update_station()
+
+
+
+    def next_station(self,event=None,jump=1):
+        with suppress(StopIteration):
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+                = self.frames['worldmap']['stations_iterator'].__next__(jump)
+            # self.frames['worldmap']['stations_iterator'].close()
+            # del(self.frames['worldmap']['stations_iterator'])
+            # self.frames['worldmap']['stations_iterator'] = \
+            #                 selfself.frames['worldmap']['stations'].iterrows()
+            # self.frames['worldmap']['STNID'],\
+            # self.frames['worldmap']['current_station'] \
+            #     = self.frames['worldmap']['stations_iterator'].__next__()
+
+        self.update_station()
+
+    def prev_station(self,event=None):
+        self.next_station(jump = -1,event=event)
+    def update_station(self):
+        for key in ['STNID','current_station','stations_iterator']: 
+            self.frames['stats'][key] = self.frames['worldmap'][key] 
+
+
+
+        # generate index of the current station
+        self.frames['stats']['records_current_station_index'] = \
+            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+             == \
+             self.frames['stats']['current_station'].name)
+
+        # create the value table of the records of the current station
+        tab_suffixes = \
+                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        for tab_suffix in tab_suffixes:
+            self.frames['stats']['records_current_station'+tab_suffix] = \
+                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+        # go to first record of current station
+        self.frames['stats']['records_iterator'] = \
+                        records_iterator(self.frames['stats']['records_current_station_mod'])
+        (self.frames['stats']['STNID'] , \
+        self.frames['stats']['current_record_chunk'] , \
+        self.frames['stats']['current_record_index']) , \
+        self.frames['stats']['current_record_mod'] = \
+                        self.frames['stats']['records_iterator'].__next__()
+
+        for key in self.frames['stats'].keys():
+            self.frames['profiles'][key] = self.frames['stats'][key]
+
+        STNID = self.frames['profiles']['STNID']
+        chunk = self.frames['profiles']['current_record_chunk']
+        if 'current_station_file_ini' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_ini'].close()
+        self.frames['profiles']['current_station_file_ini'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+        if 'current_station_file_mod' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_mod'].close()
+        self.frames['profiles']['current_station_file_mod'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_afternoon'].close()
+        self.frames['profiles']['current_station_file_afternoon'] = \
+            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+        self.frames['profiles']['records_iterator'] = \
+                        records_iterator(self.frames['profiles']['records_current_station_mod'])
+        (self.frames['profiles']['STNID'] , \
+        self.frames['profiles']['current_record_chunk'] , \
+        self.frames['profiles']['current_record_index']) , \
+        self.frames['profiles']['current_record_mod'] = \
+                        self.frames['profiles']['records_iterator'].__next__()
+
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+
+        self.update_record()
+
+    def next_record(self,event=None,jump=1):
+        with suppress(StopIteration):
+            (self.frames['profiles']['STNID'] , \
+            self.frames['profiles']['current_record_chunk'] , \
+            self.frames['profiles']['current_record_index']) , \
+            self.frames['profiles']['current_record_mod'] = \
+                      self.frames['profiles']['records_iterator'].__next__(jump)
+        # except (StopIteration):
+        #     self.frames['profiles']['records_iterator'].close()
+        #     del( self.frames['profiles']['records_iterator'])
+        #     self.frames['profiles']['records_iterator'] = \
+        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
+        #     (self.frames['profiles']['STNID'] , \
+        #     self.frames['profiles']['current_record_index']) , \
+        #     self.frames['profiles']['current_record_mod'] = \
+        #                     self.frames['profiles']['records_iterator'].__next__()
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        self.update_record()
+
+    def prev_record(self,event=None):
+        self.next_record(jump=-1,event=event)
+
+    def update_record(self):
+        self.frames['profiles']['current_record_ini'] =  \
+            self.frames['profiles']['records_current_station_ini'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'],\
+                  self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon'] =  \
+            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'] , \
+                  self.frames['profiles']['current_record_index'])]
+
+        self.frames['profiles']['current_record_mod_stats'] = \
+                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+                    self.frames['profiles']['STNID'], \
+                    self.frames['profiles']['current_record_chunk'], \
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
+                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_ini_pct'] = \
+                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        # frame
+        # note that the current station, record is the same as the stats frame for initialization
+
+        # select first 
+        #self.frames['profiles']['current_record_index'], \
+        #self.frames['profiles']['record_yaml_mod'] = \
+        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
+        #                   self.frames['stats']['current_record_index'])
+        self.frames['profiles']['record_yaml_mod'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_mod'], \
+               self.frames['profiles']['current_record_mod'].index_start,
+               self.frames['profiles']['current_record_mod'].index_end,
+               mode='mod')
+                                
+        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_ini'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_ini'], \
+               record_ini.index_start,
+               record_ini.index_end,
+                mode='ini')
+
+        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_obs_afternoon'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_afternoon'], \
+               record_afternoon.index_start,
+               record_afternoon.index_end,
+                mode='ini')
+
+
+        key = self.frames['worldmap']['inputkey']
+        # only redraw the map if the current world map has a time
+        # dimension
+        if 'time' in self.globaldata.datasets[key].page[key].dims:
+            self.goto_datetime_worldmap(
+                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                'after')
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap',
+                                                  'profiles'])
+        else:
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap_stations',
+                                                  'profiles'])
+
+    def abline(self,slope, intercept,axis):
+        """Plot a line from slope and intercept"""
+        #axis = plt.gca()
+        x_vals = np.array(axis.get_xlim())
+        y_vals = intercept + slope * x_vals
+        axis.plot(x_vals, y_vals, 'k--')
+
+    def plot(self):
+        import pylab as pl
+        from matplotlib.widgets import Button
+        import matplotlib.pyplot as plt
+        import matplotlib as mpl
+        '''
+        Definition of the axes for the sounding table stats
+        '''
+        
+        fig = pl.figure(figsize=(14,9))
+        axes = {} #axes
+        btns = {} #buttons
+
+        # frames, which sets attributes for a group of axes, buttens, 
+        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+            label = 'stats_'+str(key)
+            axes[label] = fig.add_subplot(\
+                            len(self.frames['stats']['viewkeys']),\
+                            5,\
+                            5*ikey+1,label=label)
+            # Actually, the axes should be a part of the frame!
+            #self.frames['stats']['axes'] = axes[
+
+            # pointer to the axes' point data
+            axes[label].data = {}
+
+            # pointer to the axes' color fields
+            axes[label].fields = {}
+
+
+        fig.tight_layout()
+        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
+
+        label ='stats_colorbar'
+        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
+        axes[label].fields = {}
+
+        from matplotlib.colors import LinearSegmentedColormap
+        cdictpres = {'blue': (\
+                           (0.,    0.,  0.),
+                           (0.25,  0.25, 0.25),
+                           (0.5,  .70, 0.70),
+                           (0.75, 1.0, 1.0),
+                           (1,     1.,  1.),
+                           ),
+               'green': (\
+                           (0. ,   0., 0.0),
+                           (0.25,  0.50, 0.50),
+                           (0.5,  .70, 0.70),
+                           (0.75,  0.50, 0.50),
+                           (1  ,    0,  0.),
+                           ),
+               'red':  (\
+                          (0 ,  1.0, 1.0),
+                          (0.25 ,  1.0, 1.0),
+                           (0.5,  .70, 0.70),
+                          (0.75 , 0.25, 0.25),
+                          (1,    0., 0.),
+                          )}
+        
+        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+        label = 'times'
+               
+        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+
+
+        label = 'worldmap'
+               
+        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+        axes[label].lat = None
+        axes[label].lon = None
+
+        label = 'worldmap_colorbar'
+        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
+        axes[label].fields = {}
+
+        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
+        label = 'worldmap_stations'
+        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
+        axes[label].data = {}
+
+        fig.canvas.mpl_connect('pick_event', self.on_pick)
+        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
+
+
+        """ buttons definitions """
+        
+        label = 'bprev_dataset'
+        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous dataset')
+        btns[label].on_clicked(self.prev_dataset)
+
+        label = 'bnext_dataset'
+        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next dataset')
+        btns[label].on_clicked(self.next_dataset)
+
+        label = 'bprev_datetime'
+        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous datetime')
+        btns[label].on_clicked(self.prev_datetime)
+
+        label = 'bnext_datetime'
+        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next datetime')
+        btns[label].on_clicked(self.next_datetime)
+
+
+        label = 'bprev_station'
+        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous station')
+        btns[label].on_clicked(self.prev_station)
+
+        label = 'bnext_station'
+        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next station')
+        btns[label].on_clicked(self.next_station)
+
+        label = 'bprev_record'
+        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous record')
+        btns[label].on_clicked(self.prev_record)
+
+        label = 'bnext_record'
+        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next record')
+        btns[label].on_clicked(self.next_record)
+
+
+        # self.nstatsview = nstatsview
+        # self.statsviewcmap = statsviewcmap
+        self.fig = fig
+        self.axes = axes
+        self.btns = btns
+        self.tbox = {}
+        # self.hover_active = False
+
+        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
+        #                                transform=plt.gcf().transFigure)
+
+        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
+                                          transform=plt.gcf().transFigure)
+
+        label = 'air_ap:theta'
+        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
+
+        label = 'air_ap:q'
+        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
+
+        label = 'out:h'
+        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
+
+        label = 'out:theta'
+        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
+
+        label = 'out:q'
+        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
+
+        label = 'SEB'
+        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
+
+
+        self.hover_active = False
+        self.fig = fig
+        self.fig.show()
+        self.fig.canvas.draw()
+        self.refresh_plot_interface()
+
+
+    # def scan_stations(self):
+    #     blabla
+        
+
+
+    # def get_records(current_file):
+    #     records = pd.DataFrame()
+
+    #     # initial position
+    #     next_record_found = False
+    #     while(not next_record_found):
+    #         next_record_found = (current_file.readline() == '---\n')
+    #     next_tell = current_file.tell() 
+    #     end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #     while not end_of_file:
+    #         current_tell = next_tell
+    #         next_record_found = False
+    #         current_file.seek(current_tell)
+    #         while ( (not next_record_found) and (not end_of_file)):
+    #             current_line = current_file.readline()
+    #             next_record_found = (currentline == '---\n')
+    #             end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #         # we store the position of the next record
+    #         next_tell = current_file.tell() 
+    #         
+    #         # we get the current record. Unfortunately we need to reset the
+    #         # yaml record generator first.
+    #         current_yamlgen.close()
+    #         current_yamlgen = yaml.load_all(current_file)
+    #         current_file.seek(current_tell)
+    #         current_record_mod = current_yamlgen.__next__()
+    #     current_yamlgen.close()
+
+    #     return records
+
+       #      next_record_found = False
+       #      while(not record):
+       #          next_record_found = (self.current_file.readline() == '---\n')
+       #      self.current_tell0 = self.current_file.tell() 
+
+       #  
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell0 = self.current_file.tell() 
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell1 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell0)
+       #  self.r0 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell1)
+       #  next_record_found = False
+       #  while ( (not next_record_found) and (not end_of_file):
+       #      current_line = self.current_file.readline()
+       #      next_record_found = (currentline == '---\n')
+       #      end_of_file = (currentline == '') # an empty line means we are at the end
+
+       #  self.current_tell2 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell1)
+       #  self.r1 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell2)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell3 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell2)
+       #  self.r2 = self.current_yamlgen.__next__()
+
+       #  # go to position of next record in file
+       #  self.current_file.seek(self.current_tell3)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell4 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell3)
+       #  self.r3 = self.current_yamlgen.__next__()
+ 
+       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
+
+    def goto_datetime_worldmap(self,DT,shift=None):
+        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
+            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
+            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
+            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
+                self.frames['worldmap']['iDT'] += 1
+            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
+                self.frames['worldmap']['iDT'] -= 1 
+            # for gleam, we take the values of the previous day
+            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
+                self.frames['worldmap']['iDT'] -= 2 
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+        #else:
+        #    self.frames['worldmap'].pop('DT')
+
+    def next_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def prev_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def next_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+    def prev_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+
+
+    def sel_dataset(self,inputkey):
+        self.frames['worldmap']['inputkey'] = inputkey
+        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
+        self.goto_datetime_worldmap(
+            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+            'after')# get nearest datetime of the current dataset to the profile
+        if "fig" in self.__dict__.keys():
+            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
+       
+    # def prev_station(self,event=None):
+    #     self.istation = (self.istation - 1) % self.stations.shape[0]
+    #     self.update_station()
+
+
+
+
+    #def update_datetime(self):
+    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
+    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
+    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
+    #        print(self.worldmapfocus['DT'])
+    #        self.refresh_plot_interface(only='worldmap')
+
+    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
+
+        #print('r1')
+        for argkey in args.keys():
+            self.__dict__[arg] = args[argkey]
+
+        axes = self.axes
+        tbox = self.tbox
+        frames = self.frames
+        fig = self.fig
+ 
+        if (only is None) or ('worldmap' in only):
+            globaldata = self.globaldata
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
+            else:
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+            keystotranspose = ['lat','lon']
+            for key in dict(datasetxr.dims).keys():
+                if key not in keystotranspose:
+                    keystotranspose.append(key)
+
+            datasetxr = datasetxr.transpose(*keystotranspose)
+            datasetxr = datasetxr.sortby('lat',ascending=False)
+
+            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
+            lonleft = lonleft - 360.
+            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
+            label = 'worldmap'
+            axes[label].clear()
+            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
+            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+
+        if (only is None) or ('worldmap' in only):
+            #if 'axmap' not in self.__dict__ :
+            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
+            #else:
+
+            #stations = self.stations
+
+
+            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
+            #     resolution = 'l', 
+            # area_thresh = 0.1,
+            #     llcrnrlon=-180., llcrnrlat=-90.0,
+            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
+            # 
+            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
+            # self.gmap.drawcountries(color='white',linewidth=0.3)
+            # #self.gmap.fillcontinents(color = 'gray')
+            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
+            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
+            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
+            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # #self.ax5.shadedrelief()
+
+           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
+
+
+            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
+            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+
+            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
+            if 'lev' in field.dims:
+                field = field.isel(lev=-1)
+
+            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
+            axes[label].axis('off')
+
+            from matplotlib import cm
+            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
+            
+            
+            title=frames['worldmap']['inputkey']
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+            axes[label].set_title(title)
+
+            label ='worldmap_colorbar'
+            axes[label].clear()
+            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
+
+
+            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
+            # x,y = self.gmap(lons,lats)
+            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
+            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+
+        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
+
+            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
+            store_xlim = {}
+            store_ylim = {}
+            for ikey, key in enumerate(statskeys_out):
+                if (only is not None) and ('stats_lightupdate' in only):
+                    store_xlim[key] = axes['stats_'+key].get_xlim()
+                    store_ylim[key] = axes['stats_'+key].get_ylim()
+                self.axes['stats_'+key].clear()    
+
+            label = 'times'
+            self.axes[label].clear()
+
+            key = 'dthetadt'
+            x = self.frames['stats']['records_all_stations_ini']['datetime']
+            #print(x)
+            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+            #print(y)
+            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            #print(z)
+
+            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
+            self.axes[label].data[label] = self.axes[label].scatter(x.values,
+                                                                    y.values,
+                                                                    c=z.values,
+                                                                    cmap=self.statsviewcmap,
+                                                                    s=2,
+                                                                    vmin=0.,
+                                                                    vmax=1.,
+                                                                    alpha=alpha_cloud_pixels)
+
+            
+            x = self.frames['stats']['records_current_station_ini']['datetime']
+            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+
+            x = self.frames['profiles']['records_current_station_ini']['datetime']
+            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
+            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
+
+            for ikey, key in enumerate(statskeys_out):
+
+                # show data of all stations
+                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_all_stations_mod_stats'][key]
+                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                qvalmax = x.quantile(0.999)
+                qvalmin = x.quantile(0.001)
+                print('applying extra filter over extreme values for plotting stats')
+                selx = (x >= qvalmin) & (x < qvalmax)
+                sely = (x >= qvalmin) & (x < qvalmax)
+                x = x[selx & sely]
+                y = y[selx & sely]
+                z = z[selx & sely]
+                self.axes['stats_'+key].data['stats_'+key] = \
+                       self.axes['stats_'+key].scatter(x,y, c=z,\
+                                cmap=self.statsviewcmap,\
+                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
+
+                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_current_station_mod_stats'][key]
+                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['profiles']['records_current_station_mod_stats'][key]
+                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
+
+                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
+                y = self.frames['stats']['current_record_mod_stats'][key]
+                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
+                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
+                    axes['stats_'+key].annotate(text, \
+                                               xy=(x,y),\
+                                               xytext=(0.05,0.05),\
+                                               textcoords='axes fraction',\
+                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
+                                               color='white',\
+                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
+                # self.axes['stats_'+key].data[key+'_current_record'] = \
+                #        self.axes['stats_'+key].scatter(x,y, c=z,\
+                #                 cmap=self.statsviewcmap,\
+                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
+
+                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
+                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
+                # # highlight data for curent station
+                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
+
+                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
+
+                if ikey == len(statskeys_out)-1:
+                    self.axes['stats_'+key].set_xlabel('external')
+                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
+                axes['stats_'+key].set_ylabel('model')
+
+
+                if (only is not None) and ('stats_lightupdate' in only):
+                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
+                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
+                else:
+                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
+                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
+                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
+                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
+                self.abline(1,0,axis=self.axes['stats_'+key])
+
+        if (only is None) or ('stats_colorbar' in only):
+            label ='stats_colorbar'
+            axes[label].clear()
+            import matplotlib as mpl
+            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
+            self.axes[label].fields[label] = \
+             mpl.colorbar.ColorbarBase(self.axes[label],\
+                        orientation='horizontal',\
+                        label="percentile of "+self.frames['worldmap']['inputkey'],
+                        alpha=1.,
+                                cmap=self.statsviewcmap,\
+                                       norm=norm
+                         )
+
+        #print('r1')
+        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
+            #print('r2')
+            label = 'worldmap_stations'
+            axes[label].clear()
+            
+            stations = self.frames['worldmap']['stations'].table
+            globaldata = self.globaldata
+            
+            key = label
+
+            #print('r3')
+            if (stations is not None):
+                xlist = []
+                ylist = []
+                #print('r4')
+                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
+            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    xlist.append(x)
+                    ylist.append(y)
+                #picker is needed to make it clickable (pick_event)
+                axes[label].data[label] = axes[label].scatter(xlist,ylist,
+                                                              c='r', s=15,
+                                                              picker = 15,
+                                                              label=key,
+                                                              edgecolor='k',
+                                                              linewidth=0.8)
+
+            # cb.set_label('Wilting point [kg kg-3]')
+                #print('r5')
+
+                
+            #     xseries = []
+            #     yseries = []
+            #     for iSTN,STN in stations.iterrows():
+            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
+            #         xseries.append(x)                    
+            #         yseries.append(y)
+            #         
+            #         
+            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
+                    
+                if ('current_station' in frames['worldmap']):
+                    #print('r5')
+                    STN = frames['stats']['current_station']
+                    STNID = frames['stats']['STNID']
+                    #print('r5')
+
+                    x,y = len(axes['worldmap'].lon)* \
+                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
+                          len(axes['worldmap'].lat)* \
+                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    #print('r6')
+                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
+                    #                          == \
+                    #                          self.frames['worldmap']['STNID'])\
+                    #                         & \
+                    #                         (self.seltablestats['DT'] \
+                    #                          == self.axes['statsview0].focus['DT']) \
+                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
+                    #print('r7')
+                    text = 'STNID: '+ format(STNID,'10.0f') + \
+                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
+                            ', LON: '+format(STN['longitude'],'3.3f')+ \
+                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
+
+                            #+', VAL: '+format(VAL,'.3e')
+
+                    axes[label].scatter(x,y, c='r', s=30,\
+                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
+                    #print('r8')
+            
+                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
+                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
+                    #colorstation = max((min((1.,colorstation)),0.))
+                    colorstation =0.2
+                    from matplotlib import cm
+                    axes[label].annotate(text,
+                                         xy=(x,y),
+                                         xytext=(0.05,0.05),
+                                         textcoords='axes fraction', 
+                                         bbox=dict(boxstyle="round",
+                                         fc = cm.viridis(colorstation)),
+                                         arrowprops=dict(arrowstyle="->",
+                                                         linewidth=1.1),
+                                         color='white' if colorstation < 0.5 else 'black')
+                    #print('r9')
+
+                    # #pos = sc.get_offsets()[ind["ind"][0]]
+                    # 
+                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
+                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
+                    # axes[label].data[label+'statannotate'].set_text(text)
+                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
+                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
+            #print('r9')
+            axes[label].axis('off')
+            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
+            axes[label].set_ylim((len(axes['worldmap'].lat),0))
+            #print('r10')
+
+        if (only is None) or ('profiles' in only): 
+            #print('r11')
+
+            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
+            # # self.update_station(goto_first_sounding=False)
+            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
+            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
+            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
+            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
+
+            label = 'air_ap:theta'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+                # +\
+                # ' -> '+ \
+                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+            
+            
+            
+            
+            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+            #print('r12')
+
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
+            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
+            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
+            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            #print('r13')
+            # 
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r14')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+
+            #print('r15')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+                          
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r16')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r17')
+            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            print(hmax)
+            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
+            if valid_mod:
+
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="mod "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
+
+            #print('r18')
+            axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('theta [K]')
+
+            label = 'air_ap:q'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+
+            #print('r19')
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            if valid_mod:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            else:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            # 
+            #print('r20')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r21')
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            #print('r23')
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r24')
+            if valid_mod:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="fit ")#+\
+                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
+                             #+'LT')
+            #print('r25')
+            #axes[label].legend()
+
+            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            #axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('q [kg/kg]')
+
+            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+
+            # #pl.subplots_adjust(right=0.6)
+
+            # label = 'q_pro'
+            # axes[label].clear()
+
+            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
+            # 
+            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
+            # 
+            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
+
+            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
+            # #pl.subplots_adjust(right=0.6)
+            # axes[label].set_xlabel('specific humidity [kg/kg]')
+ 
+
+            #print('r26')
+            time = self.frames['profiles']['record_yaml_mod'].out.time
+            for ilabel,label in enumerate(['h','theta','q']):
+                axes["out:"+label].clear()
+                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
+                axes["out:"+label].set_ylabel(label)
+                if ilabel == 2:
+                    axes["out:"+label].set_xlabel('local sun time [h]')
+                
+            #print('r27')
+            label = 'SEB'
+            axes[label].clear()
+            
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
+            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
+            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
+            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
+                
+            #print('r28')
+            
+            axes[label].legend()
+            
+            #         for ax in self.fig_timeseries_axes:
+#             ax.clear()
+#         
+#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
+#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
+#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
+#         #print(self.morning_sounding.c4gl.out.Swin)
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
+#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
+#         self.fig_timeseries_axes[3].legend()
+#         self.fig.canvas.draw()
+            
+
+
+
+
+
+
+        #self.ready()
+        #print('r29')
+        fig.canvas.draw()
+        #fig.show()
+
+        self.axes = axes
+        self.tbox = tbox
+        self.fig = fig
+
+    def on_pick(self,event):
+        #print("HELLO")
+        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
+        #self.axes['theta_pro'].clear()
+        #self.axes['q_pro'].clear()
+        
+
+        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
+        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
+        keys_to_axes = {}
+        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
+
+        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
+        keys_to_axes['worldmap'] = 'worldmap'
+        
+        axes = self.axes
+        #nstatsview = self.nstatsview
+        #statsviewcmap = self.statsviewcmap
+        stations = self.frames['worldmap']['stations'].table
+
+
+        #print("p1")
+        current = event
+        artist = event.artist
+        
+        selkey = artist.get_label()
+        
+        #print(keys_to_axes)
+        
+        label = keys_to_axes[selkey]
+        #print("HELLO",selkey,label)
+
+        # # Get to know in which axes we are
+        # label = None
+        # for axeskey in axes.keys():
+        #     if event.inaxes == axes[axeskey]:
+        #         label = axeskey
+        #         
+
+        # cont, pos = None, None
+        
+        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
+        ind = event.ind
+        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
+        d = axes[label].collections[0]
+        #d.set_offset_position('data')
+        xy = d.get_offsets()
+        x, y =  xy[:,0],xy[:,1]
+        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
+
+        #print("p2")
+        if len(ind) > 0:
+            #print("p3")
+            pos = x[ind[0]], y[ind[0]]
+
+            #if label[:-1] == 'statsview':
+            #    #seltablestatsstdrel = self.seltablestatsstdrel
+            #    #seltablestatspct = self.seltablestatspct
+
+            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+            #    
+            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
+            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+            #    
+            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
+            #el
+            if (label == 'worldmap') or (label == 'worldmap_stations'):
+                self.hover_active = False
+                if (self.frames['worldmap']['STNID'] !=
+                    self.frames['profiles']['STNID']):
+                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
+                # so we just need to perform update_station
+                    self.update_station()
+            elif (label[:5] == 'stats'):
+
+                self.hover_active = False
+                if (self.frames['stats']['STNID'] !=
+                self.frames['profiles']['STNID']) or \
+                   (self.frames['stats']['current_record_chunk'] != 
+                    self.frames['profiles']['current_record_chunk']) or \
+                   (self.frames['stats']['current_record_index'] != 
+                    self.frames['profiles']['current_record_index']):
+
+
+
+                    for key in ['STNID','current_station','stations_iterator']: 
+                        self.frames['worldmap'][key] = self.frames['stats'][key] 
+
+                    for key in self.frames['stats'].keys():
+                        self.frames['profiles'][key] = self.frames['stats'][key]
+
+                    STNID = self.frames['profiles']['STNID']
+                    chunk = self.frames['profiles']['current_record_chunk']
+                    if 'current_station_file_ini' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_ini'].close()
+                    self.frames['profiles']['current_station_file_ini'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+                    if 'current_station_file_mod' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_mod'].close()
+                    self.frames['profiles']['current_station_file_mod'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_afternoon'].close()
+                    self.frames['profiles']['current_station_file_afternoon'] = \
+                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+                    # go to hovered record of current station
+                    self.frames['profiles']['records_iterator'] = \
+                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # ... and go to the record of the profile window (last one that
+                    # was picked by the user)
+                    found = False
+                    EOF = False
+                    while (not found) and (not EOF):
+                        try:
+                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
+                            #print("hello*")
+                            #print(self.frames['profiles']['current_record_index'])
+                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
+                               (index == self.frames['profiles']['current_record_index']) and \
+                               (STNID == self.frames['profiles']['STNID']):
+                                #print('found!')
+                                found = True
+                        except StopIteration:
+                            EOF = True
+                    if found:
+                        self.frames['stats']['current_record_mod'] = record
+                        self.frames['stats']['current_record_chunk'] = chunk
+                        self.frames['stats']['current_record_index'] = index
+                    # # for the profiles we make a distinct record iterator, so that the
+                    # # stats iterator can move independently
+                    # self.frames['profiles']['records_iterator'] = \
+                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # (self.frames['profiles']['STNID'] , \
+                    # self.frames['profiles']['current_record_index']) , \
+                    # self.frames['profiles']['current_record_mod'] = \
+                    #                 self.frames['profiles']['records_iterator'].__next__()
+
+
+                    # for the profiles we make a distinct record iterator, so that the
+                    # stats iterator can move independently
+
+                    self.update_record()
+
+
+
+    def on_plot_hover(self,event):
+        axes = self.axes
+        #print('h1')
+
+        # Get to know in which axes we are
+        label = None
+        for axeskey in axes.keys():
+            if event.inaxes == axes[axeskey]:
+                label = axeskey
+                
+        #print('h2')
+
+        cont, pos = None, None
+        #print (label)
+        
+        if label is not None:
+            if  ('data' in axes[label].__dict__.keys()) and \
+                (label in axes[label].data.keys()) and \
+                (axes[label].data[label] is not None):
+                
+                #print('h3')
+                cont, ind =  axes[label].data[label].contains(event)
+                selkey = axes[label].data[label].get_label()
+                if len(ind["ind"]) > 0:
+                    #print('h4')
+                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
+                    #print('pos',pos,selkey)
+
+
+                    #if label[:-1] == 'statsview':
+                    #    seltablestatsstdrel = self.seltablestatsstdrel
+                    #    seltablestatspct = self.seltablestatspct
+
+                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
+                    #    self.hover_active = True
+                    #    
+                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
+                    #    
+                    #el
+                    #print(label[:5])
+                    if (label[:5] == 'stats') or (label == 'times'):
+                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
+                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
+                        
+
+                        if label[:5] == 'stats':
+                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            (self.frames['stats']['STNID'] ,
+                             self.frames['stats']['current_record_chunk'], 
+                             self.frames['stats']['current_record_index']) = \
+                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                        # elif label[:5] == 'stats':
+                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
+                        #     (self.frames['stats']['STNID'] ,
+                        #      self.frames['stats']['current_record_chunk'], 
+                        #      self.frames['stats']['current_record_index']) = \
+                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+
+
+                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+                        
+                        # # TO TEST: should be removed, since it's is also done just below
+                        # self.frames['stats']['stations_iterator'] = \
+                        #     self.frames['worldmap']['stations_iterator'] 
+                
+                
+                        # self.goto_datetime_worldmap(
+                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+                        #     'after')
+
+
+                        # scrolling to the right station
+                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                        EOF = False
+                        found = False
+                        while (not found and not EOF):
+                            if (STNID == self.frames['stats']['STNID']):
+                                   found = True 
+                            if not found:
+                                try:
+                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                                except (StopIteration):
+                                    EOF = True
+                        if found:
+                        #    self.frames['stats']['STNID'] = STNID
+                            self.frames['stats']['current_station'] =  station
+
+                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
+                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
+
+
+                        # generate index of the current station
+                        self.frames['stats']['records_current_station_index'] = \
+                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                             == self.frames['stats']['STNID'])
+
+
+                        tab_suffixes = \
+                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            self.frames['stats']['records_current_station'+tab_suffix] = \
+                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+
+                        # go to hovered record of current station
+                        self.frames['stats']['records_iterator'] = \
+                                        records_iterator(self.frames['stats']['records_current_station_mod'])
+
+
+                        # ... and go to the record of the profile window (last one that
+                        # was picked by the user)
+                        found = False
+                        EOF = False
+                        while (not found) and (not EOF):
+                            try:
+                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                                #print("hello*")
+                                #print(self.frames['profiles']['current_record_index'])
+                                if (index == self.frames['stats']['current_record_index']) and \
+                                   (chunk == self.frames['stats']['current_record_chunk']) and \
+                                   (STNID == self.frames['stats']['STNID']):
+                                    #print('found!')
+                                    found = True
+                            except StopIteration:
+                                EOF = True
+                        if found:
+                            #print('h5')
+                            self.frames['stats']['current_record_mod'] = record
+                            self.frames['stats']['current_record_chunk'] = chunk
+                            self.frames['stats']['current_record_index'] = index
+
+                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
+                        tab_suffixes = \
+                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            #print(tab_suffix)
+                            #print(self.frames['stats']['records_current_station'+tab_suffix])
+                            self.frames['stats']['current_record'+tab_suffix] =  \
+                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                      (self.frames['stats']['STNID'] , \
+                                       self.frames['stats']['current_record_chunk'] , \
+                                       self.frames['stats']['current_record_index'])]
+
+
+                        self.hover_active = True
+                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                        # print('h13')
+                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
+                        #     self.goto_datetime_worldmap(
+                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                        #         'after')
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap',
+                        #                                           'profiles'])
+                        # else:
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap_stations',
+                        #                                           'profiles'])
+
+
+
+                    elif label in ['worldmap_stations','worldmap']:
+                        #print('h5')
+
+                        if (self.axes['worldmap'].lat is not None) and \
+                           (self.axes['worldmap'].lon is not None):
+
+
+                            #self.loading()
+                            self.fig.canvas.draw()
+                            self.fig.show()
+
+
+                            # get position of 
+                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
+                                                                 self.axes['worldmap'].lat[0]) + \
+                                           self.axes['worldmap'].lat[0],4)
+                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
+                                                                 self.axes['worldmap'].lon[0]) + \
+                                           self.axes['worldmap'].lon[0],4)
+                        
+                            stations = self.frames['worldmap']['stations'].table
+                            #print('h7')
+                        
+                            #reset stations iterator:
+                            # if 'stations_iterator' in self.frames['worldmap'].keys():
+                            #     self.frames['worldmap']['stations_iterator'].close()
+                            #     del(self.frames['worldmap']['stations_iterator'])
+                            # if 'stations_iterator' in self.frames['stats'].keys():
+                            #     self.frames['stats']['stations_iterator'].close()
+                            #     del(self.frames['stats']['stations_iterator'])
+                            self.frames['worldmap']['stations_iterator'] =\
+                               stations_iterator(self.frames['worldmap']['stations'])
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                            EOF = False
+                            found = False
+                            while (not found and not EOF):
+                                #print('h8',station.latitude,latmap)
+                                #print('h8',station.longitude,lonmap)
+                                if (round(station.latitude,3) == round(latmap,3)) and \
+                                    (round(station.longitude,3) == round(lonmap,3)):
+                                       found = True 
+                                if not found:
+                                    try:
+                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                                    except (StopIteration):
+                                        EOF = True
+                            if found:
+                                self.frames['worldmap']['STNID'] = STNID
+                                self.frames['worldmap']['current_station'] = \
+                                        station
+                        
+                            self.frames['stats']['stations_iterator'] = \
+                                self.frames['worldmap']['stations_iterator'] 
+                            #print('h8')
+                            # inherit station position for the stats frame...
+                            for key in self.frames['worldmap'].keys():
+                                self.frames['stats'][key] = self.frames['worldmap'][key]
+                                
+                            ## fetch records of current station...
+                            #self.frames['stats']['records_current_station_mod'] =\
+                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                            # ... and their indices
+                            self.frames['stats']['records_current_station_index'] = \
+                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                                     == \
+                                     self.frames['stats']['current_station'].name)
+
+
+                            tab_suffixes = \
+                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['records_current_station'+tab_suffix] = \
+                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+                            # ... create a record iterator ...
+                            #self.frames['stats']['records_iterator'].close()
+                            del(self.frames['stats']['records_iterator'])
+                            self.frames['stats']['records_iterator'] = \
+                                self.frames['stats']['records_current_station_mod'].iterrows()
+
+
+
+                        
+                            #print('h9')
+                            # ... and go to to the first record of the current station
+                            (self.frames['stats']['STNID'] , \
+                             self.frames['stats']['current_record_chunk'] , \
+                             self.frames['stats']['current_record_index']) , \
+                            self.frames['stats']['current_record_mod'] = \
+                                self.frames['stats']['records_iterator'].__next__()
+                        
+
+
+
+                            #print('h10')
+                            # cash the current record
+                            tab_suffixes = \
+                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['current_record'+tab_suffix] =  \
+                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                          (self.frames['stats']['STNID'] , \
+                                           self.frames['stats']['current_record_chunk'] , \
+                                           self.frames['stats']['current_record_index'])]
+
+                            #print('h11')
+                            
+                            self.hover_active = True
+                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                            #print('h13')
+
+                        
+
+            #if (stations is not None):
+            #    for iSTN,STN in stations.iterrows():
+            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
+            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
+
+        # self.fig.show()
+ 
+        # we are hovering on nothing, so we are going back to the position of
+        # the profile sounding
+        if pos is None:
+            if self.hover_active == True:
+                #print('h1*')
+                
+                #self.loading()
+                # to do: reset stations iterators
+
+                # get station and record index from the current profile
+                for key in ['STNID', 'current_station']:
+                    self.frames['stats'][key] = self.frames['profiles'][key]
+
+                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
+                self.frames['stats']['current_station'] = \
+                        self.frames['profiles']['current_station']
+                #print('h3a*')
+                self.frames['stats']['records_current_station_mod'] = \
+                        self.frames['profiles']['records_current_station_mod']
+                #print('h3b*')
+
+                # the next lines recreate the records iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+
+                # reset stations iterator...
+                #self.frames['stats']['records_iterator'].close()
+                del(self.frames['stats']['records_iterator'])
+                self.frames['stats']['records_iterator'] = \
+                    self.frames['stats']['records_current_station_mod'].iterrows()
+                #print('h4*')
+
+                # ... and go to the record of the profile window (last one that
+                # was picked by the user)
+                found = False
+                EOF = False
+                while (not found) and (not EOF):
+                    try:
+                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                        #print("hello*")
+                        #print(self.frames['profiles']['current_record_index'])
+                        #print(self.frames['profiles']['STNID'])
+                        #print(STNID,index)
+                        if (index == self.frames['profiles']['current_record_index']) and \
+                            (chunk == self.frames['profiles']['current_record_chunk']) and \
+                            (STNID == self.frames['profiles']['STNID']):
+                            #print('found!')
+                            found = True
+                    except StopIteration:
+                        EOF = True
+                if found:
+                    #print('h5*')
+                    self.frames['stats']['current_record_mod'] = record
+                    self.frames['stats']['current_record_chunk'] = chunk
+                    self.frames['stats']['current_record_index'] = index
+
+                #print('h6*')
+
+
+
+                # # fetch records of current station...
+                # self.frames['stats']['records_current_station_mod'] =\
+                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                # ... and their indices
+                self.frames['stats']['records_current_station_index'] = \
+                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                         == \
+                         self.frames['stats']['current_station'].name)
+
+
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['records_current_station'+tab_suffix] = \
+                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+                
+
+                # cash the records of the current stations
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['current_record'+tab_suffix] =  \
+                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                              (self.frames['stats']['STNID'] , \
+                               self.frames['stats']['current_record_chunk'] , \
+                               self.frames['stats']['current_record_index'])]
+
+
+                # the next lines recreate the stations iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+                #print('h7*')
+
+                # reset the stations iterators
+                for framekey in ['stats','worldmap']:
+                    ##print(framekey)
+                    if 'stations_iterator' in self.frames[framekey]:
+                        #self.frames[framekey]['stations_iterator'].close()
+                        del(self.frames[framekey]['stations_iterator'])
+
+                self.frames['worldmap']['current_station'] = \
+                        self.frames['profiles']['current_station']
+
+                #recreate the stations iterator for the worldmap...
+                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+
+                # ... and go the position of the profile
+                #print('h8*')
+                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                EOF = False
+                found = False
+                while (not found and not EOF):
+                    if STNID == self.frames['profiles']['STNID'] :
+                        found = True 
+                    if not found:
+                        try:
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                        except (StopIteration):
+                            EOF = True
+                if found:
+                    self.frames['worldmap']['current_station'] = station
+                    self.frames['worldmap']['STNID'] = STNID
+                #print('h9*')
+                self.frames['stats']['stations_iterator'] = \
+                    self.frames['worldmap']['stations_iterator'] 
+
+                # the stats window now inherits the current station from the
+                # worldmap
+                for key in ['STNID','current_station','stations_iterator']: 
+                    self.frames['stats'][key] = self.frames['worldmap'][key] 
+                #print('h10*')
+
+                # # we now only need inherit station position and go to first record
+                # for key in self.frames['worldmap'].keys():
+                #     self.frames['stats'][key] = self.frames['worldmap'][key]
+
+                # self.frames['stats']['records_current_station'] =\
+                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
+
+                # #print(self.frames['stats']['records_current_station'])
+                # self.frames['stats']['records_iterator'] = \
+                #                 self.frames['stats']['records_current_station'].iterrows()
+                # (self.frames['stats']['STNID'] , \
+                # self.frames['stats']['current_record_index']) , \
+                # self.frames['stats']['current_record_mod'] = \
+                #                 self.frames['stats']['records_iterator'].__next__()
+                
+
+
+
+
+
+
+                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
+                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
+                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
+                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+                self.hover_active = False
+                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
+    # def loading(self):
+    #     self.tbox['loading'].set_text('Loading...')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+    #     sleep(0.1)
+    # def ready(self):
+    #     self.tbox['loading'].set_text('Ready')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+
+
+
diff --git a/class4gl/model.py b/class4gl/model.py
new file mode 100644
index 0000000..8760411
--- /dev/null
+++ b/class4gl/model.py
@@ -0,0 +1,2214 @@
+# 
+# CLASS
+# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
+# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
+# Copyright (c) 2011-2015 Chiel van Heerwaarden
+# Copyright (c) 2011-2015 Bart van Stratum
+# Copyright (c) 2011-2015 Kees van den Dries
+# 
+# This file is part of CLASS
+# 
+# CLASS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published bygamma
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# 
+# CLASS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with CLASS.  If not, see .
+#
+
+import copy as cp
+import numpy as np
+import sys
+import warnings
+import pandas as pd
+from ribtol_hw import zeta_hs2 , funcsche
+import logging
+#from SkewT.thermodynamics import Density
+#import ribtol
+
+grav = 9.81
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+
+def qsat(T,p):
+    return 0.622 * esat(T) / p
+
+
+def ribtol(Rib, zsl, z0m, z0h): 
+    Rib = np.float64(Rib)
+    zsl = np.float64(zsl)
+    z0m = np.float64(z0m)
+    z0h = np.float64(z0h)
+    #print(Rib,zsl,z0m,z0h)
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    #print(Rib,zsl,z0m,z0h)
+    while (abs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+        #print(L,fx/fxdif)
+        if(abs(L) > 1e12):
+            break
+
+    return L
+  
+def psim(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psim = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+  
+def psih(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * np.log( (1. + x*x) / 2.)
+        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+ 
+class model:
+    def __init__(self, model_input = None,debug_level=None):
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        self.logger = logging.getLogger('model')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        """ initialize the different components of the model """ 
+
+        if model_input is not None:
+            # class4gl style input
+            if 'pars' in model_input.__dict__.keys():
+
+                # we make a reference to the full input first, so we can dump it
+                # afterwards
+                self.input_c4gl = model_input
+
+                # we copy the regular parameters first. We keep the classical input
+                # format as self.input so that we don't have to change the entire
+                # model code.
+                self.input = cp.deepcopy(model_input.pars)
+
+                # we copy other sections we are interested in, such as profile
+                # data, and store it also under input
+
+                # I know we mess up a bit the structure of the class4gl_input, but
+                # we will make it clean again at the time of dumping data
+
+                # So here, we copy the profile data into self.input
+                # 1. Air circulation data 
+                if 'sw_ac' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ac']:
+                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
+                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
+
+                    # correct pressure of levels according to surface pressure
+                    # error (so that interpolation is done in a consistent way)
+
+                    p_e = self.input.Ps - self.input.sp
+                    for irow in self.input.air_ac.index[::-1]:
+                       self.input.air_ac.p.iloc[irow] =\
+                        self.input.air_ac.p.iloc[irow] + p_e
+                       p_e = p_e -\
+                       (self.input.air_ac.p.iloc[irow]+p_e)/\
+                        self.input.air_ac.p.iloc[irow] *\
+                        self.input.air_ac.delpdgrav.iloc[irow]*grav
+
+
+
+                # 2. Air circulation data 
+                if 'sw_ap' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ap']:
+                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
+
+            # standard class input
+            else:
+                self.input = cp.deepcopy(model_input)
+
+    def load_yaml_dict(self,yaml_dict):
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                for keydata,value in data.items():
+                    self.__dict__[keydata] = value
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            #elif key == 'sources':
+            #    self.__dict__[key] = data
+            elif key == 'out':
+                # lets convert it to a list of dictionaries
+                dictouttemp = pd.DataFrame(data).to_dict('list')
+            else: 
+                 warnings.warn("Key '"+key+"' is be implemented.")
+            #     self.__dict__[key] = data
+
+
+        self.tsteps = len(dictouttemp['h'])
+        self.out = model_output(self.tsteps)
+        for keydictouttemp in dictouttemp.keys():
+            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
+
+
+  
+    def run(self):
+        # initialize model variables
+        self.init()
+  
+        # time integrate model 
+        #for self.t in range(self.tsteps):
+        while self.t < self.tsteps:
+          
+            # time integrate components
+            self.timestep()
+  
+        # delete unnecessary variables from memory
+        self.exitmodel()
+    
+    def init(self):
+        # assign variables from input data
+        # initialize constants
+        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
+        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        self.rho        = 1.2                   # density of air [kg m-3]
+        self.k          = 0.4                   # Von Karman constant [-]
+        self.g          = 9.81                  # gravity acceleration [m s-2]
+        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+        self.bolz       = 5.67e-8               # Bolzman constant [-]
+        self.rhow       = 1000.                 # density of water [kg m-3]
+        self.S0         = 1368.                 # solar constant [W m-2]
+
+        # A-Gs constants and settings
+        # Plant type:       -C3-     -C4-
+        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
+        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
+        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
+        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
+        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
+        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
+        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
+        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
+        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
+        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
+        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
+
+        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
+        self.mair       =  28.9;                # molecular weight air [g mol -1]
+        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
+
+        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
+        self.wmax       =  0.55;                # upper reference value soil water [-]
+        self.wmin       =  0.005;               # lower reference value soil water [-]
+        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
+        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
+
+        # Read switches
+        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
+        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
+        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
+        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
+        self.sw_sl      = self.input.sw_sl      # surface layer switch
+        self.sw_rad     = self.input.sw_rad     # radiation switch
+        self.sw_ls      = self.input.sw_ls      # land surface switch
+        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
+        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
+
+        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
+        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
+        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+  
+        # initialize mixed-layer
+        self.h          = self.input.h          # initial ABL height [m]
+        self.Ps         = self.input.Ps         # surface pressure [Pa]
+        self.sp         = self.input.sp         # This is also surface pressure
+                                                #but derived from the global data [Pa]
+        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
+        self.ws         = None                  # large-scale vertical velocity [m s-1]
+        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
+        self.we         = -1.                   # entrainment velocity [m s-1]
+       
+         # Temperature 
+        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
+        
+        
+        self.substep    = False
+        self.substeps   = 0
+
+
+
+        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
+        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
+        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
+        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
+        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
+ 
+        self.wstar      = 0.                    # convective velocity scale [m s-1]
+ 
+        # 2m diagnostic variables 
+        self.T2m        = None                  # 2m temperature [K]
+        self.q2m        = None                  # 2m specific humidity [kg kg-1]
+        self.e2m        = None                  # 2m vapor pressure [Pa]
+        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
+        self.u2m        = None                  # 2m u-wind [m s-1]
+        self.v2m        = None                  # 2m v-wind [m s-1]
+ 
+        # Surface variables 
+        self.thetasurf  = self.input.theta      # surface potential temperature [K]
+        self.thetavsurf = None                  # surface virtual potential temperature [K]
+        self.qsurf      = None                  # surface specific humidity [g kg-1]
+
+        # Mixed-layer top variables
+        self.P_h        = None                  # Mixed-layer top pressure [pa]
+        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
+        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
+        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
+        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
+        self.dz_h       = None                  # Transition layer thickness [-]
+        self.lcl        = None                  # Lifting condensation level [m]
+
+        # Virtual temperatures and fluxes
+        self.thetav     = None                  # initial mixed-layer potential temperature [K]
+        self.dthetav    = None                  # initial virtual temperature jump at h [K]
+        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
+        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
+       
+        
+        
+        
+        
+        
+        # Moisture 
+        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
+
+        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
+        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
+        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
+  
+        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
+        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
+        self.e          = None                  # mixed-layer vapor pressure [Pa]
+        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
+        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
+      
+        
+        
+        # CO2
+        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
+        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
+        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
+        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
+        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
+        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
+        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
+        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
+        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
+        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
+       
+        # Wind 
+        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
+        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
+        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = self.input.advu       # advection of u-wind [m s-2]
+        
+        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
+        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = self.input.advv       # advection of v-wind [m s-2]
+         
+  # BEGIN -- HW 20170606
+        # z-coordinate for vertical profiles of stratification above the mixed-layer height
+
+        if self.sw_ac:
+        # this is the data frame with the grided profile on the L60 grid
+        # (subsidence, and advection) 
+            self.air_ac      = self.input.air_ac  # full level air circulation
+                                                  # forcing
+            # self.air_ach     = self.input.air_ach # half level air circulation
+            #                                       # forcing
+            
+
+        if self.sw_ap:
+        # this is the data frame with the fitted profile (including HAGL,
+        # THTA,WSPD, SNDU,WNDV PRES ...)
+            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
+
+            # just for legacy reasons...
+            if 'z' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
+            if 'p' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
+
+            indexh = np.where(self.air_ap.z.values == self.h)
+            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
+                raise ValueError("Error input profile consistency: mixed- \
+                                 layer height needs to be equal to the second \
+                                 and third \
+                                 level of the vertical profile input!")
+            # initialize q from its profile when available
+            p_old = self.Ps
+            p_new = self.air_ap.p[indexh[0][0]]
+            
+            if ((p_old is not None) & (p_old != p_new)):
+                print("Warning: Ps input was provided ("+str(p_old)+\
+                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
+                    +str(p_new)+"Pa).")
+                                    
+            self.Ps = p_new
+            # these variables/namings are more convenient to work with in the code
+            # we will update the original variables afterwards
+            #self.air_ap['q'] = self.air_ap.QABS/1000.
+
+            self.air_ap = \
+                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
+            # we require the temperature fields, since we need to consider
+            # advection
+            # if self.sw_ac:
+            #     #self.air_ap['theta'] = self.air_ap['t'] *
+
+            #     # we consider self.sp in case of air-circulation input (for
+            #     # consistence)
+            #     self.air_ap['t'] = \
+            #                 self.air_ap.theta *  \
+            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
+            # else:
+            # we consider self.Ps in case of balloon input only 
+            self.air_ap = self.air_ap.assign(t = lambda x: \
+                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
+
+            #self.air_ap['theta'] = self.air_ap.THTA
+            if 'u' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
+            if 'v' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
+
+            for var in ['theta','q','u','v']:
+
+                
+                if self.air_ap[var][1] != self.air_ap[var][0]:
+                    raise ValueError("Error input profile consistency: two \
+                                     lowest profile levels for "+var+" should \
+                                     be equal.")
+                
+                # initialize the value from its profile when available
+                value_old = self.__dict__[var]
+                value_new = self.air_ap[var][indexh[0][0]]
+                
+                if ((value_old is not None) & (value_old != value_new)):
+                    warnings.warn("Warning:  input was provided \
+                                     ("+str(value_old)+ "kg kg-1), \
+                                     but it is now overwritten by the first \
+                                     level (index 0) of air_ap]var\ which is \
+                                     different (" +str(value_new)+"K).")
+                                        
+                self.__dict__[var] = value_new
+
+                # make a profile of the stratification 
+                # please note that the stratification between z_pro[i] and
+                # z_pro[i+1] is given by air_ap.GTHT[i]
+
+                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
+                # np.gradient(self.z_pro)
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
+
+
+                self.__dict__['gamma'+var] = \
+                    self.air_ap['gamma'+var][np.where(self.h >= \
+                                                     self.air_ap.z)[0][-1]]
+
+
+
+        # the variable p_pro is just for diagnosis of lifted index
+            
+            
+
+            # input Ph is wrong, so we correct it according to hydrostatic equation
+            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+
+            #if self.sw_ac:
+                # note that we use sp as surface pressure, which is determined
+                # from era-interim instead of the observations. This is to
+                # avoid possible failure of the interpolation routine
+                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
+                #                          + \
+                #                          list(self.air_ap.p[3:]))
+
+            # else:
+                # in the other case, it is updated at the time of calculting
+                # the statistics 
+
+# END -- HW 20170606      
+        #print(self.air_ap)
+
+        if self.sw_ac and not self.sw_ap:
+            raise ValueError("air circulation switch only possible when air \
+                             profiles are given")
+        
+        if self.sw_ac:
+
+            # # # we comment this out, because subsidence is calculated
+            # according to advection
+            # #interpolate subsidence towards the air_ap height coordinate
+            # self.air_ap['w'] = np.interp(self.air_ap.p,\
+            #                               self.air_ac.p,\
+            #                               self.air_ac.w) 
+            # #subsidence at the mixed-layer top
+            # self.w = self.air_ap.w[1]
+        
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+                # in case we didn't find any points, we just take the lowest one.
+                # actually, this can happen if ERA-INTERIM pressure levels are
+                # inconsistent with 
+                if in_ml.sum() == 0:
+                    warnings.warn(" no circulation points in the mixed layer \
+                                  found. We just take the bottom one.")
+                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+
+                for var in ['t','q','u','v']:
+    
+                   # calculation of the advection variables for the mixed layer
+                   # we weight by the hydrostatic thickness of each layer and
+                   # divide by the total thickness
+                   self.__dict__['adv'+var] = \
+                            ((self.air_ac['adv'+var+'_x'][in_ml] \
+                             + \
+                             self.air_ac['adv'+var+'_y'][in_ml])* \
+                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                            self.air_ac['delpdgrav'][in_ml].sum()
+
+                   # calculation of the advection variables for the profile above
+                   # (lowest 3 values are not used by class)
+                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
+                   self.air_ap['adv'+var] = \
+                           np.interp(self.air_ap.p,\
+                                     self.air_ac.p,\
+                                     self.air_ac['adv'+var+'_x']) \
+                           + \
+                           np.interp(self.air_ap.p, \
+                                       self.air_ac.p, \
+                                       self.air_ac['adv'+var+'_y'])
+
+                # as an approximation, we consider that advection of theta in the
+                # mixed layer is equal to advection of t. This is a sufficient
+                # approximation since theta and t are very similar at the surface
+                # pressure.
+                self.__dict__['advtheta'] = self.__dict__['advt']
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # self.wrho = np.interp(self.P_h,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) 
+            # self.ws   = self.air_ap.w.iloc[1]
+
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                self.air_ap = self.air_ap.assign(wp = 0.)
+                self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                              self.air_ac.p, \
+                                              self.air_ac['wp'])
+                self.air_ap = self.air_ap.assign(R = 0.)
+                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                     self.Rv*self.air_ap.q)
+                self.air_ap = self.air_ap.assign(rho = 0.)
+                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+                
+                self.air_ap = self.air_ap.assign(w = 0.)
+                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+                #print('hello w ini')
+
+                # Note: in case of sw_ac is False, we update it from prescribed
+                # divergence
+                self.ws   = self.air_ap.w[1]
+
+                # self.ws   = self.wrho/self.rho
+                # self.ws   = self.wrho/(self.P_h/ \
+                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
+                #                         self.theta) # this should be T!!!
+
+                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+                #                         + \
+                #                         self.air_ac['divU_y'][in_ml])* \
+                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                #             self.air_ac['delpdgrav'][in_ml].sum() \
+        
+
+        # Tendencies 
+        self.htend      = None                  # tendency of CBL [m s-1]
+        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
+        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
+        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
+        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
+        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
+        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
+        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
+        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
+        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
+        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
+        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
+  
+        # initialize surface layer
+        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
+        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
+        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
+        self.z0m        = self.input.z0m        # roughness length for momentum [m]
+        self.z0h        = self.input.z0h        # roughness length for scalars [m]
+        self.Cm         = 1e12                  # drag coefficient for momentum [-]
+        self.Cs         = 1e12                  # drag coefficient for scalars [-]
+        self.L          = None                  # Obukhov length [m]
+        self.Rib        = None                  # bulk Richardson number [-]
+        self.ra         = None                  # aerodynamic resistance [s m-1]
+  
+        # initialize radiation
+        self.lat        = self.input.lat        # latitude [deg]
+        #self.fc         = self.input.fc         # coriolis parameter [s-1]
+        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
+        self.lon        = self.input.lon        # longitude [deg]
+        self.doy        = self.input.doy        # day of the year [-]
+        self.tstart     = self.input.tstart     # time of the day [-]
+        self.cc         = self.input.cc         # cloud cover fraction [-]
+        self.Swin       = None                  # incoming short wave radiation [W m-2]
+        self.Swout      = None                  # outgoing short wave radiation [W m-2]
+        self.Lwin       = None                  # incoming long wave radiation [W m-2]
+        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
+        self.Q          = self.input.Q          # net radiation [W m-2]
+        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
+  
+        # initialize land surface
+        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
+        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
+        self.T2         = self.input.T2         # temperature deeper soil layer [K]
+                           
+        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
+        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
+        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
+        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
+                           
+        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
+        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
+        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
+                           
+        self.C1sat      = self.input.C1sat      
+        self.C2ref      = self.input.C2ref      
+
+        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
+        
+        self.LAI        = self.input.LAI        # leaf area index [-]
+        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
+        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = self.input.alpha      # surface albedo [-]
+  
+        self.rs         = 1.e6                  # resistance transpiration [s m-1]
+        self.rssoil     = 1.e6                  # resistance soil [s m-1]
+                           
+        self.Ts         = self.input.Ts         # surface temperature [K]
+                           
+        self.cveg       = self.input.cveg       # vegetation fraction [-]
+        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
+        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
+        self.cliq       = None                  # wet fraction [-]
+                          
+        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
+  
+        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
+        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
+        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
+  
+        self.H          = None                  # sensible heat flux [W m-2]
+        self.LE         = None                  # evapotranspiration [W m-2]
+        self.LEliq      = None                  # open water evaporation [W m-2]
+        self.LEveg      = None                  # transpiration [W m-2]
+        self.LEsoil     = None                  # soil evaporation [W m-2]
+        self.LEpot      = None                  # potential evaporation [W m-2]
+        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
+        self.G          = None                  # ground heat flux [W m-2]
+
+        # initialize A-Gs surface scheme
+        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
+
+        # initialize cumulus parameterization
+        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
+        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
+        self.ac         = 0.                    # Cloud core fraction [-]
+        self.M          = 0.                    # Cloud core mass flux [m s-1] 
+        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
+  
+        # initialize time variables
+        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
+        self.dt     = self.input.dt
+        self.dtcur      = self.dt
+        self.firsttime = True
+        self.t      = 0
+ 
+        # Some sanity checks for valid input
+        if (self.c_beta is None): 
+            self.c_beta = 0                     # Zero curvature; linear response
+        assert(self.c_beta >= 0 or self.c_beta <= 1)
+
+        # initialize output
+        self.out = model_output(self.tsteps)
+ 
+        self.statistics()
+  
+        # calculate initial diagnostic variables
+        if(self.sw_rad):
+            self.run_radiation()
+ 
+        if(self.sw_sl):
+            for i in range(10): 
+                self.run_surface_layer()
+  
+        if(self.sw_ls):
+            self.run_land_surface()
+
+        if(self.sw_cu):
+            self.run_mixed_layer()
+            self.run_cumulus()
+        
+        if(self.sw_ml):
+            self.run_mixed_layer()
+
+    def timestep(self):
+
+        self.dtmax = +np.inf
+        self.logger.debug('before stats') 
+        self.statistics()
+
+        # run radiation model
+        self.logger.debug('before rad') 
+        if(self.sw_rad):
+            self.run_radiation()
+  
+        # run surface layer model
+        if(self.sw_sl):
+            self.logger.debug('before surface layer') 
+            self.run_surface_layer()
+        
+        # run land surface model
+        if(self.sw_ls):
+            self.logger.debug('before land surface') 
+            self.run_land_surface()
+ 
+        # run cumulus parameterization
+        if(self.sw_cu):
+            self.logger.debug('before cumulus') 
+            self.run_cumulus()
+   
+        self.logger.debug('before mixed layer') 
+        # run mixed-layer model
+        if(self.sw_ml):
+            self.run_mixed_layer()
+        self.logger.debug('after mixed layer') 
+ 
+        #get first profile data point above mixed layer
+        if self.sw_ap:
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                # here we correct for the fact that the upper profile also
+                # shifts in the vertical.
+
+                diffhtend = self.htend - self.air_ap.w[zidx_first]
+                if diffhtend > 0:
+                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            else:
+                if self.htend > 0:
+                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            #print(self.h,zidx_first,self.ws,self.air_ap.z)
+
+        
+        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
+        self.logger.debug('before store') 
+        self.substep =  (self.dtcur > self.dtmax)
+        if self.substep:
+            dtnext = self.dtcur - self.dtmax
+            self.dtcur = self.dtmax
+
+        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
+
+        # HW: this will be done multiple times in case of a substep is needed
+        # store output before time integration
+        if self.firsttime:
+            self.store()
+  
+        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
+        # time integrate land surface model
+        if(self.sw_ls):
+            self.integrate_land_surface()
+        self.logger.debug('before integrate mixed layer') 
+        # time integrate mixed-layer model
+        if(self.sw_ml):
+            self.integrate_mixed_layer() 
+        self.logger.debug('after integrate mixed layer') 
+        if self.substep:
+            self.dtcur = dtnext
+            self.firsttime = False
+            self.substeps += 1
+        else:
+            self.dtcur = self.dt
+            self.t += 1 
+            self.firsttime = True
+            self.substeps = 0
+        self.logger.debug('going to next step')
+        
+        
+  
+    def statistics(self):
+        # Calculate virtual temperatures 
+        self.thetav   = self.theta  + 0.61 * self.theta * self.q
+        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
+        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
+        # Mixed-layer top properties
+        self.P_h    = self.Ps - self.rho * self.g * self.h
+        # else:
+            # in the other case, it is updated at the time that the profile is
+            # updated (and at the initialization
+
+        self.T_h    = self.theta - self.g/self.cp * self.h
+
+        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
+        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
+
+        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
+
+        # Find lifting condensation level iteratively
+        if(self.t == 0):
+            self.lcl = self.h
+            RHlcl = 0.5
+        else:
+            RHlcl = 0.9998 
+
+        itmax = 30
+        it = 0
+        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
+            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
+            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
+        else:
+            self.q2_h   = 0.
+            self.CO22_h = 0.
+
+        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
+        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
+        self.M      = self.ac * self.wstar
+        self.wqM    = self.M * self.q2_h**0.5
+
+        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
+        if(self.dCO2 < 0):
+            self.wCO2M  = self.M * self.CO22_h**0.5
+        else:
+            self.wCO2M  = 0.
+
+    def run_mixed_layer(self):
+        if(not self.sw_sl):
+            # decompose ustar along the wind components
+            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
+            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
+
+
+
+        # calculate large-scale vertical velocity (subsidence)
+        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
+            self.ws = -self.divU * self.h
+        # else:
+        #     in case the air circulation switch is turned on, subsidence is
+        #     calculated from the circulate profile at the initialization and
+        #     in the integrate_mixed_layer routine
+              
+        # calculate compensation to fix the free troposphere in case of subsidence 
+        if(self.sw_fixft):
+            w_th_ft  = self.gammatheta * self.ws
+            w_q_ft   = self.gammaq     * self.ws
+            w_CO2_ft = self.gammaCO2   * self.ws 
+        else:
+            w_th_ft  = 0.
+            w_q_ft   = 0.
+            w_CO2_ft = 0. 
+      
+        # calculate mixed-layer growth due to cloud top radiative divergence
+        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
+       
+        # calculate convective velocity scale w* 
+        if(self.wthetav > 0.):
+            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
+        else:
+            self.wstar  = 1e-6;
+      
+        # Virtual heat entrainment flux 
+        self.wthetave    = -self.beta * self.wthetav 
+        
+        # compute mixed-layer tendencies
+        if(self.sw_shearwe):
+            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
+        else:
+            self.we    = -self.wthetave / self.dthetav
+        # Don't allow boundary layer shrinking if wtheta < 0 
+        if(self.we < 0):
+            self.we = 0.
+
+        # Calculate entrainment fluxes
+        self.wthetae     = -self.we * self.dtheta
+        self.wqe         = -self.we * self.dq
+        self.wCO2e       = -self.we * self.dCO2
+        
+        htend_pre       = self.we + self.ws + self.wf - self.M
+        
+        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+        
+ 
+        #print('thetatend_pre',thetatend_pre)
+        
+        #preliminary boundary-layer top chenage
+        #htend_pre = self.we + self.ws + self.wf - self.M
+        #preliminary change in temperature jump
+        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
+                          thetatend_pre + w_th_ft
+        
+        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
+        l_entrainment = True
+
+        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
+            l_entrainment = False
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! temperature jump is at the lower limit \
+                          and is not growing: entrainment is disabled for this (sub)timestep.") 
+        elif dtheta_pre < 0.1:
+            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
+            l_entrainment = True
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          " Warning! Potential temperature jump at mixed- \
+                          layer height would become too low limiting timestep \
+                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
+            self.dtmax = min(self.dtmax,dtmax_new)
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "next subtimestep, entrainment will be disabled")
+            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
+
+
+
+        # when entrainment is disabled, we just use the simplified formulation
+        # as in Wouters et al., 2013 (section 2.2.1)
+
+        self.dthetatend = l_entrainment*dthetatend_pre + \
+                        (1.-l_entrainment)*0.
+        self.thetatend = l_entrainment*thetatend_pre + \
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
+        self.htend = l_entrainment*htend_pre + \
+                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
+        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
+        #stop
+
+
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+
+
+        # self.qtend = l_entrainment*qtend_pre + \
+        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
+        # self.CO2tend = l_entrainment*CO2tend_pre + \
+        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+
+
+        #     # part of the timestep for which the temperature mixed-layer jump
+        #     # was changing, and for which entrainment took place. For the other
+        #     # part, we don't assume entrainment anymore, and we use the
+        #     # simplified formulation  of Wouters et al., 2013
+
+        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
+        #   
+        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
+        #                      self.dthetatend + w_th_ft) + \
+        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
+        #     self.htend = fac*self.htend + \
+        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
+        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
+        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+
+        # else:
+        #     #self.htend = htend_pre
+        #     self.dthetatend = dthetatend_pre
+        #     self.thetatend = thetatend_pre
+        
+        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
+        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
+     
+        # assume u + du = ug, so ug - u = du
+        if(self.sw_wind):
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
+  
+            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
+            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
+        
+        # tendency of the transition layer thickness
+        if(self.ac > 0 or self.lcl - self.h < 300):
+            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
+        else:
+            self.dztend = 0.
+
+   
+    def integrate_mixed_layer(self):
+        # set values previous time step
+        h0      = self.h
+        
+        theta0  = self.theta
+        dtheta0 = self.dtheta
+        q0      = self.q
+        dq0     = self.dq
+        CO20    = self.CO2
+        dCO20   = self.dCO2
+        
+        u0      = self.u
+        du0     = self.du
+        v0      = self.v
+        dv0     = self.dv
+
+        dz0     = self.dz_h
+  
+        # integrate mixed-layer equations
+        
+            
+
+# END -- HW 20170606        
+        self.h        = h0      + self.dtcur * self.htend
+        # print(self.h,self.htend)
+        # stop
+        self.theta    = theta0  + self.dtcur * self.thetatend
+        #print(dtheta0,self.dtcur,self.dthetatend)
+        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
+        self.q        = q0      + self.dtcur * self.qtend
+        self.dq       = dq0     + self.dtcur * self.dqtend
+        self.CO2      = CO20    + self.dtcur * self.CO2tend
+        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
+        self.dz_h     = dz0     + self.dtcur * self.dztend
+            
+        # Limit dz to minimal value
+        dz0 = 50
+        if(self.dz_h < dz0):
+            self.dz_h = dz0 
+  
+        if(self.sw_wind):
+            self.u        = u0      + self.dtcur * self.utend
+            self.du       = du0     + self.dtcur * self.dutend
+            self.v        = v0      + self.dtcur * self.vtend
+            self.dv       = dv0     + self.dtcur * self.dvtend
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+            for var in ['t','q','u','v']:
+                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
+
+            # take into account advection for the whole profile
+                
+                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
+
+            var = 'z'
+            #print(self.air_ap[var])
+                #     print(self.air_ap['adv'+var])
+
+
+
+
+            #moving the profile vertically according to the vertical wind
+                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
+
+
+            # air_apvarold = pd.Series(np.array(self.air_ap.z))
+            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
+            # stop
+
+
+                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
+                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
+
+            #As t is updated, we also need to recalculate theta (and R)
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+
+            # air_aptheta_old = pd.Series(self.air_ap['theta'])
+            self.air_ap['theta'] = \
+                        self.air_ap.t * \
+                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
+                                         self.dtcur * self.air_ap.w[zidx_first:]
+
+#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
+#            print(self.t, self.dtcur,self.dt,self.htend)
+
+            # # the pressure levels of the profiles are recalculated according to
+            # # there new height (after subsidence)
+            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
+            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
+            #         * self.dtcur *  self.air_ap.w[zidx_first:]
+
+            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
+                    self.dtcur * self.air_ap.wp[zidx_first:]
+
+            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
+        # note that theta and q itself are updatet by class itself
+
+    
+        if self.sw_ap:
+            # Just for model consistency preservation purposes, we set the
+            # theta variables of the mixed-layer to nan values, since the
+            # mixed-layer values should overwritte by the mixed-layer
+            # calculations of class.
+            self.air_ap['theta'][0:3] = np.nan 
+            self.air_ap['p'][0:3] = np.nan 
+            self.air_ap['q'][0:3] = np.nan 
+            self.air_ap['u'][0:3] = np.nan 
+            self.air_ap['v'][0:3] = np.nan 
+            self.air_ap['t'][0:3] = np.nan 
+            self.air_ap['z'][0:3] = np.nan 
+
+            # Update the vertical profiles: 
+            #   - new mixed layer properties( h, theta, q ...)
+            #   - any data points below the new ixed-layer height are removed
+
+            # Three data points at the bottom that describe the mixed-layer
+            # properties
+            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
+                                           # columns as air_ap
+            # air_ap_head['z'].iloc[0] = 2.
+            # air_ap_head['z'].iloc[1] = self.__dict__['h']
+            # air_ap_head['z'].iloc[2] = self.__dict__['h']
+            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
+                        [2.,self.__dict__['h'],self.__dict__['h']]
+            for var in ['theta','q','u','v']:
+
+                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
+                        [self.__dict__[var], \
+                         self.__dict__[var], \
+                         self.__dict__[var] + self.__dict__['d'+var]]
+                
+            #print(self.air_ap)
+
+            # This is the remaining profile considering the remaining
+            # datapoints above the mixed layer height
+            air_ap_tail = self.air_ap.iloc[3:]
+            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
+
+            # print('h',self.h)
+            # # only select samples monotonically increasing with height
+            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            # air_ap_tail = pd.DataFrame()
+            # theta_low = self.theta
+            # z_low =     self.h
+            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            # for ibottom in range(1,len(air_ap_tail_orig)):
+            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
+            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+            # make theta increase strong enough to avoid numerical
+            # instability
+            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            air_ap_tail = pd.DataFrame()
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            theta_low = self.theta
+            z_low =     self.h
+            ibottom = 0
+            itop = 0
+            # print(air_ap_tail_orig)
+            # stop
+
+            # HW: this is the lower limit that we use for gammatheta, which is
+            # there to avoid model crashes. Besides on this limit, the upper
+            # air profile is modified in a way that is still conserves total
+            # quantities of moisture and temperature. The limit is set by trial
+            # and error. The numerics behind the crash should be investigated
+            # so that a cleaner solution can be provided.
+            gammatheta_lower_limit = 0.002
+            while ((itop in range(0,1)) or (itop != ibottom)):
+                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+                if (
+                    #(z_mean > (z_low+0.2)) and \
+                    #(theta_mean > (theta_low+0.02) ) and \
+                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
+                  (itop >= (len(air_ap_tail_orig)-1)) \
+                   :
+
+                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                    ibottom = itop+1
+                    theta_low = air_ap_tail.theta.iloc[-1]
+                    z_low =     air_ap_tail.z.iloc[-1]
+    
+
+                itop +=1
+                # elif  (itop > len(air_ap_tail_orig)-10):
+                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+                #print(itop,ibottom)
+
+            if itop > 1:
+                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! Temperature profile was too steep. \
+                                  Modifying profile: "+ \
+                                  str(itop - 1)+ " measurements were dropped \
+                                  and replaced with its average \
+                                  Modifying profile. \
+                                  mean with next profile point(s).") 
+
+
+            self.air_ap = pd.concat((air_ap_head,\
+                                     air_ap_tail,\
+                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
+                                                                      axis=1)
+
+            if  self.sw_ac:
+                qvalues = \
+                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
+
+                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
+                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
+                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+                self.P_h    = self.Ps - self.rho * self.g * self.h
+                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
+                        [self.Ps,  self.P_h, self.P_h-0.1]
+
+                self.air_ap.t = \
+                            self.air_ap.theta * \
+                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
+
+
+        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
+
+
+
+
+        # else:
+            # in the other case, it is updated at the time the statistics are
+            # calculated 
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if in_ml.sum() == 0:
+                warnings.warn(" no circulation points in the mixed layer \
+                              found. We just take the bottom one.")
+                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+            for var in ['t','q','u','v']:
+
+                # calculation of the advection variables for the mixed-layer
+                # these will be used for the next timestep
+                # Warning: w is excluded for now.
+
+                self.__dict__['adv'+var] = \
+                        ((self.air_ac['adv'+var+'_x'][in_ml] \
+                         + \
+                         self.air_ac['adv'+var+'_y'][in_ml])* \
+                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                        self.air_ac['delpdgrav'][in_ml].sum()
+
+                # calculation of the advection variables for the profile above
+                # the mixed layer (also for the next timestep)
+                self.air_ap['adv'+var] = \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p,\
+                                              self.air_ac['adv'+var+'_x']) \
+                                    + \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p, \
+                                              self.air_ac['adv'+var+'_y'])
+                # if var == 't':
+                #     print(self.air_ap['adv'+var])
+                #     stop
+
+            # as an approximation, we consider that advection of theta in the
+            # mixed layer is equal to advection of t. This is a sufficient
+            # approximation since theta and t are very similar at the surface
+            # pressure.
+
+            self.__dict__['advtheta'] = self.__dict__['advt']
+
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            # update the vertical wind profile
+            self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                          self.air_ac.p, \
+                                          self.air_ac['wp'])
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+            
+            air_apwold = self.air_ap['w']
+            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+            #print('hello w upd')
+
+            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # # self.wrho = np.interp(self.P_h,\
+            # #                      self.air_ach.p,\
+            # #                      self.air_ach['wrho']) \
+
+
+
+            # Also update the vertical wind at the mixed-layer height
+            # (subsidence)
+            self.ws   = self.air_ap.w[1]
+        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
+
+            ## Finally, we update he 
+            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+            #                        + \
+            #                        self.air_ac['divU_y'][in_ml])* \
+            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+            #            self.air_ac['delpdgrav'][in_ml].sum() 
+            
+
+        if self.sw_ap:
+            for var in ['theta','q','u','v']:
+
+                # update of the slope (gamma) for the different variables, for
+                # the next timestep!
+
+                # there is an warning message that tells about dividing through
+                # zero, which we ignore
+
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                    # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap['gamma'+var] = gammavar
+
+                # Based on the above, update the gamma value at the mixed-layer
+                # top
+                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
+                                                                     self.air_ap.z)[0][-1]]
+
+            
+    def run_radiation(self):
+        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
+        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
+        sinlea = max(sinlea, 0.0001)
+        
+        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
+  
+        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
+  
+        self.Swin  = self.S0 * Tr * sinlea
+        self.Swout = self.alpha * self.S0 * Tr * sinlea
+        
+        
+        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
+        self.Lwout = self.bolz * self.Ts ** 4.
+          
+        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
+        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
+  
+    def run_surface_layer(self):
+        # HW: I had to raise the minimum wind speed to make the simulation with
+        # the non-iterative solution stable (this solution was a wild guess, so I don't
+        # know the exact problem of the instability in case of very low wind
+        # speeds yet)
+        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        # version of 20180730 where there are still some runs crashing. Maybe
+        # an upper limit should be set on the monin-obukhov length instead of
+        # a lower limmit on the wind speed?
+        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        
+        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
+        qsatsurf       = qsat(self.thetasurf, self.Ps)
+        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
+        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
+
+        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
+  
+        zsl       = 0.1 * self.h
+        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
+        
+
+
+        if self.sw_lit:
+            self.Rib  = min(self.Rib, 0.2)
+            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
+            self.zeta  = zsl/self.L
+            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
+            
+        
+            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
+            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
+            
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+        
+     
+            # diagnostic meteorological variables
+            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
+            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
+            
+            # diagnostic meteorological variables
+        else:
+            
+            ## circumventing any iteration with Wouters et al., 2012
+            self.zslz0m = np.max((zsl/self.z0m,10.))
+            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
+            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
+            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
+            self.L = zsl/self.zeta
+            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
+        
+            self.Cm = self.k**2.0/funm/funm
+            self.Cs = self.k**2.0/funm/funh
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+            
+            # extrapolation from mixed layer (instead of from surface) to 2meter
+            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
+            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
+            self.u2m    =                - self.uw     / self.ustar / self.k * funm
+            self.v2m    =                - self.vw     / self.ustar / self.k * funm
+        
+        
+        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
+        self.e2m    = self.q2m * self.Ps / 0.622
+     
+    def ribtol(self, Rib, zsl, z0m, z0h): 
+        if(Rib > 0.):
+            L    = 1.
+            L0   = 2.
+        else:
+            L  = -1.
+            L0 = -2.
+        #print(Rib,zsl,z0m,z0h)
+        
+        while (abs(L - L0) > 0.001):
+            L0      = L
+            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
+            Lstart  = L - 0.001*L
+            Lend    = L + 0.001*L
+            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
+                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+            L       = L - fx / fxdif
+            #print(L)
+            if(abs(L) > 1e12):
+                break
+
+        return L
+      
+    def psim(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psim = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+        return psim
+      
+    def psih(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psih  = 2. * np.log( (1. + x*x) / 2.)
+            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+        return psih
+ 
+    def jarvis_stewart(self):
+        # calculate surface resistances using Jarvis-Stewart model
+        if(self.sw_rad):
+            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
+        else:
+            f1 = 1.
+  
+        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
+            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
+        else:
+            f2 = 1.e8
+ 
+        # Limit f2 in case w2 > wfc, where f2 < 1
+        f2 = max(f2, 1.);
+ 
+        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
+        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
+  
+        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
+
+    def factorial(self,k):
+        factorial = 1
+        for n in range(2,k+1):
+            factorial = factorial * float(n)
+        return factorial;
+
+    def E1(self,x):
+        E1sum = 0
+        for k in range(1,100):
+            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
+        return -0.57721566490153286060 - np.log(x) - E1sum
+ 
+    def ags(self):
+        # Select index for plant type
+        if(self.c3c4 == 'c3'):
+            c = 0
+        elif(self.c3c4 == 'c4'):
+            c = 1
+        else:
+            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
+
+        # calculate CO2 compensation concentration
+        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
+
+        # calculate mesophyll conductance
+        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
+                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
+        gm            = gm / 1000. # conversion from mm s-1 to m s-1
+  
+        # calculate CO2 concentration inside the leaf (ci)
+        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
+        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
+  
+        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
+        D0            = (self.f0[c] - fmin) / self.ad[c]
+  
+        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
+        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
+        ci            = cfrac * (co2abs - CO2comp) + CO2comp
+  
+        # calculate maximal gross primary production in high light conditions (Ag)
+        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
+  
+        # calculate effect of soil moisture stress on gross assimilation rate
+        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
+  
+        # calculate stress function
+        if (self.c_beta == 0):
+            fstr = betaw;
+        else:
+            # Following Combe et al (2016)
+            if (self.c_beta < 0.25):
+                P = 6.4 * self.c_beta
+            elif (self.c_beta < 0.50):
+                P = 7.6 * self.c_beta - 0.3
+            else:
+                P = 2**(3.66 * self.c_beta + 0.34) - 1
+            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
+  
+        # calculate gross assimilation rate (Am)
+        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
+        Rdark        = (1. / 9.) * Am
+        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
+  
+        # calculate  light use efficiency
+        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
+  
+        # calculate gross primary productivity
+        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
+  
+        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
+        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
+        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
+  
+        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
+        a1           = 1. / (1. - self.f0[c])
+        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
+  
+        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
+  
+        # calculate surface resistance for moisture and carbon dioxide
+        self.rs      = 1. / (1.6 * gcco2)
+        rsCO2        = 1. / gcco2
+  
+        # calculate net flux of CO2 into the plant (An)
+        An           = -(co2abs - ci) / (self.ra + rsCO2)
+  
+        # CO2 soil surface flux
+        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
+        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
+  
+        # CO2 flux
+        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
+        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
+        self.wCO2    = self.wCO2A + self.wCO2R
+ 
+    def run_land_surface(self):
+        # compute ra
+        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
+        #print('ueff',self.u,self.v,self.wstar)
+
+        if(self.sw_sl):
+          self.ra = (self.Cs * ueff)**-1.
+        else:
+          self.ra = ueff / max(1.e-3, self.ustar)**2.
+
+        #print('ra',self.ra,self.ustar,ueff)
+
+        # first calculate essential thermodynamic variables
+        self.esat    = esat(self.theta)
+        self.qsat    = qsat(self.theta, self.Ps)
+        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
+        self.dqsatdT = 0.622 * desatdT / self.Ps
+        self.e       = self.q * self.Ps / 0.622
+
+        if(self.ls_type == 'js'): 
+            self.jarvis_stewart() 
+        elif(self.ls_type == 'ags'):
+            self.ags()
+        else:
+            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
+
+        # recompute f2 using wg instead of w2
+        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
+          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
+        else:
+          f2        = 1.e8
+        self.rssoil = self.rssoilmin * f2 
+ 
+        Wlmx = self.LAI * self.Wmax
+        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
+        self.cliq = min(1., self.Wl / Wlmx) 
+     
+        # calculate skin temperature implictly
+        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
+            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
+            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
+            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
+
+        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
+        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
+        #print('Ts',self.rs)
+
+        esatsurf      = esat(self.Ts)
+        self.qsatsurf = qsat(self.Ts, self.Ps)
+
+        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+  
+        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
+  
+        self.LE     = self.LEsoil + self.LEveg + self.LEliq
+        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
+        #print('H',self.ra,self.Ts,self.theta)
+        self.G      = self.Lambda * (self.Ts - self.Tsoil)
+        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
+        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
+        
+        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
+  
+        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
+   
+        d1          = 0.1
+        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
+        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
+        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
+        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
+  
+        # calculate kinematic heat fluxes
+        self.wtheta   = self.H  / (self.rho * self.cp)
+        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
+        self.wq       = self.LE / (self.rho * self.Lv)
+ 
+    def integrate_land_surface(self):
+        # integrate soil equations
+        Tsoil0        = self.Tsoil
+        wg0           = self.wg
+        Wl0           = self.Wl
+  
+        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
+        self.wg       = wg0     + self.dtcur * self.wgtend
+        self.Wl       = Wl0     + self.dtcur * self.Wltend
+  
+    # store model output
+    def store(self):
+        t                      = self.t
+        
+        self.out.time[t]          = t * self.dt / 3600. + self.tstart
+
+        # in case we are at the end of the simulation, we store the vertical
+        # profiles to the output
+        
+        # if t == (len(self.out.time) - 1):
+        #     self.out.air_ac = self.air_ac
+        #     self.out.air_ap = self.air_ap
+
+        
+        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
+        #  for key in self.out.__dict__.keys():
+        #      if key in self.__dict__:
+        #          self.out.__dict__[key][t]  = self.__dict__[key]
+        
+        self.out.h[t]          = self.h
+        
+        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
+        
+        self.out.gammatheta[t] = self.gammatheta
+        self.out.gammau[t]     = self.gammau
+        self.out.gammav[t]     = self.gammav
+        self.out.gammaq[t]     = self.gammaq
+        self.out.theta[t]      = self.theta
+        self.out.thetav[t]     = self.thetav
+        self.out.dtheta[t]     = self.dtheta
+        self.out.dthetav[t]    = self.dthetav
+        self.out.wtheta[t]     = self.wtheta
+        self.out.wthetav[t]    = self.wthetav
+        self.out.wthetae[t]    = self.wthetae
+        self.out.wthetave[t]   = self.wthetave
+        
+        self.out.q[t]          = self.q
+        self.out.dq[t]         = self.dq
+        self.out.wq[t]         = self.wq
+        self.out.wqe[t]        = self.wqe
+        self.out.wqM[t]        = self.wqM
+      
+        self.out.qsat[t]       = self.qsat
+        self.out.e[t]          = self.e
+        self.out.esat[t]       = self.esat
+      
+        fac = (self.rho*self.mco2)/self.mair
+        self.out.CO2[t]        = self.CO2
+        self.out.dCO2[t]       = self.dCO2
+        self.out.wCO2[t]       = self.wCO2  * fac
+        self.out.wCO2e[t]      = self.wCO2e * fac
+        self.out.wCO2R[t]      = self.wCO2R * fac
+        self.out.wCO2A[t]      = self.wCO2A * fac
+
+        self.out.u[t]          = self.u
+        self.out.du[t]         = self.du
+        self.out.uw[t]         = self.uw
+        
+        self.out.v[t]          = self.v
+        self.out.dv[t]         = self.dv
+        self.out.vw[t]         = self.vw
+        
+        self.out.T2m[t]        = self.T2m
+        self.out.q2m[t]        = self.q2m
+        self.out.u2m[t]        = self.u2m
+        self.out.v2m[t]        = self.v2m
+        self.out.e2m[t]        = self.e2m
+        self.out.esat2m[t]     = self.esat2m
+
+
+        self.out.Tsoil[t]      = self.Tsoil
+        self.out.T2[t]         = self.T2
+        self.out.Ts[t]         = self.Ts
+        self.out.wg[t]         = self.wg
+        
+        self.out.thetasurf[t]  = self.thetasurf
+        self.out.thetavsurf[t] = self.thetavsurf
+        self.out.qsurf[t]      = self.qsurf
+        self.out.ustar[t]      = self.ustar
+        self.out.Cm[t]         = self.Cm
+        self.out.Cs[t]         = self.Cs
+        self.out.L[t]          = self.L
+        self.out.Rib[t]        = self.Rib
+  
+        self.out.Swin[t]       = self.Swin
+        self.out.Swout[t]      = self.Swout
+        self.out.Lwin[t]       = self.Lwin
+        self.out.Lwout[t]      = self.Lwout
+        self.out.Q[t]          = self.Q
+  
+        self.out.ra[t]         = self.ra
+        self.out.rs[t]         = self.rs
+        self.out.H[t]          = self.H
+        self.out.LE[t]         = self.LE
+        self.out.LEliq[t]      = self.LEliq
+        self.out.LEveg[t]      = self.LEveg
+        self.out.LEsoil[t]     = self.LEsoil
+        self.out.LEpot[t]      = self.LEpot
+        self.out.LEref[t]      = self.LEref
+        self.out.G[t]          = self.G
+
+        self.out.zlcl[t]       = self.lcl
+        self.out.RH_h[t]       = self.RH_h
+
+        self.out.ac[t]         = self.ac
+        self.out.M[t]          = self.M
+        self.out.dz[t]         = self.dz_h
+        self.out.substeps[t]   = self.substeps
+  
+    # delete class variables to facilitate analysis in ipython
+    def exitmodel(self):
+        del(self.Lv)
+        del(self.cp)
+        del(self.rho)
+        del(self.k)
+        del(self.g)
+        del(self.Rd)
+        del(self.Rv)
+        del(self.bolz)
+        del(self.S0)
+        del(self.rhow)
+  
+        del(self.t)
+        del(self.dt)
+        del(self.tsteps)
+         
+        del(self.h)          
+        del(self.Ps)        
+        del(self.fc)        
+        del(self.ws)
+        del(self.we)
+        
+        del(self.theta)
+        del(self.dtheta)
+        del(self.gammatheta)
+        del(self.advtheta)
+        del(self.beta)
+        del(self.wtheta)
+    
+        del(self.T2m)
+        del(self.q2m)
+        del(self.e2m)
+        del(self.esat2m)
+        del(self.u2m)
+        del(self.v2m)
+        
+        del(self.thetasurf)
+        del(self.qsatsurf)
+        del(self.thetav)
+        del(self.dthetav)
+        del(self.thetavsurf)
+        del(self.qsurf)
+        del(self.wthetav)
+        
+        del(self.q)
+        del(self.qsat)
+        del(self.dqsatdT)
+        del(self.e)
+        del(self.esat)
+        del(self.dq)
+        del(self.gammaq)
+        del(self.advq)
+        del(self.wq)
+        
+        del(self.u)
+        del(self.du)
+        del(self.gammau)
+        del(self.advu)
+        
+        del(self.v)
+        del(self.dv)
+        del(self.gammav)
+        del(self.advv)
+  
+        del(self.htend)
+        del(self.thetatend)
+        del(self.dthetatend)
+        del(self.qtend)
+        del(self.dqtend)
+        del(self.utend)
+        del(self.dutend)
+        del(self.vtend)
+        del(self.dvtend)
+     
+        del(self.Tsoiltend) 
+        del(self.wgtend)  
+        del(self.Wltend) 
+  
+        del(self.ustar)
+        del(self.uw)
+        del(self.vw)
+        del(self.z0m)
+        del(self.z0h)        
+        del(self.Cm)         
+        del(self.Cs)
+        del(self.L)
+        del(self.Rib)
+        del(self.ra)
+  
+        del(self.lat)
+        del(self.lon)
+        del(self.doy)
+        del(self.tstart)
+   
+        del(self.Swin)
+        del(self.Swout)
+        del(self.Lwin)
+        del(self.Lwout)
+        del(self.cc)
+  
+        del(self.wg)
+        del(self.w2)
+        del(self.cveg)
+        del(self.cliq)
+        del(self.Tsoil)
+        del(self.T2)
+        del(self.a)
+        del(self.b)
+        del(self.p)
+        del(self.CGsat)
+  
+        del(self.wsat)
+        del(self.wfc)
+        del(self.wwilt)
+  
+        del(self.C1sat)
+        del(self.C2ref)
+  
+        del(self.LAI)
+        del(self.rs)
+        del(self.rssoil)
+        del(self.rsmin)
+        del(self.rssoilmin)
+        del(self.alpha)
+        del(self.gD)
+  
+        del(self.Ts)
+  
+        del(self.Wmax)
+        del(self.Wl)
+  
+        del(self.Lambda)
+        
+        del(self.Q)
+        del(self.H)
+        del(self.LE)
+        del(self.LEliq)
+        del(self.LEveg)
+        del(self.LEsoil)
+        del(self.LEpot)
+        del(self.LEref)
+        del(self.G)
+  
+        del(self.sw_ls)
+        del(self.sw_rad)
+        del(self.sw_sl)
+        del(self.sw_wind)
+        del(self.sw_shearwe)
+
+# class for storing mixed-layer model output data
+class model_output:
+    def __init__(self, tsteps):
+        self.time          = np.zeros(tsteps)    # time [s]
+
+        # mixed-layer variables
+        self.h          = np.zeros(tsteps)    # ABL height [m]
+        
+        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammau     = np.zeros(tsteps)
+        self.gammav     = np.zeros(tsteps)
+        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
+        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
+        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
+        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
+        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
+        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
+        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
+        
+        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
+        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
+        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
+        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
+
+        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
+        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
+        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
+
+        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
+        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
+        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
+        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
+        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
+        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
+        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
+        
+        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
+        
+        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
+
+        # diagnostic meteorological variables
+        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
+        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
+        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
+        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
+        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
+        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
+
+        # ground variables
+        self.Tsoil       = np.zeros(tsteps)
+        self.T2          = np.zeros(tsteps)
+        self.Ts          = np.zeros(tsteps)
+        self.wg          = np.zeros(tsteps)
+
+        # surface-layer variables
+        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
+        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
+        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
+        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
+        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
+        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
+        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
+        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
+        self.L          = np.zeros(tsteps)    # Obukhov length [m]
+        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
+
+        # radiation variables
+        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
+        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
+        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
+        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
+        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
+
+        # land surface variables
+        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
+        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
+        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
+        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
+        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
+        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
+        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
+        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
+        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
+        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
+
+        # Mixed-layer top variables
+        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
+        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
+
+        # cumulus variables
+        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
+        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
+        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
+        
+        
+        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
+
+# class for storing mixed-layer model input data
+class model_input:
+    def __init__(self):
+
+        # # comment not valid
+        # we comment out the initialization, because there is a problem when
+        # inheriting values from one the another class4gl_iput. We also expect
+        # that the user specifies all the required parmameters (if not, an error
+        # is raised). 
+
+        # general model variables
+        self.runtime    = None  # duration of model run [s]
+        self.dt         = None  # time step [s]
+
+        # mixed-layer variables
+        self.sw_ml      = None  # mixed-layer model switch
+        self.sw_shearwe = None  # Shear growth ABL switch
+        self.sw_fixft   = None  # Fix the free-troposphere switch
+        self.h          = None  # initial ABL height [m]
+        self.Ps         = None  # surface pressure [Pa]
+        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
+        self.fc         = None  # Coriolis parameter [s-1]
+        
+        self.theta      = None  # initial mixed-layer potential temperature [K]
+        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
+
+        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
+
+        self.dtheta     = None  # initial temperature jump at h [K]
+        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = None  # advection of heat [K s-1]
+        self.beta       = None  # entrainment ratio for virtual heat [-]
+        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
+        
+        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
+        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
+
+        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = None  # advection of moisture [kg kg-1 s-1]
+        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
+
+        self.CO2        = None  # initial mixed-layer potential temperature [K]
+        self.dCO2       = None  # initial temperature jump at h [K]
+        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advCO2     = None  # advection of heat [K s-1]
+        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
+        
+        self.sw_wind    = None  # prognostic wind switch
+        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.du         = None  # initial u-wind jump at h [m s-1]
+        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = None  # advection of u-wind [m s-2]
+
+        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = None  # initial u-wind jump at h [m s-1]
+        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = None  # advection of v-wind [m s-2]
+
+        # surface layer variables
+        self.sw_sl      = None  # surface layer switch
+        self.ustar      = None  # surface friction velocity [m s-1]
+        self.z0m        = None  # roughness length for momentum [m]
+        self.z0h        = None  # roughness length for scalars [m]
+        self.Cm         = None  # drag coefficient for momentum [-]
+        self.Cs         = None  # drag coefficient for scalars [-]
+        self.L          = None  # Obukhov length [-]
+        self.Rib        = None  # bulk Richardson number [-]
+
+        # radiation parameters
+        self.sw_rad     = None  # radiation switch
+        self.lat        = None  # latitude [deg]
+        self.lon        = None  # longitude [deg]
+        self.doy        = None  # day of the year [-]
+        self.tstart     = None  # time of the day [h UTC]
+        self.cc         = None  # cloud cover fraction [-]
+        self.Q          = None  # net radiation [W m-2] 
+        self.dFz        = None  # cloud top radiative divergence [W m-2] 
+
+        # land surface parameters
+        self.sw_ls      = None  # land surface switch
+        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
+        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = None  # temperature top soil layer [K]
+        self.T2         = None  # temperature deeper soil layer [K]
+        
+        self.a          = None  # Clapp and Hornberger retention curve parameter a
+        self.b          = None  # Clapp and Hornberger retention curve parameter b
+        self.p          = None  # Clapp and Hornberger retention curve parameter p 
+        self.CGsat      = None  # saturated soil conductivity for heat
+        
+        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
+        self.wfc        = None  # volumetric water content field capacity [-]
+        self.wwilt      = None  # volumetric water content wilting point [-]
+        
+        self.C1sat      = None 
+        self.C2ref      = None
+
+        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
+        
+        self.LAI        = None  # leaf area index [-]
+        self.gD         = None  # correction factor transpiration for VPD [-]
+        self.rsmin      = None  # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = None  # surface albedo [-]
+        
+        self.Ts         = None  # initial surface temperature [K]
+        
+        self.cveg       = None  # vegetation fraction [-]
+        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
+        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
+        
+        self.Lambda     = None  # thermal diffusivity skin layer [-]
+
+        # A-Gs parameters
+        self.c3c4       = None  # Plant type ('c3' or 'c4')
+
+        # Cumulus parameters
+        self.sw_cu      = None  # Cumulus parameterization switch
+        self.dz_h       = None  # Transition layer thickness [m]
+        
+# BEGIN -- HW 20171027
+        # self.cala       = None      # soil heat conductivity [W/(K*m)]
+        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
+# END -- HW 20171027
diff --git a/class4gl/ribtol/Makefile b/class4gl/ribtol/Makefile
new file mode 100644
index 0000000..e23e3e1
--- /dev/null
+++ b/class4gl/ribtol/Makefile
@@ -0,0 +1,8 @@
+ribtol.so : ribtol.o
+	g++ -O3 -shared -Wl -z -def -o ribtol.so -lpython2.6 -lboost_python ribtol.o
+
+ribtol.o : ribtol.cpp
+	g++ -c -O3 -fPIC ribtol.cpp -I/usr/include/python2.6
+
+clean : 
+	rm -rf ribtol.o ribtol.so
diff --git a/class4gl/ribtol/MakefileMac b/class4gl/ribtol/MakefileMac
new file mode 100644
index 0000000..bf34ea8
--- /dev/null
+++ b/class4gl/ribtol/MakefileMac
@@ -0,0 +1,9 @@
+# Note: boost-python needs to be installed: brew install boost-python -with-python3 -without-python
+ribtol.so : ribtol.o
+	clang++ -O3 -shared -o ribtol.so -L/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib -lpython3.6m -L/usr/local/lib -lboost_python3-mt -lpython ribtol.o
+
+ribtol.o : ribtol.cpp
+	clang++ -c -O3 -fPIC ribtol.cpp -I/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/include/python3.6m -I/usr/local/include
+
+clean : 
+	rm -rf ribtol.o ribtol.so
diff --git a/class4gl/ribtol/__init__.py b/class4gl/ribtol/__init__.py
new file mode 100644
index 0000000..a21583b
--- /dev/null
+++ b/class4gl/ribtol/__init__.py
@@ -0,0 +1,7 @@
+from . import model,class4gl,interface_multi,data_air,data_global
+
+__version__ = '0.1.0'
+
+__author__ = 'Hendrik Wouters '
+
+__all__ = []
diff --git a/class4gl/ribtol/ribtol.cpp b/class4gl/ribtol/ribtol.cpp
new file mode 100644
index 0000000..148b0d3
--- /dev/null
+++ b/class4gl/ribtol/ribtol.cpp
@@ -0,0 +1,81 @@
+// fast conversion of bulk Richardson number to Obukhov length
+
+#include 
+#include 
+#include 
+using namespace std;
+
+inline double psim(double zeta)
+{
+  double psim;
+  double x;
+  if(zeta <= 0.)
+  {
+    //x     = (1. - 16. * zeta) ** (0.25)
+    //psim  = 3.14159265 / 2. - 2. * arctan(x) + log( (1.+x) ** 2. * (1. + x ** 2.) / 8.)
+    x    = pow(1. + pow(3.6 * abs(zeta),2./3.), -0.5);
+    psim = 3. * log( (1. + 1. / x) / 2.);
+  }
+  else
+  {
+    psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35;
+  }
+  return psim;
+}
+    
+inline double psih(double zeta)
+{
+  double psih;
+  double x;
+  if(zeta <= 0.)
+  {
+    // x     = (1. - 16. * zeta) ** (0.25)
+    // psih  = 2. * log( (1. + x ** 2.) / 2. )
+    x     = pow(1. + pow(7.9 * abs(zeta), (2./3.)), -0.5);
+    psih  = 3. * log( (1. + 1. / x) / 2.);
+  }
+  else
+  {
+    psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - pow(1. + (2./3.) * zeta, 1.5) - (10./3.) / 0.35 + 1.;
+  }
+  return psih;
+}
+
+
+double ribtol(double Rib, double zsl, double z0m, double z0h)
+{
+  double L, L0;
+  double Lstart, Lend;
+  double fx, fxdif;
+
+  if(Rib > 0.)
+  {
+    L    = 1.;
+    L0   = 2.;
+  }
+  else
+  {
+    L  = -1.;
+    L0 = -2.;
+  }
+    
+  while (abs(L - L0) > 0.001)
+  {
+    L0      = L;
+    fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / pow(log(zsl / z0m) - psim(zsl / L) + psim(z0m / L), 2.);
+    Lstart  = L - 0.001 * L;
+    Lend    = L + 0.001 * L;
+    fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / pow(log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart), 2.)) - (-zsl / Lend * (log(zsl / z0h) - psih(zsl / Lend) + psih(z0h / Lend)) / pow(log(zsl / z0m) - psim(zsl / Lend) + psim(z0m / Lend), 2.)) ) / (Lstart - Lend);
+    L       = L - fx / fxdif;
+  }
+  
+  return L;
+
+}
+
+BOOST_PYTHON_MODULE(ribtol)
+{
+    using namespace boost::python;
+    def("ribtol", ribtol);
+}
+
diff --git a/class4gl/ribtol/ribtol.pyx b/class4gl/ribtol/ribtol.pyx
new file mode 100644
index 0000000..e11a147
--- /dev/null
+++ b/class4gl/ribtol/ribtol.pyx
@@ -0,0 +1,48 @@
+#cython: boundscheck=False
+#cython: wraparound=False
+
+from libc.math cimport atan, log, exp, fabs
+
+cdef double psim(double zeta):
+    cdef double x, psim
+
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * atan(x) + log((1. + x)**2. * (1. + x**2.) / 8.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+      
+cdef double psih(double zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * log( (1. + x*x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+
+def ribtol(double Rib, double zsl, double z0m, double z0h): 
+    cdef double L, L0, fx, Lstart, Lend, fxdif
+
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    
+    while (fabs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+
+        if(fabs(L) > 1e15):
+            break
+
+    return L
diff --git a/class4gl/ribtol/ribtol_hw.py b/class4gl/ribtol/ribtol_hw.py
new file mode 100644
index 0000000..1946cc8
--- /dev/null
+++ b/class4gl/ribtol/ribtol_hw.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Jan 12 10:46:20 2018
+
+@author: vsc42247
+"""
+
+
+
+# purpose of calc_cm_ch: calculate momentum and thermal turbulent diffusion coefficients of the surface layer with a non-iterative procedure (Wouters et al., 2012)
+
+# input:
+
+# zrib = bulk Richardson number = (g/T)* DT * z/(ua^2)
+#   with:
+#     g = 9.81 m/s2 the gravitational acceleration
+#     z = height (in meters) of the surface layer under consideration 
+#     T = (reference) temperature (in Kelvin) at height z 
+#     DT = (T - T_s) = temperature (in Kelvin) gradient between the surface and height z 
+#     u_a^2 = u^2 +  v^2 is the squared horizontal absolute wind speed 
+# zzz0m = ratio z/z0 between the height z and the momentum roughness length z0m
+# zkbm = ln(z0m/z0h), with z0m, z0h the momentum and thermal roughness length, respectively.
+
+# output: diffusion coefficients (CM and CH) which cna be used to determine surface-layer turbulent transport
+# u'w' = - CM ua^2.
+# w'T' = - CH ua DT 
+
+
+# Reference:
+# Wouters, H., De Ridder, K., and Lipzig, N. P. M.: Comprehensive
+# Parametrization of Surface-Layer Transfer Coefficients for Use
+# in Atmospheric Numerical Models, Bound.-Lay. Meteorol., 145,
+# 539–550, doi:10.1007/s10546-012-9744-3, 2012.
+
+import numpy as np
+
+def calc_cm_ch (zeta,zzz0m,zkbm):
+    krm = 0.4
+
+    #ZETA = zeta_hs2(zrib,zzz0m,zkbm)
+    FUNM,FUNH = funcsche(ZETA,zzz0m,zkbm)
+    CM = krm**2.0/FUNM/FUNM
+    CH = krm**2.0/FUNM/FUNH
+
+    # FUNMn,FUNHn = funcsche(0.,zzz0m,zkbm)
+    # CMn = krm**2.0/FUNMn/FUNMn
+    # CHn = krm**2.0/FUNMn/FUNHn
+
+    # print ZETA,FUNM,FUNH
+    # print 'CMCMN',CM/CMn
+    # print 'CHCHN',CH/CHn
+
+    return CM,CH
+
+
+def zeta_hs2(RiB,zzz0m,kBmin1):
+    #print(RiB,zzz0m,kBmin1)
+    mum=2.59
+    muh=0.95
+    nu=0.5
+    lam=1.5
+
+    betah = 5.0
+
+    zzz0h = zzz0m*np.exp(kBmin1)
+    zzzs = zzz0m*0.06 # to be changed!! r. 101 nog bekijken!!
+
+    L0M = np.log(zzz0m)
+    L0H = np.log(zzz0h)
+    facM = np.log(1.+lam/mum/zzzs)*np.exp(-mum*zzzs)/lam
+    facH = np.log(1.+lam/muh/zzzs)*np.exp(-muh*zzzs)/lam
+    L0Ms = L0M + facM 
+    L0Hs = L0H + facH
+
+    if RiB < 0.:
+        p = np.log(1.-RiB)
+        Q = -0.486 +0.219*p - 0.0331*p**2-4.93*np.exp(-L0H) - 3.65/L0H +\
+            0.38*p/L0H+ 14.8/L0H/L0H-0.946*p/L0H/L0H-10.0/L0H**3+ \
+            0.392*L0M/L0H-0.084*p*L0M/L0H+0.368*L0M/L0H/L0H
+        # print 'p: ',p
+        # print 'Q: ',Q
+        zeta = (1. + p*Q)* L0Ms**2/L0Hs * RiB
+    else:
+        betam = 4.76+7.03/zzz0m +0.24*zzz0m/zzz0h # to be changed
+        # betam = 5.0 + 1.59*10.**(-5.)*(np.exp(13.0-L0M)-1.0) \
+        #         +0.24*(np.exp(-kBmin1)-1.0) # to be changed!!
+        # print('betam',betam)
+        lL0M = np.log(L0M)
+        S0Ms = 1.-1./zzz0m + (1.+nu/mum/zzzs)*facM
+        S0Hs = 1.-1./zzz0h + (1.+nu/muh/zzzs)*facH
+        zetat = -0.316-0.515*np.exp(-L0H) + 25.8 *np.exp(-2.*L0H) + 4.36/L0H \
+                -6.39/L0H/L0H+0.834*lL0M - 0.0267*lL0M**2
+        # print('zetat',zetat)
+        RiBt = zetat *(L0Hs+ S0Hs*betah*zetat)/(L0Ms+S0Ms*betam*zetat)**2 
+        # print('RiBt',RiBt)
+
+        if (RiB > RiBt):
+            D = (L0Ms+S0Ms*betam*zetat)**3/\
+                (L0Ms*L0Hs+zetat*(2.*S0Hs * betah * L0Ms - S0Ms*betam*L0Hs))
+            zeta = zetat + D*(RiB-RiBt)
+        else:
+            r = RiB - S0Hs*betah/(S0Ms*betam)**2
+            B = S0Ms*betam*L0Hs- 2.*S0Hs*betah*L0Ms
+            C = 4.*(S0Ms*betam)**2 * L0Ms *(S0Hs*betah*L0Ms-S0Ms*betam*L0Hs)
+            zeta = - L0Ms / S0Ms/betam - B*C/(4.*(S0Ms*betam)**3 *(B**2+abs(C*r)))
+            if r != 0:
+                zeta = zeta + (B-np.sqrt(B**2+C*r) + B*C*r/(2.*(B**2+abs(C*r))))/(2.*(S0Ms*betam)**3*r)
+    # print('zeta',zeta)
+    return zeta
+
+def funcsche(zeta,zzz0,kBmin1):
+
+
+    mum=2.5
+    muh=0.9
+    nu=0.5
+    lam=1.5
+    
+    p2=3.141592/2.
+    
+    lnzzz0=np.log(zzz0)
+    zzzs=zzz0*0.06
+    zetamcorr=(1.+nu/(mum*zzzs))*zeta
+    zetam0=zeta/zzz0
+    zetahcorr=(1.+nu/(muh*zzzs))*zeta
+    zetah0=zeta/(zzz0*np.exp(kBmin1))
+    
+    if (zeta <= 0.):
+    
+        gamma=15.2
+        alfam=0.25
+        xx=(1.-gamma*zeta)**alfam
+        psim=2.*np.log((1.+xx)/2.)+np.log((1.+xx**2.)/2.)-2.*np.arctan(xx)+p2
+        xx0=(1.-gamma*zetam0)**alfam
+        psim0=2.*np.log((1.+xx0)/2.)+np.log((1.+xx0**2.)/2.)-2.*np.arctan(xx0)+p2
+        phimcorr=(1.-gamma*zetamcorr)**(-alfam)
+        
+        alfah=0.5
+        yy=(1.-gamma*zeta)**alfah
+        psih=2.*np.log((1.+yy)/2.)
+        yy0=(1.-gamma*zetah0)**alfah
+        psih0=2.*np.log((1.+yy0)/2.)
+        phihcorr=(1.-gamma*zetahcorr)**(-alfah)
+    else: 
+    
+        aa=6.1
+        bb=2.5
+        psim=-aa*np.log(zeta+(1.+zeta**bb)**(1./bb))
+        psim0=-aa*np.log(zetam0+(1.+zetam0**bb)**(1./bb))
+        phimcorr=1.+aa*(zetamcorr+zetamcorr**bb*(1.+zetamcorr**bb)**((1.-bb)/bb))/(zetamcorr+(1.+zetamcorr**bb)**(1./bb))
+        
+        cc=5.3
+        dd=1.1
+        psih=-cc*np.log(zeta+(1.+zeta**dd)**(1./dd))
+        psih0=-cc*np.log(zetah0+(1.+zetah0**dd)**(1./dd))
+        phihcorr=1.+cc*(zetahcorr+zetahcorr**dd*(1.+zetahcorr**dd)**((1.-dd)/dd))/(zetahcorr+(1.+zetahcorr**dd)**(1./dd))
+    
+    psistrm=phimcorr*(1./lam)*np.log(1.+lam/(mum*zzzs))*np.exp(-mum*zzzs)
+    psistrh=phihcorr*(1./lam)*np.log(1.+lam/(muh*zzzs))*np.exp(-muh*zzzs)
+    
+    funm=lnzzz0-psim+psim0 +psistrm
+    funh=lnzzz0+kBmin1-psih+psih0 +psistrh
+    return funm,funh
+
diff --git a/class4gl/ribtol/setup.py b/class4gl/ribtol/setup.py
new file mode 100644
index 0000000..bfb44db
--- /dev/null
+++ b/class4gl/ribtol/setup.py
@@ -0,0 +1,12 @@
+# build with "python setup.py build_ext --inplace"
+from distutils.core import setup
+from distutils.extension import Extension
+from Cython.Build import cythonize
+import numpy as np
+import os
+
+os.environ["CC"] = "g++-7"
+
+setup(
+    ext_modules = cythonize((Extension("ribtol", sources=["ribtol.pyx"], include_dirs=[np.get_include()], ), ))
+)
diff --git a/dist/class4gl-0.1dev.tar.gz b/dist/class4gl-0.1dev.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..8a1ace0a61ad7780b2a944794b28c087b83b9899
GIT binary patch
literal 74685
zcmV(basceTe{r-Ij+9T1HXi5Hsdb&*bg%q+GN_NnBH+Oj44abGPb#oBLhvr+VED
zfCNC0qMXd0bF0?1JF!8a(P%UpjYgw;5PC_nKMp%xw>R{cyWjl@KizJ(|NQyEcmE6j
z_>cedzvM_xt?D|Cjs>jQLOebTN15kKcIyyN8GSrSren?`QMhd%h3ze{k44_>Oh|
zJ^z3D&nS+jY#1czA`QaC9YnFug6TYpQ%0=NXwW~c1`|IsZ|Wa6CEGOH*@vYcCqXnj
zZgNgizRbPBoj3NA(C>g$X1iwBJ9X1^|_Pt@tX*Zn4cmMwRiuHf-?sezwho67>SFZoVZr_mq
z2fgRd|GobI6+iF&)Ej!K*ZG|<-y_y@yNwU9$d8!hjYjs3GfCs)VmkHW$0PRJhj$--
z`ShXjCYt)4Ijr%#=wy*jqS*Yq<4?UH%uCPt(m!I_(r>)V*ZL9L$`O$Q;dJ6pL1x8|
z?A)6TJINv*c>|xFq|+#wPkfMJAEO{-A4Bh!O{1Y7vexPO$;Xcl`=9LM5fGV4ag~A0
zez}?i34{No6QB7HDWC_{STy2Tury+~KJ#uvU;MgA{2^Nf>4c5L=++Av{6CDKNfIq)
z!(cW}+#Go0BJ|Qx6i;&!{P$Y_Aez6q4)_H9@9^N?`rm)q{NLOJvmm{>`BrM+I{Dw<
zd!EmKe-GyW@cHxZzvchGO#gGG%15%YEqS6H%u=5u>&DwiW@J8`7d1DH5xZJ
zLYCg#uw#bmpWAI>u}31Gm%j05!#KEOpP~i8OxO<-Ua*3{|Hl|N1<*aGKk{~ANR6&P
z|67^+SFe9i(EoeL|NVc<|NpA_Cn@j^Z2*<>f3MerS*Q6wJlN|#Kg9F@;oknW3U`p6Q0
zfNFZ1eFF7=2I{UqW53M;+7eP;{nAf-FCI)7DEDdX+dAs;j{X+K!+`xBgkIlK^!5r8i@5d_P`!UOeE$oLZ
zNRd#xQH(oqFnWacPz1*ZKc;gCY?;#pJHS6)fB22P_GdtzkX>lm0dw~A
z+p7;}fY8ssT(T2(adLU}_Vl;&lS_8-+vUYCAJ1IIKKh6h@?`~SM>IJ=x*?pH(NxAq
z`*)bm1W*fyY~n3_nAU+0%s*rv8vsYGpEH2rp?!-VA&Gc^(%TW6MQIyWmk+8^n$C}Q
zcUP;GJDx4vC?4+$lUcI+BjC|!2=$G61Rfa7Wyx$Y-{CMZDu3Y3SVCl!1
zHw^}f1o8?G)hTef_?!3AXpCcT33}g4_eHNU^heB3ytH-Y9MSPP_R~c?V_o;K*Yo$-
zHVn!2ALgyzbGLu^avus>SFFSO&-dKkp~H6hpL+-Hi$lk05WIVW*Pa(3_WRIW^ad^G
z4PKw))w&FB+w4mc!hgC`{LjQW;zxS;0{8+VqcHN)!+osk&=;t#e}Mw=r8_mgbf@ym
z#QZYhU$*8k=;DM3;(=&n-+~dAvmap`VqExhj{iAkJvT4uQc0he_+jE}wU1dxuM7pE
zZdLEA35vj$cbl}%f!ya^hcM}Odnz(r{_in6&&oy*vJA2B5W#i<5tv4x0Ps?amtaru
zd7cClQF89E9q~614F9ItHd;+ZtEtgyiuC5KwzvDrh*1wn2}fv1BiPQdJ%`hZ7y*DW
zzm6V2Xw%mgYh^>>0BekgDw>1hg*dW5vVN+6)>1(2CnJOC2WjD0yXwG@H~>k78bhLr
z=8$;dHyx7NPc|gc4<8cohM0>EeRK3h!sMI-NAB{e%qj@ZKW~p7cIivr7M2|`J@R|c
z@AVaS$DiBS_wE|1m_c?SslrhOIVzd}B>ap0aLl?2lm~%YtLFl8y+ar1POY}LTV1z*
zkfVYCu_g$<{ocXL{^0=(q>ruoE|-&_eWl*4^?;NTBt@`0?189+Gs^$uG}~n_+#LLt
zipdW2atQTt?1T;5hQ@t&ch7bB?{Jb(aN8o}5I2qqb}azGe|7-@!qu?|B{{4PK}Q^k
zOdWRM?snb111H}DI{3Gw4+;}rvDNDmV$cOvP-g>yD-$yL))O+}vbHejhiz@d7WzJS
zUv5a>341buCr>0W??B*{m2VRt9XLhO6E+6@Ck@u%Hg!61a=dUpr2Vm#_~EF{ze3R*
z^uJ^FA)5K^p?|v=--MtIhR0X{vy_eM*|TSOb;%ZU(nvr*1vsF1qZXBb8a0dt2^?<>J*To(lTdA{LV_u%qEZZ69SuR3
z77Ab&a6iY|Ev2QS4-d%`!13z~>8+q>k4AnBXDd(uL7|$#Z`omi-+o}DhKiL+(^9PR
zix-xpk9g9-`dSqL)r7-O(nJkk0qWk|41)pMknjA*q@~VJ4t)jad3SJX0j8h<4Cqb;
z(S`#-$h(6xR-B+F9ASMtISoOlh4X_2pf^hK^cN44!bhSo0lXRDfR0scOOPXV8nKD}
z@gnqMJmz^*pX`@}mCHW)?9TV+REvle47|`nCk_)Ye&Fl_`gM*D4vbDj(BUweHB)|$
z06GjN9-OB6J=81>;NV<@$ftJz|8*;d45J;)-60@~Lu#q>gp=n~jX$Epp*ZCySd8aK
zvJCqPKia|MEZ}sH$A&nT8!Id4M~0pVh*NwJ6tJc}
zb_;EhC~&%53_zIplGi1-@}afyIB6dkoji&40MsD#fvLQ)2OR8i$Z0SopvY;k!U=gB
zb{`{#myK3HVXG84?R(a-Ahc+a
zS_rxYLKoCe5Q`y^}`$PAcQzT9Wb&SJ73j4r3!NY++YbnFI
zZo-s0dfkZD244^GJ1mHn0>d$RUL_9lX#QuWV;8t3QkS?6<1;rkqWSz$!_=te&=9kQ
z8ZTyqrPAA$dLH2l=@RA_U8ItRArFGIKl)6&!So8nH&D
z91U)X6m@@4r$Z%-F}X(X`T40xORS3E%mDzta3lalcILy{O1$_nA3h^f_|%_9@uN-(
z{~<_etSvs#8G(sL1q$QIVmvbebNQ;RQf)F&GXe3P8q;vT%tnSbiUa@9qW?^Mj~49G
zn@4f*g>Q1#e`9xJP&RaUgSi5)*X=}^mn1W%~-M2#e9R~Q$gA>{;BOJT#7?8#)B)q7wK|;lWEp?hduY84%}&kQo0)QZ4`dVARoAg
z&;8De0_2~dQZ*f6ZdWSLblvK?UC~XWeRe-~RsP#Md|~Ck2v6`iLZ1LoU(t59*h%Mg
zPLD(=UujAgWO=QN5SyoubJ%Qzf&6r@BYxTM=$M`U(npcffB6D1W7mM$K^s5ryL;X}$qTkw6cTu5t-Kn$d)=3RQjb8-ldq>|aV|&M
zToOl^ggwy+`=a+m!mjPtMq;h+y`Cg7{PJh_JyHc6yY9L#UQ*xii?Z$z@M6kOAnbG$
zdxIitdP4(J_ql-7J5WY3M%xG%2>u8*?EsA%y6E8W?=HN;QuD(3#Otbk^r#d3WncCo
zv=lh_t>A2cZa2tr0XH(@GX7fu?z;yAcaA#1RYZjU00MiEnz&z_T-wALsFW(e{(;}w
zYcuf+2RK-y!GNs4EvSwZcJ$RR#hDI!I*9r??EQWBugqq>4Sf!KXK{JbI9%U)L_P=C*YEzn~gh;12mQfP8Y
ztAut$>xOrx$&7`{evmj*m{dy`XD}%RgexQ=E*Ku
z-AqH-n`+g}%YiwA2Y=WJy+=P5=R6Lq04jkqLcH<|pkz5{bNCzR+0iPUu#=z8%OH({
zhf!)lN|tZ2pMwW};}rXTCyk@Id_hzKaTQ2U5FMf{-$IE#o<#|W4Mv4!gDjy}lF>_A
zsyW3heRiJhf$HNi
z2^6+O3s7FF0mW-hy80XpqQ#8P!ysJ8ekTF&pmD{)aOiVK+f&8J-13G?ArUz(ZxfKX
zAZHvl3jAmiU7mE_ez-cjeEXgiah_Xoo)>Y(dyv!*LwW?Km>I}(
zaTKJf!1#jSfF&TcGZYGs;+zas-JGJ@l5%;WV?9w1;R@apRkleD1Es@+Z-&=XLr7jE
zc0fw`R^h^kU0-+xoMdi?qm*xD4-4tuv(_zC)z20>%MO9h(b<$pj8aTIK_v5d5zS$+
z{LO}@Qoh9|Xzqz-DJo|QOAgVcdrW&89>-CzT^Dzql}b?bD6rWxRIq3yraSYcA4UUy
zhO_Kut5FTO>i#0OhT($LZy^Gg+A!=L`ASEfo@>7w#3OPrLyK`K-VJr;mBWBxFP@;s
zO>st)@|=^^@y#Q=aKuveq@kW1iobAH1(}ihV$J^yyy#oMC)`xlFb&+p95bDT^70&E
z6}5k$*9HiXN%43?ZtA?f22&~D;%@X_{=nBxfY6&KJ|kQ1^*hjv3TKnyQX;B#rIdqK
zlnGhjbTU^0tRk!2W>ciYQp$B!;i@$5C(9sC7l1kkUQjekXuMKsECtA2F!O;*(y%%T
z7Y{Ua8*fv4zj_g=2(N){E`54PBW6}o(R)SYr_sFZV(sE)vVJm5SUH2tO$NDo3Ow&Ni9&qMv}U1Yu
zk90F{R^VhTd#c|L9;b82^ZB%JU-RY(*vEe8ElK9(b8X7};g=eKuVN1`L~v&k&?mv*
zZicp!0>E$(uAjp5V1)M@WYEek0l2R!>dFcu{w|h+GPnGS1UChf9r+?#)A!S~8bt|N
z)`Pz+VqT?;tm?onZob-%zJ3#4`cK$|YxGf}{Z;$|EQ>dPMfLQ)3yu%@7RV%
z+qDppB0?3Zp@&pa!%B=H!XI9(k&q!i^{oOvSi~R&MHi~0L8YLA9XfZ6@&!}wGh&e1>sTRrEmf)Zg}c}lrm{?SDa05wUMH`Ik)
zUY^6lGDW-yMs5>ixqE>w$Yi-8$mED^5};(%RK88qXp>*~
zl}mtZb}WETvLmP6OqKyg7pkc=q}GFdSr@i?JyQ>~zlpT12lD+oY`ObHCRdBFly&wA
z4{gS&u|yeItWyTWF%m?8UviinE=rVug$1+#rz{N{NLmDol6`+svhQn{s;eZit&l~@
zR===s_3CN5|GC6Gw=5y@@*5C&SwiIH*AaPHLgZx$k(V10d09f_rJcx2JCTvfTkNN^
z*Ka?tPKUkugb_ZTcMrRV3eSJ(3?h0bg4mFmP8z4@@QXM-}CE6fa?MHZj#jjMzpdSZ#5hNcKMLhb&`C*s7D1ch|We|U7m663^
zMQ2v-W^SXb_^`#F;PJDdr@wXB_^v{{eH&nO2t$D8$V|$Rnc$^_7mjKmv-MTz*r#j0
z$#jWZFgabVU^WOB^z!J<$?NmB0_p0_)k*u)$BS2O_VL53-`bx(y!xG8T%LVouIoA_
zGd3@qvAMc-GiDi89Hj3MHA={}-2BJraBx!7nKb=(q4u0EgmWwXTB~-lF8X`x*YnzCZ4nr9CX|TLuRD
z0m#nZvD-6zt=4)*{|L8m$pzW-w87(%kWqEwFQ{^i%|KMk(5L=ASyT8s-NZ-JQ5|?F
zl3?}3wJP(KE}nIoQkakL`@%yF<>vTa_{XQ&lT-#X2Euyj1tHo`8~R(dC>%=GxJZm|
zGk;~U&HP%OGX0rjVa{6|y7I`{VgJEI>G#LH1IIGEJoLn~PyG4H5xWrA2mcHuen8WvyU5F#rEn^wGvQCiK|?W2%gF~|T;dd_~tp&&$VpgVkD
z0iu}VI?3net!MTQ1xh%u9qinER8#g-lzJBl6C^SpZcO|{xjERy07J8JLXVRvOcUKS
z&7|YvDvIy8lopYw@X2(;{xEnkAEE+)wW2r}gF2u*XeoPRzNIk8b@QGc+NnJA>&Z_a
zcgfGa(I^*Af0REq(?!;$Dzd~&g6&q%?L4^m8014d0@%hsi^Ggz4F|C=LW|}50{01ApLfNx
z8xz~&Q%nn~NRDKrl72ayMA0mKWQ~nBPkh6cv*KFW{*ck5^&kg%k>DU7+{bQ_Cl@c~?@c+e+8~kIK#ewF_1O}O
z!}e}jr8$5TdPuRjTk;I~3ED}zit2`jLOKk6Tq7x)-&QN{-_%a>FW(>M{W>L&p^X=|
znZ{z%S87g&P-`+goE`Q|nC3y1sXM3jG@iN~S5%Dn1bl`G_#;=3YJyX>D5eaSD^2fe
z>rB?^Epii6=Ft|>Km_7Zj*cS4ifZeyw3~-M?j*oHBCw~W*{z?he4zRl)FZEh&v+KF
zfD)f~K+XUe$H9_Ml#qX~LBK;>8}6!@#A^JAHb$pEpl{3EB^SOk6L%(dl)?X}V+k&zFI}bK=Q`&Dt--Z!
zsF<)Hk2jyRqC)${!EFX8_gVE0-4YuiKPQPZBdp`hup~VB0y3h8!HCC>0%2Nyer^;_
z__&{3keFS?(QKTB1rxDu2&fj3Zp8!GN~+;#q|&pp*RcFQf8IKt&ZVzPDw-
z+LrV;Y>UeL-+NPCtvLS(VUvl=6~#YhaDKt4V1`q8TRC*&n+7=I`Zmc)iU%CxIuxZS
zz`y{YQh7ru#-rQBkNG>F#LW}jM79;TjDjJXN5Hx`jU&)qln7gT6r?y>q=3;Hi9A=A
z(X}L8=C_vrh9T%b|6zev`m@6}s$S5;=hu5jp9_bLl8822(saNhqTvCmEz}EcF&LWR
z!}v>))HE~atA#ideOnV?^(}Vx;gxYYVP4B|n~*XDr{s!(%x$RB+058>?cA`d%U2(C
zB+U4MYqU^vjU34?Owb-fTe+gnl--IL)37B^uTw7ik!>T2>*fzUS~S^>2TnrvHg#xP
zDO8D2X0q7ISz7~~Mr7-u=@LR-*hKTYqS8AJ5XY;g2(-fC>+IM5W24&(<+9|f+O8Zk
z49b==V!^~y-HFjQM$<03y6#!7HBBegf~3VTABwEy($}!O<14~r)j|Rk!0k)|Hcj;-
zQI{_U?RbMLqtygH$U|~bEvID3lcfZMGX_J9&N#yBMT~tvk6=m@iv34>9KGj4%88>X
zbmrOw>;QfE!pAmeXbfmMA*}?FRv&6*4n8u7oizX?C_+m3In+&UXFVdIn=Ga+e(_N2
z90xSh1Ogwhl!C+`;+CL}QZd4ZUlcG?r`Bzhrm%YVm(B0+hG3MPi}(weX|mdCyGs~*x{j(nadAx231v*+?^M?^6}=A)7N%h
zB53^VAwi`PKHAEFG|I$)67&sLLA7p~9mrK8=M8t7HxEs;KEM=xUdIPJ>nfCW@tAet
zY_EZ#fykozucV^^a8gc%IbGTo(3IGkWGT*i_;O
zV~y}EFX}x2ojf$R8E%y4qP$kt3vCFfQ^S2+b-k!4GPtJck``Bi0uh@sPvsWM4)i3O)GYhoSH)p5s&R(&T
z*C%g3u#ay)oSp&Tv&%CEz(2ly{}%JpV3${Cum8?|`v_GreT@y$$vI}TxqAEIH9}w~
zuYNx}y?Xo02X=A!%g=Am&ps9!l_g8HDiFH1t*_I|3tk<>c6|t%RrKurg=5w5Q8zCfU+~oaFP=>+
za!G!gkS7oxzPI;z)8oKu{b6A3R=S$g!`Dg+A&h8Vc_Q4{K=;&5#kRK~Zci|9Ue+1+
zB@KHM_TOv_g|)ExI5@Mf9f_2Ix{+&F{qu6y@BvtVBe$NtYH=4~)+ut_ZgB{_$YPt~
zR;K2}55tII8EZN@3dYM10bfh1uxZ%F_+12;oe_u+5T*1LO
zAAA|=r#URIKF4i+BO7e7+cjsqt#5R?+X#-Zl;lvq#{XJ)w7P2
zx2Bk;r$3$B-RfzYIkb9^>b#+~y$HxVs@IPlw)&dZ(#!p(bgB1{)Ha^Ht9m`vc~5HT
z=k%9;nk9s*HPgTJl>yk_g$aoD0qSqVENCt^CA=2z!TL;RL;9AyBC&;&*c8|0I$W0<
z;<~Jj>+s8lpwo(X`^8tSns<>F1J0OUHQSzuYeF
zCec!kyD(Nv@3|Jp?IOHmm%(6ymu)b+v&Hn*7A!ONYGCB~u}nFOFEfKgL~2dsLs$D!
z%i<&&fi$wG-$rP|yc|QCr9rwFLMKCioa4;h3q}KGg`kvE`l28T10{l??LnLdYG0)M
zn+LnC{=U1tcZiQxcv%vNYZ1U~xA&s!Zc|0a%w;#Tk1a39kfyV-ZE#!`MvqnqN;xL~
z$`T|PzB)ifoU$n|6(-6e%o}hq>&F_nP-Pr#2KRg;e0jdH>cnSG*u)qG_9
zbRC@G&tL4!mzT>4r#L*;D1aO=D-{cB)KcTo_1HcPrF@Gm(bwBgV~hq%0e}b5Eak6K
z<~%MiR|&4CdzLW&tA0rr{W@J>(k0ZP*Y$EmHHy6`vAwxpZeujuBs
z*v-78oB3CCGq)%Vud}EodT@)pd`pesfpa4wpYv$-s5@XRqWbf+Qsbzo?3|aYU6wNT
z+3EXFKjUAcU~Gw>nD3H$FQY+?MMICE*ua~618bzN)hY;WWK+s3pkpPJmE=sJ0bfhJ
z(_YDZ<>f|$e(aBI4f1j$7}Mb9)*5+IDF@D5pf4e=+Ig78Z10Ge?IOHNtLwTwJv6(q
zvz)xO&XH1%nZ&#iUIu#HQPdK_|CN2EDCJvf7R&E7Ni_oVCKo!s_1A|Q*=H*IUl0PmgqY8!=j`y7@`X
zSfD^s%C~gyATCw-RtXSSeya9VID~x)?F3QExA0m&SbCwK*I2!s2z
zS||dsQeqvs&-t^rqR7b0$*nfV^eXtgW|8oHD!B`!;l;+~q~)x$wh|v|Dv-4ASZ$uG
zaSam@!OS9`#GIMVeJka}AV$6zfpR?e8_sxCPvk04))cnOg&j*Nd}20B%?T8lZ?W!CM%I&)kADvULE<>l)|2B1}O`~
zeJac#`0BrOTYAqngQNr(<1&U-go3OCuPVx9`H7tjMJ17vvjnYp#XXz5qcDnMH4$~Z
zm{I#(J-WT)m{(_(8B5JNa0=6A)~rDfP^r6PnBs-O=9xI?>S42^DMnVDQ4qz6Ke$VT
zcXJpFjr`1+gM@p%99i7+MV0dKasQtmM|7^F4e+n9yUfs-hq3RXnUY2Tn81%y
zt=}IJs?6jn%6>T39JRF>{LpZx`cM^VT1s
zL_w}dnobq3D4YU_DJDo_y&8Q$3$=$Q@DDR^&=W!FcvP`i1szm~O@cHXpNrvfZR&fsVhskPhrTF$qkCcu_rrLBV+%%XT!t1$T(3JKG`W=&P#LNSdB
zsN~3u@yJH8669+IihajzSUdot?cqyF6g~X;5)2=1ILNYiYV1cDM~z3ds1CFB)$W~`
z@jCX^$Hel51{#g>*akdrVD@SwOsw8ylVFrTm68F>+=eCN{b)214mgSvQO9s{%gOcG
z_Pi^H@?+WCE)ta@c!*AC6?pde+1bs3mcKbCpvP#ZYGv@-`4@ewu)nI6JhBP;tPcJP
z)NeEZ_4Vzfo%h-nrtP-=NWNhamkNZmo$HA5*EaRqE*)Om3@li+(NQpplH^av(n*o4
zs6R5>n6u@}8|f1>OlST>n(MU)rlWvqSZIxPj@3F(B8<2b7XFm!wy3hpZ@<;;`^~2#
z?kGuN-EKi@;rVomPqODBKPG6guskN|gCfNDhrSwMl_)ASQzocPsyq5|k7ILlF%*1@
zh$evgc3(c9vJ-uC1kpyJV>l_Coo1UU1w=w9befJDra{Ho|EkS9gHqUxHyBUp4rn0y
z6`Ns>r?w@s^?Tx4G%FX0#`k>16u+xyKs_Iuxoi`*181g2xj?n8vtosmz_4mew9{x2
z5jvBW6Oa1?q!A+_flZo@HLQQ8=qoC?Q1umyKz3kT+Op7;D
zI_{rZk^tsd%f?m#V0B-To+#HwW7i30GvTd&iLiHH(<1)|Zh-axxY3
zJ4JKUJW7&9ZTJq~9$j#wkQkW`l?8G&VrXWqS`@%K6){JIS%6DeYoaP`uElW1GMZpM
z7Q-*AKnMzbXIJ|4YYdU#=D{4Tsiux)CZ1h}
zc{f_JpSCUb0j4ts%7x`=ow7`};bTF5raUOI?I<+J9^mVd87|-OPhIz*LN4XCUcP+!
zLXER64)A+jrAQMls|heltJUHleh@HT@)*gy@JGBz>~_07hpYQR`UAl&8wfsHIL5`)
zq4v%;56i07o!@BbvuZ39sINh|vOuV2PZI(koDL!50s`OWcp9N6Q{`<2ZWrn_2Q9`C}r4U@+o?ml-4t3b;CAMHy)M~ZTc-BnPa`afK!?G(CWHvlr2DD_;v4)`?h-V
zAoXUjfDaCESCl(vr&K^XQu5+3x3>YH|-BNy0{X8Z>`Z;C81CSrX4IBIM7AcG*MTCPP^aW>W?V5UZz
zDp(=60LUPk&Z7ilo#ERAk32;&
zz01liN%mBf5`eE&`S=?;d0-Y1v1~EJFu2?zo`E8Vkteh;T?Vi`Xjh)YTmn>Lqu4Y-
zEhX>Wc;J>m%w&^-S}jxAsBd5MQQ8is0AiFHv#`2J
zCpV)sTd+#L>rD6<4eqJF)_^-klr@0$j7^RU8N;#$tU@Lz7#bUcf=b~_)@RpcjS4cC
z=WDO_*)F7pKhE93l^k$#h`EX-j$V!Gxi8C5z1n6~%7SZOV=2WEEu1Ja`f~u;4kiBF
z#<#<}d+yqeTV&nF7eAfDzZR+eyP7jp%_&A3P-$w?OfuZ6+$aWXy9dH0voIC1?4_*r
zwhB;+SH6{M0)nQWP{fO34x$SaQKYpJTyAnU&b$QFAQVQ4rE
zrN3gg==+-NKdt+qZ?4{L*dRFX&#m25io$dD>T!zUKq-^%YT?
z4Aj=*zBHMgt+tx4G5dwg@;CF?*H1SqKU1WW?6;9TFuk|1o%vciv$(9ZvzAH&`IE84
zM0O=Z1Lg2D$b{>+fUS(dG=4X=-kNRgPse(h;D^C9m?5u8rc*S@!Ni|=<6Ky7O_Y&r
zL~pI9!35nEXsb1zfH(5H0gZ0f?X6{EkoW7Xi7l-|-;tYCkwP2~G2&4e$-}`=+T#pF
zA18!Tq_aqV}LGT
zX%yj%@%bWqyC7~QJW{it?J(t*2(3XoP1L*iDDZI!kLNL97vfx$1k*V%b-?+5YNjfr5Pcv)
zKf#fNTckjDaN8L6c@ISjx1jXhzT0ye6(Kt`p57jsg0Y;MX#CU~JKKsDZ|tgOKVQ{N
zOWQ*$c1p(+Zi6ZFTne@4z*nlHNhCmjncp=
zx$?n}dmV!in&Q^ctdSN1-l?}iVCHO74_ulS
zmgPzVSwl)&fcX+|>&}6x0u8p4J9L^K_%8cdxIWRno)trqc|(5eNs`4>KZtmUWQbF-vpr~N+
zBwS`+~KcCyEplKkw&arO6*ejRD@UogY<(k^o6?*#$
zGq%wkVYVy{zGbzOZr)=Ka3?c|HcX3|5St4;hhj#-1!ki%No3e&(atD!1lUU
zbD{`emKmkhR!%3oKt^HmysvsjlsTM9c)~=P-5c87?h3d03BD@kf$c#oEQ4r~aDO4x
zd~%y%SDT1Qk&+}`y1KW8VGHW!esS8@dmVVL9}mTsIowe_
z6}J@)L%~aPw$5ufx<~HZa%km!Np7(9mF+|3sZMZ-#RFNkaBRnM>QHi+t;1olN8z-P
z(dNE_HuuGr!~CJWqqV+gqOKamV1#zrQ
z`Z-xAhhNZKMFBD>J9nrU^APqiJLnc&C$Rq?@Vi|sl8+0vE_q#7y7%U@xTu)LvR!78
z&*Y+lDkaUAdFRWD&X;9dT;BPzqI2H7QUAq_HNR3tnqpf<9?gD78Ef;zoR&UIa@*`Q
zv3~Qj*1{=d8>56oAOj-G7q*zG2{PnMpw%P_Gp~e=LVC*_tc4;QwF`MxbX<96%I245
z>FX4w1E(Ysa32{CleAGJEV*GM(}TRAKVW
z9B)()kmuQy=R^0S9|wM7&9mx$am;K7#ob2Q@Fuz8j&BHBSB0zzx*0G3fUy>GCxf-=
z)^cwcL2j4*-A_f%FnJ50r8LeO_z$Tc&w%#8wF8(Fw<6kkArHZX3AZ^~WP%br5w1Sc
z6VTBVKW{Pz;bS;@H-l`T4wr9|nnaGxYzG57Cf|+t`
zwd??_DyxfUcqcdp@*XUVKLX@0KN
zwqK*I($eMY{`#m*slC-NVw?`o6x)I>oN9&ay6?7&JzI)CSDa!=I2b3{LXS@x0Q9kL
z06MC8kgH4w_SMzp*;UCe#LthPtB=N&S**X<#$C#Hg|p|b8R&Xu^ps3M+w;BBZeeN
zyy+Zw&1nSNel!!uZ`dGX-=m;?Bp58i+EQ`5iQ-_)4>-BPp?r7Bo0_d0+xC8gLf2}A
z=as&Q3b~26vO|@Ulqq}IG6y52R67{YqS(KoHFJzEekOtxbxVlwJoZ)_vq+mjox(dN
zp(xpV7f?UbY^yla*G>;V?DXBE5*7A|W0t+zMqaSu@V!LldL^d8Tb3AKWBD`8U_wb<
zmr*dp3>>h+fWZncz7yDkg3!zq`egp9&s~2WTl{H|YpjPgt3fiW`Y4*e^Nr?(LMcxQ
ztZX$?>1A286d`w2XJArxgdY#CpxhOsEj)<|Px<*NG-D@ma)`ZT;wLWq$wvvEVCZ7%
zle%f=0hWpaK?xdwz_`;8tYD9=yhn~8ro>Cp4HP#BIFtZ#=aOn0_G>E6wWOsJZ$L4aTN0=V&S0jIlo(qW`JB8jPWpU
zPE91zGs98D_hyt7Azbj7{-6ckl34(=z7KVEET57anzf3th&rrOLX)jYthQv>AHOpIN*5s%@*KHtyu&z}b~}bhrQj
z6n-q3Lq03E4){#{c+6VX{i}l$YDMhGn!Elh_n^Y8tP_q!5yt^EN?&~lQK@34@LE}j
zcvZq`YMI`wNE0IO&T-FTL8mthaspTh`McIQb6-Pg=$l}HF62ZO$oK3a
z*Z!2pCs&H+subwxpo(u90FXa-YZ-pu;|B1koC%KbNkg5Mh&PS9!+OpRd&YoO(e#R5
zJPB-Zn2QEZ>vMlt3mh)}*e9>qAy>WwJZ&stjAQ?4L#amrk{#3`J&TaEq~lHtPL24!
zrqtHS^(@#W2l$XiId!&cTHO`yd_ogqXyWCg&yM4w1u^B^+;Uu`eyd4;HQP^22wm*_
zAxMsU#TB4jd=wYKy|60Qy~fec^6D4U7;)cpELn@62J3R2VMDu56Jt`-5zC^Y4B;0~
zlVSjzi5mXq6=noT#RcnCf7%w6+h&BSZxDp4id1{+X*F-wvY`O(IhJ`Ckxtj`t(ms8
z5_+Mwa1*a?p(lvn6&P0E|Ecc2pH416y#4U{Na1zng`hMiAi3@r0ftb^P5-3c&!QD2
zlx{S>ZTK%=3G;@w*AiPiwEKEjlg2k>Im!yH2TmQhqN#~m~^LlpNO#JK-Ae>W&Swq?&NI0tfI7m>`P?gZ5lI
z8)!?VMLsE3PH-yywQA0)`eV4#NQu3%5~B&3uW4;^nNxEhJO8BvX5VVWk4>xh8^~tz
zpgU$DrH-ED9%pZ3GpgY=+l;}PE{-08DY1E*#~9~V7{b<*aXeoFWS%X%@)n$pqUxSv
zp+-h~H@iN;fW|V@vx%6BQhvuS90>T$D4b#*
z`ML4!JeOoJ(V{=1)qzu+>eamYx#%00@%Kg?`W4KrF*f4#-^O>Y6b;a&ng!n(rn#BDW5wO^^ciY?u9!YTp;MgF-6F2yX`$_cvN`d?FaoUr7^!LT+{#qzHoJQB
z_9J`s%h|^d&6K@<`}-M`oUyb2_u}mI>g*N!{p9?&Gt{U}5S+at(?p&$y3@$6dfyul|w5XmxThm#sP`dLbPx)Ub9-ZHeMv#^&zMYug>0I
z6fc3fBQBm)3StS*sDBh1JY_~VoYTVX;tA?nrBc`8=xn{HZk72dW)FxB`LeoJ?7mz1
z?%iA^O>1t3)f`%AC6{JFY8Ko>fhhwh+A`k2)0QpvRXe7V0rmb
zV5_dKwpC9<$IdDa2U}l%I@kn+I3^oU%xbtMKEeAY(+k9DBMBqS5J*=qPIgkjnMA2)e}renAL^JlK~&V-C=wpSG719aUx`YbGP
zYoc~li#-nP2)!hvg|Z&7U1r6Nd=tM0H;b#Iik)M>Km)ychzfd=GC%ZrSdCSH!Kb-~
z4Kp>tK!TWuL{dOeuK^}y+$+H
zYjJ=W{G0#LCJ%;g&k_Rh$}?i?XVJjPx{L9>%y_G+u{R5s0kwIwW|`z+mnlza^6n_m
zYOCuWQW_2lyiO>}WFFY%ein9iP)zOfaiwH@|Ja49S8)c#tjZH!Dhb`U*R(5%sBtbj
z?Yb`{h}$UqfsM0$tLg6U$)HP&P<-m*!zIW&eyV0WOvc^s_Sz&q?qw?1#r8IZHs{ZY
z)juc(F%~#lFdM)k5jLrQxA&s^e6Pz^+$)U!>rt9AwAd#Y&JZRo;f_-HXpz159&gov
zGN3o{)O*~oDfgv9&n&SiO(W2&I??UoZW1lgDPxQm@<1c>u#ZM6wA{T7p^5hStp7i2
z>GPH3DS>+Mbxf+*AzBCbS=I~8k_5eMXBakYIiLz7rvhslCH$gE
z>n}-|p|5TuMv9)`HjfOWNAQLg2xD8#0JYQ-IuU-j1tB?bBJ)|ao_%1ETVf9g6tZZX
z@HcLleaq4LT#P3UZqq1Kssp0()o*v2{WftZ*))og`C~ePB_d>-638-x{e_}n93v=h
zQ9HLBg3s0d-TG+2IU6k
z141j1QqrA5ZD_Nd51A)<=-YCpB!6Q5C#CYK!@
zLvkua5P#g^iWl>e=LHedLZqisgXny|>rabFH=`n%%gZdVoj4!=mm;~4NE$yKl^-@j
zGG8y{NLH>K3yW1Fx%?B5luNNbreDF=Mb916j!Awe(*ePugQnvqh4b0mT8
zW*l!%@vEj(-E9_~U7OV55fkT>FIf(=Y$MA?o%Lkfkd+Pdi}@~Ow3toX@E`QQgMS9e
zVB)uo?lu^nb!${NsO~QINdK{JXZE_2|EsnL%R=%W(%7RbIEwUxQB*VslKl9h%fZ=G&*G
zJrvS*3hlLH72(efl!!lB>tKZ2`bf^RNU-O0N-WkGJq*!xoHEh3vY|@LXT9-KZR?=<
z7aaK}Hx~7%ROmG$mFSi138fu(V7MHTq*C4CQf=#S`IkRuxP@M`;fh}QaEZ&r{B`J?
zQ(UD?@(ghLE@uIo=5TH#V;%7|h0Ceu_xhIWw%geE?(zeF>|^GGyZ$0kf?EskUZu|zXS>bSPwPr
z2ZwO3N}DE!*`b~g=P!JJK5%!t?%siu?<*brTha`r8n4&_O?H=G&tL^}?yv*P^iBT!
z>6`F-0`Gnb+u&`iB=U
zgX9KuzLh7`RqftDjp0X%Ai%+3NSInkEz+$n?4#_45oOqd}&m5|B(00|?rODjfTSqyM5&i~$SZ9qs37d4c=V2;ZY~!E&ceMfM3X
zYpC8bv;4t#zkRo3>UV0x{P%5fk>CP#jk0R~)^M3E(@NoZSw|B7D
ze{tA7?DD&E6lM=7MhFi@MtEaOPV$dDLJNN}gM*ug9=d-#&Wkym2YZ8kazG#KnI1R;
zjY#;)*le#AZfZ>l|w)Dba*!)Y@n`2K}
zi~*pv{J8jxltSTaW_hTFwKCO7hLpxaSQpoW&-#r#&LzL|?RB9Vd~dh-#Y?MTgvE6Q
zqD}P?)6&ZNNi?54@;DH~#eigUR&zYnFQ@oopJq5|n2e|8h;$?xRf_8_ll!5NF0c>f
z$J8Fd*KURAQpj;KRm#$=V2{4)6C6$4o5LJWx!mfy6a;K0FK4;IW6F|Fk8Zm0mqtly
zTt4;6Y~$T(FyvR`q3?}YYY-@M5rkQ{Nacs(35Y%U#AYf-U`V
z=@^AD!N69&cq%u@pWTq(-9Q&5R3>F}QOxaTg**HxF27V$*%Wt6_4UumSl?QJ<~y8L
zT)gCz76@+>jdrz~<~;Y@j)KKvj!Euu8IeXnyEzE_fj5ihk4J3gV|MXqMLGawNubxb
zq~f+tlykwOz8|m*A)?*82Tr#lcKKLm)nj#~ve42X$|IAL30F9VJSw&Wd+lsr$JzxF
z^*14r(2k_Si9XQ$6OZbh^Hfh8ARh}2E8q4VMJvIZlB_mp8Bp8Hw<=A+(9jS
zm!BzH6`I%%Bj6-OwsA7>OjYD^xv6mt1aYj|2MU%F5lmW%yf*OSTaYiqAbJ3evT#5)
zh=szC#;2U(IFTyFZw+yrsi>t+jPv{KAyoZpF&`Y-m}9!k#_?p|*{
zUtppHIy~W9!HnhU^HAB$36(oxqe5!l#vzDHewl>hdMXHo796P^
z1$;yI7KSMwN|#}a(GPfj0W1@Hy8otet-avRV41t7{%7v6S@3s8V8WGP!T(vd^EOVy
zW;{$nP1}$RcwBJj+}>922~9mCZ$~5A<@=~h>R^6V%P-Ojc*mZ=Iu`y2RJqN)NEQ0=
z1QLo4nCjLLe3qpHP8b%>ybSK5l9esBqh
z>*QTkP0y~5>2ZdB-z+PbH_m!PeeOkiE{?@9zhee~60huKfjo(}DsSC^!CO2q>
zBJ0sVlBRQHanmr&-pX6N+yte<@y1EhmI@0^W6qOpQKE-}IZBjkJQe$LRgU_??hevX
z@zz-Ni)QWdlg!$qe(_*mVcj0rv~G{I?FvoY`ld%a#gE1Tp1#rAxMKBI4q%~Ex{W}a|p#=P+WY2>%
zeAIimN))>Onk67pB>Z45*F)@SIs6RU+fJ_VXIoh|2*>gS=zOVf-io@BE0S`NZd=@W
zNaToi_f7#F6$LznXotJn=+N%_EznV^lTU6nIvL%KpC_Or-~V{i+o6{~Dx%Tps9JV;
zjR(HfL_2w&L_UhO6S(oDY2eGIpaiPH@&*)6N2jh^w>SoU`DpyOVtrR?m+BepZ7ab^f9GKV?`
z^%%wEcZq{$^~<>YUb!^9cTD7H-5nP>pbD#;8@?qbVwd@PM*m8rbi6)=YU<^VN;X9{
zta8+fAkNRUlP@3Wj>;W73Ru1E8%@h9--+yFEO7rDLrs?J(x$||)(`AbR+>F=v1yKY
z9;NwKYEH$kvh8bU{T|!0%u=r7nDQ)Z%O^s^DTX7~e=$y(xx#+Sp7mjKmp*-6Je6H#=tUQap<=$ZASdxS57s2&$#{*GhC<++<`xFQOUE86Ag>$#)-YV22)U>!ZqEZ;cPWlPDYmRpF;Zzp&u8
zYt5O;+~l&R5?5Q(PqNpBc*UJYu`d#8M2mDT9?&EC>61zA)b5%fUni7!+XG}5BidZ2
zpVc;M(1%?f=0Oks(9mXCxM%Ul-XbapE-o2-CwouNQwGw=)X-5{LU@r4Q+nK$;Sv#j8DqtT4@s*)
z5~QKpL{=}<(`b?fZh@wsiE+$0kNBa2;U)&R8IP{D_znGT?|HA=-J=YcN3b4}RIpVd
zgK;HnKv4oH2<$CUPUOxb<}=aGjX_ySo6Wqb4|A5af)RAWD4O#KI7bzO$+LOjsHFc(
zR#?(tAxWS%Sd>&=+A1$gD&N~G-&&yt84+i
zvVmTefL_@^uc|@&w%UFvY*Af@?N>0%y^ZrNlQYyRO0rlgu51-o){3f(&nnu+cK_a1
z^L?XqP@r(-Espb(U$Lrh8@~ky-v-jpL8>mW*{K86mF@hhXg9k}DuPGX!@>}u&@vHP
zRXGvXsw-PnenYj5c-mX|<`SgBsuq{F7ME2mPHZhsM2ljRg>8~7vT3-mF{!2EVr8kg
zSXHc(r40p}tXbMnuvA>yP*_%>aMhm{c?1eA{BNsyZ>zbm)m+$WE^RfJwwk`Jrf;oD
zFDnh{$qO46HXu#dZxS{Pi@3IwGi*zwtUWu^)7?kBJV(9!$x0pwkpUs
zEVNaH|C*!!s09JbakYUxw}L&lfjzf_{nbW-U#$a+JquZ5i$)RK=pS3?|Hjt&8_R&s
zECA2XY!$=czA*BlMU&TE`r~3LJL%HV3`ElR{GwC=X=Yh^u@L*eYu$wt6~sjj5VR@k+Lwi
z1q}VrPknwqR8Eg@E`%K%M1F&7_+Zs>Y(M
z5Y#M7>7Au)Y$>}O+6rx*V@yIz@yD(mrQ_|Mv%y=OdMA6^&)Y~
zKaK_!A6JP5pOtShQ|H*~cM8hTN4yQu2HI(5sk9+vhVmUg)^Nwo+9Is=q^8&-Y1FKblxVNuqg
z?8CCGf%?I+n59MBv5BrR|6=*{DQ{#!p1V92%U3qUZG{*P!d5z|5VRPPsN$P!
ztKinQ2YGf*39Sn(jApi}h#u`*6UOamSg}S8+_p{1aJlAq?A6apsvF^rm3g@%CYn8PAziR$9A3~OB{
zpVeDL6|oWz@yU9~CqJEwd^7BN`nf7%ay1+rDtfM~SOE`6e|E*)_4?gs9eS>5|7py4(z^979d$eid{IBM{6e3CiYp`x`^9gS+8$x?ZfPY{{?UaSV%T
zFvOVr*LP!fH!ciec)u2|WSSyOg;DI#SL>k{CpLSG#|a}K+b7`OUkBel0UP!4eqA4f
z#dHxaU{8XLsY4L!_GEuGRCkhbxl)em+SBcC#zsqTj^SfON=*_sbP?8SJuJ!1X_N|xeA=huQ#S4NZR{l;Db@h0J%6tY
z`?4Nvn2320ZwA`0zw+vWd|wTcLVl38SPn}Ex?r{Co61d#j*@oe+9no;i`sw%Fom~l
z02iy;vT_8|#u>8g}SU!~o%bW&^y1q$day43VRsT@K(C~_T
zbuB2hW%i;LI6+`RRtpoSRZ*zkZn1@ND;Be1>7+`h+8U>TtA5p5
zu{;*6S$8$Atg9wQ;{08G$jqbltX(HH4<=fknres}9(nH7XI0gZ>o18s6vaB2qd4Fp
zo0fhU4G5-9@?bRHHH=O9FJ~A=NThS(kt2i{x}FvwPM!o2BYSvpQUrK9T`L@iu{UOe
zNQRF|4HQj)$uoZ2GJDlQOP8VQ?|mBUJm#A6Y3tHZK?7qp+bdmv>DX%4R8%3e12XF
zXnj;`1O0V93t+I5A{1rnLh^l{LP=OD7e++|wF}UoL&)?+u51@Re+G8p4NDO+<45|Rs3n^9*F!lW+h=H7Q
zFP?goekWnAV1xs0V~$Wij+HF!TZlVlcO5#e7A|<~&p>$&1;xuYyv0mjTOG3xI40E2
z7DDm>1Mt>Rjqa?BivfjGWm|$qe6sl+SF*a9h9<0Vi@sG@4f2
zEShB4>^^QwO-p|D=ITTR$-^pn;cj-L#h*>$_{u>Wu)o|us{#pq5q39Q5yj+D96C^Tg*NO!4NYFwfH2Q9}<3{efKX
zq!~clB9Gi=)2p;qjj<#&Nbqa-P=)PsWuwt%*LxM0SXG=?#}L5z$%Wp%R|ppr@3teK51!pze_X@Yi*bTI_}9{OWr0AVzB&7
z(iX$$(L!qPQL49(cuXn~Diw(&i^n_B3?W=k*l$;-M$3WiG$Ab@a2%;HrTl7}Qmbj#
z*eQlTe<|yeOhvr;Qacc3kjkvY71JCh#W_=2T4!2Bmo2pF6#5fp3f97KwzLS0jB8Lr
z5o+n%N#}LN#bDmoG71*}KE{oc46*|_FXAdg3Xw^aORwwc9uQ{F+}!({fE@b%90lPz
zsLCy>!exrSV&Z11LRU7lQ~(xC-j(Sao{jxZVZWJET~q{{|rE#>jcZH*XU(fM$W@UTlr#vf^A_=pr0W|*+LvtYr5LU3S
zDv%3mUKYJgv-sREf~6Pwk_!d6p+6T|V+B-$A9>_Y6JI~Z6j{SyG+HEF)KHjhA-Z*q
z40pcLU78XKn&@9-ISXNP9qct}C&i3|D%ewTZ&b1W4;BRhY&MdcxJ?(pC-DW`u4>-v
zY_XrtUcddoIvw`r6XQ|B`p?y43=JQ&W}^$`@{`Wsnn1t)wA*^OJ#{`80L7DNum+Hn
zvF!S9y8vW+8}|7vcJ|?wiR5?x@*jCI)9Jdsp}*V>gWFwM=FT6#NgrMKIXv9|?tkGQ
z|M7qRH~rgs`v;bH`@Oy1VgI|{-rnBfVYk2E-T$uJJAA&s_Z{p0D+nMCp5HB#!G6EL
z|Gcc;c-SAV8zlT>|NdX{v&A~w9bok#Y~Dv~k&Zequ%yv=_Uu`saSB@>DkQMx-oyX@
z=FM3DCF}K%_V$hrddRmg8jbhSa1p{G9SyQ5;w1ZEGF~_|GUh*^ijdH;h(31sRaHts
z`Otf55?gE`P@^Z+P#b*dlQ8^Jw&VC{*ti;SW#NU!|;!mQS_&toq
zW2h?sgwyE^A<4hzkLeen60Dl3m!i6GEL*rg%d(c!kY%@C5)4jB`)~0!$IZXAyktPJ
zO))ljRW4fk`AZ8|McumOjj~CDDQw5y7^$
zW^-ADaXZP50;R(=BNTEGiGul1mT}g`vWJZJK_q4_x${>P$tzDUl+E82sG9HEul&^o
z9#-1#gNMuh@2CM^;EAFA8cnXB+CzVQnnv&1>@v9-dTn+($A9vAc;|#oQH^Qj+#=R^
zKALQmjbm>K6a3PB(L>TuTv+%YU0ZqFApt0*iZJ2+AwF@2ZShY|xg&_CO5rCAY#c9q
znh*fcDYESM4c`2i{b$Y&%}Mj9Ic>I;
z5*~K?sQIqhR`Lxs@1f{!yn8p+Dh~$DBi8KbWuPV(OIK9JjV1brzWu|NXj1Hyj4Sx*
zonG88mVN0?3A$EDhoT~eaF8q(4kyDDNwhoBi-}6~>7DuMo{9-=jPUcnj)Qu1o%Je~1X`
z6F&^2reoTfsjpmK!;$Ws$_k`$41h{|N6m?=3=`!7x^CfUEm^jBV;6>fjlS;;PFtf@*`8aYsk-~-&Aj1I86;*bofPPo4>+so6jFO}P%tk5
zql%+ks3xasFwtfLSF0~(K3}mymsH&VF^IP(g25+>4w<<}V#c@7?~v1>Vv5L5Acj@g
zBQIe^Mzx%hS8EC|p{yEiC#JudHwMTg-!zbKXI#+A{gnd1-}&ey8iO8`T@(c)@@r1D
zQQ+ph;@+2dpAn47<%)lBO+SEW5f6OYlK-iu|6>JtYLO*{IxR&!7eAWLrZ}N_HW0qb
zK)p&IRZ?e*9i|);0%+nkk5t0DkhE07q^Pcxhg~}Dv`ufyyt?MKdAN-^3ZOdHr}x3=
zt(k=xDcIh0mr8y_$mpedVB*e1c+~R3R2?vX_CjE8-ZD|0Fg0ha0)X5PQw31h7gKY?
z?01QX?HaC_nipoT+uMVt?e(6RniD3UG2Y!%9Wl2wCrnIGx-THw95Kt>W;8F%{)-m{
zDs1Mlr1>-Ulu_1qSkl}XyKc98C_sF*zmn$7s7}FpcO}i4vFrA;rW-mmYQBtJx0k_r
zqSuq=%80~zQ89gWd=51qzz#y}=>RwL1JqmqRg!-LH$cq;uyi`tcmZk-fG{C`XRmBR
z>ILh_MZf9?xZm$ruB>9%2+i%Uv%gF?KYnn%2)r8dZ>eC$hN@vAz
z>XVI^hRl%4!{%gJ+5l}#mSqjHEbEeGxt=UbJ6SY0y-b%`2=+`0*kMA)YE+Q
zfQo$yqQ0M==AwrnwrOu^Z|13|`l2c0FLg)LJkbt%?#t%_@e*$|%@M7$@4noV^-G-5
zG(WWdVXm80`J!oVXov2LY)00(qG?`eDmSn7MAMwm_7zQXjU$@ogQl`;T|cypvZ|*T
zxqxLe@vXhZG)FN|gnF{8Cp(L2eqyyC*6?Ef7haE_pN(O1R
zk&l?>9tI2Wz|;#YE@GNL7M;BMV%xE&2G?n-NPUecD
zAz%!z0s(wduUfsvaWEVT3{Hc1AabFyR<6NwxB1%y1q-Z2hf7MJh(v|sN?4y=o^;-R
zxH`Lh`(C48kg6<{Q%|{=vRTLFM0BIYWcim`)CSyCCk_U?+>I_s*aXQMcFttlQWpp73yq1>#Q)EYV^zmjf
zwY4!D7lhf(reaPX!qJf5bRx5rq3;v=Sl5cZa1Vm5EkZ9OZyGY~QbHp${=B8Q5v<_6
z(J^wJwlHi(Jo7^5NL)-13!+PsxQ;+?u9eSynKW>}C=tQ+E)caNb|S(zC^sB=W?4xm
zy6;)VxG2$OssM}*KytWk;loAyJj`>O@dxpkB$wZY`02p4UNm5H7yI#B%}%HD$4rs9
zeE&Y?b#UFP>p*F@#T_N~U!U_;_*ZemhKY+4AleaX7X(=iyn$`Am>cs*2w-ta15J-^
zfj}vU;yhjlj@Tu}`OhX{*F2(s{@E6E5Ub{96h?5SPai}7nAW-8H738MD`nyOMqSQO
zVDYdW=6Yz8OPZPf*M$L0wRFJ*J{Yl9i`)XRm4msL6;a~g#>PMEtZaZ0DTkqP6KAxk
z+%g$*>OXk}c4TB1=Ea}0f`3{8MC8TrpT&=gC?Jt$rbaf9#X4NtmEP7{x|Z@WYh4^=
zKINj#`l(i%`ynR4)@B3AQN&CkxL7nzJNkJhZ+y$otI}=eiZqyrO)yHCSJGC*H$qcZ
zz+dR&LV^8j1?x*DwFY6mq_t53q**rhrnP~xYH)~0V@HV?$
za5xAw!Z1nm(0?el6+XYmMfRYv|Fca{k9*y2-9^jqIP)Eb_fV{38OJJ$?>vfJ=CZdU
zJN(@&TCoZIALv0B@GK=iYv3G@Nt(#%zI_DE6gN^D4f_}o7^DMGZ~lHqZ7-)X9KGfgRsyx|R^!?}ZWrtHr)oK5|de8Z}BD1>7A!D-&t
zii#86rXD%J(I@iZeS$`n;kdC1#oN=!j`%+u(_4Rp+>8xyifrhVYhUGhSDPOhTIIS^
z+p()ps)GqfxAFh8_paS-8(E@gf99`1$m;GES7Z*f=
z651p|6QmyQ?HbqQIZ%5@-GS0fs!Q2W|UyReh=JB^Z`XcU~;#V+Ezg*hGny9J=
z2=6m<4h$h#w5cNgj
z#BaJg;E+-sc^@TPR2mjED>Fw)*CqADlPfmg6`
z`!_OcGRC{*Bh7Qxr-aG+fRBt>l;#$(My#r3VT
z^!P^ObBuxrM3>H`tkaqB7p9kFH9?+^ORG2E0*GYJeCuVA1IGaXoWZk%3{RnJ1SX>x
znSGryKSVzY#&(8Kktd4+!EK9C@8J(L-<0<{7|d`ryOr)kKrnOl0UnM#oxFc=|cB!UN%EGdqqlN_ETT&Ruw)mwyjq+pQ2ML;u_
zp?i=rq7rc=^#@~=(Jyo)9kj^z?04^vPvJxQdqCU1&rMTx(VP=T6hM;AJp~dvxr*o+B{uSv+d6`4uTX6_!9>KXlAlmYD5z?+E-C{t$dl*YnAqo
zWR31I2r8~Lr06@i8L!0&5}@%i?8ED%*Qhx?B`2+75L`S?ZQ;I#%aq7LFWZ7DG3bd6
z%9Nt_zm9%7!_sQ_xbW~xk=$wm+m`U6;PY9W&gHxy
z7swbvU2?^+puEm6w2()XbXG`r@X~FP#L}s=5N=!(ILh$IUgW@$9suv`;*26DA!8sC
zSW+?#0#!DZF+_0caDT@s89L=H}k1xaVM_#={_6PNEb9Zgb0
z60bCCbX=B&s(9`oijJe*4fsHEisCeWQAEKhMK>oEbzOsilgH)^Kpf-9I5uO9DA~mF
z+HyHcKUzF->1-G7Tqx3w7P{hq7b>}A>Z5h0`X`WYLZUk-1f-v01_GBb6wl~tILU!K
zzttq!yhS*69`oDS>Lgr-i%Ehi
zWGI}&I%SU<`Gr_DmD^-=C(`yx@?Nw-QZ7kQja(-T+>)ee{4Tv#lkiNGFA-4-;>LyC
zM(K&9^=-h#C7F;v8)fGr&XON2ad<0RYLv)4w{H}q0Mt0Rssmzvc+q85>bDF4b-vpW
z*(ARzOp1@Y_qjIx?d{R>C|85z_E@%}@`4(JsJy0f;a}SI9eP~e
z(WFF~b9WkDuSc~A`A)mZJVRyMDBBC3PM@mxO+DV@pN6NNWuCLj`DzpyHX%SkCzX_I
zqj(`C+YZ;%9cIz;lwMR-nJUSsmuUfwHxE9qa;T}Uk>sN4*fgjVJw(`fH0}Mr=c*O{
zcx^d0{A2t7h@*ugN66i`U3+VNyUB77(b=u#0VgiHZ#VO6u4Vn2om&E6witTiz$X^>
zDoS|>noSl@9JCnG;PCpTh=fqszV#OZ#?<1u9=
zxIpZsJWW}T4V~r{No6WPA$n;*p_lW+Zlx=n7PcZ_5#yJ3O2}h$4ny4lErk!&E>JB4
z8BGzbJV_0uy!wvh*;&d1`NcGIp5DtC*_gY&4%ew{waZjV8V0bPom{(wVNWVZmmSMsm{QjV&gI
z{cM!|_ziHYU-V=gkWsp!Z1bZ9X{vT$$nYMLq^{n$SS(m(?
z!E23d&*Ez|&W`30OKd3cnc|$s&gqpLG_>`6gjyK3J2DS}J*13Z(Ta30{F(
zu}mXoV!6?zT9==Yk|Gy)epoX~EB3Q1D-{TC
z%SbEV#Au2m2>|mv3JSWp=&NuI+ZUCt83EF5fLSEBX1+-VE1X7aT@I_Hi(<#Qex?IZ
znoH$sG%-YQflr1%YOOdx&8oa`gqmk2SdOdZb7$ZIbwkL1NR7{}mvRz%$AAC35>vla%6~M`qNn)tm(}(j$sA$J8sL5X7PCNR2#3>+c(FfXK!Eq`udH63fat+Ba?*7
z{;K)+WUraEJ?MB{@3Hq}Xc*^KHY<48`!K;iPW1>A)8cfhKkl<;suz(KX9-BhcolrU
z6Hl5RVSZZG)Z3)r1PzjdAM9E=W!e9;nQIUlg>qKawDTt#K!&>3u0^6h5
zUu&jkaFL)8G`#U~78CMI@GPE5Ov(9Ns4KsH!qy6kotoVAa3P|vMU~^;-cIZ6$zzz#
z!_H7FwSheelkp187B=(wEqoH9_R!@;)x|~SPge2LX;O)%*L5kx64~s-L#*C({WG78
zT~=!$(n}oqd$E~gut&CTWT>5CY9?E_!+$YND-dfmh`kP@)kM-)%-acPg^foAhuu7b
zF-+FMd@1dG1qv(#P$}lbPor|`dJ-DsL1x22wr98N&;AA`22dwwD`&D!>-#XmOZ?)9
zs&CJe5QFzqm|OzkS(u)9Lu`R{8hAwh|?f_q)CpKu^rN5^{o0*N?#0uvyzdVn+YN={uf>j{mg#
z;xbVEd$wd1e&h7_Ekmgahzne8%4F(c&YfO
zlIf7a%OQ&kgpQGDNCi9--n$0>!Qpp)Q4g*66V-M8uiyD5$BB!8P2kpnNElTOQVnwQ
zn)Fh|c@Tp3~JA!Y9|-2MqS~XJ#nN!c-vRvIhg}Ys~){8YiFy
zwMU1{Y91{@{aB+Vh8H5rlT7`wgT<{ar9hdzb28#0ee1tq!lPsnEYvOL`cTaYZla{$
zb*+EXx8G;$N7yIIzEhx87qqfa!DQ4(hbINeSVqh6fN41#!I!a`M7|*hZ&5P9Pbixf}w}yU>HD+bB2EZ*GB=3^Kv@ZD!)G7i>WvgqQ2QXj`
zcDR4|-DAJ|w4@dVD<07)yaA7!RbsM$3XClLJ!*D5ni`wivV+gS-zo)ArYLQ
z#xe|?8n`2WD^uqB5{`QLb+l1`%xEUgXP5f&^t7p=;qvmd*e!S3NWq^Icv#=_;Kp$j
zY`QpU<~kc=9YULGSuItTgn`(qg&MiO768vOz-Y|0GuTjq1&+kgyV2GfcTvnsauEr=
zT=rG4X~U!si(CvXv3$hAJ)5DiB;EDTMJ5Ka2VyoXr-vmSKo{urIKtQpNi3i+Hye;g
zH#>Jork3r{OI8;T0JW42ti7*DDPWAV`7SE__1OnlKus%Vv8q70rGWBU+sG*q&kDLm
zBn9P6VQ5hPYde^hay)85FySr(++l?GFc}iiGY6w2^KD`sO}jqVnv^!dv6ZBz^2+Mg
z74+gz8IhIb2m?c>$7kUX`#~90eK%_qQGr!)jLA}kl}H
z&qi}x_4=7mL=Ou&yI8Rp0h`a-p8lXaG&V;%I>UJM`=$ZOns*BAcY0m8P#c>t^#+$9
z&Y~uBQ<)xP4SBWBHcdH-SiVum9$l
zZKrM->r+miK5cow+4BCX?7bfK`)9P-upSGIItFh_G|C`I(iAPMKZG)-dm!RX!d8oQHrS4Zvum;B)$+s*GXBIUW(9WZ?4x(8Oog721
z&@xl7>Xe1kTA~P)4&NHGBpwua26bGpq&t4sHQ$uQAW1n0jLRwTfMJ670m`@H3=VzI
zz`_B7u4@KGqU2$wAjlfnZVg=6PEYT(A!ddXrV`N!?
zhI3oAtx65NIUKWOsun2fVQo*gO0dUT@W0C-&!nMY;x(10umaGta|*jjrb9B~m-FWB
z8M~u#*diGw+;5Z{HcMjU0m1QrsCY;?3iQ5V2#1iyby5}KwAsmdFuxTIXL)YTW(FIA
zmX;Ypx2$Xr(&7+)YcRw`u2^P76MS(I7Mt4(iv!rdE4G{CKz4!@s@=;&KnxVz9c9VM
z$3jp&+`6sWTT7He?+o3o!pm(1RNVc~^|7`L^=ja=w-uj^(A~9-{cQy*26S81t+SO|
z$AxVK+gR*G^nDT8n;IsGB83?-T>!k4s*r~eUfEQm4ns&MTJu+NG*Qyrp@+ySNK8I;
zxV(KCEMMN4;VkpA7p{TKm+$Yv;n3ReYul#sSY&xGwVixgtsV8%W^eD%+E<@k!4p?!
zapuBWhkVnJfl0_x2X$}Z0*NSgpGQO}f)sZJq;$zTIFDx0`qsotjPTgVYoC?(ZV3ZY
zrtL1WDRR*ZBhP%>ano~V>&`vNk7NgW^tp%YyIAxozUm;dL3g>
zmyYI47-9U|2;J5e7opSf@QRbiFdxx^u~F_UC5G?p
z8Xe^BxOmD&r)o(R7Y0{01*S{ao{Et&&#I2av>puyO1_G)g0-j%Fi6IA*zR@;f}Q&e
z8#HCycAA7KwR+Jf9;nJVUW|jaqtZ!FrAMw$5oCmR+-1%AK*YHldO}fAqT(LM%Ui(_
zi4-)zbg2qna~MzEsIsNiHV87M3^NsvO;Q5}V9upbGw+?uogA<=O~w^vW45}C6?LG0
zX!75)CKN-148}>h_VGfG`<+E67kMR(3cyuBK{$f(816$dg0Dyikefr&t7VcARkm1O
zvI`|ueF%mzg5K=`dV5A$TmiyiJqVbgrK>7eb7BTy!cArYGC;pV>$3{tO;(G5l-kPR
zjXI=@&gXE2>5In!26ep-IkqcWJXlV$S-6(WVaf)34t|?bNH%hoURW+s~egp8<`N(C#87=in4aVFT6oV-gHA~!FIBCE(-lmM=g6sZi2aek7mkB*)EX~8G-#5;ab*o>N)=vR;IUGy=+W&tC)?+_G+$qh(1U2&6@Qt
z+5qtL|C9SaJKZc+Q9{N`$|Il@+9puOWwVB(7Y0XE_!ZqU
zn|0+&83FJhY^hI$+(s@ifuMGaFn}Xk$qY@VY-Gc+Szw{fl4D=eN$i#qsS#}M8J_7i
zH!)r?qeabi>xk=UeWzu|FbRL(eG0SDuUXlx+e&_Z>Q!@h**`5dDp$OuDW&6OG-W~T
zuV>HFX)iN6)TedVL#VcFrexBT)@u%Hd&l9=y5g1|#Je4E%J6>(nV;b-t1B|~XFE@p
zK{7{^h?JQg+U1W!MOdRVD2tB?ISNp0HwwNcoRLtHF^S-o=Kz83cPI)R$7~a}SU?jM
zP+1lsG*8aVRD@dwrlrnnu?TyOk2B1We;IF9RSTk$C8+|;?8yOeA72A{5@MWQWO5Xi
zuwk&_WMeQkn}!*bc4CzMI&M
zEA`1mZ>_?sYuY%=NmSy|#OY9{YIU7jp8zFXZI`UyKWveLs4KmT>=oa`;rpj-SvE
z@&ln4>ob+IKiR#f@Ij`<^k5?(QK2jN@uktS8bt+?C(VW^O$GPVyeL(5Yo;qP
zJe68ShUPYka;RMW>_NV2W!?+vj8$?l^U~~_{Tbw9J8
z?Ox1cfFkw@2NdQ<6nQCL%x*{MrQgSx_+EHJIR~;QVn&Sdg;OV9qET=kSUO(fE(8qoB=zKR(&&(W;Q(~aa7GErL3e<&
zTR@@x-HL0%3O=useuj+5N$&0fGaoX8B8y-~&(w!fJq~C$h@D`P`G_zy{AwhFtJ%{J
z=%qLPiT)+WzUkRB3JZ%lz3|KD&YV9k;iDpmgpH}>$mI`HJK>k{n0{jXM+*5Wg00%u
z)A2S?n8J;`3r}AYg6r^RG>%~;!x1ak}OTG=qI;XVJuo3
zj*c7pl)d@syPD`_Xl6r^x@O}Kaa`$qK;ApM`GNGX9JED7?-s1zBo^j<{F_EL6imYzU5Aygw+mCux$yOm*d8O%2*Gcb6mteO42v1bS}^Waa9KQ(xB
zYmI?>F=oyxT(3e*gom}!fZxJxF%f}B&jS=2!)XU?E~6W`+2N^%t63dl$Cj^c3ayHf
zr#L<)FZ}eOKG156K!jlDc=(gdBcB6_ra4@NYK8-M&42;UjBT*Xm%(h>)_wUX8W3PK
z-OOhEDpT-q>NMS(-?5XW3GY@B^CiNx-O3_}k>0(NX?8+VKj_#^qjqpaEa`#-c%%ok
zI`HYk@>D}g-UV}5EIn>)#s?TrB75gdgq81~%3$2Ti&;av
z^iaQ+;WmcVv${pG#sy!d7i?52Hym9UW&Ad05ii=(G|Lb_vPD;E928iT7<_1h5ma66
zR)?XbIl^v`hUHSD>?*)Gx-qBeX-~s7RGVO$Ef_ezQER-=xgZU9i(aZl(p0L=H~%>0s%@vNpr<8hZ0n8giV
z#Md0a7Cll0@hyM}t^&ATXqDDTi&v9W%
zotrYlNht)_>}%xd9N}PBiItD?gZA!e5`2l0YyE&8_HG3_@nQ#b;Iz6EI&!?W;!`p5
zYy-jR=vdJ(!K5?^rIL}PViw0AQK_e0Qb@wG#sKA$P3DwJl9(i#i_E#@L-ZAvJi#Tj
zaO}5?I`#F(etn^WzD>~0twsYs7(^s)$sSWEw&1_j&gK8`jBCjF6AhP6MEw=0Ph
zrV`azoS@DYc4BpE5B70{v(@3DB!97v-)l4)JB^UAkgPYn^+iT?r9ejU52dNdi+~Oe
zeRt=;*4yTgP)Ouj+`dq@0;AQ`moRr(H7JWupTzPOdBbr>#A!
zTe)~nCLxQvMBM32X9A2sFjJ!|<6Qw(s?vF>`KVf)E7NFM4rL8PX|rdYy!h!|`}xVK
zXC1#kYM;D0JAQxiS|moRfGoW8M7SA6F-3QE=2QeXlTZ@Yt0*C-y;ypM;C((`eXI`m
zedmiK?_9Vw8j_=xQ@)lV)gdl!nU9pZ$?66cuUXEaychXa3CN@IS_n1yCNojzIl*Ly
ztWaW2xX|5A0~#)I$$30c!}*fKE$QJx5{XiNI*#YT8Tu_J^3<9oM>JSnO7v>5Uh^|MV(-8wd<`o8CNttuy)-W#x8#sA-EA_uh!@S{w0qpSZ$C+;!savJu6^!D+i~-&J)|
zu7MvU@!q~<$VnO6W@lvqz!TN?yY4+?A0^;}8&5P-1%7*1dDMQ8glzjD=W!tbo~VA`
zt{TBpSE(5QU)`l`{rxJ!Xh95WZTW#M90bI@yqyy`-*GBgHX<$k?!D8MOOYLAX^PXY
zUTGhd9jtY9kk%b~h8t4Fl1Dl?K_m^?Pk981yasDk{v+gYO=ItZX3o^?-W_JNWgt5wwCQcyVZV*h@IR
z?QPpBN62v8os9HE^{2HpC9z`|%rzYvTBcn^@dj_k%{bSNlldUhVS^NA1OK$`owOBUK1?l~o~#yoaGoL=q3)+4Z%)
zJcV0NdpRHUc|eu6&v`9-If5wM?Y?|fh3W$>=Rfe$yz}~cz(sZU1ywp{UuUH~fU&v3
z5|S*(MIKVLsV_Hnx7~c4>J!dinQ%=mjmq>HD!^{_s6g78o`aJrt98p6H0nxuPbkQ!Sb
z{4R60lbYz>>Y+cRT4;+p=19iN7Da?RLyKr
z$^0%N}xL_f6DJ?cUJZ^?ylUqlTxRu3pJHE
zbtj{>CS%#riY#zpg3(4caVV63zif4-3yX>s@lUTH#>;E@?N__X3aVwb(rIgKy~c_S
zhIPa^m6Dwl$Nm0JrlzXEte%WycQtA4f-Nnb5>KTf$gcDk!atUyH!1e(GR8A-abdkU
zdHwn{8y|ELrH(Y+Ezq{So(IWCUSle{_XOHnh07VbRfoa&GQ*W)JOx(PGq}uhsG@3k
zJf2MW5Md;9Sz-(H5juRjhcD8H+r}9G#LQqlku-)8b10j0uwKLq+;`j}G1BlY-*31wI{Re+`-_WhLA|5fKZTs
zRgpCLwM06(1HBVX>z)B9{uu}|G*pPsKZ&R7FehjrY94RJMxkH#h_)BsySJxXM4I>K7(jj!m>7st%@~Cm^E~39BQ1x4{fnA7+IK%
z15Ok}@~%QuBmf((&8~I;2_p%aUS;B0BY)s1TrDDA^dQd{_>nRgnQ69YEUS?&LLJCX
zM)0a4YmT8lvVlmuooTcvgL;4w6&DkXv{3>SN0zS5tp|1B3{pu`Z
zO0@}%LTrPUb$a&x=*^2`&pLZ~a%w$)dwlxFK7%vtw`1%5@zE=|;ha3Pemi>g>+z{)
zot#;3j*p+8T4!&qpN*&SN$s6nR8v*3t9dj-Q>KynSQ6d;j+5lUK*5g-VyV
zz;Y(IL&oUWGXA)L6@{CcSRxK-T1-C)PnQ!6qYty+c4)W=JF)bo#SZ|ELifkK0m
z&v^T|j3=KDocGqAb!Ht{G*hObPB!P5au@fz^x@-3OCk>dXT8&Qg;FJ+764)i++*Z>
z6s>U?!qRIgi4`0EGHT-JtAQyl;3j4Qzn_>*&TFa)I8k
z;2Yd3*M(^?ZZjCg{L)4s=>kmak;im?4K;Ql&gD`jqZ=6p3?)Smuih^?iO79hyDV*v
z*5X4qmv1c{V9{#&yU=oL$F^H_I*aR0w2WV~&@LEllj4(%xFwFX$=^MGL4lk^6e2CS
zCjK>>&Ak5})yl#Ok#i#E7g6Fk3fD`hMyXsCf6bFO?|wbY>07CAFZIflwK3I2u%J{K
z)TCC)fO?M@CW`D3uQ=8rT}MgZ+|^2ptK6tn-$bR$pQlV8Q6(X3rtyz
z^(3&4$v=9E70}Z#iP_^&2DHIjGN&D;2tf^f5JoXx+ObtrY-Zuih+fofRCseA6~n}7=Z*|n7A?wF7~HIOkgyRD(GiOGXjCK6iqr2E
zqRAmGI+Y}vZ((UWG<+sNhAUl-mX(O#cWpR*(lFj{<+O|Sox*c_cMxW_49oxc28j2X
zL-4{T5bf=k#gi`l+49N_*rmbin8uZ_NF(sV&f38e+dO?vI$Q(Mui=&z&dVGYRFo`$
zSup^hc_+bhuM&~k6YxtY40w@{H}5h7O+-nYESp3Cn}DaGMnPQB&w!KlCk3on_+f3Ztg;Q|1Gi
z#B}*3ovz$e2*>Nw)E^GZho}V{tWA^SWSd*-hD#K|{5D>=57#}!tGaiHjovvkBxH6`
ziCqm0kk^5kVoS;;z(c1*=E!Xmfm~iBc$76LnyWC2oCv;La>W!i
zEC&o3T)&k6qF(BkoSklTwHVU=z~;NxZjg+B3~BigkZ>aMl2x%k(;+ehKCQEEb8~Ps
znNT_dAhR?)JzOm^9PcDd;D*vh;ZLK6Y(71DP5X>?wR=aTADW&RI)Os>ZGC_;U!e=w
za&-@|-C_rb3(Cy`YF9gly
zR(8)+*MJ6Wt_^^cunfoLEZC8T7^cIyd+g
z>`fBlLBy@6$dWQ;7A-=@RkECt25qvL&u{fcSw|u|s8rR22};3l`j*|Yp1nFcJ$>@x
z6)aR_9{*jYNP?Nem{L8|CjNb4o6-ZkiPtCdB*40eK*Hb6;tCziA6~zL;5<
zbTPBaq{^KmCG%W%5#FqeWlfV}m|8!r@79?xUIGLHVLF9YTobY-z%VOx3mNEfuoU|<
znyyDM?PJu-VW_T2u*8dMn!M-qV|aZA9Lo4(AWV=1!MH$#0!O6m>1o-A5NbFPX`^Yg
z$ndazP0v86zB)mx0aGoXN8nw;3eOn6pB>Hz64o$#{3-u%IRX}mzj8JU{24AL%l^|2
z{D~N!jTZHj`q*ft*pK0`!F+W~k3QD?FZ03MRW9WLhN9*FF2^N#Bw5)f~>psvfij*f;!?(0_#42#PC=~=)aHw-x{@(3~
zO4JZP;OcHxUz=GH(8Hwi{PL@_5j^QLq_w-}U}FaY?%tmBJ{e7dgJ(w*VIEJ29<}=)J2Hx$Xmy_XvRBhMs7X%Y=QbyvIck9RJ(H
z!+rkYO0=Qf{^66y{?h}e`^mXo~`3wvXMBAXICL{
z0aB&7lH$;KD)QstiIP|jtZz46;JoRw_>EZmCQai2HzfJ((3MsAUKAwbkTT>@(v1}y
z4Bv=(MKaJAwrw!!)TcI0h=<)zyQ&nUI`nqXLj|-)#reB3-Mpu8X7dY-0J!STqf&Bd
z>FbJrah2kl!rhLQ1!@9TIdWMooV%)8@IKcivG2J1G$MG=AUQpsT{pt
z*)-MxG3pgl1|-&CuFI$q>z;KLtuP)YkjOQ&ty=r?Dn9r{uZx}S88CxJ4w7Mp-&@KG
zsU!H)tCu}koyod=u|W%n;yC1sZ+aG7_(Rmp%_B-fw!O}*hZl}5bu=cJ{%Ii-98r#F
zD@2A1VKoFy#mg6C!J{ox3wH2kg_Z);4!w(TWJev_RjAVHmg9vRo_ASf79H4^;jam2`ZoC5%3fMF>
zl0j};a<3^(;;OQERl$VInQJIEW=*KovzoX&-Td>hcrkW!U6J!2sZ)wb{76XTXOl|i
z=2Fzg4|Nu0>-Zd7$4^(Sg!)z-f&spvagsl1s1OVJN`A!`IMg&gG)&p!eXYJp^tp_a
zBs!nT)vxD37Vf!|XgY-w;
zH~?;qRB
zNL+G}#lrwq;UZyS65FD(tOqv0j3pTW6%Q?OguQ>szkylFs9Yo*b02ra&y>PmU&8tB
zI1X3)0qcDaf2{Q2hd=hMt`FCMuFK}F(fS91ryz~R;BOyRb=PlGHFvMuQFDtgN1_n^
z_2s3n4WJKURpWUAAIOHKC!C?e8YRF$W{l+hJ%cpOj
z;NRvW9&Zmjm0ur1Stu6UQsr7{l5vHRTx1@j$(O#!P=+Ao08cwonDH<-VADOHaHziN
zBT9Z_+fBdv9{;eN5XeYFFNz>V(cb~DU`+^H&M$(yAk41Ab>TL&T*u4wV#D*#1?ye6
zu@zUVkL^o&g8VJuv9F+LKsW>byLb_YY&E#p`?
zCs$>#2xhklEU|c@<(VagWUAS3(S!s44IjTav_rk+N=C(L3CRN#cphHAL-~_~a)-lA
zKH*4&#N@jBQeq&M&Wmtuoo>$CNJB7Dw$SUhl&fgJQlzKwu}U2&kSsl7)b_;=MM1(^
zMFIwqrd1&&N)tTB+FXeWVwTLF0!#16`$@Sgt6MBfx0T-k*e%^DXL
zc|KSdt7ww3fCx~vn$SSV`yg5W!eAZk|FoAZ8+lKU#Bxw)DLf!30SDQP0t0vfAJBN3
zfX14laXLk~5q2dlOil$w6G~A^=XF+`TW6Pw%+;5zk(G_;iH$;ARw=@?pO
zVi_tT#WKq3gIehZR3M4frxc_-JhkY8b|+L|Ha+D>v6MiDOYsk_ka+AGb_CXIE89b8
zEQM;rAp=28VTr2qwOCjzH?(Z7L}hK^a*+}$oU6%ZzRVPRUbNs0wWNaRlR}l17~v{{
z&4|_uHLST2Xp*JE!zA9UFu|)IlGRjOT7Cy$0anpAtgRb6S`IA@Ak40?_Krw!iV1g!
zVc@uo!sJ!>;tj*7B*Caj5XOk%`1GtJ*z6*&
zugKn;TK=YrS+;$-hHEQKKk!BcUu@$x^o43YMSV)`m1$4)49ZxS
zazN9F#z~~e*wffztd32qkw%(2@=u$vvwQUM#S^0`a;}e1-9Adzx3iEpj@xJ$EaArS
z5`BL#c3Zk2vTW4kVkpc8bz^?^X5#r`jX+jiFtr?c6urfGw`J@Y>dTRFn0lG*a@3i2qy$w!fnFLHXG9C`>g#IQ6Q~`O2Dv0yvdoL
zh=*|CqrxRyK!f#oE(Y-eUSFoydn?y1OsiTzBu(RoTsf70909q@pWW9S5oHGpbK%
zE7z<@Ua34&4qElZBwJFeXZ>?;Pni*qf8on7Up&De=pOOcNMF;liA`d1Tfo(OnuTQ0
zOAiFFd^8;uO+n8A(bB=o4VkDh^w%+)D8;bN7VEw}BgZb;D{T;qEnXIzY;9}vvUa2C
zNG`XIWcA=J3xg*kGh70T7K_jfeB71b=2nmWa_6yMZXE{Wg2bf`O^L;r7X8bge}AAq
zd2MV16F&*B4rbB$fxPYb%i9NO1Eme$e*5Gv{~Le&+yD3f(x-Fs_m22GeEi*Cx{n_}
z{;tz~`sDEGUpn1yzx(#-U#!mWF#sEsJy_OXu9ESS!^0=vm6f}g98R`%68_T9|0aKQ
zbN8ifA>C;_xh6ci@S}8YL;)4ZeHBH9{Dtkj8;#LO+ywDFwQN{Oe#geg!3NIs6+e9$
zE+(t!qxAtu0p6^Cxa0@d@cVyW%!6o#_R~Ldz1fT^4TgUbi+{!b&!&C4S@6~FUZkLU
z|35u^{Ouv_|8Ji@?tBO3I)~lE?;iiT|9_7^vGIjSAV5FthIqz|Iy_n|<)bv_DLkF5
z<1OPnr4YD`9B$xL6u26;
zhy=AKtO*E&)LW%*uWw__Vj~9!`6c-sn`>0e$3K#IA-_j)I=q{65(g*gB*0bbGh>;I
zqvfrS9-wLabNbcb_w!)AoW(dJKzwKMDlPRYT7<#sG`wK9hiCY59xSC~oaJ^&=FL+u
z($_s%MEM+Jt6S-dG8it2zv-_d6jipnIoIsEt`%nA3hy7V!uth(0cuKc=r{XV#
zyC^zs*&`C_E#i1V&LnoKT;f*AJ;Wg&5uv-lXk?_c5#KS77%~ri8~(Kg0$3IUnF(hj
zs_WweR?8@_af?$U`}Tpo&w@!ux|j$LW~%Agou>eQKiN#D(M==0>y3opQ@n&3k$|NZ
zRa~GhV&OH8!(hedKt-+yvh8IxLPpafd>L9kpVOt+uC{q7)v
z$G7LJXmSx|H2eML{Py6`f8rmur*V?B4?ErO&JWI`#lZ^RSnU;3Liiv#b34R%3r(Qf
ziS@Um*RR;036XT{=6;H7QyZpc0o(m1j6Pg)5p;9kZdyND_I|Oe_}VbY`?e(#wzPit
zzB)(mU;NA~(ABzNH!_it@WQDgQSvo{{qAc5O
zq)xs9dsH1nHX5=iIw{F|Ngb5P+(UXU60EW-K8KJ>C{*Ag^V$hOxK^P+=2i
zpz~5|k3*WEQA0Ks1L3t!!cepJY7%20Qx5;d5jt2v3rxmlzDQ)Ep(~8|vq}Q8sx}e@9==(FYul
zWrNzR_2!KXe2ifX3@+=4nFxPFath-bPW2Q8Y)wN!x|B5d5UK)1lx-aA8>Ba4L^*8Q
z4dHNOYBPnR=w(_DK~u=RzRLMpoC8=)1PJhiNZHkad~GtAqk`0CqoHa2M1-X&+F>LE
zo31Rd5^Z_!$Z>{j`(zKb>?`ZT1*WsnK%0wud>>i3|a<2vE@J<#5?)n(h4z)Uc#u^`~2d
zrrfG_U36&n+OqU}F#uwdi0oqAvhr8sLba;GBJiic-J|;w%nE;jxpEaRWf28Slx5?3
z4gr-bBB96&7<@Gr0r;{Ae1#(UQh+1ghq4FR)iH52I3BS!ZXN))P!9lIFcknT0f90u
z5R?iCIA*peTSOyW<5Ck}0VLZku0A5AKz(U(m3y2rBfdELx1y!gg=L#cV-`^n*3dXz
z3LaRC9vJ2%U(!&jq<6Bc-g--wWTeugOypD8V}!#R-Qcbh3U@3eP;yCclRV*kdA!N{
zwDi1HI1Sh1%ajfX3o^xRB%Nrn=o`ieD`}~H`yf3+P%G+>lPg0+Udeh$54z9f4C(z@n%`lV57Vpu8MK&*gjbvrFrkm
ztgsUKSVxiwAfu=V?9`E`TYn-_g!uT6qt4R*z`s#vWDDsL@G2y^aPz){2B5^wcXFe;
zbKB)D8({fKyecwqCfx3bKg%V5Lr#O^+SrRYMFbA=4Ott)O>G{)%0$USp+|#JXISVJ
ziXKXOrbmaRP`d?O$$p~P108oXgH}a;#R{IivWu1w7vZ81`bfgkORrbA#ft%@(G@3x
zB>(|PQ^+0Ki8teLx!wYX4b<{CBdthfa$lYJ1WOo>iSy4d=8F;E!1>O_ThX&+K1WW4
z!#M@*l3YzVEeBLCE(>>%
zbGemMs7j*pt4w=YwIK_)8!ga#u4wKktF28j91*1)U%3wDUQwy;s5x;QlkP+!D-H`*
zp|Hq87XQKOSZ)oqN)a3MQI01PkXO>EtKg}sOiZZd6*KuVM*R$)fWX8Eby$3D965OY
z^|7E`&v?*RBPLlw%TfgJKw$v+^p?truCf#S_+fhFew}f82z6Q-Ael*q=Bb$t%AbnV
z2U4Wg-|=$n+&`A?)iP*bTh<-)uc-*2TpcarUBSr4I(ZK>jk|2tH&*&_TSsb$rKvZ)5PWtFIA`Nh4Ivn{Kx
zdN$u!ZVBE)McYc3-oxoKTjiR})*bYit%uNJwzlaoTVGFuSy!!TcvhesVpB&iE|-ti}9l7%Ha#^8Ohb~9-1H{X1-+i;Y<@`D5ceukyvpk1F2
z?dIgoyXbpcJ8FAdb#%S??8l_&ZSAJ!)e*~Scw6_?@3ww_?QVAR+(T8G0x8w*wrc8a
zTYsut|5UmDsdD{O<@%?}^-q=Sjw)AO7LhpmmcRqYyFlILX7RCa!+y6tOz9i0`{}Hw
zN>>3w*STf_D@|ISMOLD-K<-3UJ)|Jo)1m`R7lckO8(~EKpf|{q6Ry2R(R^{e;+(

!sF?se_b~}W)#fz8g4F>ynU1K zfw~Bh*!XB3HK&>Qw7zzuC z7WK;zyk_}vwoUR}PVt~}acAie)y`!yT%IA1AsEpjtkuf<&7Q^LNV#WWy5=OIP>Z>S zy(ge4h*bbnN||7Zj6UHA%9WBhku5GqTvW(CbJkCB$41y3U`orE>{};k`zYM33Us1& z)^2hvIH;nHsNM8^-!{{#TtAJ(h{R5mwz7IktRJkB*cR2J->Wr7zz~EhjuxrJcuQg! ztX25094^zc$njj2Jv58-BrAj^o7`NQ~s&J!wU^jK&{x zTV`m)tz|{2$&P^|DLrvXXiOq|eW?aXro8GkDtUo41XgJ&QZEXT51Ve4QH&fDAhdVY zWtov8L&ZWviV>p%vVysx8066+)1&Dsyj()Jx}9}YkbSEV?r{4M9xy6`GorPQ$-LYMFYZLd|Y7e>H?zQDA{FHazaChFq zY1~v6(W69OS#ve2g*15ChPyldEwZPRC(@51g@Rm(Yy%_Tw)v%Z$Y#L4YgMnzf1PoF z&W0Nd1hhSFv=A_p2}Q@EgRm{zszGTrPw6{NSc%tl>HH$bBA#(yiSi**W%C;e%1h9d zuaDf4yJgvMcQW8Lut@C=qS!wro&3q$+7OC{3%gBFHT0ArEB7|v!dHpd)#_8wd1Z{G&@`Om%#wjiY$6nFXuSVlxj{(RjqcIr{?+o_XV@8qYz;<4ENlzcU=x z0iCxNprJFgR%7T46DQTx1iTIax<}s0;G(W+MP0*;x~}Eb8`*NwRf*}74MS6IXff!u zza1K^Et+oQ?lW-*yNje!uK(u{h_ zLUBz?uh#7nHPzyX1*#pv@UOy&UH8DR#iZAp*RorlTP63AI?bGB(MO>a+sKGPsRe}< ztmMl`8V1KnJ*ap$d14#7UbLWV8uMdSayPv5kySKVozSJovG+9@nyb3WS$37|jfO|~ zWYp>ru+9eQm)Y^FLcnFV4c!W|QaYDIb*QCnk&X?dnoib}RY*x%JhT!JJe1gR{4%dv zmV8!Ua$K3NIdhN`uT)=CgrzvH73Tu6@GszqTAtsgHcYaHzh1$qf|?G;#sdbXwp|<; z=)(>WTKdF==hG- zwakzPpluDTuQZocbkPJS2{8?4viJB~nb9&`Hx&8u&zQg4;{VDp&%aOn@28Ku-KTo| z-|lyv&XYgm|Nf5gf7MvO92@pLc_O31{uBnAbu`_~PJvukMX1e%2X3VSX=lMXm7Eph z*JidM-s3;TvPhNbwlTWD>2?=Nvdop7I={r{(=4juu*&dl>*hgu~ww_aNJ+sh`CL zk6-j`Slik=qh$iUd484)wL?#(PmlgeIyhiBUduk$n-I3#q_@D=q7Qbq4yoAdw{Ol~ z;-n4i{~&Gs&*3@z`#M;`zemdzeZPf&|6{YDf6+?uXmbJoo`&cn_jZh`gE#RN{QEo{ zXX0G~|L*7v&yMD{YzW8ydB$%~-~RgM`N^9Xrw7>u&&r1ed7VZEd7Y2ukmW$tpJQ2z zVu8HYU;KRX>iD!65epF)@uM=FMj}2|W(WFOnt?X`%k_NLbj`pA(ynh61~dHP;2Rd2 z;kPs!7gZSQK+Kb0v5*u{c?08gB&X2=_1UY0W1x7qVX&f1zUU<}3fA+~`=Pu&qcT}N z2T=zs+z)Uq9$+Kr^eHmo!a0af)MJZE7_%X=_sGw03TAEG**DU`gt|SDrH9+%%~8d| z?5InC<++HM3%|0ppgqqPMY`HbS4OTk%4CV`*o;?hDajliBau?IuU~i^4lpLFdR7w~ z^-=E|uJWrgTTmv*J}wQ%zC2VU=pREkL!qbROLnDlG&sPVQ4VOb_Rtl_$OWxvRxz#c zWJ0-Ffx{&G51MRQxoP48FJfTfR`KTIlD#)9j$cQ4-6Q%3*Ou&n1zgd>l2B2@kuJ1G ze&GA^%YL_^n@?$d?cHUmJ@A~}W8Sv)c!#-F5(2^q{E)@Mp;2&MGo2^^lPZ6=Q49;l zrzP3=+Jy)_1ko@ZbYLp_Tqh0dBwF<`ozI~Cq&IXu<;YVvn_lv-z4p_gyWjOnAa%RF z_V+`#^!3wTyZd=UnAU}?!E;vSF zvkTGr??`-8d#Y@te>4tsy6qj-L7#hlW*o<*nCc5#l+$#^k&xD}x|0pI#_P^h8E{X0b#X^qGFD zk?=XTOaz=SjA68}LNeKfVFZ5g)WrW%rR(q<7TAi^YpkC0gLY+0{XO|g)e=Sm^%DS$`u+a+)l2+*ee$30>G}I-XDD0z<;|IGVfmM5NAMdy zj&1(=n>{q|{kLz9N6+5A`t|i2CHuhidg`bS0VuN(2)o{OG+AFJ{lRy`y>8bdd%4u~ zh9mzx)q3ADHbG`_<&7lavcGEnJ=tq|$;W7kfk66$j@R`bdrwT-fmSxgxZHf0E}vX+ zP#=bqqy8{l?&?jyc|D|1-W|O^dVRX_qlc(Yg2(jN5fetg61j< zTxlX3Ed*$jwoaM9iZ4heO7O;4WqALuqn~gwJ6PtR+v#+C_dxlv-}SfPi2!U5|251V z8UsUHz5pxeCVOQ}fORHypzt`q*^<;S2joe9h}fv1!9k-K+XXpl_EtgTqgi~7 z#z?SoqKO*L+_%my;Wm?m;XFYB>{YP3Es7o{H(XK8wU7=!QEe2s!XEuM%lR*uN}v{- z*~~EzKr}KQ=4ZolUDsrNBJIsY^M;s0Igzw2i^9PxE0smKLda=vXu-%TfJ$!&mpNED zh!*wjak)KRal2rV!u36MU~ecsz9X$PO#_(5=<7*085F z?B$yEa?R42lxwt#7%4@O(mg5P;sQx>$W;+^z1*XYe59jlc=%X8M54#v&c|)K(Dt?N#UFjg$ zcf|ceyA!x&*P~WyP%#fa!ft?5d3h@j@M#pzCJDv=qrd@T7oB6=_+VmXNlnggt!J-} zPERfEZihyh@gmIp549fd53--RLtm??&anO>on3p06Ki|v=

Tg$Jzy)FuJ?$_$) z%2r8|P|2+A+@L%a${SVWb`RR<)wWiDd*D_-RW^ugy1GH_M}QHoN0h7W`;Ol(0gBh< z0?>o*UpxLD{9Zi4R-z4z{O~ezcV@<+~RTw6|uH{-z z*hve8eu~Ub$43$S70KoOMt8%bi+E7;NwjdpAl-N3u5zHqaiV}$HE%GO(Zxa#87r8? zOB5wZQc}wuAn<@9iNIyHxf5={Ahfz}emVwBWz>_j5(PiYTo+iEX~JWWt+1i0sgX}h z>uf6?O_kBxsSjzbT{@AiObsphxMC}`Fef!)i0hV@U9wrw%1gbk$rX#O8(kJT*aY{E0&hjIj!1@bk$myb-~4)dJc7QZX0lm7b0IGbCC)KRDblqo_TfzfxkPYx zOx6+Vd)UvHyn!kGT|RkTL_v2bf9E(tqkW{BXjuh>^E}Kz22klX!P0^g#CUMdQZ}%q>=ab-HvvF z%)R}^_DWQmY%Uq%u;L7m=C(}x=~%hmuE=ZY&5nUuXVgLv$zZwvX;r#is)3ws_s<9V(4aldGHl+ zubz?_KsJ(w(s9Mis=b~X#1(o?!4uHpa8-PtY0M@Mu2vr>2 zRXL0+6-LFiu;ySK4%>Gcrb&%)D#ETef|EK!SOmVQIWlzyw+P)l?q!U9CKNksltjwN z71Up99$^~m($K6!Qx*hC!HC*65Th?Z|3|BkC-BiP6_L9$}t$_O` z_Awvuy~YUs8Cv=Op!m-i06##$zpyQsJxB=X%J|RS$KO49T!{btwA1}F{`2qg*ZSsQ zldLFNTe!HgmbdH6c=5Q=vf6uX;Mp-sK+oE&r|rMulSZ@IY&-+5WzCUR&o-g;Ccd() z?|@J4J?-{7k1fpp)@c0CU;~g=J?mw-n5?3Y)`xh*x1e{M)iTB~5^$JYMUyZQGJ+Kp zh!#Ruaj{B0;e+)&PL{}-p`v>dq2eKvUo`N7!Pa&2Xq>uPk-(wR_%#VG1PvplsIs!H zEM~QQl$DdaLKc$0*@->}PexNnV`7SckbLU3tTBxj+<4U1Ip8IA5VMw9BJ4Dku~1gT zw*xLs5Bq9;;qafm<}`ZvEeY3&c&)Up_dqmHg~sp{1J>##QV1tmJnU%AO72t>gzZ`% zLW)C$LZFF|os;*xZym|}N%&BJyN+hF)b2vL6Q))daFvpxZ7B`0r&#fg=+L5?RYW>GD zO$$}Iz8yg!%z7t-oTBuw|2+`AZ{^_9)Uz!q#v2d=t5z?@VNq(S6M?Z|Ioj8jsipuD zGKQ%{O|+)O#cs)1fH;F54;(_5yN6)&7*aTB7-$v!tSPP_R#moO(b^5Ei3|(72-nY^ z|NMkv(2t)q3~}j8g3RB>8?w6v_I3@p(h^ld&o6>;JPRfP#h)Kfs>0Xb&}9yv-K>h? z=_zil_@3ORkHgh!kp{EB4k#l<`f?UuTo5H_v^WUanzFEp$Ntaq8BDEwb`Cef@w0d_ zjV>In)3+ZvK{BQU7fGGm7>^uEx>K_(MfTpM!aPinthxz0S~KlFkK>Pe3}2YqRF5X8 zkJv1NM!}|`HHF4B)_gNtM@x|cDe^GDAYTmH`WWAqEly=|y%TguY2kgR3-{lZclijp zc$UCUg&72J7nl>B&5$?(3#nqUZRjFuVLe_xoQZD{^UPicxQdW7AQNNQ9OFym7#2Rl zrf1Z^H9!q89wN{M3Ulx0Be-X1a!y=*G`aH6(BzwV{WCUrj8Gif?Edkax5sbJJj!?l z$3WqY>j6f&SYHvg1*6sODnn!pJiHQ9gZba^78bxMmKoaM+Q*>^EVktcEGkb&QYvqp z08l>{YYd5gfWf@Am2gc=RRV0-iieLBKgg+Bu%1JB6ECFY4{jYU56eBoQ#%*@R^12L zk7DextXz5cF%4MmNuT5r=_vZ}C=q$~$muyf&;k7Q3N2j_eL>x!S{t+G&ZSV0qhHUQ zy{;^GKx`KlzcVKF!<~xQ7SJt;9CnWJYC@iry}f^8K>>aM--lna z1te2NVP7*$Q$&T?c&n_(Onlov!j7IJ@k=n$0Woqo+8Fp%W|+ za*6?zLQuiV*}DXi8BR(ju$7M$P(G6_gI`_Ml(tHvH`bfmRuKx*LJWy2J6z~iu@b7Z zfX2f~hjYISCnIs*QXZdh%pB`xA>I}NKtE-}$^?~kQfAYFV#NYnIaZj}Voh#Je=T0| zOlGe#5mdfpIzA08guNi1ybQ$;WuLZ)k@f@`Bco}V1vZr#QXP-4o|v2L8Z(DbmCSH( zvqUa5JHV)puWVQV4fCIoa>_R8TzMZkGW|#IDh*68Yl}RiZrN!dz7@zR9VFF!fjDcj zt;evO9G-6dnksItJ_WSsY5eKT^YEWM7nE{`cv)3K6jO7-m@}7N;Iu^9N>u;acWm63TDV+-df^Z<2LefMH!sp4Imu%(t{P1d&GPJ3574&t&|=B1uc5& zuE)M>Xyi;>F|j4&i)TQ3lTVv4{2Wevb#OG>ddFer;hVm~O1NlT;pFGuRipRSUnJUe zr&(JKVUfjhpwE0eJ2ZjO&+-t=?z$3x(K7N+MxWkNmTJ23j13)RAchrjfkhv0RzBT=#AsaqBf&qI2?$_Gi)gDp=hPrj$pM zCn0+zNzgwts6BQhQv51<_D1p)f6_vSj7H+XqSa!?!ZjL=+C-!LMr01A97)30QB$L?){#1r{G)C0 z_~ZhsI|9TH`O7EUmf+?wq+yWoMeY5pQP+Buhtw!xbW1NR9>oG<}Jb7PH)YZ6OnK%QGEZ1ku8D z2-rB`w#=VmUZ;VHMsEg*@9$6#$p?}mH%c_=-^(z*jC&(#6P z3x|*W3I+`KMkY2VgUsxr-n94O$9~Pj$8KV6KE!NJ9xhL?rf*lALFvwkbfeb%odk{C zY7_yKw-)fy0j2?D=YYhmF;8-=6?&(FFfI5A>LKPBELchqX>n3AG|BVJHw8Y1Vq5_M z$OIR8R496hV}3>9Qp#$%2Zu%LxOs3>)fVO9bjshAwbnl5=1hy3GlrzMS+x1%9*gMR z*_$6EdTz;{Qt1!V+&q(!mM8+K%-yhcr7}lYoPOfCwa(=h4*IFNas?K9oG%^RWToAb zsA%Z{lNU|41mRs;Y3y{vlyL3+zSV8yGrN=}_tCi8cUV0w?OUhedad%f?QYJ~tF;Tl z)^xK%qw&mR|-Sbvf|mqx|oH*TzP80cy)aAx_6C92k{TOwk;1J)!_G6kNs{S67aoF zHmm7qZ$z(0^u}L|d;DB6HqP2SynmakI9{9D#OJ0qe|mr4Hh+3wY!9Fs+B*LEzHJ@< zTx<)VW^LVX_EN})*yv&DL>`(a@_y4;)_8tK4@+kBeiLeLU7y>-;@sYEX3dT2lYCg5 z)>M(d~Q#Hwwmd+_u7qi1I)Z{M79Q&*2^H8qu1Uc5Rw?S1ad zo3d;5TfwRaLOVYW7V+|S#5VaOOo~pU2!~tO(Awqi(wZ$t@H&BOc&%wGjv&?r;OfL2 zzdt&8bJjacn!EJ?>b0Tb@%xk42j{n=t6()+j1Q9atUG#)uAH;m5o=uN*`9oeUBd@W z6AE!O``2S}*xY$GOR+dKU~!no;?RJ_c>GXUtjFU#67WdF0e%}7+H`DNcU@@HvA)hS zsMpa9*6-b*UM~?}_)sCge*65%lb?FytMFo1e7Wku^a{aFuQb~H?D+lhDNICtGMR#< zs{HTh^lxvvlklS5eK|S{VffoepEf}o8a_E49Z#MdjZQb`!1?VV2IJWzScSoevs8C4 z)qVQ>^!10{c~k!0Wno~|gP)Jyj_}Lq`8#Y%fKxh5E%?v6Kq8tC)&heowsN=%rw`Z) zfh)AKeAv-i7F!4&wuQhjQZF8MK3?ea(R^|8`t@n=)zQf^x(R2^oz5_gTL3WpFbaodYGlYoqu}# z<~j7m&r^^`1(H%YYJ$ntdUW$p#|=Xpeaokd__-&4v5hky|q& zpB{8XKItR!Ngt6i3~nD3gIgVg+wB-^9&`*g`WS5LjKStX$6%w6!KTg_Ts`O*T>a`f!85&{7omr?x9cf=eZ3E`Ni%5h_wfqf!KK9~8k` z9l_f>At>=nmJfEJSf+Tv*DXuudfD8KX*IWbEnAV7hih#xuGQnR`TF$3@w?t~#r#VB z?ovJecXZYD4@YEkyc~7E|L%KYA`Tv3t`1hg^@xGNy*WG}j+qNa>}b$TQTejn=$KXL z!mf0)h(2x5bZHu{FuWfb0FXJq5JOt$Az6(jm|*)V6VS+IV@UzJ8?y@{8h}wb-WdreYJ%QVd?td;}^${=Mn1=D1H`Z8K6kU z){+hDCRc2CVk?#g(PAXls5GrtmN0Z|y3q8sqO4J6D~tqnEq+n9FT&=1VAO$J?N1l` zO*YM9%P|rAPPUa(gNSA_#W{8(m`s58tnLb#t&zGQ>$6ry!A@9wGj;|OJ>6QSsQV?~ z49!iUZ*$5M8JdzdNU0}=v6Z~y$Y>3un)jr8Js0@2q{H_#4{Au2?=(RB z*X1f6hm#G5ha)nAYdGD}sgIT7mnYgz=U;Wi8wt3=9ui%kMleXw#e}H}XW+%(lQ3E5 z={BlAF>g#(-o?e4tXKNjWbe@`A^SBnLc*5H;ow^d1-}n-G%2^dWuJ8Yoq~|j#z9AT zSTJ3oAXN*E5D#Qar7MJD*B3I z$z`w%UCL^9NEzD@e?hAVBiHR&t@U->PHi>d0Hov4s1?tKM<%*}C&dw_sVW^`F_9T8 z#ZF6(CA@FmS*G6993&!W2p2LVrKaIa*IW@txdbvK2MA|qin^Q#rL?^-TG_YJV~Kup zU3fqy@Akf7Rc7NRG1SiSQp+FxjQ9qqaMrwXPzYyc@|Utb4fH@%81CD?_4kGKHVYr^ z!(X04ns%9>8PrQT$ZlEJzUgOUTkH>jgs!3yuNWi1N*hBz9bjYjK(1c*AU2;L45UE4 zvmGvJc-9Yr(w7R8pCWWR`ckS}(=19&?@)O_)btM49kt{9CDM zB{? z9^Kk_*4fO&IjFPIFtISo>0UTa#04~4_r^2MlPgKAfBtaIJ12F>-U_Bu*uf;Z5O;v< z;FgnqF!LW+hZA`NC^&pFTu#(UY6;*Q$y5VASuBMQP;F!nOK76;zOenkr==s?$B<7# z-E143Xwsbs1<4@AHlNcoyd}^wus;CiKf(VmFNgf>Ub2#u!K3{p(o%2#!5tc zsX$wrFPm|NMS;2Lt#rE0v$x2~AtTt+Y?kb@T$4_m_{(5@=|>4l;G7hU@Mdr<-e(vN z*4?%o(wn=0F|;J(mOk=!X58-1z!e#f6t8|6#@hiAbD{uB>)s)AWe$nLtO?P~b3qiV z5}0m-q@oNUF)Tv@cao37$tYW)lJ*RhuZ5g~C7CvB_bW@}li$`K>>*oRbBTL35ihni zfhF$Igg&M?O-B?%6o!We*db{`-`Roz3mq_N@|Mv^3%dCk@zCnO?_?qo+r9s$DE4^g z?O`HCr9gsfvLYmDJJ8!+$M1ail&CFy?LI}?3ttWokr2a|$DJ<~u&}Lu{V9}0xD4J- zr(=o$8uFJNTw-GL$H1N(1#x^d9$#nHFj{K-j$PWRP(ZEENnk4 zzieQHKX>MKm8)O6{ji|idi*`z8&1ul2N*{jmRGcNfI@XW5@$(PQQMUc==)G%z^_cY1IdoI4 zG==PoVXd_>7aF?7&cpR(46N_7S+Has=gWAtI1g7B;mWhR-~aVH*X;Vm5f#&%lRC!K z<2TSJ3>=R@-@z?m9WN|Y8GrBkW{_{DtTuSOc>f1#G5sR9h}lsS(FdtR_O6%bOB*XkftV_ zZ~#QpZDzUvfJB%4Rk)l5d{@O+healr`XVWV_))^)AcV(e;5aALBitz5 zwcgfSDwW%}N8DW5Z5396dSQj1z`u}{uf1P&p)%fCGhoGQ>r>0!Ut{2<##>^L7y|EpyxMhu%k6o=RZ*c%hFTF+dP1jza$=RhhwFYo^ z5|QlI*K(1GzKR8o4kL*Ki%BT5SVZe-tE8!d-T%IHpmeC|;FtypazV;jPLbDS$Pi3x z8gXzU3eAel25}le^072=5FJ*GW^v(foyTo=d^!osplj=r%T&ajxJCZt#QKQ7>upKKG@~E>eO7xmJ z1A24JA7J~&kQY{Z$6vAD1feC3%wb<&jxXzU2Ba0!A-^0%jhd$4zIp@a7$&xipm%nK zCY?ndA#kDRI6)N)Mk=pV?brf2U6Y={tcyf&O&V4xU>Y^mOgP$2K&odPs&&Bz&&Pf_ zZlLNBsIz6Sa!Q{)fNQM&`6~Ri6aEDI6h9Yrs>peyNz9EU{h?76?J2`p2BSvRw~r}p zple5ba1@R^jn(YA`i&KS>YnI`$=y02+V&@@Fr{UAs_nv#(zyowodoiRD118EyI^vv zlfIv933NIQ`Xe!xSUvaTSph?t9w*ysGSR8{g+3hi-JT2%6|`hKQk!ma2y=KjSXY|Htj&XVUn4pA= zt-REqY(k73XGA*AzrJ|$`X%gkIq zFN;{$PWY6!-bmn7fT$KS|1IT?P6Guxns-dQQ!P~nr1TSsl!F!`rKa)tlg=)puM;qbnWM zQ%tCtHC^2|jTy~g6>B$O#3 z1)wqE&f5~`<66+?J!dF_&H)rb4QoLWfrn1uuCUa1VFI9CCqTjjBM&7nwUGx2`5M>6 z_p~xziNyM~)-$0k(-Hqt-eB2@UFa{B3veDZ`o`FStC!#~b{l%Lz+a0v?%Nt*1`at+ zAsDh1y(6R-dm&`KgA}9$1VSJL+nk&T&|}9qr$84$oIuAM z!*_38W955^2<^Ncngjn_P-v-UiikYGY;^i^k_~VX+|YfPj0t1$%r{H0E$?N_Jx9{Z zU>Ra6Yww+}1A`3qgu>vr?j~u!z7gRrBg_?C?s}1tXtdfNRJ1CzIfs0C9_DsfC)o#$ zPMox18(o90yGsN;0dEhyTuGkEijKnw8<=dCL1KS9kMHhj5qf7a^lyEFcSx3t@s=3Th` zu~YL>Xr(bP&5dP5fe1XcjUbdAW3h=ZM!{IIcR-?J@MNC)$-VC{^S3s{X1|`Gs$}r| zhKeD%vh5Uot50OR>D!+t>1O&r9T`hVdCFyWD4Y5G5%xmfcVN51^Oim$XW~C78cLpV zlS*;m36Ih~SyQ;^Jog{$1~)Ks9np9%{@i=u3&sa8>61BIMe7e38vdG@UWnM?z5-F* zFmjJQcX)b?mEigK_#}Hi5uVSo=ab%9`h0#mWY03_V@KMzo0+%L>|6ihG;M2e+Do5@ z$HMbj_I#Z7r{|uv>D%glMI(o2ee1atzEONU9;DAig3g}%!t;5Cx53GoFqV@t$n;s! zH*c=rw|>^Q-moV;3+)Q{?1}xkIB(v!^&*;;=JaCl>J)1a2!hyS-yNLeBR&sLvghML z_I%RIo==DTnb2NZ_n6=|1<4vx^euQd(8e1>5PcpDvu8S~^tpE`Jokm?L1VnVyYK1s z?fC41knR~iUmOp~^Ep1B4EyQx+37KPzMx1sV~V)?%HK9n#do8-LvKxe0#@y@d$hOvC!6;@GQO`3eN*!eserr zptq{Nz3a^bi5zmA2@S`59@!&%=4%$X!>RE8RFF5a=M(lvqrOdjN#D}i!sF0AN!Qx3 zd!9a@y8ZO|+&#_Krm!F{^6ON$Zy-+Jm>N9fqUUD)Imw=nh4(|@eP4JM>o({ci1Au| zI}!Pn(AGfFw=?1WNgKR%jn{WXDo;Rxvc!w1edRLMFX6 zO~3BHhse+gyIo3yFx|--i8m{LG4;nNSg`A{H|hOYxE`CoJe*#!&D0{Y8@#exB<3{D zh?Ak4{)-3^9*^DLNk*m@!ZZvoGBQ0$mvq1HrXoHslLMGO%aQG`*Aa$Fr8j&BfU_Te zKEJr+_~kcereuJdFbP&+K!a5JN2ju(V!9MlXQ)HMnFfB!0LTJ8?PY%wxwYRHzg}FN zh<~2;gu*G*CSy10;R=;|wXE~)1^LDPHj20IUR7`1y(-_jyE!m)6)p_jW=;&^HJ%@ zpabI(t?^GfmmTT4!l1VC0@E@~{TWy^W})vbeca=2Hn@{dH;EDYi5VLUZ#_5A8WJS9 zs}sqhyn#cre|_-_23R22&;{qd zCYRxm$g{c)ysfmSu|1CW5wc)>EkbL_h$`+==+98GV~wY&06_>&-Yub-7@+Af=yL-6fbiJAv9TZK^~hk{{sVxEU& zwjvnzSF>ofwPplhJYi`$V#0nTwILD8i0YM1+4npQcQTm=vt%-XsQc_X9qt&`+n=wQ z;(V-VCtUjXq(&7$S;f$sl8F$~tQ?w1-qHHbgXAt@6waLka?rg!un*0n@5N7~MgGZt zOyr@_acAryl+PV|G&RWqhEhLZ?=W}6yL%d+9^`2oT-wMh8hD+4Vf>xR()1lhrne!? zi8whldu&WrYXWPmn2Qba-a13b#g8}&C$4i%nD3k$C+izT z@4>hK{q5bib3zUWM?xt`<|`|MX@NLTO3CF=)1tH&GDzBy2lvlGk8Q!g* zq_oEyAY=@8W>ww4?v2U+W$jGB8L)X<-x(@Y<%tTiV8$buf*SQ*g09iHjFfIeL0p4pM1)|3P7V}El`L_pa2>z%jl$3R&L&rRL+gCo*yqg zObssWW+`zsgc4%&aG9H8Cktq2W7JkDnWj=+7bFCcy<$R&{j%3xxlFUv>eX!FjqlwW zF9<4)*Wj)uS?k4>P8!WG_3kQ6+>%h#m;rP@IR|*0b_%N))8#$NQohv#t+$}Xi7@5? zlkscoT1(jrDP(xhyE;yQ5YuFklF}IS)T@?BV^=Z&Qe?h5O9r`A%q>Q zeZz_g+QKkMeA~EP2V^@Uf8~0S&k^;F@e=w36r1+yei6(TMi7G(CE3BCP);aFXii-r z_=OB#K;8sjPC9o^hHJt>B#q>!{KiN_HqZt$#x#cIFF89NkYE0K6)~+TE^i#80hSg# zdmNO7<~uKbM+YOeR+L$3e<*oC4VdsW_i9ARaO%&aYOzp{ z{eLm?PHt>sYNKvWt5%uLC{NqP6h^YnWyy?s$c2z8o{2JXR%aG>Js9t#arAJeTk+ahyEjwXxnc9nsbn1xo>eVl=|MGmgoshwi zF+7ixus<2X$spX)Kg_DLm%H`7H93IsD%Go!dGOq=Ad_sG0-)yDc110WpYOS?r8Gs8 z6hEFwbz9wligVNuzpODJ6Y+E-7fc?JVh19{4tAngPIQCMTUYRv=Ey|r(OfO3W(AsS zce^67q^j2_oa?Dx(}rt?KkS{jbwwGMq{RoW&>d|}@LsjQyI{3|-8bIdbnZ39FfZOU zUPC&Qcys4xIZontC^u{|H2HGAA?LskL9#XCnMa6==5U!tI4qI z)j}05Z_x568uNTh@O2$+ZWl|R&`9VnZwZyuYtJChRe^EtNq@GdBUJCvPEeUP!H*L{ zM}pPu^jHe|4}>84xtt_6rnAZ&#-e+F4GEPJTzb8JUKzvN`c5b)*TR@&b|IBme##-$ z5&ae4d12@peSho1h|52ESyXtXY~_O`)DlA5y2npkvkA{b@EcQmLxMojKC;j&e`3dl{j)Fj)3`>kO+Nb1(M3lbACy7#>DD5^~b-L zgO|_a=q4e1uKyf`sOOx@pOgKC_JzIR?eV`S$Os%7%7N~$4*%vr2o~1YY>C`#mMGrE z9Mm(yR>W`+nF4_`gqOMH*x7n_A%BMUIu#T0_c&Ve=O9Y)yO}bV;k52H`AEAN z&w|y~B@~{d?cd>F9r~R}Kd?!YDRE~}7_HM%Z-ON`^}h9Q;fj?%{{cSUd8^J07%9vd z>gJ|ovmi2-C3WAjdM<`E!hd?M?I;QjPVhVY1HYB!pqP8sqnAFFrOr<9+93W?6zXHQ zNC5|a$qQv~&D*u_FRMlgH%M@lJ-1gk%99?w{QdAQlH%TS{+?I7D_cw}LF#R8rhaOW6M)JOl{-L*U=f2n7?mRnWF&<_emQkrF$I z1A;T>T{Pc>kX+N78^3I(TL?ytOv0E^7Q%T$y7;YV`gebpF!xi47e67z0wR*2_p}2n zZsI?z=tDG(9m;!vg^VBztF!iR{A9M6tbI5y#v{BKkiL!SSu-BN1s4Z-7@R*ar6iuH z9P=K34al$AjQomM@aKIHLLLUH3vfU$;h>1*FpG?X2R3Ic_{|<192m2;Pc{O=otBgl z!U6Y#pD{%|RCr4-+{Sp|;LMhc&|e3Db|G5S>mPsq^(yZO$%=n^k8?M$455 z>I#*}8-M+Qq9=ciB17>ddq$Q`PyGR0O+^*-0&c5D)J+mtpa`Of|4U;YiTTQ=n;@K1 z=gS~Yc$au-!@Ko@<$t6QfBN;RgbJEnl_%0&`^jd#%n*;rrnE(m^x|{?XBY5HCN6#M z2$6<-s^Yj^1F?2;eyACS2qk5I5SaOL>3ogSsgo3dxf!L?lrKPPOu)N1W({G}Ru1@~ z`TRfN$AvA^6x@xwVzTFb-hsIIt`K#+l_P37@rD=L8-dAUIeq5w&}4mkB;>?g~7 zd-db%mm>bL)f;dtj;Z}n_T`##S3I5xH==SS?xS@$2i7~5Uz?KGBC7)_rxJ9-Lf7WY zD=Z4Ux4tk%Nm@R`rDAv#865faww>^z4m(-(T-E&aI~q>)Du4FP%}wwyGRY2QI<29U z_NLOx5Q*+J*$o58+w7z)GB$ANJo7a5&0(p^17LFr2&~bK0EOeIBN~30UQU5b>BSHk z;iC4AGY-k5z-MtpG*@M8upJvY1d0p+x_Ly!aaOLHYms!IFzn4(~jiV?%Pl!9%SW(wtz9*1=mdadA)9L3>5BHYJbFUYNxm(I3sO3j-hT zWd=U!ngZ<#2waGpYjZ)+PIzGyzfb z4#<|NW|O{$zD~WkAZOBVBLlR_7XBfgI_M>mC}!S9b3#suvG6{CH6Ak$1n5Py^%Dm) zi+|t53A+Ax#(Q!QhHnC}J8%6iclHGjjpEr?Hdm74FZPt1;m!XrJN0Ub{N&dYNJ{~1 zvYI8zF!SV_)p)@vbtQ}vbVidqnP5-m=;885sAJUu%GPg@I0Ew8VFd0kH+P`;@U2=X zMV%qpqpnS{i}J_v5^5}^12%IDr_KIMe$Ur_DCHuk%!IENDc(%_6ZU^c2A4@&f#H?nHZT5qzZrClSNiy6=ny}LP+qhxC%zCn8p$nO(V+>unpZ3rZjhr*UV1ldw3xnw=CK-c%OxGrOvM-!?Jc= zWaKmo2Z(uMd~Y1laP~dh_Y6v0)V^k@=CTv9n939=e`0W}As(64z7e;?+ z;YXX02brXJbI1O+4^1O;&w&uc6v6)7v|H+i=8(9s@jB-#T-Y?3%Wk%)Q=?qo{fG*U zC@wA|t_MA^a|D6<4Tk}!u!NM=lP;K9*3rF`W~}CzaX~|hv6)!%sMMM$m%j+5;CCBm z4au-4p%{j&`8ryy2(AtsR;!w{ss>&el1d+a&?8IH7qvlC26Dm;UO9_j`qt9ulWoXO zaaTVXSFhe)U1m`?2^HN%A22H-yCQGK;11!1aRkQ`b_T(~-8HL3-untgQ{B)ToI%Vg zTb(GkgD3Z%p_&DVOTz5ct~vX+2e zZPQz0f|C@5N-Y9RLIB4k(oEV_^i4wgWll)ZJvo3({Wbn6Mx+FWfCYh4zzqTmP_Kxz zR_NR$SRJNZ8f6d8wGQmMlEErxMUEOAoWN|aQ1g)Xf1k5g7DD9tJseGa%qh!LJ zZhz!c*=;|W-JPbhD5aOlo)bMG-w!IH7wEXWSp&t8{vGvM1^k_DmW39fz5q@*jb&EG zB1#@nB5%({t!5FDCp}uoylrHnFYZH%qj)_Vr@=xB3ooc{y zgqAhV(!r!%xolwAEu!i$10+|t>rZbEmK%UQyA_G$o$Mk|NZkbMIAOB{lhlr+E0}EF z6((QYYBmZy?`IrWQJZ^|iWUVQZlNsOw180}f@tM0OJ4lx zy?ZJLAM{Qj)T_y(umE1*PXdv;`qOLS^@svGd|X6PUEOYolCPFs5Je!0h^oa?w&6T3 zwnQhR-EdarQdPhl*)uCJmjaSfRyU34$a@4@_d9wsehzL&%UQb3aIWPTag+uMDcqun z(^7B)-vG55%3dq(QCcAo-cA+3hYjPdby~w$aRQ1A& zB10pmH%d-on_k-xC!gIBs zZZq7bu>OVc`Q*EHw{R!2i|N|4n3n}5s*@gB=tb<69ou! zh?5nI#!s*ZK%q?QFS%t$y|pnFF+LG6GW&6z$6~9KxQ5Rn|H*kR3Ija232qP4@$s;) z%G|$LYVF^w{dh5fa9LUGHVdX;#MZ$igt)m>=U_T50^(#d-l}$nInl4Jm_GJP7=Pvs zurnf3U63~ogC)Jb;KX6H@*V0Q4_Z~ndkEru^tzwPILUf}I@FmS>!5dNT1||V)&&p{ ztSe01+nFaQA0NO!B^Hx!K~Z#Hc?g3$OUTH*G&GX^Iui$GvDEc(G=wH?EJ zcXVWmsx>H7P=2K`Ktpee;X}jhq)`a@c^}Mg{VX!*A2vxM@(UI(G%LT_t`K(sG*Op> z0(U~QrkRCOdzsy6WzIAnILmO0NqQ40yX_xK;bdb9ZgFm0@~~O^7WpR*KKdi8f9zZc z6W~3t=wvXT!ZoUCvgGflIR3a(JR!wd(^PC##_NKw_1VM2dd zfBUp1y39%4_h*2x!@h%k9O3Vxm*R_AvdIF5;rwQI7(7UHod#UQo_>?BFCPMcLFoiB z)G0K%ig5WFEV2+5rZ4eAmI}o#i=Zfm#;X8`k$B0Z_+cTijtG)|W*(IIJXEje73Ii8GA|>EDeoyH@DmB;=`g;;aD@6u58}@d_fb^xVOz zbI+f9=YqJ*2#?Vnj$!Jp%}zDR!CUK^fEt`Q;=RU3c_@ z8mIEQPH%y|Ha{ub%-IeC8O>C})HxA}uA0XNZGmG?B(L@yJGCa&{hbevW)!kaIfK zn}ZsdxFr}C2owY_G~ONOw6TqTzf~>C_ly*c=^3RZCXt071NS~gbe{ZsU{dy-rme{Z zkIj8XZBjl~Y-t)A@1Yd`#~lB?MmuO%a8ims`0H?sSy4sd+*<~B>=JSo4m)#PBK2wT z8Xo*w&|lmyqL6Aa05)QqK0pBM3KXQL#TF#k>-CWe;h_~J)<|&B_RL#;@M4#ocF1fk zm;NlV%vCUZH$i^|!nly_Mav1iv}ILiM1-Asvv=f)1GtM&leHxO+-%E`;F!Ju?1{V( zqO!B|ix4#^mIwV#@jOM}45s^OgyX?E@=6C&vkG(u$EmC;JyBW$JaUHpnY0;18yYPM z+LhLZq0T~rHGj^$JB6;=l4UbR=aGhE8SXfiA4!`H$02r8Iu1TiRDLy^l-^z@g*M!5 zR%o)xs%dF7oZ*hc;q%gDD7lVG<&3F#jjYkk^as1o3~#r=#IP#rd1*M7;f`a$X=yeN zv774ULbWB0rlfbg`;@R|8_Wo;p`MUt0~zi(5S))jgAh7tmt%Y_Q@I_KqUj&SV#Q-X z?(c$G3DtY#cm6fg=H=uw{J_+C@K63jGL0UJvLSxJt#ratW{eEXt5S^M50xP)Am&dr z`Ak4ED~lZ)h^%287zfTxv`kR{NG8P43g1O<4z9gr3|AILRLgv0*)G2OBWg8^7hP$8 zQvQZscS4Wo>F?r{cF7R?)IjO3-=~y@y?|hHbx9+-mS;>&$Y(t+x*~Tu=@IJ8ss5C0 zy>Hac`Dx>JqD^wSXw=9F3}$fVV2XM?QED~P9BbH6zi~tFn>W&H)X3Xce`q%QXSpVZ z?}Fd(K7c7NNocFGD@X$ zol(KdbsuxvplJ>xHtB-Hxrh;hqr@9Jb-Q5 zaMoWM{@p6sQrjNHpu?*-5ZR0Qv=Jzs;&3q@hycDgPTn!r&$Q1P=qRFB1c&2{egB;$ zHeeS@(;ixeVotKOL&5;;(o~7T7sm3BsqBXs?&ncd3 ziI$mcBbzBbs!NZ}g=TrWDejCx+i{Iwmt@0CHcPTIhg&fb7~vm&cDGA}2JSB0wUPyB zKn2x0PFw})KowFncg;sxiVPB-T##PCnwaM>I}vBuK~KOa@1$^1C9AC(Nsiw^sx!{^ z(jB0yv)Zz98AD@2>t&}blSwHYIlnGD%i!gz0n&Qh`A14>LT&L`XL$9zx*A<=M%aFI zU(AU5)gw|>m90s2vC^U}wWx%UEzR;|lq#)D^CMZ7En4c^ZKk&AK31Ib!WLFF+d9-z zwTER@_q=~8sBZA(QeD4Hb^XVox`F*AGL)e%i~Q_nc2J(#L9?0V0eVE?Oc2b#&L$>2 zY|yvi7fB1pUo0&Q%d{}mpanxD2JxOSI{2aD&wBgQuRp$g{o|kBPOe`3;mxbJS1+#K z3Qmp+Ut@Ile0k->i{PeooA~7w^~&VcPLHjZS2pW`%PHQ4AUs)h_Xh5Pva1iE6uuto&UIq)xE zzh!gbO=A`_dS4^XPlItprroynqD%%Xzbon2J!BWPEa)h%WAf@QSkk}k>s~HCFpn&2 zA=TlkXP)R(f_cQ@U007~YuBIGWxkX7%){F;nXLGTK{tx;`i-_ctnP1 z6g)4%so^&Cy*o@%kbvVumR^?Y5LTI0bX%kWolwyj_>m`GAGhiEz|f;-M$S(-IhnCT zlCfHqdv_JBEa-bNNKJb8Lme>Uvty)`rk2rt<_SZEpLuX|e1IU+c<5-x(5$-UZuD*A+sujkafrqK;;8nKF3@C6a``R|iSm`kAB)D1zS8*9 z)41s8R7K*N*CW$3NR|e|>>_vz|1I*qn)V7-KuC-rj;A)SM1Z+-N{jw8LdemRAf%Ja zHIc&zFRcpZq)f@d|Ikctshh@w@Tp`GVEU6y?9Y+=fdLt>sDXe8clnkH2Kgh;hLB_j zy?5PC<7=~zab=btQfLlK@VO^P@67G${>to#eMB2qUcFy_1Q!dv-U$U+@qZ%jMHvW6*oRWN@x*%vdG zr>KA=i?uKsC(``VzlTv{`g7;mGjDwxiz#=yEb-sEZ|3uTk%H`|DO6FfO1^HdNin?g zNJ?MR%0hGp=4zMfxUy*X)T1tSNE0sO(KK%&D=XaBc*J-1j0Zx!pd`Q50d(dx@<;uW4* zNaP+%c@Lgl$|CK~qUFp>tZOz5hxd6*$;xKCiW(4J{(@AKWGbvZuo|4p^w@cQd3n?) zl4<2c^FHp%UQu4~9!_H=;e_GM7QaJF8qNjsw>q-OrYOGZZ*Jy+WDX3gu+D(r7k z1#5T@qYTf*&(Ab{Z8wYN2^nOwpu&(;5y4&%sPNw~53vXT8`q3hubUN!NE_^F zHCR7@UE1n0nmvIt)!c`a;DLW*<@cq^GfGQ~txoB}Q6X--NBJj+3|8N3o^y zCfTMX+3s0x%NPlpUOG-E>j&TXjIOJ-=@iB8(EngkR}TVFAsDlsurerel4^us7G#z# zAxqjYq&RrxXDPvoXt>SGBb-3?5JlmM5(G}GsK7E)U?nPIGq#@eT)&RMt!wh z0^cn&y16k6s3-(I;n7ZJcQrv{vWpnvH!>@5GXjfTUpkj#2T!M~DD>#Yxg=79>98wX zvpe@g0IVrC6j@J8e_BM=t1glesV-0u{`_u{to;G`;f*eET83t;8@8%Q=2LHtD-4c|ta^^HZK zQReDi;c^-Q%-NlEUzUSf;l#jb)WGT_f2tEt#eU&sPX8*L3-THyC<%MzB--GaZx5l& z3JpYSALHoduWe@Ew$g8Dv^Ap?ZY|em`B6W`R3V3THD;vIRMY0H%%`px8ztK}N}ppQs}TfS4_uyV5%{zG zv)wDS+34eb;Q=Qu-*w*)@OU)X51B;(I-eWq1- zS5o72swY`HKa>7EH%?O2V5&8{+i14iPxqrt^q|LQnmp-G^|@U$keJI#);vl59<2Vw zIq2s4wj;hB^!n%cwr=l=QFwx9FwYzs#bXD|C_7uU6f#Tvu!|lfX+%4+;TowjyIA(& zQKXp`a*`mf%i(=OSqOgK6EY+u1qK`U{l-HV919}VUfA-o!LnGk*Xx(dGMl=$TG0S_ z{zJ0%z(*0H^kT?5c;nxtp5j0WpnyrBG##^44u`H<561T+-W}WcJ-qmi4@FvQ+q@Ry z7OaMY-0ZVspBC3?Xrkr1#|#T~0J@Lv-i=-mLx{ zSpBdOtJlfC*_6Yp^Q1Oq?5doVZBAZjEP}i3Bg1_4Ussogc?i48T zK80+qDw=3+ybB<$VWX2jCJ;okRp@{4!wKE`Etc#M@8DN+>r@VLApxs|4R;HCm@!%c zH48ftC*XaAusHi;luzoKV+qG@2!>`(<*Z#MHqg6RBWjh&sf~^lf3)JMM=nYei0-5fP?CW|F*SS*_e3fenSh zN8c*!X*Ig;qK8jmJ55_4<8Xgq58y#<6(LnfEJ~*zbrrfrA7oc4VYfUu)R4f{wnlnj z(XmFhKTZnsGlF003UE0P>f)t}Oqk0tvhnvRI5oRnxSR93MbO{EFCvyv$ZCBRNBK>467{+4tu(S`)fP@2dXos7H{b+`GtTU8tSuNyKu+P=g3;ZT-kFrFo@aC%1~^33Dyg{Da7A5?t>Hw7^kES28xOa zICA#VyNGyks43jB#{EXF*rW>EUd^ICQ&0+5tRnJ76BoX4%zn@9-I%s!1v~b zsGBV;3&$L~LsY-k!};mQ5*$Gy&2z)nRBy3Igpmd_V8LUzceIKQHeAR zYRDO?A?*r?LO>^i#G|Yg%1MR4`{Tk_EvTqPMWt=ub;NJB?ettl*YYrW?m+GwpmmK? zMgn=Wt^Uy>HP<$&QnJ(PSFSM78v0NwOtbnxZf3d5t5#tu^#Rq{VA~(`@@uXEpqy11 zgvuQKeP{IC6Lx}`hTiO*S+kxmR9uK0qgD~B`DVB}qnjJJj%(T1!E%0mKzhyVdq@dMZ+551`k^nvrXTS*Th_>K#92R@x1|%RNTe0Etd-?HZb{@SGm?~a5t^uk1*e0wbtyzpAlE^3Q?HO>``0Ek)`QrKa+{Pq ziLA;Tl@Bpd$cvk?rr)Y#@0UGpF_{SqvbyqwIj#umAoImJlsPS##?kk1cKWT(4M)|l zoEv599=7?Tdnly=oyHH1gI}+HIH(R*m2!=NglotHWJt-(4JADyMCpYFJYO1Vuh?YB zhb4m|#bv|ZX`stEG%CB+c)I1NZn_Q@c55h7s@(w|J#a^xt$XDz zD6+>Y31V0Z;#P0~`xwk0=(IqMEDKMAnSp&`>V+Xer~6SBdP?^5oiT8q0YT8zK0qsn z14Ylcn-+WHl!7NT?_W`7`#6h8Lo~`7Yo-|CL0~Fqs%qH-+)nutA#CFTJ{R)}$MxW8WE3aen?H7y;QZ>~%gffqeH_C(L#>a!(fp=+O>ib>;-PQbzd zIqhC*(=Ocb`TS^C;Skk7*(dcDH@450FYENQIc--MciWMDRW~1L73g4wEsFJ1Fm$Q# zwEOaqua48=r@b!^fz>=62ny6a>2g zUdm+79c>!=eq*JH!es$wM` z9cVSgRwGbY?P`3g6udqkCv@TCrNHaEajo%zRzOlPuK%+X0ja32egD*|+SV%3$53f1=KR96 zzp6AJQKgyRC5mK}=>Z=@3HX?bz(-I9eiDVC{4V4dJr9qsL{%6>2EDx=OVo*=ZJ?TdW{gU=+{~FSNHx20fuiDS{$Kgj;aeiqk%tuyKzW=JE{1T}6%hyC^d(N=p zOuMs=^8Hu+*;!tJPCyR(%c1%wJt z0LGR;O_PFrk&_=9tJ-8lWHiA7s1o*q1puUng|tawYszgskBEf|rAr$u$RSD5P|>Ks z(sD+LwsZf|u;YKWxDU$2 znWb{A;s14JiznX$-tWl&4?Y_{{(eUue(?G5;`h7q;nG-Y*r~d!qEHmC{8?}l%#6k6 zE|>?&RuGa@cE3R}?gG$#ebD!!Ne60`v(R9_=&xGef=3h>WqSRFM%j=)qA9@Eix+-^ku*jjYV-_BgT} zkJ}^M0&~-YEU>z&Vnx=c2R!8?<>KzlTYm6jcfR)Si;RZ;&!J&OFN=LW`17kb zZ+;SF@vf09f*7Nd#s;!P^1@5VFCUW?kSzd04TO=0cgro<64)L$!Rz!!f>k}p?tV-6 zh9tuB-`EBG(ZC0(R>v%hSYo;NiZa5i828coorh6pjHy5KHnDHKHf9(u!bsNM>>Z8+ zu}ft>FiyCn48&!HXso;z6r1`B??Vu6))4U;v2wFnVqhFd5xEKH1j`WdkRaN)Uj(xS z1$<8$AV7s)VHYq8gU|`uK0>CLrIA2m*LWKN42%!!!9)hj02{@5n1$Y&9B@cG1gk=; z0EHmSpCuwN!-64@dGHJjGiH-P!Ms52w1b$|Ac-c7=!AsrX&%jT8ck{FEr|UA&`O6H zv59MQZXVXv=P3>5G}K%QOxO z|DL@OC|eAcl1BFvUuFxuP3Z1YQ1gRTUs&fXIvN3qoFN<5I-!BH@wH3;ERUY_gGTnI z1!%eu6b$WhVb%c=(UMa95SE({+Rj+PwjBD|VW-TF79)Xs4{2 zh4?ALYbr(hF(ZC>bfI%%{M#-+&Qf#*OG*|jP${cDE|wRE%gRVfh~t?>%fvBgTqo$J zlkaT$@NgFq_T?B`{5PFO{EI$50DExC@3*-)?!j`NRwmCvParp1e%=ux*@VS+Tl$V+ zF^}0@d3Lwwq&9%pQkO%3sGKYQ3zZO!_5dGRJwCKjnLp$6M`7z zkrueDd&1_60jcKsI9S<7Iuugq`qJr*9sE~t%cW+tBS99&rA6eL)h1yPnxZLLa?ggR zJtD103*bSI6@b{pxzNNXj*+I_7Q)h&m+s&omyZBU+GJWFr(I8)RN!DWEGXD0?L1%* zu-ep4CNl!u{Jgj6jRRDc%*Oo}2bKGK8fyu7L_iNT)~ZDWusZXTKNAzF$MQjUo} zIi{_Ry0w3^_T$9_*vo`0TYn9~)G(gcNa$@o)smCp+7Z-C4i;bf_rL+Ap|lE%4bmRxvd9Vx8VA^#tgN<)Sokq?rJe=R9Bw*9uUKuam z{POC>%l|h1{Nk^#P*9#lMEIGd+16-`Pu`K|h{*ILz$A1W9Fe5I`9>BL3MF>1SkQw6 zkvmpoE+I8gKJ1x#ScULt#fuv0WL;mL2u#_OWw762GK!(YWiwGb3qU|pCml;G%jayd zS-!)jjb>CJhqG&USVFv=yTjnXgvkRGfS|-gO^!>a6an5bvrK&n9L1uwIlfefX;m#X zmiJwX0sL0mM_JX9c&v-8y6$u;1K-zCq&)gBkVjQzIXOvTk#$s@sR?RCz+b+EtgMmN z_)@n9`jII1ExWTPc$JxpqZRk9TzScY$RMjj^SK!8`}Irbpm#C{7n%u;e2RF4KM8af zOd*7(*Q!{yO2mTj^63yujWzp2EEByR39$sah}cGW&Af^~9d<=e=KbMVfZ7Dp0Vhlxl)xd=!TT=P;Z5>(}z&zOI8W^v)CSk-Er)V?wrvFo~+9h43m zM5b)W@E2>qxF;K7bPv)Dk(Q#Rk7`W?%zH1U=i|9C-69ksk*#gT*0e#Lg{)CC1w%TD?XZA&a!SFLVhgfs$#%hBZ06N@V8S8~t-PSo}0Fq5|7) zz_=_Ams7}-DttbQxdP9MMSDyTC&7}O9ohQji_?KaAh(n$0n?);uY+cxJF-DwcfgcH z`WkK(wfC_nbU*d3VAU-i!%=p%@w(m}RJCB%-DRH^dACYrcqF>hqqRGAJgS-DQUO3_$t+u5SRNIL0j4yRW5sq)DWR$x%`&&J_6S(hA-|YyRLN}g%{)cD zbz5tX8jiNnM{XXF+kT$XjG{LG=#Aj>v;uu1+mFOjR%M8(HtoobGUbsJ^yNwsxhv&c zdo}9_t}AxaKB$%)We%wYL8%pz8yN~FA@r&(gRIH?NCR-%xD|vpX+kh*cP1`8k zps#IB9aA{tqdA;apTh+7(6wJX051$vy2z@TW36Ujv@M#+AxW#j8f%i%AofAcfv_$$ zU6b>mt))XN&Dph8!Y`+z0tYTvEtKnz?Xio!QSn?#@@NQbMsD5mihL}R>wbEo%Qwk) zh=xs~ouj6S)|B1dD{Xi8P2JrmpzlsYEsn(G2@_m=YiT$~+G^BRpr~tH{bXFddV6)r zJ%6X5-iAZ;oE*T(xttuP6ElY6ju(;x`5YA6=nnw?w)YjFn~b<_7Hh{Zf~!A_)%98- zlr`yOg(?Oe>$2b!q^Z@G|7F>A>n)Y~I#r{Xt)nmm_h@dhzz`f&hMCmt>}0v|w5L!< zeP3s!&@*+&%o%ueuQz6JK~S##E;2q1-FHEoZPc}#cDNpNt>NX9Oj$u4Z{_sydI+jh z%bCq3emJ2qpSY@|E~Zi~7pTBq;!P*< z=H@1#YT$Y~`QHo)WVse-gKp>W~der_V3fs+^<1@!`BfVTKhO? zZ@2os4i$9JWZP}z?C@9)4_s=JJ0;Yf(6l@f5W8}(ikbsulLq!F_cix>(Dr?3=l`%b zQ7dQRIjI@gKAk4^?l{rfd7{%~eHH1)X|z7}DqMWNQUya?R-w7$RwG$tcd9LlHap8U zRo%s;<+|cQoxVfLf)QuKlWsk*d}{CEh=>E99Nj(PL;<6$7tiOgEJJ z9^Z!1)CQ!PW zTB24u2!T~$sG4%y1TfJ$dYzdqyVNDz*-x^baot0WgpM*w*-lWpEG6Ks zZsTZYzpK0{l?OZbL#qjBdp{`9y%VY!f2CDshrw6DZEzr}Gp~;7v*5L|Uw2xYx#|c? zQ{|?kWz^SM?r5mY@q%-c4;?sqkZk6@L$@`%9gt{w%b+0Z*V6#f-J#Txp$$4U`jK{u zylSAAD*;YBmxGKQ1<04WOkdwN+uT0e$VU6A2zVdnT1`EozAmwnzcex4RsxdQaP_5w zIwMaDZBkP|msYH}qq*>F_r2x9Uk6##kW{wj*LJA48+OZe-5TyF52VmgZct)Y7EqxO z{-G9dH9ES+ji1aG+zKbL2G-%A+rnN~x39vc4_0o$Bh6aP-IreC{Sg|nt~I?I#}pEUR;xXlDy@zB?2&9DPhtnDR-P0Troxhz=*{DQ>FI5)5q)GW=40J>86!qhjRvRvI;Zw7>j%u|R>@&h`Oizsw{xT{ zo-nJy*)_VC_P$6Bv?U#ht8W$QrtUkucQVnaneS)o*hDpl-n?c~Ts~^rKbJ|zkL71q zbr!BZMYnJQB@0IT0AyTtbj~wIr@hl-t)t^(J2$p*Xl#A@YP7DkQ(ZLidd$2K^p&7Z z{0*D=7%H|Xx-&6_6b)#NPK2>ccuN)QGF=txfR>JZr?gRR{Ru1f-+oWA0i%Tm(1OX4 zX%c3Ymu05;6a7adKzH!1?C$1Kx$B9#;xsKDr}IZ?+x1ou#of|82-@@X^IxTjiZp%J}*D218X~mBmt&UE8DHkbt(l`&dHhe zOEcuOw0Ek`?IX{p%85fR?(EHTL!I}tiwPRIT^Jf<3{PNk$uxQZPXjDNa0#=N5OT51 zH}oBmK25t=OMY#&P=H(GvoE3VudJTi7rRH~^7*!L=YD}&yY-9w{O;?|*PpLHUw^*- reEs?Q^Y!QJ&)1)?KVN^o{(SxU`t$YY>(AGpC;s{W_jD|%0PqC><$U(y literal 0 HcmV?d00001 diff --git a/dist/class4gl-0.1dev/PKG-INFO b/dist/class4gl-0.1dev/PKG-INFO new file mode 100644 index 0000000..1cecd19 --- /dev/null +++ b/dist/class4gl-0.1dev/PKG-INFO @@ -0,0 +1,14 @@ +Metadata-Version: 1.0 +Name: class4gl +Version: 0.1dev +Summary: UNKNOWN +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: GPLv3 licence +Description: # class4gl + Chemistry Land-surface Atmosphere Soil Slab model (CLASS) | Python version + + This is the extension of class to be able to be used with global balloon soundings. + +Platform: UNKNOWN diff --git a/dist/class4gl-0.1dev/bin/__init__.py b/dist/class4gl-0.1dev/bin/__init__.py new file mode 100644 index 0000000..a21583b --- /dev/null +++ b/dist/class4gl-0.1dev/bin/__init__.py @@ -0,0 +1,7 @@ +from . import model,class4gl,interface_multi,data_air,data_global + +__version__ = '0.1.0' + +__author__ = 'Hendrik Wouters ' + +__all__ = [] diff --git a/dist/class4gl-0.1dev/lib/__init__.py b/dist/class4gl-0.1dev/lib/__init__.py new file mode 100644 index 0000000..a21583b --- /dev/null +++ b/dist/class4gl-0.1dev/lib/__init__.py @@ -0,0 +1,7 @@ +from . import model,class4gl,interface_multi,data_air,data_global + +__version__ = '0.1.0' + +__author__ = 'Hendrik Wouters ' + +__all__ = [] diff --git a/dist/class4gl-0.1dev/lib/class4gl.py b/dist/class4gl-0.1dev/lib/class4gl.py new file mode 100644 index 0000000..7baaa51 --- /dev/null +++ b/dist/class4gl-0.1dev/lib/class4gl.py @@ -0,0 +1,1611 @@ +# -*- coding: utf-8 -*- + +""" + +Created on Mon Jan 29 12:33:51 2018 + +Module file for class4gl, which extents the class-model to be able to take +global air profiles as input. It exists of: + +CLASSES: + - an input object, namely class4gl_input. It includes: + - a function to read Wyoming sounding data from a yyoming stream object + - a function to read global data from a globaldata library object + - the model object: class4gl + - .... + +DEPENDENCIES: + - xarray + - numpy + - data_global + - Pysolar + - yaml + +@author: Hendrik Wouters + +""" + + + +""" Setup of envirnoment """ + +# Standard modules of the stand class-boundary-layer model +from model import model +from model import model_output as class4gl_output +from model import model_input +from model import qsat +#from data_soundings import wyoming +import Pysolar +import yaml +import logging +import warnings +import pytz + +#formatter = logging.Formatter() +logging.basicConfig(format='%(asctime)s - \ + %(name)s - \ + %(levelname)s - \ + %(message)s') + + +# Generic Python Packages +import numpy as np +import datetime as dt +import pandas as pd +import xarray as xr +import io +#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio +from data_global import data_global +grav = 9.81 + +# this is just a generic input object +class generic_input(object): + def __init__(self): + self.init = True + + +# all units from all variables in CLASS(4GL) should be defined here! +units = { + 'h':'m', + 'theta':'K', + 'q':'kg/kg', + 'cc': '-', + 'cveg': '-', + 'wg': 'm3 m-3', + 'w2': 'm3 m-3', + #'wg': 'kg/kg', + 'Tsoil': 'K', + 'T2': 'K', + 'z0m': 'm', + 'alpha': '-', + 'LAI': '-', + 'dhdt':'m/h', + 'dthetadt':'K/h', + 'dqdt':'kg/kg/h', + 'BR': '-', + 'EF': '-', +} + +class class4gl_input(object): +# this was the way it was defined previously. +#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c')) + + def __init__(self,set_pars_defaults=True,debug_level=None): + + + """ set up logger (see: https://docs.python.org/2/howto/logging.html) + """ + + print('hello') + self.logger = logging.getLogger('class4gl_input') + print(self.logger) + if debug_level is not None: + self.logger.setLevel(debug_level) + + # # create logger + # self.logger = logging.getLogger('class4gl_input') + # self.logger.setLevel(debug_level) + + # # create console handler and set level to debug + # ch = logging.StreamHandler() + # ch.setLevel(debug_level) + + # # create formatter + # formatter = logging.Formatter('%(asctime)s - \ + # %(name)s - \ + # %(levelname)s - \ + # %(message)s') + # add formatter to ch + # ch.setFormatter(formatter) + + # # add ch to logger + # self.logger.addHandler(ch) + + # """ end set up logger """ + + + + # these are the standard model input single-value parameters for class + self.pars = model_input() + + # diagnostic parameters of the initial profile + self.diag = dict() + + # In this variable, we keep track of the different parameters from where it originates from. + self.sources = {} + + if set_pars_defaults: + self.set_pars_defaults() + + def set_pars_defaults(self): + + """ + Create empty model_input and set up case + """ + defaults = dict( + dt = 60. , # time step [s] + runtime = 6*3600 , # total run time [s] + + # mixed-layer input + sw_ml = True , # mixed-layer model switch + sw_shearwe = False , # shear growth mixed-layer switch + sw_fixft = False , # Fix the free-troposphere switch + h = 200. , # initial ABL height [m] + Ps = 101300., # surface pressure [Pa] + divU = 0. , # horizontal large-scale divergence of wind [s-1] + #fc = 1.e-4 , # Coriolis parameter [m s-1] + + theta = 288. , # initial mixed-layer potential temperature [K] + dtheta = 1. , # initial temperature jump at h [K] + gammatheta = 0.006 , # free atmosphere potential temperature lapse rate [K m-1] + advtheta = 0. , # advection of heat [K s-1] + beta = 0.2 , # entrainment ratio for virtual heat [-] + wtheta = 0.1 , # surface kinematic heat flux [K m s-1] + + q = 0.008 , # initial mixed-layer specific humidity [kg kg-1] + dq = -0.001 , # initial specific humidity jump at h [kg kg-1] + gammaq = 0. , # free atmosphere specific humidity lapse rate [kg kg-1 m-1] + advq = 0. , # advection of moisture [kg kg-1 s-1] + wq = 0.1e-3 , # surface kinematic moisture flux [kg kg-1 m s-1] + + CO2 = 422. , # initial mixed-layer CO2 [ppm] + dCO2 = -44. , # initial CO2 jump at h [ppm] + gammaCO2 = 0. , # free atmosphere CO2 lapse rate [ppm m-1] + advCO2 = 0. , # advection of CO2 [ppm s-1] + wCO2 = 0. , # surface kinematic CO2 flux [ppm m s-1] + sw_wind = True , # prognostic wind switch + u = 0. , # initial mixed-layer u-wind speed [m s-1] + du = 0. , # initial u-wind jump at h [m s-1] + gammau = 0. , # free atmosphere u-wind speed lapse rate [s-1] + advu = 0. , # advection of u-wind [m s-2] + v = 0.0 , # initial mixed-layer u-wind speed [m s-1] + dv = 0.0 , # initial u-wind jump at h [m s-1] + gammav = 0. , # free atmosphere v-wind speed lapse rate [s-1] + advv = 0. , # advection of v-wind [m s-2] + sw_sl = True , # surface layer switch + ustar = 0.3 , # surface friction velocity [m s-1] + z0m = 0.02 , # roughness length for momentum [m] + z0h = 0.02* 0.1 , # roughness length for scalars [m] + sw_rad = True , # radiation switch + lat = 51.97 , # latitude [deg] + lon = -4.93 , # longitude [deg] + doy = 268. , # day of the year [-] + tstart = 6.8 , # time of the day [h UTC] + cc = 0.0 , # cloud cover fraction [-] + Q = 400. , # net radiation [W m-2] + dFz = 0. , # cloud top radiative divergence [W m-2] + ls_type = 'js' , # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs) + wg = 0.21 , # volumetric water content top soil layer [m3 m-3] + w2 = 0.21 , # volumetric water content deeper soil layer [m3 m-3] + cveg = 0.85 , # vegetation fraction [-] + Tsoil = 295. , # temperature top soil layer [K] + Ts = 295. , # initial surface temperature [K] + T2 = 296. , # temperature deeper soil layer [K] + a = 0.219 , # Clapp and Hornberger retention curve parameter a + b = 4.90 , # Clapp and Hornberger retention curve parameter b + p = 4. , # Clapp and Hornberger retention curve parameter c + CGsat = 3.56e-6, # saturated soil conductivity for heat + wsat = 0.472 , # saturated volumetric water content ECMWF config [-] + wfc = 0.323 , # volumetric water content field capacity [-] + wwilt = 0.171 , # volumetric water content wilting point [-] + C1sat = 0.132 , + C2ref = 1.8 , + LAI = 2. , # leaf area index [-] + gD = 0.0 , # correction factor transpiration for VPD [-] + rsmin = 110. , # minimum resistance transpiration [s m-1] + rssoilmin = 50. , # minimun resistance soil evaporation [s m-1] + alpha = 0.25 , # surface albedo [-] + Wmax = 0.0012 , # thickness of water layer on wet vegetation [m] + Wl = 0.0000 , # equivalent water layer depth for wet vegetation [m] + Lambda = 5.9 , # thermal diffusivity skin layer [-] + c3c4 = 'c3' , # Plant type ('c3' or 'c4') + sw_cu = False , # Cumulus parameterization switch + dz_h = 150. , # Transition layer thickness [m] + cala = None , # soil heat conductivity [W/(K*m)] + crhoc = None , # soil heat capacity [J/K*m**3] + sw_ls = True , + sw_ap = True , # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input + sw_ac = None , # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields as input from eg., ERA-INTERIM + sw_lit = False, + ) + pars = model_input() + for key in defaults: + pars.__dict__[key] = defaults[key] + + self.update(source='defaults',pars=pars) + + def clear(self): + """ this procudure clears the class4gl_input """ + + for key in list(self.__dict__.keys()): + del(self.__dict__[key]) + self.__init__() + + def dump(self,file): + """ this procedure dumps the class4gl_input object into a yaml file + + Input: + - self.__dict__ (internal): the dictionary from which we read + Output: + - file: All the parameters in self.__init__() are written to + the yaml file, including pars, air_ap, sources etc. + """ + file.write('---\n') + index = file.tell() + file.write('# CLASS4GL input; format version: 0.1\n') + + # write out the position of the current record + yaml.dump({'index':index}, file, default_flow_style=False) + + # we do not include the none values + for key,data in self.__dict__.items(): + #if ((type(data) == model_input) or (type(class4gl_input): + if key == 'pars': + + pars = {'pars' : self.__dict__['pars'].__dict__} + parsout = {} + for key in pars.keys(): + if pars[key] is not None: + parsout[key] = pars[key] + + yaml.dump(parsout, file, default_flow_style=False) + elif type(data) == dict: + if key == 'sources': + # in case of sources, we want to have a + # condensed list format as well, so we leave out + # 'default_flow_style=False' + yaml.dump({key : data}, file) + else: + yaml.dump({key : data}, file, + default_flow_style=False) + elif type(data) == pd.DataFrame: + # in case of dataframes (for profiles), we want to have a + # condensed list format as well, so we leave out + # 'default_flow_style=False' + yaml.dump({key: data.to_dict(orient='list')},file) + + # # these are trials to get it into a more human-readable + # fixed-width format, but it is too complex + #stream = yaml.dump({key : False},width=100, default_flow_style=False) + #file.write(stream) + + # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here + #file.write(key+': !!str |\n') + #file.write(str(data)+'\n') + + def load_yaml_dict(self,yaml_dict,reset=True): + """ this procedure loads class4gl_input data from a dictionary obtained from yaml + + Input: + - yaml_dict: the dictionary from which we read + - reset: reset data before reading + Output: + - All the parameters in self, eg., (pars, air_ap, sources etc.,). + """ + + if reset: + for key in list(self.__dict__.keys()): + del(self.__dict__[key]) + self.__init__() + + for key,data in yaml_dict.items(): + if key == 'pars': + self.__dict__[key] = model_input() + self.__dict__[key].__dict__ = data + elif key in ['air_ap','air_balloon','air_ac','air_ach']: + self.__dict__[key] = pd.DataFrame(data) + elif key == 'sources': + self.__dict__[key] = data + elif key == 'diag': + self.__dict__[key] = data + else: + warnings.warn("Key '"+key+"' may not be implemented.") + self.__dict__[key] = data + + def update(self,source,**kwargs): + """ this procedure is to make updates of input parameters and tracking + of their source more convenient. It implements the assignment of + parameter source/sensitivity experiment IDs ('eg., + 'defaults', 'sounding balloon', any satellite information, climate + models, sensitivity tests etc.). These are all stored in a convenient + way with as class4gl_input.sources. This way, the user can always consult with + from where parameters data originates from. + + Input: + - source: name of the underlying dataset + - **kwargs: a dictionary of data input, for which the key values + refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and + the values is a again a dictionary/dataframe of datakeys/columns + ('wg','PRES','datetime', ...) and datavalues (either single values, + profiles ...), eg., + + pars = {'wg': 0.007 , 'w2', 0.005} + pars = {pd.Dataframe('PRES': [1005.,9523,...] , 'THTA': [295., + 300.,...]} + + Output: + - self.__dict__[datatype] : object to which the parameters are + assigned. They can be consulted with + self.pars, self.profiles, etc. + + - self.sources[source] : It supplements the overview overview of + data sources can be consulted with + self.sources. The structure is as follows: + as: + self.sources = { + 'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...], + 'GLEAM' : ['pars:wg','pars:w2', ...], + ... + } + + """ + + #print(source,kwargs) + + for key,data in kwargs.items(): + + #print(key) + # if the key is not in class4gl_input object, then just add it. In + # that case, the update procedures below will just overwrite it + if key not in self.__dict__: + self.__dict__[key] = data + + + + + #... we do an additional check to see whether there is a type + # match. I not then raise a key error + if (type(data) != type(self.__dict__[key]) \ + # we allow dict input for model_input pars + and not ((key == 'pars') and (type(data) == dict) and \ + (type(self.__dict__[key]) == model_input))): + + raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object') + + + # This variable keeps track of the added data that is supplemented + # by the current source. We add this to class4gl_input.sources + datakeys = [] + + #... and we update the class4gl_input data, and this depends on the + # data type + + if type(self.__dict__[key]) == pd.DataFrame: + # If the data type is a dataframe, then we update the columns + for column in list(data.columns): + #print(column) + self.__dict__[key][column] = data[column] + datakeys.append(column) + + + elif type(self.__dict__[key]) == model_input: + # if the data type is a model_input, then we update its internal + # dictionary of parameters + if type(data) == model_input: + self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \ + **data.__dict__} + datakeys = list(data.__dict__.keys()) + elif type(data) == dict: + self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \ + **data} + datakeys = list(data.keys()) + else: + raise TypeError('input key '+key+' is not of the same type\ + as the one in the class4gl_object') + + elif type(self.__dict__[key]) == dict: + # if the data type is a dictionary, we update the + # dictionary + self.__dict__[key] = {self.__dict__[key] , data} + datakeys = list(data.keys()) + + + # if source entry is not existing yet, we add it + if source not in self.sources.keys(): + self.sources[source] = [] + + + # self.logger.debug('updating section "'+\ + # key+' ('+' '.join(datakeys)+')'\ + # '" from source \ + # "'+source+'"') + + # Update the source dictionary: add the provided data keys to the + # specified source list + for datakey in datakeys: + # At first, remove the occurences of the keys in the other + # source lists + for sourcekey,sourcelist in self.sources.items(): + if key+':'+datakey in sourcelist: + self.sources[sourcekey].remove(key+':'+datakey) + # Afterwards, add it to the current source list + self.sources[source].append(key+':'+datakey) + + + # # in case the datatype is a class4gl_input_pars, we update its keys + # # according to **kwargs dictionary + # if type(self.__dict__[datatype]) == class4gl_input_pars: + # # add the data parameters to the datatype object dictionary of the + # # datatype + # self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ , + # **kwargs} + # # in case, the datatype reflects a dataframe, we update the columns according + # # to the *args list + # elif type(self.__dict__[datatype]) == pd.DataFrame: + # for dataframe in args: + # for column in list(dataframe.columns): + # self.__dict__[datatype][column] = dataframe[column] + + + def get_profile(self,IOBJ, *args, **argv): + # if type(IOBJ) == wyoming: + self.get_profile_wyoming(IOBJ,*args,**argv) + # else: + # raise TypeError('Type '+str(type(IOBJ))+' is not supported') + + def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): + """ + Purpose: + This procedure assigns wyoming air profiles and parameters to the class4gl_input object. + + Input: + 1. wy_strm = wyoming html (beautifulsoup) stream object. The + function will take the profile at the stream's current + position. + 2. air_ap_mode: which air profile do we take? + - b : best + - l : according to lower limit for the mixed-layer height + estimate + - u : according to upper limit for the mixed-layer height + estimate + + + Output: + 1. all single-value parameters are stored in the + class4gl_input.pars object + 2. the souding profiles are stored in the in the + class4gl_input.air_balloon dataframe + 3. modified sounding profiles for which the mixed layer height + is fitted + 4. ... + + """ + + + # Raise an error in case the input stream is not the correct object + # if type(wy_strm) is not wyoming: + # raise TypeError('Not a wyoming type input stream') + + # Let's tell the class_input object that it is a Wyoming fit type + self.air_ap_type = 'wyoming' + # ... and which mode of fitting we apply + self.air_ap_mode = air_ap_mode + + """ Temporary variables used for output """ + # single value parameters derived from the sounding profile + dpars = dict() + # profile values + air_balloon = pd.DataFrame() + # fitted profile values + air_ap = pd.DataFrame() + + string = wy_strm.current.find_next('pre').text + string = string.split('\n')[:-1] + string = '\n'.join(string) + + columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV'] + air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1] + #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4]) + + #string = soup.pre.next_sibling.next_sibling + + string = wy_strm.current.find_next('pre').find_next('pre').text + + # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)). + dpars = {**dpars, + **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict() + } + + # we get weird output when it's a numpy Timestamp, so we convert it to + # pd.datetime type + + dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M")) + dpars['STNID'] = dpars['Station number'] + + # altitude above ground level + air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation'] + # absolute humidity in g/kg + air_balloon['q']= (air_balloon.MIXR/1000.) \ + / \ + (air_balloon.MIXR/1000.+1.) + # convert wind speed from knots to m/s + air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT + angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees. + + air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x) + air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x) + + + + cp = 1005. # specific heat of dry air [J kg-1 K-1] + Rd = 287. # gas constant for dry air [J kg-1 K-1] + Rv = 461.5 # gas constant for moist air [J kg-1 K-1] + + air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q) + air_balloon['p'] = air_balloon.PRES*100. + + + # Therefore, determine the sounding that are valid for 'any' column + is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) + #is_valid = (air_balloon.z >= 0) + # # this is an alternative pipe/numpy method + # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0] + valid_indices = air_balloon.index[is_valid].values + print(valid_indices) + + dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]] + + air_balloon['t'] = air_balloon['TEMP']+273.15 + air_balloon['theta'] = (air_balloon.t) * \ + (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp) + air_balloon['thetav'] = air_balloon['theta']*(1. + 0.61 * air_balloon['q']) + + if len(valid_indices) > 0: + #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile + dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) + + dpars['h_b'] = np.max((dpars['h'],10.)) + dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height + dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height + dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height + + # the final mixed-layer height that will be used by class. We round it + # to 1 decimal so that we get a clean yaml output format + dpars['h'] = np.round(dpars['h_'+air_ap_mode],1) + else: + dpars['h_u'] =np.nan + dpars['h_l'] =np.nan + dpars['h_e'] =np.nan + dpars['h'] =np.nan + + + if np.isnan(dpars['h']): + dpars['Ps'] = np.nan + + + + + if ~np.isnan(dpars['h']): + # determine mixed-layer properties (moisture, potential temperature...) from profile + + # ... and those of the mixed layer + is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) + valid_indices_below_h = air_balloon.index[is_valid_below_h].values + if len(valid_indices) > 1: + if len(valid_indices_below_h) >= 3.: + ml_mean = air_balloon[is_valid_below_h].mean() + else: + ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() + elif len(valid_indices) == 1: + ml_mean = (air_balloon.iloc[0:1]).mean() + else: + temp = pd.DataFrame(air_balloon) + temp.iloc[0] = np.nan + ml_mean = temp + + dpars['theta']= ml_mean.theta + dpars['q'] = ml_mean.q + dpars['u'] = ml_mean.u + dpars['v'] = ml_mean.v + else: + dpars['theta'] = np.nan + dpars['q'] = np.nan + dpars['u'] = np.nan + dpars['v'] = np.nan + + + + + # First 3 data points of the mixed-layer fit. We create a empty head + # first + air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns) + # All other data points above the mixed-layer fit + air_ap_tail = air_balloon[air_balloon.z > dpars['h']] + + #calculate mixed-layer jump ( this should be larger than 0.1) + + air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']])) + air_ap_head['HGHT'] = air_ap_head['z'] \ + + \ + np.round(dpars[ 'Station elevation'],1) + + # make a row object for defining the jump + jump = air_ap_head.iloc[0] * np.nan + + if air_ap_tail.shape[0] > 1: + + # we originally used THTA, but that has another definition than the + # variable theta that we need which should be the temperature that + # one would have if brought to surface (NOT reference) pressure. + for column in ['theta','q','u','v']: + + # initialize the profile head with the mixed-layer values + air_ap_head[column] = ml_mean[column] + # calculate jump values at mixed-layer height, which will be + # added to the third datapoint of the profile head + jump[column] = (air_ap_tail[column].iloc[1]\ + -\ + air_ap_tail[column].iloc[0])\ + /\ + (air_ap_tail.z.iloc[1]\ + - air_ap_tail.z.iloc[0])\ + *\ + (dpars['h']- air_ap_tail.z.iloc[0])\ + +\ + air_ap_tail[column].iloc[0]\ + -\ + ml_mean[column] + if column == 'theta': + # for potential temperature, we need to set a lower limit to + # avoid the model to crash + jump.theta = np.max((0.1,jump.theta)) + + air_ap_head[column][2] += jump[column] + + air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) + + + + # make theta increase strong enough to avoid numerical + # instability + air_ap_tail_orig = pd.DataFrame(air_ap_tail) + air_ap_tail = pd.DataFrame() + #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True) + #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True) + theta_low = dpars['theta'] + z_low = dpars['h'] + ibottom = 0 + for itop in range(0,len(air_ap_tail_orig)): + theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean() + z_mean = air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean() + if ( + (z_mean > (z_low+10.)) and \ + (theta_mean > (theta_low+0.2) ) and \ + (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)): + + air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True) + ibottom = itop+1 + theta_low = air_ap_tail.theta.iloc[-1] + z_low = air_ap_tail.z.iloc[-1] + # elif (itop > len(air_ap_tail_orig)-10): + # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True) + + + + + + air_ap = \ + pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1) + + # we copy the pressure at ground level from balloon sounding. The + # pressure at mixed-layer height will be determined internally by class + #print(air_ap['PRES'].iloc[0]) + + rho = 1.2 # density of air [kg m-3] + g = 9.81 # gravity acceleration [m s-2] + + air_ap['p'].iloc[0] =dpars['Ps'] + air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h']) + air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1) + + + dpars['lat'] = dpars['Station latitude'] + dpars['latitude'] = dpars['lat'] + + # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich) + dpars['lon'] = 0. + # this is the real longitude that will be used to extract ground data + dpars['longitude'] = dpars['Station longitude'] + + dpars['ldatetime'] = dpars['datetime'] \ + + \ + dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.)) + dpars['doy'] = dpars['datetime'].timetuple().tm_yday + dpars['SolarAltitude'] = \ + Pysolar.GetAltitude(\ + dpars['latitude'],\ + dpars['longitude'],\ + dpars['datetime']\ + ) + dpars['SolarAzimuth'] = Pysolar.GetAzimuth(\ + dpars['latitude'],\ + dpars['longitude'],\ + dpars['datetime']\ + ) + dpars['lSunrise'], dpars['lSunset'] \ + = Pysolar.util.GetSunriseSunset(dpars['latitude'], + 0., + dpars['ldatetime'],0.) + dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise']) + dpars['lSunset'] = pytz.utc.localize(dpars['lSunset']) + # This is the nearest datetime when the sun is up (for class) + dpars['ldatetime_daylight'] = \ + np.min(\ + (np.max(\ + (dpars['ldatetime'],\ + dpars['lSunrise'])\ + ),\ + dpars['lSunset']\ + )\ + ) + # apply the same time shift for UTC datetime + dpars['datetime_daylight'] = dpars['datetime'] \ + +\ + (dpars['ldatetime_daylight']\ + -\ + dpars['ldatetime']) + + dpars['doy'] = dpars['datetime'].timetuple().tm_yday + + # We set the starting time to the local sun time, since the model + # thinks we are always at the meridian (lon=0). This way the solar + # radiation is calculated correctly. + dpars['tstart'] = dpars['ldatetime_daylight'].hour \ + + \ + dpars['ldatetime_daylight'].minute/60.\ + + \ + dpars['ldatetime_daylight'].second/3600. + + + # convert numpy types to native python data types. This provides + # cleaner data IO with yaml: + for key,value in dpars.items(): + if type(value).__module__ == 'numpy': + dpars[key] = dpars[key].item() + + # # we make a pars object that is similar to the destination object + # pars = model_input() + # for key,value in dpars.items(): + # pars.__dict__[key] = value + + + # we round the columns to a specified decimal, so that we get a clean + # output format for yaml + decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\ + 'DRCT':2 ,'SKNT':2, 'theta':4, 'THTE':2, 'THTV':2,\ + 'z':2, 'q':5, 'WSPD':2, 'u':4, 'v':4} +# + for column,decimal in decimals.items(): + air_balloon[column] = air_balloon[column].round(decimal) + air_ap[column] = air_ap[column].round(decimal) + + self.update(source='wyoming',\ + # pars=pars, + pars=dpars,\ + air_balloon=air_balloon,\ + air_ap=air_ap) + + + def get_global_input(self, globaldata,only_keys=None,exclude_keys=None): + + """ + Purpose: This sets copies the parameters from the global datasets into the self (or similar object) + according to the position (lat lon) and the class datetime and timespan + globaldata should be a globaldata multifile object + + Input: + - globaldata: this is the library object + - only_keys: only extract specified keys + - exclude_keys: do not inherit specified keys + """ + classdatetime = np.datetime64(self.pars.datetime_daylight) + classdatetime_stop = np.datetime64(self.pars.datetime_daylight \ + + \ + dt.timedelta(seconds=self.pars.runtime)\ + ) + + + # # list of variables that we get from global ground data + # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', + # 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', + # 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', + # 'texture', 'itex', 'isoil', 'BR', + # 'b', 'cveg', + # 'C1sat', + # 'C2ref', 'p', 'a', + # ] #globaldata.datasets.keys(): + + # # these are the required class4gl 3d atmospheric input which is not provided by the soundings + # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p'] + + + if type(globaldata) is not data_global: + raise TypeError("Wrong type of input library") + + # by default, we get all dataset keys + keys = list(globaldata.datasets.keys()) + + # We add LAI manually, because it is not listed in the datasets and + #they its retreival is hard coded below based on LAIpixel and cveg + if ('LAIpixel' in keys) and ('cveg' in keys): + keys.append('LAI') + + # # In case there is surface pressure, we also calculate the half-level + # # and full-level pressure fields + # if ('sp' in keys): + # keys.append('pfull') + # keys.append('phalf') + + # If specified, we only take the keys that are in only_keys + if only_keys is not None: + for key in keys: + if key not in only_keys: + keys.remove(key) + + # If specified, we take out keys that are in exclude keys + if exclude_keys is not None: + for key in keys: + if key in exclude_keys: + keys.remove(key) + + # we set everything to nan first in the pars section (non-profile parameters + # without lev argument), so that we can check afterwards whether the + # data is well-fetched or not. + for key in keys: + if not ((key in globaldata.datasets) and \ + (globaldata.datasets[key].page is not None) and \ + ('lev' in globaldata.datasets[key].page[key].dims)): + self.update(source='globaldata',pars={key:np.nan}) + # # we do not check profile input for now. We assume it is + # # available + #else: + # self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])})) + + self.logger.debug('getting keys "'+', '.join(keys)+'\ + from global data') + + for key in keys: + # If we find it, then we obtain the variables + if ((key in globaldata.datasets) and \ + (globaldata.datasets[key].page is not None)): + + # check first whether the dataset has a height coordinate (3d space) + if 'lev' in globaldata.datasets[key].page[key].dims: + + # first, we browse to the correct file that has the current time + if 'time' in list(globaldata.datasets[key].page[key].dims): + globaldata.datasets[key].browse_page(time=classdatetime) + + + if (globaldata.datasets[key].page is not None): + # find longitude and latitude coordinates + ilats = (np.abs(globaldata.datasets[key].page.lat - + self.pars.latitude) < 0.5) + ilons = (np.abs(globaldata.datasets[key].page.lon - + self.pars.longitude) < 0.5) + + # if we have a time dimension, then we look up the required timesteps during the class simulation + if 'time' in list(globaldata.datasets[key].page[key].dims): + itimes = ((globaldata.datasets[key].page.time >= \ + classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop)) + + # In case we didn't find any correct time, we take the + # closest one. + if np.sum(itimes) == 0.: + + + classdatetimemean = \ + np.datetime64(self.pars.datetime_daylight + \ + dt.timedelta(seconds=int(self.pars.runtime/2.) + )) + + dstimes = globaldata.datasets[key].page.time + time = dstimes.sel(time=classdatetimemean,method='nearest') + itimes = (globaldata.datasets[key].page.time == + time) + + else: + # we don't have a time coordinate so it doesn't matter + # what itimes is + itimes = 0 + + #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value). + + # over which dimensions we take a mean: + dims = globaldata.datasets[key].page[key].dims + namesmean = list(dims) + namesmean.remove('lev') + idxmean = [dims.index(namemean) for namemean in namesmean] + + value = \ + globaldata.datasets[key].page[key].isel(time=itimes, + lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1. + + # Ideally, source should be equal to the datakey of globaldata.library + # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) + # but therefore the globaldata class requires a revision to make this work + self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) + + else: + # this procedure is for reading the ground fields (2d space). + # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again. + + + if 'time' in list(globaldata.datasets[key].page[key].dims): + + # first, we browse to the correct file + #print(key) + globaldata.datasets[key].browse_page(time=classdatetime) + + if globaldata.datasets[key].page is not None: + DIST = \ + np.abs((globaldata.datasets[key].page.variables['lat'].values\ + - self.pars.latitude)) + ilat = np.where((DIST) == np.min(DIST))[0][0] + DIST = \ + np.abs((globaldata.datasets[key].page.variables['lon'].values\ + - self.pars.longitude)) + ilon = np.where((DIST) == np.min(DIST))[0][0] + + DIST = \ + np.abs((globaldata.datasets[key].page.variables['lat'].values\ + - (self.pars.latitude + 0.5))) + ilatmax = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]: + ilatmax = ilat + + DIST = \ + np.abs((globaldata.datasets[key].page.variables['lon'].values\ + - (self.pars.longitude + 0.5))) + ilonmax = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]: + ilonmax = ilon + + DIST = \ + np.abs((globaldata.datasets[key].page.lat.values\ + - (self.pars.latitude - 0.5))) + ilatmin = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]: + ilatmin = ilat + DIST = \ + np.abs((globaldata.datasets[key].page.lon.values\ + - (self.pars.longitude - 0.5))) + ilonmin = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]: + ilonmin = ilon + + if ilatmin < ilatmax: + ilatrange = range(ilatmin,ilatmax+1) + else: + ilatrange = range(ilatmax,ilatmin+1) + + if ilonmin < ilonmax: + ilonrange = range(ilonmin,ilonmax+1) + else: + ilonrange = range(ilonmax,ilonmin+1) + + if 'time' in list(globaldata.datasets[key].page[key].dims): + DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime)) + + idatetime = np.where((DIST) == np.min(DIST))[0][0] + #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime) + if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ): + idatetime += 1 + + classdatetimeend = np.datetime64(\ + self.pars.datetime +\ + dt.timedelta(seconds=self.pars.runtime)\ + ) + DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend)) + idatetimeend = np.where((DIST) == np.min(DIST))[0][0] + #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend) + if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)): + idatetimeend -= 1 + idatetime = np.min((idatetime,idatetimeend)) + #for gleam, we take the previous day values + if key in ['wg', 'w2']: + idatetime = idatetime - 1 + idatetimeend = idatetimeend - 1 + + # in case of soil temperature, we take the exact + # timing (which is the morning) + if key in ['Tsoil','T2']: + idatetimeend = idatetime + + idts = range(idatetime,idatetimeend+1) + + count = 0 + self.__dict__[key] = 0. + value = 0. + for iilat in ilatrange: + for iilon in ilonrange: + for iidts in idts: + value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values) + count += 1 + value = value/count + self.update(source='globaldata',pars={key:value.item()}) + + else: + + count = 0 + value = 0. + for iilat in ilatrange: + for iilon in ilonrange: + value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values) + count += 1 + value = value/count + + self.update(source='globaldata',pars={key:value.item()}) + + if ('LAIpixel' in keys) and ('cveg' in keys): + self.logger.debug('also update LAI based on LAIpixel and cveg') + # I suppose LAI pixel is already determined in the previous + # procedure. Anyway... + key = 'LAIpixel' + + if globaldata.datasets[key].page is not None: + # first, we browse to the correct file that has the current time + if 'time' in list(globaldata.datasets[key].page[key].dims): + globaldata.datasets[key].browse_page(time=classdatetime) + + DIST = \ + np.abs((globaldata.datasets[key].page.lat.values\ + - self.pars.latitude)) + ilat = np.where((DIST) == np.min(DIST))[0][0] + DIST = \ + np.abs((globaldata.datasets[key].page.lon.values\ + - self.pars.longitude)) + ilon = np.where((DIST) == np.min(DIST))[0][0] + + + DIST = \ + np.abs((globaldata.datasets[key].page.lat.values\ + - (self.pars.latitude + 0.5))) + ilatmax = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]: + ilatmax = ilat + + DIST = \ + np.abs((globaldata.datasets[key].page.lon.values \ + - (self.pars.longitude + 0.5))) + ilonmax = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]: + ilonmax = ilon + + DIST = \ + np.abs((globaldata.datasets[key].page.lat.values\ + - (self.pars.latitude - 0.5))) + ilatmin = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]: + ilatmin = ilat + DIST = \ + np.abs((globaldata.datasets[key].page.lon.values\ + - (self.pars.longitude - 0.5))) + ilonmin = np.where((DIST) == np.min(DIST))[0][0] + if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]: + ilonmin = ilon + DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime)) + idatetime = np.where((DIST) == np.min(DIST))[0][0] + + + if ilatmin < ilatmax: + ilatrange = range(ilatmin,ilatmax+1) + else: + ilatrange = range(ilatmax,ilatmin+1) + + if ilonmin < ilonmax: + ilonrange = range(ilonmin,ilonmax+1) + else: + ilonrange = range(ilonmax,ilonmin+1) + + #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape) + LAIpixel = 0. + count = 0 + for iilat in [ilat]: #ilatrange + for iilon in [ilon]: #ilonrange + LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values + + + # if np.isnan(tarray[idatetime]): + # print("interpolating GIMMS LAIpixel nan value") + # + # mask = np.isnan(tarray) + # + # #replace each nan value with a interpolated value + # if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]: + # tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask]) + # + # else: + # print("Warning. Could not interpolate GIMMS LAIpixel nan value") + + # tarray *= np.nan + + count += 1 + #tarray_res += tarray + LAIpixel = LAIpixel/count + + count = 0 + #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values + + self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) + #print('LAIpixel:',self.__dict__['LAIpixel']) + #print('cveg:',self.__dict__['cveg']) + + # finally, we rescale the LAI according to the vegetation + # fraction + value = 0. + if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)): + value =self.pars.LAIpixel/self.pars.cveg + else: + # in case of small vegetation fraction, we take just a standard + # LAI value. It doesn't have a big influence anyway for + # small vegetation + value = 2. + #print('LAI:',self.__dict__['LAI']) + self.update(source='globaldata',pars={'LAI':value}) + + + # in case we have 'sp', we also calculate the 3d pressure fields at + # full level and half level + if ('sp' in keys) and ('sp' in self.pars.__dict__): + pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0) + + phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values) + + + # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE + # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR + # # # CALCULATING THE ADVECTION PROFILES + # # hydrostatic thickness of each model layer + delpdgrav = -(phalf[:-1] - phalf[1:])/grav + # # dz = rhodz/(R * T / pfull) + + + # # subsidence multiplied by density. We calculate the subsidence of + # # the in class itself + # wrho = np.zeros_like(phalf) + # wrho[-1] = 0. + + # for ihlev in range(0,wrho.shape[0]-1): + # # subsidence multiplied by density is the integral of + # # divergences multiplied by the layer thicknessies + # wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \ + # self.air_ac['divU_y'][ihlev:]) * \ + # delpdgrav[ihlev:]).sum() + + + + self.update(source='globaldata',\ + air_ac=pd.DataFrame({'p':list(pfull)})) + self.update(source='globaldata',\ + air_ach=pd.DataFrame({'p':list(phalf)})) + self.update(source='globaldata',\ + air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)})) + # self.update(source='globaldata',\ + # air_ach=pd.DataFrame({'wrho':list(wrho)})) + + def check_source(self,source,check_only_sections=None): + """ this procedure checks whether data of a specified source is valid. + + INPUT: + source: the data source we want to check + check_only_sections: a string or list with sections to be checked + OUTPUT: + returns True or False + """ + + # we set source ok to false as soon as we find a invalid input + source_ok = True + + # convert to a single-item list in case of a string + check_only_sections_def = (([check_only_sections]) if \ + type(check_only_sections) is str else \ + check_only_sections) + + if source not in self.sources.keys(): + self.logger.info('Source '+source+' does not exist') + source_ok = False + + for sectiondatakey in self.sources[source]: + section,datakey = sectiondatakey.split(':') + if ((check_only_sections_def is None) or \ + (section in check_only_sections_def)): + checkdatakeys = [] + if type(self.__dict__[section]) is pd.DataFrame: + checkdata = self.__dict__[section] + elif type(self.__dict__[section]) is model_input: + checkdata = self.__dict__[section].__dict__ + + if (datakey not in checkdata): + # self.logger.info('Expected key '+datakey+\ + # ' is not in parameter input') + source_ok = False + elif (checkdata[datakey] is None) or \ + (pd.isnull(checkdata[datakey]) is True): + + # self.logger.info('Key value of "'+datakey+\ + # '" is invalid: ('+ \ + # str(self.__dict__[section].__dict__[datakey])+')') + source_ok = False + + return source_ok + + def check_source_globaldata(self): + """ this procedure checks whether all global parameter data is + available, according to the keys in the self.sources""" + + source_globaldata_ok = True + + #self.get_values_air_input() + + # and now we can get the surface values + #class_settings = class4gl_input() + #class_settings.set_air_input(input_atm) + + # we only allow non-polar stations + if not (self.pars.lat <= 60.): + source_globaldata_ok = False + self.logger.info('cveg is invalid: ('+str(self.pars.cveg)+')') + + # check lat and lon + if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)): + source_globaldata_ok = False + self.logger.info('lat is invalid: ('+str(self.pars.lat)+')') + self.logger.info('or lon is invalid: ('+str(self.pars.lon)+')') + else: + # we only check the ground parameter data (pars section). The + # profile data (air_ap section) are supposed to be valid in any + # case. + source_ok = self.check_source(source='globaldata',\ + check_only_sections=['air_ac',\ + 'air_ap',\ + 'pars']) + if not source_ok: + source_globaldata_ok = False + + # Additional check: we exclude desert-like + if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)): + source_globaldata_ok = False + self.logger.info('cveg is invalid: ('+str(self.pars.cveg)+')') + if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)): + source_globaldata_ok = False + self.logger.info('LAI is invalid: ('+str(self.pars.LAI)+')') + elif self.pars.cveg < 0.02: + self.logger.info('cveg is too low: ('+str(self.pars.cveg)+')') + source_globaldata_ok = False + + return source_globaldata_ok + + +class c4gli_iterator(): + """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially + + for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator + """ + def __init__(self,file): + # take file as IO stream + self.file = file + self.yaml_generator = yaml.load_all(file) + self.current_dict = {} + self.current_class4gl_input = class4gl_input() + separator = self.file.readline() # this is just dummy + self.header = file.readline() + if self.header != '# CLASS4GL record; format version: 0.1\n': + raise NotImplementedError("Wrong format version: '"+self.header+"'") + def __iter__(self): + return self + def __next__(self): + self.current_dict = self.yaml_generator.__next__() + self.current_class4gl_input.load_yaml_dict(self.current_dict) + return self.current_class4gl_input + + + +#get_cape and lift_parcel are adapted from the SkewT package + +class gl_dia(object): + def get_lifted_index(self,timestep=-1): + self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.) + +#from SkewT +#def get_lcl(startp,startt,startdp,nsteps=101): +# from numpy import interp +# #-------------------------------------------------------------------- +# # Lift a parcel dry adiabatically from startp to LCL. +# # Init temp is startt in K, Init dew point is stwrtdp, +# # pressure levels are in Pa +# #-------------------------------------------------------------------- +# +# assert startdp<=startt +# +# if startdp==startt: +# return np.array([startp]),np.array([startt]),np.array([startdp]), +# +# # Pres=linspace(startp,60000.,nsteps) +# Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps) +# +# # Lift the dry parcel +# T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) +# # Mixing ratio isopleth +# starte=VaporPressure(startdp) +# startw=MixRatio(starte,startp) +# e=Pres*startw/(.622+startw) +# T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK +# +# # Solve for the intersection of these lines (LCL). +# # interp requires the x argument (argument 2) +# # to be ascending in order! +# P_lcl=interp(0.,T_iso-T_dry,Pres) +# T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1]) +# +# # # presdry=linspace(startp,P_lcl) +# # presdry=logspace(log10(startp),log10(P_lcl),nsteps) +# +# # tempdry=interp(presdry,Pres[::-1],T_dry[::-1]) +# # tempiso=interp(presdry,Pres[::-1],T_iso[::-1]) +# +# return P_lcl,T_lcl + + + +def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25): + """ Calculate mixed-layer height from temperature and wind speed profile + + Input: + HAGL: height coordinates [m] + THTV: virtual potential temperature profile [K] + WSPD: wind speed profile [m/s] + + Output: + BLH: best-guess mixed-layer height + BLHu: upper limit of mixed-layer height + BLHl: lower limit of mixed-layer height + + """ + + #initialize error BLH + BLHe = 0. + eps = 2.#security limit + iTHTV_0 = np.where(~np.isnan(THTV))[0] + if len(iTHTV_0) > 0: + iTHTV_0 = iTHTV_0[0] + THTV_0 = THTV[iTHTV_0] + else: + THTV_0 = np.nan + + RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2. + + + #RiB = 9.81/THTV_0 * ( THTV[i-1] + (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2 + #RiB - RiBc = 0 + + #best guess of BLH + + #print("RiB: ",RiB) + #print("RiBc: ",RiBc) + + + + BLHi = np.where(RiB > RiBc)[0] + if len(BLHi ) > 0: + BLHi = BLHi[0] + #print("BLHi: ",BLHi) + BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] + + # possible error is calculated as the difference height levels used for the interpolation + BLHu = np.max([BLH,HAGL[BLHi]-eps]) + BLHl = np.min([BLH,HAGL[BLHi-1]+eps]) + # calculate an alternative BLH based on another critical Richardson number (RiBce): + BLHi =np.where(RiB > RiBce)[0] + if len(BLHi ) > 0: + BLHi = BLHi[0] + + BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] + BLHu = np.max([BLHu,HAGL[BLHi]-eps]) + BLHl = np.min([BLHl,HAGL[BLHi-1]+eps]) + + BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)]) + BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)]) + + else: + BLH,BLHu,BLHl = np.nan, np.nan,np.nan + + else: + BLH,BLHu,BLHl = np.nan, np.nan,np.nan + + return BLH,BLHu,BLHl + + + +#from class +def get_lcl(startp,startt,startqv): + # Find lifting condensation level iteratively + lcl = 20. + RHlcl = 0.5 + + itmax = 30 + it = 0 + while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0: + iTHTV_0 = iTHTV_0[0] + THTV_0 = THTV[iTHTV_0] + else: + THTV_0 = np.nan + RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2. + + + #RiB = 9.81/THTV_0 * ( THTV[i-1] + (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2 + #RiB - RiBc = 0 + + #best guess of BLH + + #print("RiB: ",RiB) + #print("RiBc: ",RiBc) + + + + BLHi = np.where(RiB > RiBc)[0] + if len(BLHi ) > 0: + BLHi = BLHi[0] + #print("BLHi: ",BLHi) + BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] + + # possible error is calculated as the difference height levels used for the interpolation + BLHu = np.max([BLH,HAGL[BLHi]-eps]) + BLHd = np.min([BLH,HAGL[BLHi-1]+eps]) + # calculate an alternative BLH based on another critical Richardson number (RiBce): + BLHi =np.where(RiB > RiBce)[0] + if len(BLHi ) > 0: + BLHi = BLHi[0] + + BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] + BLHu = np.max([BLHu,HAGL[BLHi]-eps]) + BLHd = np.min([BLHd,HAGL[BLHi-1]+eps]) + + BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)]) + BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)]) + + else: + BLH,BLHu,BLHd = np.nan, np.nan,np.nan + + else: + BLH,BLHu,BLHd = np.nan, np.nan,np.nan + + return BLH,BLHu,BLHd + +def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)): + STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds()) + return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)] + + +#from os import listdir +#from os.path import isfile #,join +import glob + + +class wyoming(object): + def __init__(self): + self.status = 'init' + self.found = False + self.DT = None + self.current = None + #self.mode = 'b' + self.profile_type = 'wyoming' + self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] + self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/" + + def set_STNM(self,STNM): + self.__init__() + self.STNM = STNM + self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html") + self.FILES = [os.path.realpath(FILE) for FILE in self.FILES] + self.current = None + self.found = False + self.FILES.sort() + + def find_first(self,year=None,get_atm=False): + self.found = False + + # check first file/year or specified year + if year == None: + self.iFN = 0 + self.FN = self.FILES[self.iFN] + else: + self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html") + self.iFN = self.FILES.index(self.FN) + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)" + + # go through other files and find first sounding when year is not specified + self.iFN=self.iFN+1 + while keepsearching: + self.FN = self.FILES[self.iFN] + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + self.iFN=self.iFN+1 + keepsearching = (self.current is None) and (self.iFN < len(self.FILES)) + self.found = (self.current is not None) + + self.status = 'fetch' + if self.found: + self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + + if self.found and get_atm: + self.get_values_air_input() + + + def find(self,DT,get_atm=False): + + self.found = False + keepsearching = True + #print(DT) + # we open a new file only when it's needed. Otherwise we just scroll to the right sounding. + if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)): + self.DT = DT + self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html") + self.iFN = self.FILES.index(self.FN) + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + + keepsearching = (self.current is not None) + while keepsearching: + DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + if DTcurrent == DT: + self.found = True + keepsearching = False + if get_atm: + self.get_values_air_input() + self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + elif DTcurrent > DT: + keepsearching = False + self.current = None + else: + self.current = self.current.find_next('h2') + if self.current is None: + keepsearching = False + self.found = (self.current is not None) + self.status = 'fetch' + + def find_next(self,get_atm=False): + self.found = False + self.DT = None + if self.current is None: + self.find_first() + else: + self.current = self.current.find_next('h2') + self.found = (self.current is not None) + keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES))) + while keepsearching: + self.iFN=self.iFN+1 + self.FN = self.FILES[self.iFN] + self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") + self.current = self.sounding_series.find('h2') + + self.found = (self.current is not None) + keepsearching = ((self.current is None) and (self.iFN < len(self.FILES))) + if self.found: + self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) + if self.found and get_atm: + self.get_values_air_input() + + + + def get_values_air_input(self,latitude=None,longitude=None): + + # for iDT,DT in enumerate(DTS): + + #websource = urllib.request.urlopen(webpage) + #soup = BeautifulSoup(open(webpage), "html.parser") + + + #workaround for ...last line has
 which results in stringlike first column
+        string = self.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = self.current.find_next('pre').find_next('pre').text
+
+        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
+        #PARAMS.insert(0,'date',DT)
+
+        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
+        
+        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
+        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
+        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
+        HAGL = HGHT - np.float(PARAMS['Station elevation'])
+        ONE_COLUMN.insert(0,'HAGL',HAGL)
+
+        
+        
+        
+        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
+        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
+        ONE_COLUMN.insert(0,'QABS',QABS)
+        
+        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
+
+        #mixed layer potential temperature
+        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
+
+        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
+        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
+        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
+
+        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        BLHV = np.max((BLHV,10.))
+        BLHVu = np.max((BLHVu,10.))
+        BLHVd = np.max((BLHVd,10.))
+        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
+
+        #security values for mixed-layer jump values dthetav, dtheta and dq
+        
+        # fit new profiles taking the above-estimated mixed-layer height
+        ONE_COLUMNNEW = []
+        for BLH in [BLHV,BLHVu,BLHVd]:
+            ONE_COLUMNNEW.append(pd.DataFrame())
+            
+            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+            
+            listHAGLNEW = list(HAGLNEW)
+            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
+                
+                # get index of lowest valid observation. This seems to vary
+                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
+                if len(idxvalid) > 0:
+                    #print('idxvalid',idxvalid)
+                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
+                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
+                    else:
+                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
+                else:
+                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+                    #print(col,meanabl)
+               
+                
+                # if col == 'PRES':
+                #     meanabl =  
+            
+                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+                #THTVM = np.nanmean(THTV[HAGL <= BLH])
+                #print("new_pro_h",new_pro_h)
+                # calculate jump ath the top of the mixed layer
+                if col in ['THTA','THTV',]:
+                    #for moisture
+                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+                    if len(listHAGLNEW) > 4:
+                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+                        dtheta = np.max((0.1,dtheta_pre))
+                        #meanabl = meanabl - (dtheta - dtheta_pre)
+                        #print('dtheta_pre',dtheta_pre)
+                        #print('dtheta',dtheta)
+                        #print('meanabl',meanabl)
+                        #stop
+                        
+                    else:
+                        dtheta = np.nan
+                else:
+                    if len(listHAGLNEW) > 4:
+                        #for moisture (it can have both negative and positive slope)
+                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+                    else:
+                        dtheta = np.nan
+                #print('dtheta',dtheta)
+                
+                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+            
+                
+                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+                
+            #QABSM = np.nanmean(QABS[HAGL <= BLH])
+            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+            
+        # we just make a copy of the fields, so that it can be read correctly by CLASS 
+        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
+            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+            
+            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
+        
+            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
+            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
+
+
+        # assign fields adopted by CLASS
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'b':
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'u':
+            PARAMS.insert(0,'h',   BLHVu)
+        elif self.mode == 'd':
+            PARAMS.insert(0,'h',   BLHVd)
+        else:
+            PARAMS.insert(0,'h',   BLHV)
+            
+
+        try:
+            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
+        except:
+            print("could not convert latitude coordinate")
+            PARAMS.insert(0,'latitude', np.nan)
+            PARAMS.insert(0,'lat', np.nan)
+        try:
+            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
+            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
+            PARAMS.insert(0,'lon', 0.)
+        except:
+            print("could not convert longitude coordinate")
+            PARAMS.insert(0,'longitude', np.nan)
+            PARAMS.insert(0,'lon', 0.)
+
+        if latitude is not None:
+            print('overwriting latitude with specified value')
+            PARAMS['latitude'] = np.float(latitude)
+            PARAMS['lat'] = np.float(latitude)
+        if longitude is not None:
+            print('overwriting longitude with specified value')
+            PARAMS['longitude'] = np.float(longitude)
+        try:
+            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
+            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
+            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            # This is the nearest datetime when sun is up (for class)
+            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
+            # apply the same time shift for UTC datetime
+            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
+            
+        except:
+            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
+            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
+            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
+            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
+
+        
+
+        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
+        # as we are forcing lon equal to zero this is is expressed in local suntime
+        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
+
+           
+        ONE_COLUMNb = ONE_COLUMNNEW[0]
+        ONE_COLUMNu = ONE_COLUMNNEW[1]
+        ONE_COLUMNd = ONE_COLUMNNEW[2]
+        
+
+        THTVM = np.nanmean(THTV[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+        
+        QABSM = np.nanmean(QABS[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
+        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
+        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
+
+        BLHVe = abs(BLHV - BLHVu)
+        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
+
+        #PARAMS.insert(0,'dq',0.)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
+        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+        
+        if self.mode == 'o': #original 
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
+        elif self.mode == 'b': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNb
+            BLCOLUMN = ONE_COLUMNb
+        elif self.mode == 'u': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNu
+            BLCOLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNd
+            BLCOLUMN = ONE_COLUMNd
+        else:
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb
+
+        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
+        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
+        # print(BLCOLUMN['HAGL'][lt6000])
+        # print(BLCOLUMN['HAGL'][lt2500])
+        # 
+        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
+
+        #print(BLCOLUMN['HAGL'][lt2500])
+        PARAMS.insert(0,'OK',
+                      ((BLHVe < 200.) and 
+                       ( len(np.where(lt6000)[0]) > 5) and
+                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
+                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
+                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
+                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
+                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
+                      )
+                     )
+
+        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
+        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
+        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
+        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
+        
+        
+        PARAMS = PARAMS.T
+
+        
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = USE_ONECOLUMN
+        # if self.mode == 'o': #original 
+        #     self.ONE_COLUMN = ONE_COLUMN
+        # elif self.mode == 'b': # best BLH
+        #     self.ONE_COLUMN = ONE_COLUMNb
+        # elif self.mode == 'u':# upper BLH
+        #     self.ONE_COLUMN = ONE_COLUMNu
+        # elif self.mode == 'd': # lower BLH
+        #     self.ONE_COLUMN=ONE_COLUMNd
+        # else:
+        #     self.ONE_COLUMN = ONE_COLUMN
+
diff --git a/dist/class4gl-0.1dev/lib/data_global.py b/dist/class4gl-0.1dev/lib/data_global.py
new file mode 100644
index 0000000..9c3d9b5
--- /dev/null
+++ b/dist/class4gl-0.1dev/lib/data_global.py
@@ -0,0 +1,936 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: Hendrik Wouters
+
+Purpose: provides class routines for ground and atmosphere conditions used for
+the CLASS miced-layer model
+
+Usage:
+    from data_global import data_global
+    from class4gl import class4gl_input
+    from data_soundings import wyoming
+
+    # create a data_global object and load initial data pages
+    globaldata = data_global()
+    globaldata.load_datasets()
+    # create a class4gl_input object
+    c4gli = class4gl_input()
+    # Initialize it with profile data. We need to do this first. Actually this
+    # will set the coordinate parameters (datetime, latitude, longitude) in
+    # class4gl_input.pars.__dict__, which is required to read point data from
+    # the data_global object.
+
+    # open a Wyoming stream for a specific station
+    wy_strm = wyoming(STNM=91376)
+    # load the first profile
+    wy_strm.find_first()
+    # load the profile data into the class4gl_input object
+    c4gli.get_profile_wyoming(wy_strm)
+    
+    # and finally, read the global input data for this profile
+    c4gli.get_global_input(globaldata)
+
+
+"""
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+#import pynacolada as pcd
+import pandas as pd
+import xarray as xr
+import os
+import glob
+import sys
+import errno
+import warnings
+import logging
+
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+class book(object):
+    """ this is a class for a dataset spread over multiple files. It has a
+    similar purpose  open_mfdataset, but only 1 file (called current 'page')
+    one is loaded at a time. This saves precious memory.  """
+    def __init__(self,fn,concat_dim = None,debug_level=None):
+        self.logger = logging.getLogger('book')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # filenames are expanded as a list and sorted by filename
+        self.pages = glob.glob(fn); self.pages.sort()
+        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
+        if len(self.pages) == 0:
+            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
+        self.ipage = -1; self.page = None
+        self.renames = {} # each time when opening a file, a renaming should be done.
+        self.set_page(0)
+
+        # we consider that the outer dimension is the one we concatenate
+        self.concat_dim = concat_dim
+        if self.concat_dim is None:
+            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
+
+    # this wraps the xarray sel-commmand
+    def sel(*args, **kwargs):
+        for dim in kwargs.keys():
+            if dim == self.concat_dim:
+                self.browse_page(**{dim: kwargs[dim]})
+        return page.sel(*args,**kwargs)
+
+
+    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
+    #def __getattr__(self,attr):
+    #    orig_attr = self.page.__getattribute__(attr)
+    #    if callable(orig_attr):
+    #        def hooked(*args, **kwargs):
+    #            for dim in kwargs.keys():
+    #                if dim == self.concat_dim:
+    #                    self.browse_page(**{dim: kwargs[dim]})
+    #
+    #            result = orig_attr(*args, **kwargs)
+    #            # prevent wrapped_class from becoming unwrapped
+    #            if result == self.page:
+    #                return self
+    #            self.post()
+    #            return result
+    #        return hooked
+    #    else:
+    #        return orig_attr
+
+    def set_renames(self,renames):
+        #first, we convert back to original names, and afterwards, we apply the update of the renames.
+        reverse_renames = dict((v,k) for k,v in self.renames.items())
+        self.renames = renames
+        if self.page is not None:
+            self.page = self.page.rename(reverse_renames)
+            self.page = self.page.rename(self.renames)
+
+    def set_page(self,ipage,page=None):
+        """ this sets the right page according to ipage:
+                - We do not switch the page if we are already at the right one
+                - we set the correct renamings (level -> lev, latitude -> lat,
+                etc.)
+                - The dataset is also squeezed.
+        """
+
+        if ((ipage != self.ipage) or (page is not None)):
+
+            if self.page is not None:
+                self.page.close()
+
+            self.ipage = ipage
+            if page is not None:
+                self.page = page
+            else:
+                if self.ipage == -1:
+                   self.page = None
+                else:
+                    #try:
+
+                    self.logger.info("Switching to page "+str(self.ipage)+': '\
+                                     +self.pages[self.ipage])
+                    self.page = xr.open_dataset(self.pages[self.ipage])
+
+
+            # do some final corrections to the dataset to make them uniform
+            if self.page is not None:
+               if 'latitude' in self.page.dims:
+#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
+               if 'level' in self.page.dims:
+                   self.page = self.page.rename({'level':'lev'})
+
+               self.page = self.page.rename(self.renames)
+               self.page = self.page.squeeze(drop=True)
+
+    def browse_page(self,rewind=2,**args):
+
+        # at the moment, this is only tested with files that are stacked according to the time dimension.
+        dims = args.keys()
+
+
+        if self.ipage == -1:
+            self.set_page(0)
+
+        found = False
+        iipage = 0
+        startipage = self.ipage - rewind
+        while (iipage < len(self.pages)) and not found:
+            ipage = (iipage+startipage) % len(self.pages)
+            for dim in args.keys():
+                this_file = True
+
+                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
+                if 'dims' not in self.__dict__:
+                    self.dims = {}
+                if dim not in self.dims.keys():
+                    self.dims[dim] = [None]*len(self.pages)
+
+                if self.dims[dim][ipage] is None:
+                    self.logger.info('Loading coordinates of dimension "'+dim+\
+                                     '" of page "' +str(ipage)+'".')
+                    self.set_page(ipage)
+                    # print(ipage)
+                    # print(dim)
+                    # print(dim,self.page[dim].values)
+                    self.dims[dim][ipage] = self.page[dim].values
+
+                # determine current time range of the current page
+                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
+                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
+
+                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
+                    this_file = False
+
+            if this_file:
+                found = True
+                self.set_page(ipage)
+            else:
+
+                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
+                #    iipage = len(self.pages) # we stop searching
+
+                iipage += 1
+
+        if not found:
+            self.logger.info("Page not found. Setting to page -1")
+            #iipage = len(self.pages) # we stop searching further
+            self.set_page(-1)
+
+        if self.ipage != -1:
+            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
+        else:
+            self.logger.debug("I'm now at page "+ str(self.ipage))
+
+
+class data_global(object):
+    def __init__(self,sources= {
+        # # old gleam
+        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
+        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
+        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
+        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
+        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
+        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
+        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
+        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
+        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
+        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
+        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
+        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
+        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
+        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
+        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
+        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
+        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
+        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
+        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
+        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
+        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
+        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
+        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
+        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
+        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
+        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
+        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
+        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
+        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
+        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
+        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
+        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
+        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
+        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
+        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
+        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
+        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
+        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
+        },debug_level=None):
+        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
+        self.sources = sources
+        self.datarefs = {}
+        self.datasets = {}
+        self.datetime = dt.datetime(1981,1,1)
+
+        self.logger = logging.getLogger('data_global')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+        self.debug_level = debug_level
+
+        warnings.warn('omitting pressure field p and advection')
+
+    def in_library(self,fn):
+        if fn not in self.library.keys():
+            return False
+        else:
+            print("Warning: "+fn+" is already in the library.")
+            return True
+
+    def add_to_library(self,fn):
+        if not self.in_library(fn):
+            print("opening: "+fn)
+            self.library[fn] = \
+                book(fn,concat_dim='time',debug_level=self.debug_level)
+
+            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
+            #if 'latitude' in self.library[fn].variables:
+            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+
+    # default procedure for loading datasets into the globaldata library
+    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
+        if type(varssource) is str:
+            varssource = [varssource]
+        if type(varsdest) is str:
+            varsdest = [varsdest]
+
+        self.add_to_library(input_fn)
+
+        if varssource is None:
+            varssource = []
+            for var in self.sources[input_fn].variables:
+                avoid = \
+                ['lat','lon','latitude','longitude','time','lev','level']
+                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
+                    varssource.append(var)
+
+        if varsdest is None:
+            varsdest = varssource
+
+        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        for ivar,vardest in enumerate(varsdest):
+            varsource = varssource[ivar]
+            print('setting '+vardest+' as '+varsource+' from '+input_fn)
+
+            if vardest in self.datarefs.keys():
+                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
+            #self.add_to_library(fn,varsource,vardest)
+            if vardest != varsource:
+                libkey = input_fn+'.'+varsource+'.'+vardest
+                if libkey not in self.library.keys():
+                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
+                    self.library[libkey] = book(input_fn,\
+                                                debug_level=self.debug_level)
+                    self.library[libkey].set_renames({varsource: vardest})
+
+                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+            else:
+                self.datarefs[vardest] = input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+
+            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
+            #     print('Warning: '+ vardest "not in " + input_fn)
+
+
+
+    def load_datasets(self,sources = None,recalc=0):
+
+        if sources is None:
+            sources = self.sources
+        for key in sources.keys():
+            #datakey,vardest,*args = key.split(':')
+            datakey,vardest = key.split(':')
+            #print(datakey)
+
+            fnvarsource = sources[key].split(':')
+            if len(fnvarsource) > 2:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource,fnargs = fnvarsource
+                fnargs = [fnargs]
+            elif len(fnvarsource) > 1:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource = fnvarsource
+                fnargs = []
+            else:
+                fn = sources[key]
+                varsource = vardest
+            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
+
+    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
+            # the default way of loading a 2d dataset
+            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
+                self.load_dataset_default(fn,varsource,vardest)
+            elif datakey == 'IGBPDIS':
+                if vardest == 'alpha':
+                    ltypes = ['W','B','H','TC']
+                    for ltype in ltypes:
+                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
+                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
+
+
+                    # landfr = {}
+                    # for ltype in ['W','B','H','TC']:
+                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
+
+
+
+                    keytemp = 'alpha'
+                    fnkeytemp = fn+':IGBPDIS:alpha'
+                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
+                        self.library[fnkeytemp]  = book(fnkeytemp,
+                                                        debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+                    else:
+                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
+                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
+                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
+                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
+                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
+                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
+                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
+
+                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+
+                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
+                        for ltype in ltypes:
+                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
+
+                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
+                        print('writing file to: '+fnkeytemp)
+                        os.system('rm '+fnkeytemp)
+                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
+                        self.library[fnkeytemp].close()
+
+
+                        self.library[fnkeytemp]  = \
+                            book(fnkeytemp,debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+
+
+                else:
+                    self.load_dataset_default(fn,varsource,vardest)
+
+
+            elif datakey == 'GLAS':
+                self.load_dataset_default(fn,varsource,vardest)
+                if vardest == 'z0m':
+                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
+                elif vardest == 'z0h':
+                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
+            elif datakey == 'DSMW':
+
+
+                # Procedure of the thermal properties:
+                # 1. determine soil texture from DSMW/10.
+                # 2. soil type with look-up table (according to DWD/EXTPAR)
+                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
+                #    with parameter look-up table from Noilhan and Planton (1989).
+                #    Note: The look-up table is inspired on DWD/COSMO
+
+                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
+
+
+
+                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
+                self.load_dataset_default(fn,'DSMW')
+                print('calculating texture')
+                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
+                TEMP  = {}
+                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
+                TEMP3 = {}
+                for SPKEY in SPKEYS:
+
+
+                    keytemp = SPKEY+'_values'
+                    fnoutkeytemp = fnout+':DSMW:'+keytemp
+                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                    else:
+                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
+                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
+                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+                        # for faster computation, we need to get it to memory out of Dask.
+                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
+                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
+
+                # yes, I know I only check the last file.
+                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
+                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
+                        print('idx',idx,SPKEY)
+                        SEL = (TEMP2 == idx)
+                    #     print(idx,len(TEMP3))
+                        for SPKEY in SPKEYS:
+                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
+
+                    for SPKEY in SPKEYS:
+                        keytemp = SPKEY+'_values'
+                        fnoutkeytemp = fnout+':DSMW:'+keytemp
+                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
+                        os.system('rm '+fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].close()
+
+
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                keytemp = 'texture'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+                else:
+                    self.library[fn+':DSMW:texture'] = xr.Dataset()
+                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
+                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
+                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
+                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+
+                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
+
+                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
+                    zundef[zundef < 0] = np.nan
+                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
+                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
+
+                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+
+
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+                print('calculating texture type')
+
+
+
+                keytemp = 'itex'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+                else:
+                    self.library[fnoutkeytemp] = xr.Dataset()
+                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+                    X = self.datasets['texture'].page['texture'].values*100
+                    X[pd.isnull(X)] = -9
+
+
+                    self.datasets[keytemp][keytemp].values = X
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
+                    self.datasets['itex'].close()
+
+
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+
+                keytemp = 'isoil'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                isoil_reprocessed = False
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+                else:
+                    isoil_reprocessed = True
+                    print('calculating soil type')
+                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    ITEX = self.datasets['itex'].page['itex'].values
+                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
+                    LOOKUP = [
+                              [-10 ,9],# ocean
+                              [0 ,7],# fine textured, clay (soil type 7)
+                              [20,6],# medium to fine textured, loamy clay (soil type 6)
+                              [40,5],# medium textured, loam (soil type 5)
+                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                              [80,3],# coarse textured, sand (soil type 3)
+                              [100,9],# coarse textured, sand (soil type 3)
+                            ]
+                    for iitex,iisoil in LOOKUP:
+                        ISOIL[ITEX > iitex] = iisoil
+                        print('iitex,iisoil',iitex,iisoil)
+
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    LOOKUP = [
+                              [9001, 1 ], # ice, glacier (soil type 1)
+                              [9002, 2 ], # rock, lithosols (soil type 2)
+                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                              [9,    9 ], # undefined (ocean)
+                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                              [9000, 9 ], # undefined (inland lake)
+                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                            ]
+                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
+
+                    CODE_VALUES[ITEX == 901200] = 9012
+                    for icode,iisoil in LOOKUP:
+                        ISOIL[CODE_VALUES == icode] = iisoil
+
+                    self.datasets['isoil']['isoil'].values = ISOIL
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+                    print('saved inbetween file to: '+fnoutkeytemp)
+
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                #adopted from data_soil.f90 (COSMO5.0)
+                SP_LOOKUP = {
+                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
+                  # (by index)                                           loam                    loam                                water      ice
+                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+                  # Important note: For peat, the unknown values below are set equal to that of loam
+                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
+                  #error in table 2 of NP89: values need to be multiplied by e-6
+                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
+
+                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
+                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
+                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
+                }
+
+
+                # isoil_reprocessed = False
+                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+
+                #     self.library[fn+':DSMW:isoil'] = \
+                #             book(fnoutkeytemp,debug_level=self.debug_level)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+                # else:
+                #     isoil_reprocessed = True
+                #     print('calculating soil type')
+                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+
+
+
+                # this should become cleaner in future but let's hard code it for now.
+                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
+                print('calculating soil parameter')
+                DATATEMPSPKEY = {}
+                if (recalc < 1) and (isoil_reprocessed == False): 
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        keytemp = SPKEY
+                        fnoutkeytemp=fnout+':DSMW:'+keytemp
+                        self.library[fn+':DSMW:'+SPKEY] =\
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
+                        self.datarefs[SPKEY] =fnoutkeytemp
+                else:
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+
+                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
+                    ISOIL = self.datasets['isoil'].page['isoil'].values
+                    print(np.where(ISOIL>0.))
+                    for i in range(11):
+                        SELECT = (ISOIL == i)
+                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
+
+                        os.system('rm '+fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].close()
+                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
+
+                        self.library[fn+':DSMW:'+SPKEY] = \
+                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+
+
+            else:
+                self.load_dataset_default(fn,varsource,vardest)
+
+
+
+
+
+
+#
+#                 # only print the last parameter value in the plot
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'cala'
+#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'crhoc'
+#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#     key = "CERES"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         CERES_start_date = dt.datetime(2000,3,1)
+#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+#
+#         var = 'cc'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#         print(class_settings.lat,class_settings.lon)
+#
+#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
+#
+#         input_nc.close()
+#
+
+
+#     key = "GIMMS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+#         print("Reading Leag Area Index from "+input_fn)
+#         var = 'LAI'
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+#
+#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+#
+#         if np.isnan(tarray[idatetime]):
+#             print("interpolating GIMMS cveg nan value")
+#
+#             mask = np.isnan(tarray)
+#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+#             else:
+#                 print("Warning. Could not interpolate GIMMS cveg nan value")
+#
+#         class_settings.__dict__[var] = tarray[idatetime]
+#
+#         input_nc.close()
+#
+#     key = "IGBPDIS_ALPHA"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         var = 'alpha'
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+#         print("Reading albedo from "+input_fn)
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#
+#         landfr = {}
+#         for ltype in ['W','B','H','TC']:
+#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+#
+#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+#
+#         alpha=0.
+#         for ltype in landfr.keys():
+#             alpha += landfr[ltype]*aweights[ltype]
+#
+#
+#         class_settings.__dict__[var] = alpha
+#         input_nc.close()
+#
+#
+#     key = "ERAINT_ST"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         print("Reading soil temperature from "+input_fn)
+#
+#         var = 'Tsoil'
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         var = 'T2'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+#
+#
+#         input_nc.close()
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #var = 'T2'
+#     #valold = class_settings.__dict__[var]
+#     #
+#     #class_settings.__dict__[var] = 305.
+#     #class_settings.__dict__['Tsoil'] = 302.
+#     #valnew = class_settings.__dict__[var]
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #
+#     #var = 'Lambda'
+#     #valold = class_settings.__dict__[var]
+#
+#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
+#     ## I need to ask Chiel.
+#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+#     #
+#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
+#     #class_settings.__dict__[var] = valnew
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     key = "GLAS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+#         print("Reading canopy height for determining roughness length from "+input_fn)
+#         var = 'z0m'
+#
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+#
+#         lowerlimit = 0.01
+#         if testval < lowerlimit:
+#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+#             class_settings.__dict__[var] = lowerlimit
+#         else:
+#             class_settings.__dict__[var] = testval
+#
+#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+#
+#
+#         input_nc.close()
+
+
+
+
+
diff --git a/dist/class4gl-0.1dev/lib/interface_functions.py b/dist/class4gl-0.1dev/lib/interface_functions.py
new file mode 100644
index 0000000..3e483f3
--- /dev/null
+++ b/dist/class4gl-0.1dev/lib/interface_functions.py
@@ -0,0 +1,506 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+#from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+#'_afternoon.yaml'
+def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
+    filename = yaml_file.name
+    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+    #yaml_file = open(filename)
+
+    #print('going to next observation',filename)
+    yaml_file.seek(index_start)
+
+    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+
+    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+    filebuffer.write(buf)
+    filebuffer.close()
+    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+    
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+
+    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+    print(command)
+    os.system(command)
+    jsonstream = open(filename+'.buffer.json.'+str(index_start))
+    record_dict = json.load(jsonstream)
+    jsonstream.close()
+    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+
+
+    if mode =='mod':
+        modelout = class4gl()
+        modelout.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        return modelout
+    elif mode == 'ini':
+
+ 
+        # datetimes are incorrectly converted to strings. We need to convert them
+        # again to datetimes
+        for key,value in record_dict['pars'].items():
+            # we don't want the key with columns that have none values
+            if value is not None: 
+                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
+               # elif (type(value) == str):
+                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+
+            if (value == 0.9e19) or (value == '.9e19'):
+                record_dict['pars'][key] = np.nan
+        for key in record_dict.keys():
+            #print(key)
+            if key in ['air_ap','air_balloon',]:
+                #NNprint('check')
+                for datakey,datavalue in record_dict[key].items():
+                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+
+        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        c4gli = class4gl_input()
+        print(c4gli.logger,'hello')
+        c4gli.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+
+class stations(object):
+    def __init__(self,path,suffix='ini',refetch_stations=False):
+
+        self.path = path
+
+        self.file = self.path+'/stations_list.csv'
+        if (os.path.isfile(self.file)) and (not refetch_stations):
+            self.table = pd.read_csv(self.file)
+        else:
+            self.table = self.get_stations(suffix=suffix)
+            self.table.to_csv(self.file)
+        
+        self.table = self.table.set_index('STNID')
+        #print(self.table)
+
+    def get_stations(self,suffix):
+        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
+        if len(stations_list_files) == 0:
+            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
+        stations_list_files.sort()
+        print(stations_list_files)
+        if len(stations_list_files) == 0:
+            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
+        stations_list = []
+        for stations_list_file in stations_list_files:
+            thisfile = open(stations_list_file,'r')
+            yamlgen = yaml.load_all(thisfile)
+            try:
+                first_record  = yamlgen.__next__()
+            except:
+                first_record = None
+            if first_record is not None:
+                stations_list.append({})
+                for column in ['STNID','latitude','longitude']:
+                    #print(first_record['pars'].keys())
+                    stations_list[-1][column] = first_record['pars'][column]
+                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
+            yamlgen.close()
+            thisfile.close()
+    
+        print(stations_list)
+        return pd.DataFrame(stations_list)
+
+class stations_iterator(object):
+    def __init__(self,stations):
+        self.stations = stations
+        self.ix = -1 
+    def __iter__(self):
+        return self
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.stations.table)) 
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_row(self,row):
+        self.ix = row
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_STNID(self,STNID):
+        self.ix = np.where((self.stations.table.index == STNID))[0][0]
+        print(self.ix)
+        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+    def close():
+        del(self.ix)
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.records))
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+# #'_afternoon.yaml'
+# def get_record_yaml(yaml_file,index_start,index_end):
+#     filename = yaml_file.name
+#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+#     #yaml_file = open(filename)
+# 
+#     #print('going to next observation',filename)
+#     yaml_file.seek(index_start)
+# 
+#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+# 
+#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+#     filebuffer.write(buf)
+#     filebuffer.close()
+#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+#     
+#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+# 
+#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+#     print(command)
+#     os.system(command)
+#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
+#     record_dict = json.load(jsonstream)
+#     jsonstream.close()
+#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+#  
+#     # datetimes are incorrectly converted to strings. We need to convert them
+#     # again to datetimes
+#     for key,value in record_dict['pars'].items():
+#         # we don't want the key with columns that have none values
+#         if value is not None: 
+#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
+#            # elif (type(value) == str):
+#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+#                 
+#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
+# 
+#         if (value == 0.9e19) or (value == '.9e19'):
+#             record_dict['pars'][key] = np.nan
+#     for key in record_dict.keys():
+#         print(key)
+#         if key in ['air_ap','air_balloon',]:
+#             print('check')
+#             for datakey,datavalue in record_dict[key].items():
+#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+# 
+#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+# 
+#     c4gli = class4gl_input()
+#     c4gli.load_yaml_dict(record_dict)
+#     return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
+
+    records = pd.DataFrame()
+    for STNID,station in stations.iterrows():
+        dictfnchunks = []
+        if getchunk is 'all':
+
+            # we try the old single-chunk filename format first (usually for
+            # original profile pairs)
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
+            if os.path.isfile(fn):
+                chunk = 0
+                dictfnchunks.append(dict(fn=fn,chunk=chunk))
+
+            # otherwise, we use the new multi-chunk filename format
+            else:
+                chunk = 0
+                end_of_chunks = False
+                while not end_of_chunks:
+                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                    if os.path.isfile(fn):
+                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                    else:
+                        end_of_chunks = True
+                    chunk += 1
+
+            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
+            # yamlfilenames = glob.glob(globyamlfilenames)
+            # yamlfilenames.sort()
+        else:
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
+            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
+            
+        if len(dictfnchunks) > 0:
+            for dictfnchunk in dictfnchunks:
+                yamlfilename = dictfnchunk['fn']
+                chunk = dictfnchunk['chunk']
+                print(chunk)
+
+                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
+                pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
+                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
+                generate_pkl = False
+                if not os.path.isfile(pklfilename): 
+                    print('pkl file does not exist. I generate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                elif not (os.path.getmtime(yamlfilename) <  \
+                    os.path.getmtime(pklfilename)):
+                    print('pkl file older than yaml file, so I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+
+                if refetch_records:
+                    print('refetch_records flag is True. I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                if not generate_pkl:
+                    records = pd.concat([records,pd.read_pickle(pklfilename)])
+                   # irecord = 0
+                else:
+                    with open(yamlfilename) as yaml_file:
+
+                        dictout = {}
+
+                        next_record_found = False
+                        end_of_file = False
+                        while (not next_record_found) and (not end_of_file):
+                            linebuffer = yaml_file.readline()
+                            next_record_found = (linebuffer == '---\n')
+                            end_of_file = (linebuffer == '')
+                        next_tell = yaml_file.tell()
+                        
+                        while not end_of_file:
+
+                            print(' next record:',next_tell)
+                            current_tell = next_tell
+                            next_record_found = False
+                            yaml_file.seek(current_tell)
+                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            linebuffer = ''
+                            while ( (not next_record_found) and (not end_of_file)):
+                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                                linebuffer = yaml_file.readline()
+                                next_record_found = (linebuffer == '---\n')
+                                end_of_file = (linebuffer == '')
+                            filebuffer.close()
+                            
+                            next_tell = yaml_file.tell()
+                            index_start = current_tell
+                            index_end = next_tell
+
+                            
+                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
+                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            print(command)
+                            
+                            os.system(command)
+                            #jsonoutput = subprocess.check_output(command,shell=True) 
+                            #print(jsonoutput)
+                            #jsonstream = io.StringIO(jsonoutput)
+                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
+                            record = json.load(jsonstream)
+                            dictouttemp = {}
+                            for key,value in record['pars'].items():
+                                # we don't want the key with columns that have none values
+                                if value is not None: 
+                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
+                                   if (type(value) in regular_numeric_types):
+                                        dictouttemp[key] = value
+                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
+                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
+                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
+                            recordindex = record['index']
+                            dictouttemp['chunk'] = chunk
+                            dictouttemp['index_start'] = index_start
+                            dictouttemp['index_end'] = index_end
+                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
+                            for key,value in dictouttemp.items():
+                                if key not in dictout.keys():
+                                    dictout[key] = {}
+                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
+                            print(' obs record registered')
+                            jsonstream.close()
+                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                    records_station = pd.DataFrame.from_dict(dictout)
+                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
+                    print('writing table file ('+pklfilename+') for station '\
+                          +str(STNID))
+                    records_station.to_pickle(pklfilename)
+                    # else:
+                    #     os.system('rm '+pklfilename)
+                    records = pd.concat([records,records_station])
+    return records
+
+def stdrel(mod,obs,columns):
+    stdrel = pd.DataFrame(columns = columns)
+    for column in columns:
+        stdrel[column] = \
+                (mod.groupby('STNID')[column].transform('mean') -
+                 obs.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') + \
+                (mod[column] -
+                 mod.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') 
+    return stdrel
+
+def pct(obs,columns):
+    pct = pd.DataFrame(columns=columns)
+    for column in columns:
+        #print(column)
+        pct[column] = ""
+        pct[column] = obs[column].rank(pct=True)
+    return pct
+
+def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
+    stats = pd.DataFrame()
+    for key in keys: 
+        stats['d'+key+'dt'] = ""
+        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+                              (obs_afternoon.ldatetime - \
+                               obs_morning.ldatetime).dt.seconds*3600.
+    return stats
+
diff --git a/dist/class4gl-0.1dev/lib/interface_multi.py b/dist/class4gl-0.1dev/lib/interface_multi.py
new file mode 100644
index 0000000..83148e5
--- /dev/null
+++ b/dist/class4gl-0.1dev/lib/interface_multi.py
@@ -0,0 +1,2061 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+# from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+cdictpres = {'blue': (\
+                   (0.,    0.,  0.),
+                   (0.25,  0.25, 0.25),
+                   (0.5,  .70, 0.70),
+                   (0.75, 1.0, 1.0),
+                   (1,     1.,  1.),
+                   ),
+       'green': (\
+                   (0. ,   0., 0.0),
+                   (0.25,  0.50, 0.50),
+                   (0.5,  .70, 0.70),
+                   (0.75,  0.50, 0.50),
+                   (1  ,    0,  0.),
+                   ),
+       'red':  (\
+                  (0 ,  1.0, 1.0),
+                  (0.25 ,  1.0, 1.0),
+                   (0.5,  .70, 0.70),
+                  (0.75 , 0.25, 0.25),
+                  (1,    0., 0.),
+                  )}
+
+statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+os.system('module load Ruby')
+
+class c4gl_interface_soundings(object):
+    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+        """ creates an interactive interface for analysing class4gl experiments
+
+        INPUT:
+            path_exp : path of the experiment output
+            path_obs : path of the observations 
+            globaldata: global data that is being shown on the map
+            refetch_stations: do we need to build the list of the stations again?
+        OUTPUT:
+            the procedure returns an interface object with interactive plots
+
+        """
+        
+        # set the ground
+        self.globaldata = globaldata
+
+ 
+        self.path_exp = path_exp
+        self.path_obs = path_obs
+        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
+
+        # # get the list of stations
+        # stationsfile = self.path_exp+'/stations_list.csv'
+        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
+        #     stations = pd.read_csv(stationsfile)
+        # else:
+        #     stations = get_stations(self.path_exp)
+        #     stations.to_csv(stationsfile)
+
+        # stations = stations.set_index('STNID')
+
+        self.frames = {}
+
+        self.frames['stats'] = {}
+        self.frames['worldmap'] = {}
+                
+        self.frames['profiles'] = {}
+        self.frames['profiles'] = {}
+        self.frames['profiles']['DT'] = None
+        self.frames['profiles']['STNID'] = None
+
+        #self.frames['worldmap']['stationsfile'] = stationsfile
+        self.frames['worldmap']['stations'] = stations(self.path_exp, \
+                                                       suffix='ini',\
+                                                       refetch_stations=refetch_stations)
+
+        # Initially, the stats frame inherets the values/iterators of
+        # worldmap
+        for key in self.frames['worldmap'].keys():
+            self.frames['stats'][key] = self.frames['worldmap'][key]
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_ini'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='ini',\
+                                           refetch_records=refetch_records
+                                           )
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_mod'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='mod',\
+                                           refetch_records=refetch_records
+                                           )
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_obs_afternoon'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_obs,\
+                                           subset='afternoon',\
+                                           refetch_records=refetch_records
+                                           )
+
+        self.frames['stats']['records_all_stations_mod'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['records_all_stations_ini']['dates'] = \
+            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+
+
+        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
+
+        self.frames['stats']['records_all_stations_obs_afternoon'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['viewkeys'] = ['h','theta','q']
+        print('Calculating table statistics')
+        self.frames['stats']['records_all_stations_mod_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_mod'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+
+        self.frames['stats']['inputkeys'] = inputkeys
+        
+        # self.frames['stats']['inputkeys'] = \
+        #     [ key for key in \
+        #       self.globaldata.datasets.keys() \
+        #       if key in \
+        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
+
+
+        # get units from the class4gl units database
+        self.units = dict(units)
+        # for those that don't have a definition yet, we just ask a question
+        # mark
+        for var in self.frames['stats']['inputkeys']:
+            self.units[var] = '?'
+
+        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
+        self.frames['stats']['records_all_stations_ini_pct'] = \
+                  pct(self.frames['stats']['records_all_stations_ini'], \
+                      columns = self.frames['stats']['inputkeys'])
+
+        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
+        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+        #     mod['
+
+        # 
+        # 
+        # \
+        #        self.frames['stats']['records_all_stations_mod'], \
+
+
+
+        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
+        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
+        #               columns = [ 'd'+key+'dt' for key in \
+        #                           self.frames['stats']['viewkeys']], \
+        #              )
+
+        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
+        #               obs = self.frames['stats']['records_all_stations_ini'], \
+        #               columns = self.frames['stats']['viewkeys'], \
+        #              )
+        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+        
+        print('filtering pathological data')
+        # some observational sounding still seem problematic, which needs to be
+        # investigated. In the meantime, we filter them
+        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+
+        # we filter ALL data frames!!!
+        for key in self.frames['stats'].keys():
+            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
+               (self.frames['stats'][key].index.names == indextype):
+                self.frames['stats'][key] = self.frames['stats'][key][valid]
+        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+
+        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
+
+
+        print("filtering stations from interface that have no records")
+        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
+            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                    == STNID).sum() == 0):
+                print("dropping", STNID)
+                self.frames['worldmap']['stations'].table = \
+                        self.frames['worldmap']['stations'].table.drop(STNID)
+                    
+        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+        
+        # TO TEST: should be removed, since it's is also done just below
+        self.frames['stats']['stations_iterator'] = \
+            self.frames['worldmap']['stations_iterator'] 
+
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
+        self.next_station()
+
+        # self.goto_datetime_worldmap(
+        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+        #     'after')
+    def sel_station(self,STNID=None,rownumber=None):
+
+        if (STNID is not None) and (rownumber is not None):
+            raise ValueError('Please provide either STNID or rownumber, not both.')
+
+        if (STNID is None) and (rownumber is None):
+            raise ValueError('Please provide either STNID or rownumber.')
+            
+        if STNID is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
+            print(
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+            )
+            self.update_station()
+        elif rownumber is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
+            self.update_station()
+
+
+
+    def next_station(self,event=None,jump=1):
+        with suppress(StopIteration):
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+                = self.frames['worldmap']['stations_iterator'].__next__(jump)
+            # self.frames['worldmap']['stations_iterator'].close()
+            # del(self.frames['worldmap']['stations_iterator'])
+            # self.frames['worldmap']['stations_iterator'] = \
+            #                 selfself.frames['worldmap']['stations'].iterrows()
+            # self.frames['worldmap']['STNID'],\
+            # self.frames['worldmap']['current_station'] \
+            #     = self.frames['worldmap']['stations_iterator'].__next__()
+
+        self.update_station()
+
+    def prev_station(self,event=None):
+        self.next_station(jump = -1,event=event)
+    def update_station(self):
+        for key in ['STNID','current_station','stations_iterator']: 
+            self.frames['stats'][key] = self.frames['worldmap'][key] 
+
+
+
+        # generate index of the current station
+        self.frames['stats']['records_current_station_index'] = \
+            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+             == \
+             self.frames['stats']['current_station'].name)
+
+        # create the value table of the records of the current station
+        tab_suffixes = \
+                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        for tab_suffix in tab_suffixes:
+            self.frames['stats']['records_current_station'+tab_suffix] = \
+                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+        # go to first record of current station
+        self.frames['stats']['records_iterator'] = \
+                        records_iterator(self.frames['stats']['records_current_station_mod'])
+        (self.frames['stats']['STNID'] , \
+        self.frames['stats']['current_record_chunk'] , \
+        self.frames['stats']['current_record_index']) , \
+        self.frames['stats']['current_record_mod'] = \
+                        self.frames['stats']['records_iterator'].__next__()
+
+        for key in self.frames['stats'].keys():
+            self.frames['profiles'][key] = self.frames['stats'][key]
+
+        STNID = self.frames['profiles']['STNID']
+        chunk = self.frames['profiles']['current_record_chunk']
+        if 'current_station_file_ini' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_ini'].close()
+        self.frames['profiles']['current_station_file_ini'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+        if 'current_station_file_mod' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_mod'].close()
+        self.frames['profiles']['current_station_file_mod'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_afternoon'].close()
+        self.frames['profiles']['current_station_file_afternoon'] = \
+            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+        self.frames['profiles']['records_iterator'] = \
+                        records_iterator(self.frames['profiles']['records_current_station_mod'])
+        (self.frames['profiles']['STNID'] , \
+        self.frames['profiles']['current_record_chunk'] , \
+        self.frames['profiles']['current_record_index']) , \
+        self.frames['profiles']['current_record_mod'] = \
+                        self.frames['profiles']['records_iterator'].__next__()
+
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+
+        self.update_record()
+
+    def next_record(self,event=None,jump=1):
+        with suppress(StopIteration):
+            (self.frames['profiles']['STNID'] , \
+            self.frames['profiles']['current_record_chunk'] , \
+            self.frames['profiles']['current_record_index']) , \
+            self.frames['profiles']['current_record_mod'] = \
+                      self.frames['profiles']['records_iterator'].__next__(jump)
+        # except (StopIteration):
+        #     self.frames['profiles']['records_iterator'].close()
+        #     del( self.frames['profiles']['records_iterator'])
+        #     self.frames['profiles']['records_iterator'] = \
+        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
+        #     (self.frames['profiles']['STNID'] , \
+        #     self.frames['profiles']['current_record_index']) , \
+        #     self.frames['profiles']['current_record_mod'] = \
+        #                     self.frames['profiles']['records_iterator'].__next__()
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        self.update_record()
+
+    def prev_record(self,event=None):
+        self.next_record(jump=-1,event=event)
+
+    def update_record(self):
+        self.frames['profiles']['current_record_ini'] =  \
+            self.frames['profiles']['records_current_station_ini'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'],\
+                  self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon'] =  \
+            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'] , \
+                  self.frames['profiles']['current_record_index'])]
+
+        self.frames['profiles']['current_record_mod_stats'] = \
+                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+                    self.frames['profiles']['STNID'], \
+                    self.frames['profiles']['current_record_chunk'], \
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
+                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_ini_pct'] = \
+                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        # frame
+        # note that the current station, record is the same as the stats frame for initialization
+
+        # select first 
+        #self.frames['profiles']['current_record_index'], \
+        #self.frames['profiles']['record_yaml_mod'] = \
+        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
+        #                   self.frames['stats']['current_record_index'])
+        self.frames['profiles']['record_yaml_mod'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_mod'], \
+               self.frames['profiles']['current_record_mod'].index_start,
+               self.frames['profiles']['current_record_mod'].index_end,
+               mode='mod')
+                                
+        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_ini'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_ini'], \
+               record_ini.index_start,
+               record_ini.index_end,
+                mode='ini')
+
+        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_obs_afternoon'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_afternoon'], \
+               record_afternoon.index_start,
+               record_afternoon.index_end,
+                mode='ini')
+
+
+        key = self.frames['worldmap']['inputkey']
+        # only redraw the map if the current world map has a time
+        # dimension
+        if 'time' in self.globaldata.datasets[key].page[key].dims:
+            self.goto_datetime_worldmap(
+                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                'after')
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap',
+                                                  'profiles'])
+        else:
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap_stations',
+                                                  'profiles'])
+
+    def abline(self,slope, intercept,axis):
+        """Plot a line from slope and intercept"""
+        #axis = plt.gca()
+        x_vals = np.array(axis.get_xlim())
+        y_vals = intercept + slope * x_vals
+        axis.plot(x_vals, y_vals, 'k--')
+
+    def plot(self):
+        import pylab as pl
+        from matplotlib.widgets import Button
+        import matplotlib.pyplot as plt
+        import matplotlib as mpl
+        '''
+        Definition of the axes for the sounding table stats
+        '''
+        
+        fig = pl.figure(figsize=(14,9))
+        axes = {} #axes
+        btns = {} #buttons
+
+        # frames, which sets attributes for a group of axes, buttens, 
+        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+            label = 'stats_'+str(key)
+            axes[label] = fig.add_subplot(\
+                            len(self.frames['stats']['viewkeys']),\
+                            5,\
+                            5*ikey+1,label=label)
+            # Actually, the axes should be a part of the frame!
+            #self.frames['stats']['axes'] = axes[
+
+            # pointer to the axes' point data
+            axes[label].data = {}
+
+            # pointer to the axes' color fields
+            axes[label].fields = {}
+
+
+        fig.tight_layout()
+        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
+
+        label ='stats_colorbar'
+        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
+        axes[label].fields = {}
+
+        from matplotlib.colors import LinearSegmentedColormap
+        cdictpres = {'blue': (\
+                           (0.,    0.,  0.),
+                           (0.25,  0.25, 0.25),
+                           (0.5,  .70, 0.70),
+                           (0.75, 1.0, 1.0),
+                           (1,     1.,  1.),
+                           ),
+               'green': (\
+                           (0. ,   0., 0.0),
+                           (0.25,  0.50, 0.50),
+                           (0.5,  .70, 0.70),
+                           (0.75,  0.50, 0.50),
+                           (1  ,    0,  0.),
+                           ),
+               'red':  (\
+                          (0 ,  1.0, 1.0),
+                          (0.25 ,  1.0, 1.0),
+                           (0.5,  .70, 0.70),
+                          (0.75 , 0.25, 0.25),
+                          (1,    0., 0.),
+                          )}
+        
+        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+        label = 'times'
+               
+        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+
+
+        label = 'worldmap'
+               
+        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+        axes[label].lat = None
+        axes[label].lon = None
+
+        label = 'worldmap_colorbar'
+        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
+        axes[label].fields = {}
+
+        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
+        label = 'worldmap_stations'
+        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
+        axes[label].data = {}
+
+        fig.canvas.mpl_connect('pick_event', self.on_pick)
+        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
+
+
+        """ buttons definitions """
+        
+        label = 'bprev_dataset'
+        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous dataset')
+        btns[label].on_clicked(self.prev_dataset)
+
+        label = 'bnext_dataset'
+        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next dataset')
+        btns[label].on_clicked(self.next_dataset)
+
+        label = 'bprev_datetime'
+        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous datetime')
+        btns[label].on_clicked(self.prev_datetime)
+
+        label = 'bnext_datetime'
+        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next datetime')
+        btns[label].on_clicked(self.next_datetime)
+
+
+        label = 'bprev_station'
+        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous station')
+        btns[label].on_clicked(self.prev_station)
+
+        label = 'bnext_station'
+        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next station')
+        btns[label].on_clicked(self.next_station)
+
+        label = 'bprev_record'
+        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous record')
+        btns[label].on_clicked(self.prev_record)
+
+        label = 'bnext_record'
+        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next record')
+        btns[label].on_clicked(self.next_record)
+
+
+        # self.nstatsview = nstatsview
+        # self.statsviewcmap = statsviewcmap
+        self.fig = fig
+        self.axes = axes
+        self.btns = btns
+        self.tbox = {}
+        # self.hover_active = False
+
+        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
+        #                                transform=plt.gcf().transFigure)
+
+        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
+                                          transform=plt.gcf().transFigure)
+
+        label = 'air_ap:theta'
+        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
+
+        label = 'air_ap:q'
+        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
+
+        label = 'out:h'
+        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
+
+        label = 'out:theta'
+        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
+
+        label = 'out:q'
+        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
+
+        label = 'SEB'
+        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
+
+
+        self.hover_active = False
+        self.fig = fig
+        self.fig.show()
+        self.fig.canvas.draw()
+        self.refresh_plot_interface()
+
+
+    # def scan_stations(self):
+    #     blabla
+        
+
+
+    # def get_records(current_file):
+    #     records = pd.DataFrame()
+
+    #     # initial position
+    #     next_record_found = False
+    #     while(not next_record_found):
+    #         next_record_found = (current_file.readline() == '---\n')
+    #     next_tell = current_file.tell() 
+    #     end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #     while not end_of_file:
+    #         current_tell = next_tell
+    #         next_record_found = False
+    #         current_file.seek(current_tell)
+    #         while ( (not next_record_found) and (not end_of_file)):
+    #             current_line = current_file.readline()
+    #             next_record_found = (currentline == '---\n')
+    #             end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #         # we store the position of the next record
+    #         next_tell = current_file.tell() 
+    #         
+    #         # we get the current record. Unfortunately we need to reset the
+    #         # yaml record generator first.
+    #         current_yamlgen.close()
+    #         current_yamlgen = yaml.load_all(current_file)
+    #         current_file.seek(current_tell)
+    #         current_record_mod = current_yamlgen.__next__()
+    #     current_yamlgen.close()
+
+    #     return records
+
+       #      next_record_found = False
+       #      while(not record):
+       #          next_record_found = (self.current_file.readline() == '---\n')
+       #      self.current_tell0 = self.current_file.tell() 
+
+       #  
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell0 = self.current_file.tell() 
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell1 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell0)
+       #  self.r0 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell1)
+       #  next_record_found = False
+       #  while ( (not next_record_found) and (not end_of_file):
+       #      current_line = self.current_file.readline()
+       #      next_record_found = (currentline == '---\n')
+       #      end_of_file = (currentline == '') # an empty line means we are at the end
+
+       #  self.current_tell2 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell1)
+       #  self.r1 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell2)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell3 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell2)
+       #  self.r2 = self.current_yamlgen.__next__()
+
+       #  # go to position of next record in file
+       #  self.current_file.seek(self.current_tell3)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell4 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell3)
+       #  self.r3 = self.current_yamlgen.__next__()
+ 
+       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
+
+    def goto_datetime_worldmap(self,DT,shift=None):
+        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
+            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
+            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
+            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
+                self.frames['worldmap']['iDT'] += 1
+            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
+                self.frames['worldmap']['iDT'] -= 1 
+            # for gleam, we take the values of the previous day
+            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
+                self.frames['worldmap']['iDT'] -= 2 
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+        #else:
+        #    self.frames['worldmap'].pop('DT')
+
+    def next_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def prev_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def next_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+    def prev_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+
+
+    def sel_dataset(self,inputkey):
+        self.frames['worldmap']['inputkey'] = inputkey
+        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
+        self.goto_datetime_worldmap(
+            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+            'after')# get nearest datetime of the current dataset to the profile
+        if "fig" in self.__dict__.keys():
+            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
+       
+    # def prev_station(self,event=None):
+    #     self.istation = (self.istation - 1) % self.stations.shape[0]
+    #     self.update_station()
+
+
+
+
+    #def update_datetime(self):
+    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
+    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
+    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
+    #        print(self.worldmapfocus['DT'])
+    #        self.refresh_plot_interface(only='worldmap')
+
+    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
+
+        #print('r1')
+        for argkey in args.keys():
+            self.__dict__[arg] = args[argkey]
+
+        axes = self.axes
+        tbox = self.tbox
+        frames = self.frames
+        fig = self.fig
+ 
+        if (only is None) or ('worldmap' in only):
+            globaldata = self.globaldata
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
+            else:
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+            keystotranspose = ['lat','lon']
+            for key in dict(datasetxr.dims).keys():
+                if key not in keystotranspose:
+                    keystotranspose.append(key)
+
+            datasetxr = datasetxr.transpose(*keystotranspose)
+            datasetxr = datasetxr.sortby('lat',ascending=False)
+
+            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
+            lonleft = lonleft - 360.
+            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
+            label = 'worldmap'
+            axes[label].clear()
+            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
+            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+
+        if (only is None) or ('worldmap' in only):
+            #if 'axmap' not in self.__dict__ :
+            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
+            #else:
+
+            #stations = self.stations
+
+
+            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
+            #     resolution = 'l', 
+            # area_thresh = 0.1,
+            #     llcrnrlon=-180., llcrnrlat=-90.0,
+            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
+            # 
+            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
+            # self.gmap.drawcountries(color='white',linewidth=0.3)
+            # #self.gmap.fillcontinents(color = 'gray')
+            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
+            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
+            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
+            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # #self.ax5.shadedrelief()
+
+           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
+
+
+            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
+            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+
+            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
+            if 'lev' in field.dims:
+                field = field.isel(lev=-1)
+
+            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
+            axes[label].axis('off')
+
+            from matplotlib import cm
+            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
+            
+            
+            title=frames['worldmap']['inputkey']
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+            axes[label].set_title(title)
+
+            label ='worldmap_colorbar'
+            axes[label].clear()
+            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
+
+
+            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
+            # x,y = self.gmap(lons,lats)
+            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
+            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+
+        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
+
+            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
+            store_xlim = {}
+            store_ylim = {}
+            for ikey, key in enumerate(statskeys_out):
+                if (only is not None) and ('stats_lightupdate' in only):
+                    store_xlim[key] = axes['stats_'+key].get_xlim()
+                    store_ylim[key] = axes['stats_'+key].get_ylim()
+                self.axes['stats_'+key].clear()    
+
+            label = 'times'
+            self.axes[label].clear()
+
+            key = 'dthetadt'
+            x = self.frames['stats']['records_all_stations_ini']['datetime']
+            #print(x)
+            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+            #print(y)
+            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            #print(z)
+
+            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
+            self.axes[label].data[label] = self.axes[label].scatter(x.values,
+                                                                    y.values,
+                                                                    c=z.values,
+                                                                    cmap=self.statsviewcmap,
+                                                                    s=2,
+                                                                    vmin=0.,
+                                                                    vmax=1.,
+                                                                    alpha=alpha_cloud_pixels)
+
+            
+            x = self.frames['stats']['records_current_station_ini']['datetime']
+            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+
+            x = self.frames['profiles']['records_current_station_ini']['datetime']
+            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
+            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
+
+            for ikey, key in enumerate(statskeys_out):
+
+                # show data of all stations
+                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_all_stations_mod_stats'][key]
+                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                qvalmax = x.quantile(0.999)
+                qvalmin = x.quantile(0.001)
+                print('applying extra filter over extreme values for plotting stats')
+                selx = (x >= qvalmin) & (x < qvalmax)
+                sely = (x >= qvalmin) & (x < qvalmax)
+                x = x[selx & sely]
+                y = y[selx & sely]
+                z = z[selx & sely]
+                self.axes['stats_'+key].data['stats_'+key] = \
+                       self.axes['stats_'+key].scatter(x,y, c=z,\
+                                cmap=self.statsviewcmap,\
+                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
+
+                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_current_station_mod_stats'][key]
+                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['profiles']['records_current_station_mod_stats'][key]
+                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
+
+                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
+                y = self.frames['stats']['current_record_mod_stats'][key]
+                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
+                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
+                    axes['stats_'+key].annotate(text, \
+                                               xy=(x,y),\
+                                               xytext=(0.05,0.05),\
+                                               textcoords='axes fraction',\
+                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
+                                               color='white',\
+                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
+                # self.axes['stats_'+key].data[key+'_current_record'] = \
+                #        self.axes['stats_'+key].scatter(x,y, c=z,\
+                #                 cmap=self.statsviewcmap,\
+                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
+
+                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
+                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
+                # # highlight data for curent station
+                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
+
+                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
+
+                if ikey == len(statskeys_out)-1:
+                    self.axes['stats_'+key].set_xlabel('external')
+                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
+                axes['stats_'+key].set_ylabel('model')
+
+
+                if (only is not None) and ('stats_lightupdate' in only):
+                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
+                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
+                else:
+                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
+                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
+                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
+                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
+                self.abline(1,0,axis=self.axes['stats_'+key])
+
+        if (only is None) or ('stats_colorbar' in only):
+            label ='stats_colorbar'
+            axes[label].clear()
+            import matplotlib as mpl
+            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
+            self.axes[label].fields[label] = \
+             mpl.colorbar.ColorbarBase(self.axes[label],\
+                        orientation='horizontal',\
+                        label="percentile of "+self.frames['worldmap']['inputkey'],
+                        alpha=1.,
+                                cmap=self.statsviewcmap,\
+                                       norm=norm
+                         )
+
+        #print('r1')
+        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
+            #print('r2')
+            label = 'worldmap_stations'
+            axes[label].clear()
+            
+            stations = self.frames['worldmap']['stations'].table
+            globaldata = self.globaldata
+            
+            key = label
+
+            #print('r3')
+            if (stations is not None):
+                xlist = []
+                ylist = []
+                #print('r4')
+                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
+            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    xlist.append(x)
+                    ylist.append(y)
+                #picker is needed to make it clickable (pick_event)
+                axes[label].data[label] = axes[label].scatter(xlist,ylist,
+                                                              c='r', s=15,
+                                                              picker = 15,
+                                                              label=key,
+                                                              edgecolor='k',
+                                                              linewidth=0.8)
+
+            # cb.set_label('Wilting point [kg kg-3]')
+                #print('r5')
+
+                
+            #     xseries = []
+            #     yseries = []
+            #     for iSTN,STN in stations.iterrows():
+            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
+            #         xseries.append(x)                    
+            #         yseries.append(y)
+            #         
+            #         
+            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
+                    
+                if ('current_station' in frames['worldmap']):
+                    #print('r5')
+                    STN = frames['stats']['current_station']
+                    STNID = frames['stats']['STNID']
+                    #print('r5')
+
+                    x,y = len(axes['worldmap'].lon)* \
+                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
+                          len(axes['worldmap'].lat)* \
+                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    #print('r6')
+                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
+                    #                          == \
+                    #                          self.frames['worldmap']['STNID'])\
+                    #                         & \
+                    #                         (self.seltablestats['DT'] \
+                    #                          == self.axes['statsview0].focus['DT']) \
+                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
+                    #print('r7')
+                    text = 'STNID: '+ format(STNID,'10.0f') + \
+                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
+                            ', LON: '+format(STN['longitude'],'3.3f')+ \
+                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
+
+                            #+', VAL: '+format(VAL,'.3e')
+
+                    axes[label].scatter(x,y, c='r', s=30,\
+                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
+                    #print('r8')
+            
+                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
+                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
+                    #colorstation = max((min((1.,colorstation)),0.))
+                    colorstation =0.2
+                    from matplotlib import cm
+                    axes[label].annotate(text,
+                                         xy=(x,y),
+                                         xytext=(0.05,0.05),
+                                         textcoords='axes fraction', 
+                                         bbox=dict(boxstyle="round",
+                                         fc = cm.viridis(colorstation)),
+                                         arrowprops=dict(arrowstyle="->",
+                                                         linewidth=1.1),
+                                         color='white' if colorstation < 0.5 else 'black')
+                    #print('r9')
+
+                    # #pos = sc.get_offsets()[ind["ind"][0]]
+                    # 
+                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
+                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
+                    # axes[label].data[label+'statannotate'].set_text(text)
+                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
+                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
+            #print('r9')
+            axes[label].axis('off')
+            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
+            axes[label].set_ylim((len(axes['worldmap'].lat),0))
+            #print('r10')
+
+        if (only is None) or ('profiles' in only): 
+            #print('r11')
+
+            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
+            # # self.update_station(goto_first_sounding=False)
+            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
+            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
+            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
+            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
+
+            label = 'air_ap:theta'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+                # +\
+                # ' -> '+ \
+                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+            
+            
+            
+            
+            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+            #print('r12')
+
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
+            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
+            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
+            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            #print('r13')
+            # 
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r14')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+
+            #print('r15')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+                          
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r16')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r17')
+            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            print(hmax)
+            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
+            if valid_mod:
+
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="mod "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
+
+            #print('r18')
+            axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('theta [K]')
+
+            label = 'air_ap:q'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+
+            #print('r19')
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            if valid_mod:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            else:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            # 
+            #print('r20')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r21')
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            #print('r23')
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r24')
+            if valid_mod:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="fit ")#+\
+                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
+                             #+'LT')
+            #print('r25')
+            #axes[label].legend()
+
+            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            #axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('q [kg/kg]')
+
+            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+
+            # #pl.subplots_adjust(right=0.6)
+
+            # label = 'q_pro'
+            # axes[label].clear()
+
+            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
+            # 
+            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
+            # 
+            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
+
+            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
+            # #pl.subplots_adjust(right=0.6)
+            # axes[label].set_xlabel('specific humidity [kg/kg]')
+ 
+
+            #print('r26')
+            time = self.frames['profiles']['record_yaml_mod'].out.time
+            for ilabel,label in enumerate(['h','theta','q']):
+                axes["out:"+label].clear()
+                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
+                axes["out:"+label].set_ylabel(label)
+                if ilabel == 2:
+                    axes["out:"+label].set_xlabel('local sun time [h]')
+                
+            #print('r27')
+            label = 'SEB'
+            axes[label].clear()
+            
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
+            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
+            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
+            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
+                
+            #print('r28')
+            
+            axes[label].legend()
+            
+            #         for ax in self.fig_timeseries_axes:
+#             ax.clear()
+#         
+#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
+#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
+#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
+#         #print(self.morning_sounding.c4gl.out.Swin)
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
+#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
+#         self.fig_timeseries_axes[3].legend()
+#         self.fig.canvas.draw()
+            
+
+
+
+
+
+
+        #self.ready()
+        #print('r29')
+        fig.canvas.draw()
+        #fig.show()
+
+        self.axes = axes
+        self.tbox = tbox
+        self.fig = fig
+
+    def on_pick(self,event):
+        #print("HELLO")
+        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
+        #self.axes['theta_pro'].clear()
+        #self.axes['q_pro'].clear()
+        
+
+        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
+        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
+        keys_to_axes = {}
+        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
+
+        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
+        keys_to_axes['worldmap'] = 'worldmap'
+        
+        axes = self.axes
+        #nstatsview = self.nstatsview
+        #statsviewcmap = self.statsviewcmap
+        stations = self.frames['worldmap']['stations'].table
+
+
+        #print("p1")
+        current = event
+        artist = event.artist
+        
+        selkey = artist.get_label()
+        
+        #print(keys_to_axes)
+        
+        label = keys_to_axes[selkey]
+        #print("HELLO",selkey,label)
+
+        # # Get to know in which axes we are
+        # label = None
+        # for axeskey in axes.keys():
+        #     if event.inaxes == axes[axeskey]:
+        #         label = axeskey
+        #         
+
+        # cont, pos = None, None
+        
+        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
+        ind = event.ind
+        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
+        d = axes[label].collections[0]
+        #d.set_offset_position('data')
+        xy = d.get_offsets()
+        x, y =  xy[:,0],xy[:,1]
+        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
+
+        #print("p2")
+        if len(ind) > 0:
+            #print("p3")
+            pos = x[ind[0]], y[ind[0]]
+
+            #if label[:-1] == 'statsview':
+            #    #seltablestatsstdrel = self.seltablestatsstdrel
+            #    #seltablestatspct = self.seltablestatspct
+
+            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+            #    
+            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
+            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+            #    
+            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
+            #el
+            if (label == 'worldmap') or (label == 'worldmap_stations'):
+                self.hover_active = False
+                if (self.frames['worldmap']['STNID'] !=
+                    self.frames['profiles']['STNID']):
+                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
+                # so we just need to perform update_station
+                    self.update_station()
+            elif (label[:5] == 'stats'):
+
+                self.hover_active = False
+                if (self.frames['stats']['STNID'] !=
+                self.frames['profiles']['STNID']) or \
+                   (self.frames['stats']['current_record_chunk'] != 
+                    self.frames['profiles']['current_record_chunk']) or \
+                   (self.frames['stats']['current_record_index'] != 
+                    self.frames['profiles']['current_record_index']):
+
+
+
+                    for key in ['STNID','current_station','stations_iterator']: 
+                        self.frames['worldmap'][key] = self.frames['stats'][key] 
+
+                    for key in self.frames['stats'].keys():
+                        self.frames['profiles'][key] = self.frames['stats'][key]
+
+                    STNID = self.frames['profiles']['STNID']
+                    chunk = self.frames['profiles']['current_record_chunk']
+                    if 'current_station_file_ini' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_ini'].close()
+                    self.frames['profiles']['current_station_file_ini'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+                    if 'current_station_file_mod' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_mod'].close()
+                    self.frames['profiles']['current_station_file_mod'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_afternoon'].close()
+                    self.frames['profiles']['current_station_file_afternoon'] = \
+                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+                    # go to hovered record of current station
+                    self.frames['profiles']['records_iterator'] = \
+                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # ... and go to the record of the profile window (last one that
+                    # was picked by the user)
+                    found = False
+                    EOF = False
+                    while (not found) and (not EOF):
+                        try:
+                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
+                            #print("hello*")
+                            #print(self.frames['profiles']['current_record_index'])
+                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
+                               (index == self.frames['profiles']['current_record_index']) and \
+                               (STNID == self.frames['profiles']['STNID']):
+                                #print('found!')
+                                found = True
+                        except StopIteration:
+                            EOF = True
+                    if found:
+                        self.frames['stats']['current_record_mod'] = record
+                        self.frames['stats']['current_record_chunk'] = chunk
+                        self.frames['stats']['current_record_index'] = index
+                    # # for the profiles we make a distinct record iterator, so that the
+                    # # stats iterator can move independently
+                    # self.frames['profiles']['records_iterator'] = \
+                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # (self.frames['profiles']['STNID'] , \
+                    # self.frames['profiles']['current_record_index']) , \
+                    # self.frames['profiles']['current_record_mod'] = \
+                    #                 self.frames['profiles']['records_iterator'].__next__()
+
+
+                    # for the profiles we make a distinct record iterator, so that the
+                    # stats iterator can move independently
+
+                    self.update_record()
+
+
+
+    def on_plot_hover(self,event):
+        axes = self.axes
+        #print('h1')
+
+        # Get to know in which axes we are
+        label = None
+        for axeskey in axes.keys():
+            if event.inaxes == axes[axeskey]:
+                label = axeskey
+                
+        #print('h2')
+
+        cont, pos = None, None
+        #print (label)
+        
+        if label is not None:
+            if  ('data' in axes[label].__dict__.keys()) and \
+                (label in axes[label].data.keys()) and \
+                (axes[label].data[label] is not None):
+                
+                #print('h3')
+                cont, ind =  axes[label].data[label].contains(event)
+                selkey = axes[label].data[label].get_label()
+                if len(ind["ind"]) > 0:
+                    #print('h4')
+                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
+                    #print('pos',pos,selkey)
+
+
+                    #if label[:-1] == 'statsview':
+                    #    seltablestatsstdrel = self.seltablestatsstdrel
+                    #    seltablestatspct = self.seltablestatspct
+
+                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
+                    #    self.hover_active = True
+                    #    
+                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
+                    #    
+                    #el
+                    #print(label[:5])
+                    if (label[:5] == 'stats') or (label == 'times'):
+                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
+                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
+                        
+
+                        if label[:5] == 'stats':
+                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            (self.frames['stats']['STNID'] ,
+                             self.frames['stats']['current_record_chunk'], 
+                             self.frames['stats']['current_record_index']) = \
+                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                        # elif label[:5] == 'stats':
+                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
+                        #     (self.frames['stats']['STNID'] ,
+                        #      self.frames['stats']['current_record_chunk'], 
+                        #      self.frames['stats']['current_record_index']) = \
+                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+
+
+                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+                        
+                        # # TO TEST: should be removed, since it's is also done just below
+                        # self.frames['stats']['stations_iterator'] = \
+                        #     self.frames['worldmap']['stations_iterator'] 
+                
+                
+                        # self.goto_datetime_worldmap(
+                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+                        #     'after')
+
+
+                        # scrolling to the right station
+                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                        EOF = False
+                        found = False
+                        while (not found and not EOF):
+                            if (STNID == self.frames['stats']['STNID']):
+                                   found = True 
+                            if not found:
+                                try:
+                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                                except (StopIteration):
+                                    EOF = True
+                        if found:
+                        #    self.frames['stats']['STNID'] = STNID
+                            self.frames['stats']['current_station'] =  station
+
+                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
+                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
+
+
+                        # generate index of the current station
+                        self.frames['stats']['records_current_station_index'] = \
+                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                             == self.frames['stats']['STNID'])
+
+
+                        tab_suffixes = \
+                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            self.frames['stats']['records_current_station'+tab_suffix] = \
+                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+
+                        # go to hovered record of current station
+                        self.frames['stats']['records_iterator'] = \
+                                        records_iterator(self.frames['stats']['records_current_station_mod'])
+
+
+                        # ... and go to the record of the profile window (last one that
+                        # was picked by the user)
+                        found = False
+                        EOF = False
+                        while (not found) and (not EOF):
+                            try:
+                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                                #print("hello*")
+                                #print(self.frames['profiles']['current_record_index'])
+                                if (index == self.frames['stats']['current_record_index']) and \
+                                   (chunk == self.frames['stats']['current_record_chunk']) and \
+                                   (STNID == self.frames['stats']['STNID']):
+                                    #print('found!')
+                                    found = True
+                            except StopIteration:
+                                EOF = True
+                        if found:
+                            #print('h5')
+                            self.frames['stats']['current_record_mod'] = record
+                            self.frames['stats']['current_record_chunk'] = chunk
+                            self.frames['stats']['current_record_index'] = index
+
+                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
+                        tab_suffixes = \
+                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            #print(tab_suffix)
+                            #print(self.frames['stats']['records_current_station'+tab_suffix])
+                            self.frames['stats']['current_record'+tab_suffix] =  \
+                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                      (self.frames['stats']['STNID'] , \
+                                       self.frames['stats']['current_record_chunk'] , \
+                                       self.frames['stats']['current_record_index'])]
+
+
+                        self.hover_active = True
+                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                        # print('h13')
+                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
+                        #     self.goto_datetime_worldmap(
+                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                        #         'after')
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap',
+                        #                                           'profiles'])
+                        # else:
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap_stations',
+                        #                                           'profiles'])
+
+
+
+                    elif label in ['worldmap_stations','worldmap']:
+                        #print('h5')
+
+                        if (self.axes['worldmap'].lat is not None) and \
+                           (self.axes['worldmap'].lon is not None):
+
+
+                            #self.loading()
+                            self.fig.canvas.draw()
+                            self.fig.show()
+
+
+                            # get position of 
+                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
+                                                                 self.axes['worldmap'].lat[0]) + \
+                                           self.axes['worldmap'].lat[0],4)
+                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
+                                                                 self.axes['worldmap'].lon[0]) + \
+                                           self.axes['worldmap'].lon[0],4)
+                        
+                            stations = self.frames['worldmap']['stations'].table
+                            #print('h7')
+                        
+                            #reset stations iterator:
+                            # if 'stations_iterator' in self.frames['worldmap'].keys():
+                            #     self.frames['worldmap']['stations_iterator'].close()
+                            #     del(self.frames['worldmap']['stations_iterator'])
+                            # if 'stations_iterator' in self.frames['stats'].keys():
+                            #     self.frames['stats']['stations_iterator'].close()
+                            #     del(self.frames['stats']['stations_iterator'])
+                            self.frames['worldmap']['stations_iterator'] =\
+                               stations_iterator(self.frames['worldmap']['stations'])
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                            EOF = False
+                            found = False
+                            while (not found and not EOF):
+                                #print('h8',station.latitude,latmap)
+                                #print('h8',station.longitude,lonmap)
+                                if (round(station.latitude,3) == round(latmap,3)) and \
+                                    (round(station.longitude,3) == round(lonmap,3)):
+                                       found = True 
+                                if not found:
+                                    try:
+                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                                    except (StopIteration):
+                                        EOF = True
+                            if found:
+                                self.frames['worldmap']['STNID'] = STNID
+                                self.frames['worldmap']['current_station'] = \
+                                        station
+                        
+                            self.frames['stats']['stations_iterator'] = \
+                                self.frames['worldmap']['stations_iterator'] 
+                            #print('h8')
+                            # inherit station position for the stats frame...
+                            for key in self.frames['worldmap'].keys():
+                                self.frames['stats'][key] = self.frames['worldmap'][key]
+                                
+                            ## fetch records of current station...
+                            #self.frames['stats']['records_current_station_mod'] =\
+                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                            # ... and their indices
+                            self.frames['stats']['records_current_station_index'] = \
+                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                                     == \
+                                     self.frames['stats']['current_station'].name)
+
+
+                            tab_suffixes = \
+                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['records_current_station'+tab_suffix] = \
+                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+                            # ... create a record iterator ...
+                            #self.frames['stats']['records_iterator'].close()
+                            del(self.frames['stats']['records_iterator'])
+                            self.frames['stats']['records_iterator'] = \
+                                self.frames['stats']['records_current_station_mod'].iterrows()
+
+
+
+                        
+                            #print('h9')
+                            # ... and go to to the first record of the current station
+                            (self.frames['stats']['STNID'] , \
+                             self.frames['stats']['current_record_chunk'] , \
+                             self.frames['stats']['current_record_index']) , \
+                            self.frames['stats']['current_record_mod'] = \
+                                self.frames['stats']['records_iterator'].__next__()
+                        
+
+
+
+                            #print('h10')
+                            # cash the current record
+                            tab_suffixes = \
+                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['current_record'+tab_suffix] =  \
+                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                          (self.frames['stats']['STNID'] , \
+                                           self.frames['stats']['current_record_chunk'] , \
+                                           self.frames['stats']['current_record_index'])]
+
+                            #print('h11')
+                            
+                            self.hover_active = True
+                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                            #print('h13')
+
+                        
+
+            #if (stations is not None):
+            #    for iSTN,STN in stations.iterrows():
+            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
+            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
+
+        # self.fig.show()
+ 
+        # we are hovering on nothing, so we are going back to the position of
+        # the profile sounding
+        if pos is None:
+            if self.hover_active == True:
+                #print('h1*')
+                
+                #self.loading()
+                # to do: reset stations iterators
+
+                # get station and record index from the current profile
+                for key in ['STNID', 'current_station']:
+                    self.frames['stats'][key] = self.frames['profiles'][key]
+
+                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
+                self.frames['stats']['current_station'] = \
+                        self.frames['profiles']['current_station']
+                #print('h3a*')
+                self.frames['stats']['records_current_station_mod'] = \
+                        self.frames['profiles']['records_current_station_mod']
+                #print('h3b*')
+
+                # the next lines recreate the records iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+
+                # reset stations iterator...
+                #self.frames['stats']['records_iterator'].close()
+                del(self.frames['stats']['records_iterator'])
+                self.frames['stats']['records_iterator'] = \
+                    self.frames['stats']['records_current_station_mod'].iterrows()
+                #print('h4*')
+
+                # ... and go to the record of the profile window (last one that
+                # was picked by the user)
+                found = False
+                EOF = False
+                while (not found) and (not EOF):
+                    try:
+                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                        #print("hello*")
+                        #print(self.frames['profiles']['current_record_index'])
+                        #print(self.frames['profiles']['STNID'])
+                        #print(STNID,index)
+                        if (index == self.frames['profiles']['current_record_index']) and \
+                            (chunk == self.frames['profiles']['current_record_chunk']) and \
+                            (STNID == self.frames['profiles']['STNID']):
+                            #print('found!')
+                            found = True
+                    except StopIteration:
+                        EOF = True
+                if found:
+                    #print('h5*')
+                    self.frames['stats']['current_record_mod'] = record
+                    self.frames['stats']['current_record_chunk'] = chunk
+                    self.frames['stats']['current_record_index'] = index
+
+                #print('h6*')
+
+
+
+                # # fetch records of current station...
+                # self.frames['stats']['records_current_station_mod'] =\
+                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                # ... and their indices
+                self.frames['stats']['records_current_station_index'] = \
+                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                         == \
+                         self.frames['stats']['current_station'].name)
+
+
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['records_current_station'+tab_suffix] = \
+                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+                
+
+                # cash the records of the current stations
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['current_record'+tab_suffix] =  \
+                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                              (self.frames['stats']['STNID'] , \
+                               self.frames['stats']['current_record_chunk'] , \
+                               self.frames['stats']['current_record_index'])]
+
+
+                # the next lines recreate the stations iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+                #print('h7*')
+
+                # reset the stations iterators
+                for framekey in ['stats','worldmap']:
+                    ##print(framekey)
+                    if 'stations_iterator' in self.frames[framekey]:
+                        #self.frames[framekey]['stations_iterator'].close()
+                        del(self.frames[framekey]['stations_iterator'])
+
+                self.frames['worldmap']['current_station'] = \
+                        self.frames['profiles']['current_station']
+
+                #recreate the stations iterator for the worldmap...
+                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+
+                # ... and go the position of the profile
+                #print('h8*')
+                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                EOF = False
+                found = False
+                while (not found and not EOF):
+                    if STNID == self.frames['profiles']['STNID'] :
+                        found = True 
+                    if not found:
+                        try:
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                        except (StopIteration):
+                            EOF = True
+                if found:
+                    self.frames['worldmap']['current_station'] = station
+                    self.frames['worldmap']['STNID'] = STNID
+                #print('h9*')
+                self.frames['stats']['stations_iterator'] = \
+                    self.frames['worldmap']['stations_iterator'] 
+
+                # the stats window now inherits the current station from the
+                # worldmap
+                for key in ['STNID','current_station','stations_iterator']: 
+                    self.frames['stats'][key] = self.frames['worldmap'][key] 
+                #print('h10*')
+
+                # # we now only need inherit station position and go to first record
+                # for key in self.frames['worldmap'].keys():
+                #     self.frames['stats'][key] = self.frames['worldmap'][key]
+
+                # self.frames['stats']['records_current_station'] =\
+                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
+
+                # #print(self.frames['stats']['records_current_station'])
+                # self.frames['stats']['records_iterator'] = \
+                #                 self.frames['stats']['records_current_station'].iterrows()
+                # (self.frames['stats']['STNID'] , \
+                # self.frames['stats']['current_record_index']) , \
+                # self.frames['stats']['current_record_mod'] = \
+                #                 self.frames['stats']['records_iterator'].__next__()
+                
+
+
+
+
+
+
+                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
+                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
+                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
+                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+                self.hover_active = False
+                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
+    # def loading(self):
+    #     self.tbox['loading'].set_text('Loading...')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+    #     sleep(0.1)
+    # def ready(self):
+    #     self.tbox['loading'].set_text('Ready')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+
+
+
diff --git a/dist/class4gl-0.1dev/lib/model.py b/dist/class4gl-0.1dev/lib/model.py
new file mode 100644
index 0000000..8760411
--- /dev/null
+++ b/dist/class4gl-0.1dev/lib/model.py
@@ -0,0 +1,2214 @@
+# 
+# CLASS
+# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
+# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
+# Copyright (c) 2011-2015 Chiel van Heerwaarden
+# Copyright (c) 2011-2015 Bart van Stratum
+# Copyright (c) 2011-2015 Kees van den Dries
+# 
+# This file is part of CLASS
+# 
+# CLASS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published bygamma
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# 
+# CLASS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with CLASS.  If not, see .
+#
+
+import copy as cp
+import numpy as np
+import sys
+import warnings
+import pandas as pd
+from ribtol_hw import zeta_hs2 , funcsche
+import logging
+#from SkewT.thermodynamics import Density
+#import ribtol
+
+grav = 9.81
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+
+def qsat(T,p):
+    return 0.622 * esat(T) / p
+
+
+def ribtol(Rib, zsl, z0m, z0h): 
+    Rib = np.float64(Rib)
+    zsl = np.float64(zsl)
+    z0m = np.float64(z0m)
+    z0h = np.float64(z0h)
+    #print(Rib,zsl,z0m,z0h)
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    #print(Rib,zsl,z0m,z0h)
+    while (abs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+        #print(L,fx/fxdif)
+        if(abs(L) > 1e12):
+            break
+
+    return L
+  
+def psim(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psim = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+  
+def psih(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * np.log( (1. + x*x) / 2.)
+        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+ 
+class model:
+    def __init__(self, model_input = None,debug_level=None):
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        self.logger = logging.getLogger('model')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        """ initialize the different components of the model """ 
+
+        if model_input is not None:
+            # class4gl style input
+            if 'pars' in model_input.__dict__.keys():
+
+                # we make a reference to the full input first, so we can dump it
+                # afterwards
+                self.input_c4gl = model_input
+
+                # we copy the regular parameters first. We keep the classical input
+                # format as self.input so that we don't have to change the entire
+                # model code.
+                self.input = cp.deepcopy(model_input.pars)
+
+                # we copy other sections we are interested in, such as profile
+                # data, and store it also under input
+
+                # I know we mess up a bit the structure of the class4gl_input, but
+                # we will make it clean again at the time of dumping data
+
+                # So here, we copy the profile data into self.input
+                # 1. Air circulation data 
+                if 'sw_ac' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ac']:
+                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
+                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
+
+                    # correct pressure of levels according to surface pressure
+                    # error (so that interpolation is done in a consistent way)
+
+                    p_e = self.input.Ps - self.input.sp
+                    for irow in self.input.air_ac.index[::-1]:
+                       self.input.air_ac.p.iloc[irow] =\
+                        self.input.air_ac.p.iloc[irow] + p_e
+                       p_e = p_e -\
+                       (self.input.air_ac.p.iloc[irow]+p_e)/\
+                        self.input.air_ac.p.iloc[irow] *\
+                        self.input.air_ac.delpdgrav.iloc[irow]*grav
+
+
+
+                # 2. Air circulation data 
+                if 'sw_ap' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ap']:
+                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
+
+            # standard class input
+            else:
+                self.input = cp.deepcopy(model_input)
+
+    def load_yaml_dict(self,yaml_dict):
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                for keydata,value in data.items():
+                    self.__dict__[keydata] = value
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            #elif key == 'sources':
+            #    self.__dict__[key] = data
+            elif key == 'out':
+                # lets convert it to a list of dictionaries
+                dictouttemp = pd.DataFrame(data).to_dict('list')
+            else: 
+                 warnings.warn("Key '"+key+"' is be implemented.")
+            #     self.__dict__[key] = data
+
+
+        self.tsteps = len(dictouttemp['h'])
+        self.out = model_output(self.tsteps)
+        for keydictouttemp in dictouttemp.keys():
+            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
+
+
+  
+    def run(self):
+        # initialize model variables
+        self.init()
+  
+        # time integrate model 
+        #for self.t in range(self.tsteps):
+        while self.t < self.tsteps:
+          
+            # time integrate components
+            self.timestep()
+  
+        # delete unnecessary variables from memory
+        self.exitmodel()
+    
+    def init(self):
+        # assign variables from input data
+        # initialize constants
+        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
+        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        self.rho        = 1.2                   # density of air [kg m-3]
+        self.k          = 0.4                   # Von Karman constant [-]
+        self.g          = 9.81                  # gravity acceleration [m s-2]
+        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+        self.bolz       = 5.67e-8               # Bolzman constant [-]
+        self.rhow       = 1000.                 # density of water [kg m-3]
+        self.S0         = 1368.                 # solar constant [W m-2]
+
+        # A-Gs constants and settings
+        # Plant type:       -C3-     -C4-
+        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
+        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
+        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
+        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
+        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
+        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
+        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
+        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
+        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
+        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
+        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
+
+        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
+        self.mair       =  28.9;                # molecular weight air [g mol -1]
+        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
+
+        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
+        self.wmax       =  0.55;                # upper reference value soil water [-]
+        self.wmin       =  0.005;               # lower reference value soil water [-]
+        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
+        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
+
+        # Read switches
+        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
+        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
+        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
+        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
+        self.sw_sl      = self.input.sw_sl      # surface layer switch
+        self.sw_rad     = self.input.sw_rad     # radiation switch
+        self.sw_ls      = self.input.sw_ls      # land surface switch
+        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
+        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
+
+        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
+        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
+        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+  
+        # initialize mixed-layer
+        self.h          = self.input.h          # initial ABL height [m]
+        self.Ps         = self.input.Ps         # surface pressure [Pa]
+        self.sp         = self.input.sp         # This is also surface pressure
+                                                #but derived from the global data [Pa]
+        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
+        self.ws         = None                  # large-scale vertical velocity [m s-1]
+        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
+        self.we         = -1.                   # entrainment velocity [m s-1]
+       
+         # Temperature 
+        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
+        
+        
+        self.substep    = False
+        self.substeps   = 0
+
+
+
+        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
+        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
+        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
+        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
+        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
+ 
+        self.wstar      = 0.                    # convective velocity scale [m s-1]
+ 
+        # 2m diagnostic variables 
+        self.T2m        = None                  # 2m temperature [K]
+        self.q2m        = None                  # 2m specific humidity [kg kg-1]
+        self.e2m        = None                  # 2m vapor pressure [Pa]
+        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
+        self.u2m        = None                  # 2m u-wind [m s-1]
+        self.v2m        = None                  # 2m v-wind [m s-1]
+ 
+        # Surface variables 
+        self.thetasurf  = self.input.theta      # surface potential temperature [K]
+        self.thetavsurf = None                  # surface virtual potential temperature [K]
+        self.qsurf      = None                  # surface specific humidity [g kg-1]
+
+        # Mixed-layer top variables
+        self.P_h        = None                  # Mixed-layer top pressure [pa]
+        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
+        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
+        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
+        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
+        self.dz_h       = None                  # Transition layer thickness [-]
+        self.lcl        = None                  # Lifting condensation level [m]
+
+        # Virtual temperatures and fluxes
+        self.thetav     = None                  # initial mixed-layer potential temperature [K]
+        self.dthetav    = None                  # initial virtual temperature jump at h [K]
+        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
+        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
+       
+        
+        
+        
+        
+        
+        # Moisture 
+        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
+
+        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
+        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
+        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
+  
+        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
+        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
+        self.e          = None                  # mixed-layer vapor pressure [Pa]
+        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
+        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
+      
+        
+        
+        # CO2
+        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
+        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
+        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
+        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
+        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
+        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
+        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
+        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
+        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
+        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
+       
+        # Wind 
+        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
+        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
+        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = self.input.advu       # advection of u-wind [m s-2]
+        
+        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
+        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = self.input.advv       # advection of v-wind [m s-2]
+         
+  # BEGIN -- HW 20170606
+        # z-coordinate for vertical profiles of stratification above the mixed-layer height
+
+        if self.sw_ac:
+        # this is the data frame with the grided profile on the L60 grid
+        # (subsidence, and advection) 
+            self.air_ac      = self.input.air_ac  # full level air circulation
+                                                  # forcing
+            # self.air_ach     = self.input.air_ach # half level air circulation
+            #                                       # forcing
+            
+
+        if self.sw_ap:
+        # this is the data frame with the fitted profile (including HAGL,
+        # THTA,WSPD, SNDU,WNDV PRES ...)
+            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
+
+            # just for legacy reasons...
+            if 'z' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
+            if 'p' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
+
+            indexh = np.where(self.air_ap.z.values == self.h)
+            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
+                raise ValueError("Error input profile consistency: mixed- \
+                                 layer height needs to be equal to the second \
+                                 and third \
+                                 level of the vertical profile input!")
+            # initialize q from its profile when available
+            p_old = self.Ps
+            p_new = self.air_ap.p[indexh[0][0]]
+            
+            if ((p_old is not None) & (p_old != p_new)):
+                print("Warning: Ps input was provided ("+str(p_old)+\
+                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
+                    +str(p_new)+"Pa).")
+                                    
+            self.Ps = p_new
+            # these variables/namings are more convenient to work with in the code
+            # we will update the original variables afterwards
+            #self.air_ap['q'] = self.air_ap.QABS/1000.
+
+            self.air_ap = \
+                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
+            # we require the temperature fields, since we need to consider
+            # advection
+            # if self.sw_ac:
+            #     #self.air_ap['theta'] = self.air_ap['t'] *
+
+            #     # we consider self.sp in case of air-circulation input (for
+            #     # consistence)
+            #     self.air_ap['t'] = \
+            #                 self.air_ap.theta *  \
+            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
+            # else:
+            # we consider self.Ps in case of balloon input only 
+            self.air_ap = self.air_ap.assign(t = lambda x: \
+                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
+
+            #self.air_ap['theta'] = self.air_ap.THTA
+            if 'u' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
+            if 'v' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
+
+            for var in ['theta','q','u','v']:
+
+                
+                if self.air_ap[var][1] != self.air_ap[var][0]:
+                    raise ValueError("Error input profile consistency: two \
+                                     lowest profile levels for "+var+" should \
+                                     be equal.")
+                
+                # initialize the value from its profile when available
+                value_old = self.__dict__[var]
+                value_new = self.air_ap[var][indexh[0][0]]
+                
+                if ((value_old is not None) & (value_old != value_new)):
+                    warnings.warn("Warning:  input was provided \
+                                     ("+str(value_old)+ "kg kg-1), \
+                                     but it is now overwritten by the first \
+                                     level (index 0) of air_ap]var\ which is \
+                                     different (" +str(value_new)+"K).")
+                                        
+                self.__dict__[var] = value_new
+
+                # make a profile of the stratification 
+                # please note that the stratification between z_pro[i] and
+                # z_pro[i+1] is given by air_ap.GTHT[i]
+
+                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
+                # np.gradient(self.z_pro)
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
+
+
+                self.__dict__['gamma'+var] = \
+                    self.air_ap['gamma'+var][np.where(self.h >= \
+                                                     self.air_ap.z)[0][-1]]
+
+
+
+        # the variable p_pro is just for diagnosis of lifted index
+            
+            
+
+            # input Ph is wrong, so we correct it according to hydrostatic equation
+            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+
+            #if self.sw_ac:
+                # note that we use sp as surface pressure, which is determined
+                # from era-interim instead of the observations. This is to
+                # avoid possible failure of the interpolation routine
+                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
+                #                          + \
+                #                          list(self.air_ap.p[3:]))
+
+            # else:
+                # in the other case, it is updated at the time of calculting
+                # the statistics 
+
+# END -- HW 20170606      
+        #print(self.air_ap)
+
+        if self.sw_ac and not self.sw_ap:
+            raise ValueError("air circulation switch only possible when air \
+                             profiles are given")
+        
+        if self.sw_ac:
+
+            # # # we comment this out, because subsidence is calculated
+            # according to advection
+            # #interpolate subsidence towards the air_ap height coordinate
+            # self.air_ap['w'] = np.interp(self.air_ap.p,\
+            #                               self.air_ac.p,\
+            #                               self.air_ac.w) 
+            # #subsidence at the mixed-layer top
+            # self.w = self.air_ap.w[1]
+        
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+                # in case we didn't find any points, we just take the lowest one.
+                # actually, this can happen if ERA-INTERIM pressure levels are
+                # inconsistent with 
+                if in_ml.sum() == 0:
+                    warnings.warn(" no circulation points in the mixed layer \
+                                  found. We just take the bottom one.")
+                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+
+                for var in ['t','q','u','v']:
+    
+                   # calculation of the advection variables for the mixed layer
+                   # we weight by the hydrostatic thickness of each layer and
+                   # divide by the total thickness
+                   self.__dict__['adv'+var] = \
+                            ((self.air_ac['adv'+var+'_x'][in_ml] \
+                             + \
+                             self.air_ac['adv'+var+'_y'][in_ml])* \
+                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                            self.air_ac['delpdgrav'][in_ml].sum()
+
+                   # calculation of the advection variables for the profile above
+                   # (lowest 3 values are not used by class)
+                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
+                   self.air_ap['adv'+var] = \
+                           np.interp(self.air_ap.p,\
+                                     self.air_ac.p,\
+                                     self.air_ac['adv'+var+'_x']) \
+                           + \
+                           np.interp(self.air_ap.p, \
+                                       self.air_ac.p, \
+                                       self.air_ac['adv'+var+'_y'])
+
+                # as an approximation, we consider that advection of theta in the
+                # mixed layer is equal to advection of t. This is a sufficient
+                # approximation since theta and t are very similar at the surface
+                # pressure.
+                self.__dict__['advtheta'] = self.__dict__['advt']
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # self.wrho = np.interp(self.P_h,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) 
+            # self.ws   = self.air_ap.w.iloc[1]
+
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                self.air_ap = self.air_ap.assign(wp = 0.)
+                self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                              self.air_ac.p, \
+                                              self.air_ac['wp'])
+                self.air_ap = self.air_ap.assign(R = 0.)
+                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                     self.Rv*self.air_ap.q)
+                self.air_ap = self.air_ap.assign(rho = 0.)
+                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+                
+                self.air_ap = self.air_ap.assign(w = 0.)
+                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+                #print('hello w ini')
+
+                # Note: in case of sw_ac is False, we update it from prescribed
+                # divergence
+                self.ws   = self.air_ap.w[1]
+
+                # self.ws   = self.wrho/self.rho
+                # self.ws   = self.wrho/(self.P_h/ \
+                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
+                #                         self.theta) # this should be T!!!
+
+                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+                #                         + \
+                #                         self.air_ac['divU_y'][in_ml])* \
+                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                #             self.air_ac['delpdgrav'][in_ml].sum() \
+        
+
+        # Tendencies 
+        self.htend      = None                  # tendency of CBL [m s-1]
+        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
+        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
+        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
+        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
+        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
+        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
+        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
+        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
+        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
+        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
+        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
+  
+        # initialize surface layer
+        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
+        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
+        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
+        self.z0m        = self.input.z0m        # roughness length for momentum [m]
+        self.z0h        = self.input.z0h        # roughness length for scalars [m]
+        self.Cm         = 1e12                  # drag coefficient for momentum [-]
+        self.Cs         = 1e12                  # drag coefficient for scalars [-]
+        self.L          = None                  # Obukhov length [m]
+        self.Rib        = None                  # bulk Richardson number [-]
+        self.ra         = None                  # aerodynamic resistance [s m-1]
+  
+        # initialize radiation
+        self.lat        = self.input.lat        # latitude [deg]
+        #self.fc         = self.input.fc         # coriolis parameter [s-1]
+        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
+        self.lon        = self.input.lon        # longitude [deg]
+        self.doy        = self.input.doy        # day of the year [-]
+        self.tstart     = self.input.tstart     # time of the day [-]
+        self.cc         = self.input.cc         # cloud cover fraction [-]
+        self.Swin       = None                  # incoming short wave radiation [W m-2]
+        self.Swout      = None                  # outgoing short wave radiation [W m-2]
+        self.Lwin       = None                  # incoming long wave radiation [W m-2]
+        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
+        self.Q          = self.input.Q          # net radiation [W m-2]
+        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
+  
+        # initialize land surface
+        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
+        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
+        self.T2         = self.input.T2         # temperature deeper soil layer [K]
+                           
+        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
+        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
+        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
+        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
+                           
+        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
+        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
+        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
+                           
+        self.C1sat      = self.input.C1sat      
+        self.C2ref      = self.input.C2ref      
+
+        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
+        
+        self.LAI        = self.input.LAI        # leaf area index [-]
+        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
+        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = self.input.alpha      # surface albedo [-]
+  
+        self.rs         = 1.e6                  # resistance transpiration [s m-1]
+        self.rssoil     = 1.e6                  # resistance soil [s m-1]
+                           
+        self.Ts         = self.input.Ts         # surface temperature [K]
+                           
+        self.cveg       = self.input.cveg       # vegetation fraction [-]
+        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
+        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
+        self.cliq       = None                  # wet fraction [-]
+                          
+        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
+  
+        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
+        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
+        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
+  
+        self.H          = None                  # sensible heat flux [W m-2]
+        self.LE         = None                  # evapotranspiration [W m-2]
+        self.LEliq      = None                  # open water evaporation [W m-2]
+        self.LEveg      = None                  # transpiration [W m-2]
+        self.LEsoil     = None                  # soil evaporation [W m-2]
+        self.LEpot      = None                  # potential evaporation [W m-2]
+        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
+        self.G          = None                  # ground heat flux [W m-2]
+
+        # initialize A-Gs surface scheme
+        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
+
+        # initialize cumulus parameterization
+        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
+        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
+        self.ac         = 0.                    # Cloud core fraction [-]
+        self.M          = 0.                    # Cloud core mass flux [m s-1] 
+        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
+  
+        # initialize time variables
+        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
+        self.dt     = self.input.dt
+        self.dtcur      = self.dt
+        self.firsttime = True
+        self.t      = 0
+ 
+        # Some sanity checks for valid input
+        if (self.c_beta is None): 
+            self.c_beta = 0                     # Zero curvature; linear response
+        assert(self.c_beta >= 0 or self.c_beta <= 1)
+
+        # initialize output
+        self.out = model_output(self.tsteps)
+ 
+        self.statistics()
+  
+        # calculate initial diagnostic variables
+        if(self.sw_rad):
+            self.run_radiation()
+ 
+        if(self.sw_sl):
+            for i in range(10): 
+                self.run_surface_layer()
+  
+        if(self.sw_ls):
+            self.run_land_surface()
+
+        if(self.sw_cu):
+            self.run_mixed_layer()
+            self.run_cumulus()
+        
+        if(self.sw_ml):
+            self.run_mixed_layer()
+
+    def timestep(self):
+
+        self.dtmax = +np.inf
+        self.logger.debug('before stats') 
+        self.statistics()
+
+        # run radiation model
+        self.logger.debug('before rad') 
+        if(self.sw_rad):
+            self.run_radiation()
+  
+        # run surface layer model
+        if(self.sw_sl):
+            self.logger.debug('before surface layer') 
+            self.run_surface_layer()
+        
+        # run land surface model
+        if(self.sw_ls):
+            self.logger.debug('before land surface') 
+            self.run_land_surface()
+ 
+        # run cumulus parameterization
+        if(self.sw_cu):
+            self.logger.debug('before cumulus') 
+            self.run_cumulus()
+   
+        self.logger.debug('before mixed layer') 
+        # run mixed-layer model
+        if(self.sw_ml):
+            self.run_mixed_layer()
+        self.logger.debug('after mixed layer') 
+ 
+        #get first profile data point above mixed layer
+        if self.sw_ap:
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                # here we correct for the fact that the upper profile also
+                # shifts in the vertical.
+
+                diffhtend = self.htend - self.air_ap.w[zidx_first]
+                if diffhtend > 0:
+                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            else:
+                if self.htend > 0:
+                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            #print(self.h,zidx_first,self.ws,self.air_ap.z)
+
+        
+        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
+        self.logger.debug('before store') 
+        self.substep =  (self.dtcur > self.dtmax)
+        if self.substep:
+            dtnext = self.dtcur - self.dtmax
+            self.dtcur = self.dtmax
+
+        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
+
+        # HW: this will be done multiple times in case of a substep is needed
+        # store output before time integration
+        if self.firsttime:
+            self.store()
+  
+        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
+        # time integrate land surface model
+        if(self.sw_ls):
+            self.integrate_land_surface()
+        self.logger.debug('before integrate mixed layer') 
+        # time integrate mixed-layer model
+        if(self.sw_ml):
+            self.integrate_mixed_layer() 
+        self.logger.debug('after integrate mixed layer') 
+        if self.substep:
+            self.dtcur = dtnext
+            self.firsttime = False
+            self.substeps += 1
+        else:
+            self.dtcur = self.dt
+            self.t += 1 
+            self.firsttime = True
+            self.substeps = 0
+        self.logger.debug('going to next step')
+        
+        
+  
+    def statistics(self):
+        # Calculate virtual temperatures 
+        self.thetav   = self.theta  + 0.61 * self.theta * self.q
+        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
+        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
+        # Mixed-layer top properties
+        self.P_h    = self.Ps - self.rho * self.g * self.h
+        # else:
+            # in the other case, it is updated at the time that the profile is
+            # updated (and at the initialization
+
+        self.T_h    = self.theta - self.g/self.cp * self.h
+
+        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
+        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
+
+        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
+
+        # Find lifting condensation level iteratively
+        if(self.t == 0):
+            self.lcl = self.h
+            RHlcl = 0.5
+        else:
+            RHlcl = 0.9998 
+
+        itmax = 30
+        it = 0
+        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
+            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
+            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
+        else:
+            self.q2_h   = 0.
+            self.CO22_h = 0.
+
+        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
+        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
+        self.M      = self.ac * self.wstar
+        self.wqM    = self.M * self.q2_h**0.5
+
+        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
+        if(self.dCO2 < 0):
+            self.wCO2M  = self.M * self.CO22_h**0.5
+        else:
+            self.wCO2M  = 0.
+
+    def run_mixed_layer(self):
+        if(not self.sw_sl):
+            # decompose ustar along the wind components
+            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
+            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
+
+
+
+        # calculate large-scale vertical velocity (subsidence)
+        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
+            self.ws = -self.divU * self.h
+        # else:
+        #     in case the air circulation switch is turned on, subsidence is
+        #     calculated from the circulate profile at the initialization and
+        #     in the integrate_mixed_layer routine
+              
+        # calculate compensation to fix the free troposphere in case of subsidence 
+        if(self.sw_fixft):
+            w_th_ft  = self.gammatheta * self.ws
+            w_q_ft   = self.gammaq     * self.ws
+            w_CO2_ft = self.gammaCO2   * self.ws 
+        else:
+            w_th_ft  = 0.
+            w_q_ft   = 0.
+            w_CO2_ft = 0. 
+      
+        # calculate mixed-layer growth due to cloud top radiative divergence
+        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
+       
+        # calculate convective velocity scale w* 
+        if(self.wthetav > 0.):
+            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
+        else:
+            self.wstar  = 1e-6;
+      
+        # Virtual heat entrainment flux 
+        self.wthetave    = -self.beta * self.wthetav 
+        
+        # compute mixed-layer tendencies
+        if(self.sw_shearwe):
+            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
+        else:
+            self.we    = -self.wthetave / self.dthetav
+        # Don't allow boundary layer shrinking if wtheta < 0 
+        if(self.we < 0):
+            self.we = 0.
+
+        # Calculate entrainment fluxes
+        self.wthetae     = -self.we * self.dtheta
+        self.wqe         = -self.we * self.dq
+        self.wCO2e       = -self.we * self.dCO2
+        
+        htend_pre       = self.we + self.ws + self.wf - self.M
+        
+        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+        
+ 
+        #print('thetatend_pre',thetatend_pre)
+        
+        #preliminary boundary-layer top chenage
+        #htend_pre = self.we + self.ws + self.wf - self.M
+        #preliminary change in temperature jump
+        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
+                          thetatend_pre + w_th_ft
+        
+        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
+        l_entrainment = True
+
+        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
+            l_entrainment = False
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! temperature jump is at the lower limit \
+                          and is not growing: entrainment is disabled for this (sub)timestep.") 
+        elif dtheta_pre < 0.1:
+            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
+            l_entrainment = True
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          " Warning! Potential temperature jump at mixed- \
+                          layer height would become too low limiting timestep \
+                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
+            self.dtmax = min(self.dtmax,dtmax_new)
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "next subtimestep, entrainment will be disabled")
+            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
+
+
+
+        # when entrainment is disabled, we just use the simplified formulation
+        # as in Wouters et al., 2013 (section 2.2.1)
+
+        self.dthetatend = l_entrainment*dthetatend_pre + \
+                        (1.-l_entrainment)*0.
+        self.thetatend = l_entrainment*thetatend_pre + \
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
+        self.htend = l_entrainment*htend_pre + \
+                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
+        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
+        #stop
+
+
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+
+
+        # self.qtend = l_entrainment*qtend_pre + \
+        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
+        # self.CO2tend = l_entrainment*CO2tend_pre + \
+        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+
+
+        #     # part of the timestep for which the temperature mixed-layer jump
+        #     # was changing, and for which entrainment took place. For the other
+        #     # part, we don't assume entrainment anymore, and we use the
+        #     # simplified formulation  of Wouters et al., 2013
+
+        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
+        #   
+        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
+        #                      self.dthetatend + w_th_ft) + \
+        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
+        #     self.htend = fac*self.htend + \
+        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
+        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
+        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+
+        # else:
+        #     #self.htend = htend_pre
+        #     self.dthetatend = dthetatend_pre
+        #     self.thetatend = thetatend_pre
+        
+        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
+        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
+     
+        # assume u + du = ug, so ug - u = du
+        if(self.sw_wind):
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
+  
+            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
+            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
+        
+        # tendency of the transition layer thickness
+        if(self.ac > 0 or self.lcl - self.h < 300):
+            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
+        else:
+            self.dztend = 0.
+
+   
+    def integrate_mixed_layer(self):
+        # set values previous time step
+        h0      = self.h
+        
+        theta0  = self.theta
+        dtheta0 = self.dtheta
+        q0      = self.q
+        dq0     = self.dq
+        CO20    = self.CO2
+        dCO20   = self.dCO2
+        
+        u0      = self.u
+        du0     = self.du
+        v0      = self.v
+        dv0     = self.dv
+
+        dz0     = self.dz_h
+  
+        # integrate mixed-layer equations
+        
+            
+
+# END -- HW 20170606        
+        self.h        = h0      + self.dtcur * self.htend
+        # print(self.h,self.htend)
+        # stop
+        self.theta    = theta0  + self.dtcur * self.thetatend
+        #print(dtheta0,self.dtcur,self.dthetatend)
+        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
+        self.q        = q0      + self.dtcur * self.qtend
+        self.dq       = dq0     + self.dtcur * self.dqtend
+        self.CO2      = CO20    + self.dtcur * self.CO2tend
+        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
+        self.dz_h     = dz0     + self.dtcur * self.dztend
+            
+        # Limit dz to minimal value
+        dz0 = 50
+        if(self.dz_h < dz0):
+            self.dz_h = dz0 
+  
+        if(self.sw_wind):
+            self.u        = u0      + self.dtcur * self.utend
+            self.du       = du0     + self.dtcur * self.dutend
+            self.v        = v0      + self.dtcur * self.vtend
+            self.dv       = dv0     + self.dtcur * self.dvtend
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+            for var in ['t','q','u','v']:
+                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
+
+            # take into account advection for the whole profile
+                
+                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
+
+            var = 'z'
+            #print(self.air_ap[var])
+                #     print(self.air_ap['adv'+var])
+
+
+
+
+            #moving the profile vertically according to the vertical wind
+                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
+
+
+            # air_apvarold = pd.Series(np.array(self.air_ap.z))
+            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
+            # stop
+
+
+                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
+                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
+
+            #As t is updated, we also need to recalculate theta (and R)
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+
+            # air_aptheta_old = pd.Series(self.air_ap['theta'])
+            self.air_ap['theta'] = \
+                        self.air_ap.t * \
+                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
+                                         self.dtcur * self.air_ap.w[zidx_first:]
+
+#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
+#            print(self.t, self.dtcur,self.dt,self.htend)
+
+            # # the pressure levels of the profiles are recalculated according to
+            # # there new height (after subsidence)
+            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
+            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
+            #         * self.dtcur *  self.air_ap.w[zidx_first:]
+
+            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
+                    self.dtcur * self.air_ap.wp[zidx_first:]
+
+            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
+        # note that theta and q itself are updatet by class itself
+
+    
+        if self.sw_ap:
+            # Just for model consistency preservation purposes, we set the
+            # theta variables of the mixed-layer to nan values, since the
+            # mixed-layer values should overwritte by the mixed-layer
+            # calculations of class.
+            self.air_ap['theta'][0:3] = np.nan 
+            self.air_ap['p'][0:3] = np.nan 
+            self.air_ap['q'][0:3] = np.nan 
+            self.air_ap['u'][0:3] = np.nan 
+            self.air_ap['v'][0:3] = np.nan 
+            self.air_ap['t'][0:3] = np.nan 
+            self.air_ap['z'][0:3] = np.nan 
+
+            # Update the vertical profiles: 
+            #   - new mixed layer properties( h, theta, q ...)
+            #   - any data points below the new ixed-layer height are removed
+
+            # Three data points at the bottom that describe the mixed-layer
+            # properties
+            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
+                                           # columns as air_ap
+            # air_ap_head['z'].iloc[0] = 2.
+            # air_ap_head['z'].iloc[1] = self.__dict__['h']
+            # air_ap_head['z'].iloc[2] = self.__dict__['h']
+            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
+                        [2.,self.__dict__['h'],self.__dict__['h']]
+            for var in ['theta','q','u','v']:
+
+                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
+                        [self.__dict__[var], \
+                         self.__dict__[var], \
+                         self.__dict__[var] + self.__dict__['d'+var]]
+                
+            #print(self.air_ap)
+
+            # This is the remaining profile considering the remaining
+            # datapoints above the mixed layer height
+            air_ap_tail = self.air_ap.iloc[3:]
+            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
+
+            # print('h',self.h)
+            # # only select samples monotonically increasing with height
+            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            # air_ap_tail = pd.DataFrame()
+            # theta_low = self.theta
+            # z_low =     self.h
+            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            # for ibottom in range(1,len(air_ap_tail_orig)):
+            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
+            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+            # make theta increase strong enough to avoid numerical
+            # instability
+            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            air_ap_tail = pd.DataFrame()
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            theta_low = self.theta
+            z_low =     self.h
+            ibottom = 0
+            itop = 0
+            # print(air_ap_tail_orig)
+            # stop
+
+            # HW: this is the lower limit that we use for gammatheta, which is
+            # there to avoid model crashes. Besides on this limit, the upper
+            # air profile is modified in a way that is still conserves total
+            # quantities of moisture and temperature. The limit is set by trial
+            # and error. The numerics behind the crash should be investigated
+            # so that a cleaner solution can be provided.
+            gammatheta_lower_limit = 0.002
+            while ((itop in range(0,1)) or (itop != ibottom)):
+                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+                if (
+                    #(z_mean > (z_low+0.2)) and \
+                    #(theta_mean > (theta_low+0.02) ) and \
+                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
+                  (itop >= (len(air_ap_tail_orig)-1)) \
+                   :
+
+                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                    ibottom = itop+1
+                    theta_low = air_ap_tail.theta.iloc[-1]
+                    z_low =     air_ap_tail.z.iloc[-1]
+    
+
+                itop +=1
+                # elif  (itop > len(air_ap_tail_orig)-10):
+                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+                #print(itop,ibottom)
+
+            if itop > 1:
+                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! Temperature profile was too steep. \
+                                  Modifying profile: "+ \
+                                  str(itop - 1)+ " measurements were dropped \
+                                  and replaced with its average \
+                                  Modifying profile. \
+                                  mean with next profile point(s).") 
+
+
+            self.air_ap = pd.concat((air_ap_head,\
+                                     air_ap_tail,\
+                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
+                                                                      axis=1)
+
+            if  self.sw_ac:
+                qvalues = \
+                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
+
+                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
+                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
+                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+                self.P_h    = self.Ps - self.rho * self.g * self.h
+                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
+                        [self.Ps,  self.P_h, self.P_h-0.1]
+
+                self.air_ap.t = \
+                            self.air_ap.theta * \
+                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
+
+
+        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
+
+
+
+
+        # else:
+            # in the other case, it is updated at the time the statistics are
+            # calculated 
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if in_ml.sum() == 0:
+                warnings.warn(" no circulation points in the mixed layer \
+                              found. We just take the bottom one.")
+                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+            for var in ['t','q','u','v']:
+
+                # calculation of the advection variables for the mixed-layer
+                # these will be used for the next timestep
+                # Warning: w is excluded for now.
+
+                self.__dict__['adv'+var] = \
+                        ((self.air_ac['adv'+var+'_x'][in_ml] \
+                         + \
+                         self.air_ac['adv'+var+'_y'][in_ml])* \
+                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                        self.air_ac['delpdgrav'][in_ml].sum()
+
+                # calculation of the advection variables for the profile above
+                # the mixed layer (also for the next timestep)
+                self.air_ap['adv'+var] = \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p,\
+                                              self.air_ac['adv'+var+'_x']) \
+                                    + \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p, \
+                                              self.air_ac['adv'+var+'_y'])
+                # if var == 't':
+                #     print(self.air_ap['adv'+var])
+                #     stop
+
+            # as an approximation, we consider that advection of theta in the
+            # mixed layer is equal to advection of t. This is a sufficient
+            # approximation since theta and t are very similar at the surface
+            # pressure.
+
+            self.__dict__['advtheta'] = self.__dict__['advt']
+
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            # update the vertical wind profile
+            self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                          self.air_ac.p, \
+                                          self.air_ac['wp'])
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+            
+            air_apwold = self.air_ap['w']
+            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+            #print('hello w upd')
+
+            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # # self.wrho = np.interp(self.P_h,\
+            # #                      self.air_ach.p,\
+            # #                      self.air_ach['wrho']) \
+
+
+
+            # Also update the vertical wind at the mixed-layer height
+            # (subsidence)
+            self.ws   = self.air_ap.w[1]
+        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
+
+            ## Finally, we update he 
+            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+            #                        + \
+            #                        self.air_ac['divU_y'][in_ml])* \
+            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+            #            self.air_ac['delpdgrav'][in_ml].sum() 
+            
+
+        if self.sw_ap:
+            for var in ['theta','q','u','v']:
+
+                # update of the slope (gamma) for the different variables, for
+                # the next timestep!
+
+                # there is an warning message that tells about dividing through
+                # zero, which we ignore
+
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                    # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap['gamma'+var] = gammavar
+
+                # Based on the above, update the gamma value at the mixed-layer
+                # top
+                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
+                                                                     self.air_ap.z)[0][-1]]
+
+            
+    def run_radiation(self):
+        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
+        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
+        sinlea = max(sinlea, 0.0001)
+        
+        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
+  
+        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
+  
+        self.Swin  = self.S0 * Tr * sinlea
+        self.Swout = self.alpha * self.S0 * Tr * sinlea
+        
+        
+        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
+        self.Lwout = self.bolz * self.Ts ** 4.
+          
+        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
+        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
+  
+    def run_surface_layer(self):
+        # HW: I had to raise the minimum wind speed to make the simulation with
+        # the non-iterative solution stable (this solution was a wild guess, so I don't
+        # know the exact problem of the instability in case of very low wind
+        # speeds yet)
+        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        # version of 20180730 where there are still some runs crashing. Maybe
+        # an upper limit should be set on the monin-obukhov length instead of
+        # a lower limmit on the wind speed?
+        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        
+        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
+        qsatsurf       = qsat(self.thetasurf, self.Ps)
+        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
+        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
+
+        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
+  
+        zsl       = 0.1 * self.h
+        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
+        
+
+
+        if self.sw_lit:
+            self.Rib  = min(self.Rib, 0.2)
+            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
+            self.zeta  = zsl/self.L
+            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
+            
+        
+            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
+            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
+            
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+        
+     
+            # diagnostic meteorological variables
+            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
+            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
+            
+            # diagnostic meteorological variables
+        else:
+            
+            ## circumventing any iteration with Wouters et al., 2012
+            self.zslz0m = np.max((zsl/self.z0m,10.))
+            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
+            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
+            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
+            self.L = zsl/self.zeta
+            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
+        
+            self.Cm = self.k**2.0/funm/funm
+            self.Cs = self.k**2.0/funm/funh
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+            
+            # extrapolation from mixed layer (instead of from surface) to 2meter
+            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
+            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
+            self.u2m    =                - self.uw     / self.ustar / self.k * funm
+            self.v2m    =                - self.vw     / self.ustar / self.k * funm
+        
+        
+        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
+        self.e2m    = self.q2m * self.Ps / 0.622
+     
+    def ribtol(self, Rib, zsl, z0m, z0h): 
+        if(Rib > 0.):
+            L    = 1.
+            L0   = 2.
+        else:
+            L  = -1.
+            L0 = -2.
+        #print(Rib,zsl,z0m,z0h)
+        
+        while (abs(L - L0) > 0.001):
+            L0      = L
+            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
+            Lstart  = L - 0.001*L
+            Lend    = L + 0.001*L
+            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
+                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+            L       = L - fx / fxdif
+            #print(L)
+            if(abs(L) > 1e12):
+                break
+
+        return L
+      
+    def psim(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psim = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+        return psim
+      
+    def psih(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psih  = 2. * np.log( (1. + x*x) / 2.)
+            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+        return psih
+ 
+    def jarvis_stewart(self):
+        # calculate surface resistances using Jarvis-Stewart model
+        if(self.sw_rad):
+            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
+        else:
+            f1 = 1.
+  
+        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
+            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
+        else:
+            f2 = 1.e8
+ 
+        # Limit f2 in case w2 > wfc, where f2 < 1
+        f2 = max(f2, 1.);
+ 
+        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
+        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
+  
+        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
+
+    def factorial(self,k):
+        factorial = 1
+        for n in range(2,k+1):
+            factorial = factorial * float(n)
+        return factorial;
+
+    def E1(self,x):
+        E1sum = 0
+        for k in range(1,100):
+            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
+        return -0.57721566490153286060 - np.log(x) - E1sum
+ 
+    def ags(self):
+        # Select index for plant type
+        if(self.c3c4 == 'c3'):
+            c = 0
+        elif(self.c3c4 == 'c4'):
+            c = 1
+        else:
+            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
+
+        # calculate CO2 compensation concentration
+        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
+
+        # calculate mesophyll conductance
+        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
+                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
+        gm            = gm / 1000. # conversion from mm s-1 to m s-1
+  
+        # calculate CO2 concentration inside the leaf (ci)
+        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
+        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
+  
+        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
+        D0            = (self.f0[c] - fmin) / self.ad[c]
+  
+        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
+        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
+        ci            = cfrac * (co2abs - CO2comp) + CO2comp
+  
+        # calculate maximal gross primary production in high light conditions (Ag)
+        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
+  
+        # calculate effect of soil moisture stress on gross assimilation rate
+        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
+  
+        # calculate stress function
+        if (self.c_beta == 0):
+            fstr = betaw;
+        else:
+            # Following Combe et al (2016)
+            if (self.c_beta < 0.25):
+                P = 6.4 * self.c_beta
+            elif (self.c_beta < 0.50):
+                P = 7.6 * self.c_beta - 0.3
+            else:
+                P = 2**(3.66 * self.c_beta + 0.34) - 1
+            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
+  
+        # calculate gross assimilation rate (Am)
+        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
+        Rdark        = (1. / 9.) * Am
+        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
+  
+        # calculate  light use efficiency
+        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
+  
+        # calculate gross primary productivity
+        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
+  
+        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
+        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
+        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
+  
+        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
+        a1           = 1. / (1. - self.f0[c])
+        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
+  
+        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
+  
+        # calculate surface resistance for moisture and carbon dioxide
+        self.rs      = 1. / (1.6 * gcco2)
+        rsCO2        = 1. / gcco2
+  
+        # calculate net flux of CO2 into the plant (An)
+        An           = -(co2abs - ci) / (self.ra + rsCO2)
+  
+        # CO2 soil surface flux
+        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
+        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
+  
+        # CO2 flux
+        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
+        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
+        self.wCO2    = self.wCO2A + self.wCO2R
+ 
+    def run_land_surface(self):
+        # compute ra
+        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
+        #print('ueff',self.u,self.v,self.wstar)
+
+        if(self.sw_sl):
+          self.ra = (self.Cs * ueff)**-1.
+        else:
+          self.ra = ueff / max(1.e-3, self.ustar)**2.
+
+        #print('ra',self.ra,self.ustar,ueff)
+
+        # first calculate essential thermodynamic variables
+        self.esat    = esat(self.theta)
+        self.qsat    = qsat(self.theta, self.Ps)
+        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
+        self.dqsatdT = 0.622 * desatdT / self.Ps
+        self.e       = self.q * self.Ps / 0.622
+
+        if(self.ls_type == 'js'): 
+            self.jarvis_stewart() 
+        elif(self.ls_type == 'ags'):
+            self.ags()
+        else:
+            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
+
+        # recompute f2 using wg instead of w2
+        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
+          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
+        else:
+          f2        = 1.e8
+        self.rssoil = self.rssoilmin * f2 
+ 
+        Wlmx = self.LAI * self.Wmax
+        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
+        self.cliq = min(1., self.Wl / Wlmx) 
+     
+        # calculate skin temperature implictly
+        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
+            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
+            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
+            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
+
+        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
+        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
+        #print('Ts',self.rs)
+
+        esatsurf      = esat(self.Ts)
+        self.qsatsurf = qsat(self.Ts, self.Ps)
+
+        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+  
+        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
+  
+        self.LE     = self.LEsoil + self.LEveg + self.LEliq
+        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
+        #print('H',self.ra,self.Ts,self.theta)
+        self.G      = self.Lambda * (self.Ts - self.Tsoil)
+        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
+        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
+        
+        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
+  
+        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
+   
+        d1          = 0.1
+        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
+        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
+        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
+        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
+  
+        # calculate kinematic heat fluxes
+        self.wtheta   = self.H  / (self.rho * self.cp)
+        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
+        self.wq       = self.LE / (self.rho * self.Lv)
+ 
+    def integrate_land_surface(self):
+        # integrate soil equations
+        Tsoil0        = self.Tsoil
+        wg0           = self.wg
+        Wl0           = self.Wl
+  
+        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
+        self.wg       = wg0     + self.dtcur * self.wgtend
+        self.Wl       = Wl0     + self.dtcur * self.Wltend
+  
+    # store model output
+    def store(self):
+        t                      = self.t
+        
+        self.out.time[t]          = t * self.dt / 3600. + self.tstart
+
+        # in case we are at the end of the simulation, we store the vertical
+        # profiles to the output
+        
+        # if t == (len(self.out.time) - 1):
+        #     self.out.air_ac = self.air_ac
+        #     self.out.air_ap = self.air_ap
+
+        
+        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
+        #  for key in self.out.__dict__.keys():
+        #      if key in self.__dict__:
+        #          self.out.__dict__[key][t]  = self.__dict__[key]
+        
+        self.out.h[t]          = self.h
+        
+        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
+        
+        self.out.gammatheta[t] = self.gammatheta
+        self.out.gammau[t]     = self.gammau
+        self.out.gammav[t]     = self.gammav
+        self.out.gammaq[t]     = self.gammaq
+        self.out.theta[t]      = self.theta
+        self.out.thetav[t]     = self.thetav
+        self.out.dtheta[t]     = self.dtheta
+        self.out.dthetav[t]    = self.dthetav
+        self.out.wtheta[t]     = self.wtheta
+        self.out.wthetav[t]    = self.wthetav
+        self.out.wthetae[t]    = self.wthetae
+        self.out.wthetave[t]   = self.wthetave
+        
+        self.out.q[t]          = self.q
+        self.out.dq[t]         = self.dq
+        self.out.wq[t]         = self.wq
+        self.out.wqe[t]        = self.wqe
+        self.out.wqM[t]        = self.wqM
+      
+        self.out.qsat[t]       = self.qsat
+        self.out.e[t]          = self.e
+        self.out.esat[t]       = self.esat
+      
+        fac = (self.rho*self.mco2)/self.mair
+        self.out.CO2[t]        = self.CO2
+        self.out.dCO2[t]       = self.dCO2
+        self.out.wCO2[t]       = self.wCO2  * fac
+        self.out.wCO2e[t]      = self.wCO2e * fac
+        self.out.wCO2R[t]      = self.wCO2R * fac
+        self.out.wCO2A[t]      = self.wCO2A * fac
+
+        self.out.u[t]          = self.u
+        self.out.du[t]         = self.du
+        self.out.uw[t]         = self.uw
+        
+        self.out.v[t]          = self.v
+        self.out.dv[t]         = self.dv
+        self.out.vw[t]         = self.vw
+        
+        self.out.T2m[t]        = self.T2m
+        self.out.q2m[t]        = self.q2m
+        self.out.u2m[t]        = self.u2m
+        self.out.v2m[t]        = self.v2m
+        self.out.e2m[t]        = self.e2m
+        self.out.esat2m[t]     = self.esat2m
+
+
+        self.out.Tsoil[t]      = self.Tsoil
+        self.out.T2[t]         = self.T2
+        self.out.Ts[t]         = self.Ts
+        self.out.wg[t]         = self.wg
+        
+        self.out.thetasurf[t]  = self.thetasurf
+        self.out.thetavsurf[t] = self.thetavsurf
+        self.out.qsurf[t]      = self.qsurf
+        self.out.ustar[t]      = self.ustar
+        self.out.Cm[t]         = self.Cm
+        self.out.Cs[t]         = self.Cs
+        self.out.L[t]          = self.L
+        self.out.Rib[t]        = self.Rib
+  
+        self.out.Swin[t]       = self.Swin
+        self.out.Swout[t]      = self.Swout
+        self.out.Lwin[t]       = self.Lwin
+        self.out.Lwout[t]      = self.Lwout
+        self.out.Q[t]          = self.Q
+  
+        self.out.ra[t]         = self.ra
+        self.out.rs[t]         = self.rs
+        self.out.H[t]          = self.H
+        self.out.LE[t]         = self.LE
+        self.out.LEliq[t]      = self.LEliq
+        self.out.LEveg[t]      = self.LEveg
+        self.out.LEsoil[t]     = self.LEsoil
+        self.out.LEpot[t]      = self.LEpot
+        self.out.LEref[t]      = self.LEref
+        self.out.G[t]          = self.G
+
+        self.out.zlcl[t]       = self.lcl
+        self.out.RH_h[t]       = self.RH_h
+
+        self.out.ac[t]         = self.ac
+        self.out.M[t]          = self.M
+        self.out.dz[t]         = self.dz_h
+        self.out.substeps[t]   = self.substeps
+  
+    # delete class variables to facilitate analysis in ipython
+    def exitmodel(self):
+        del(self.Lv)
+        del(self.cp)
+        del(self.rho)
+        del(self.k)
+        del(self.g)
+        del(self.Rd)
+        del(self.Rv)
+        del(self.bolz)
+        del(self.S0)
+        del(self.rhow)
+  
+        del(self.t)
+        del(self.dt)
+        del(self.tsteps)
+         
+        del(self.h)          
+        del(self.Ps)        
+        del(self.fc)        
+        del(self.ws)
+        del(self.we)
+        
+        del(self.theta)
+        del(self.dtheta)
+        del(self.gammatheta)
+        del(self.advtheta)
+        del(self.beta)
+        del(self.wtheta)
+    
+        del(self.T2m)
+        del(self.q2m)
+        del(self.e2m)
+        del(self.esat2m)
+        del(self.u2m)
+        del(self.v2m)
+        
+        del(self.thetasurf)
+        del(self.qsatsurf)
+        del(self.thetav)
+        del(self.dthetav)
+        del(self.thetavsurf)
+        del(self.qsurf)
+        del(self.wthetav)
+        
+        del(self.q)
+        del(self.qsat)
+        del(self.dqsatdT)
+        del(self.e)
+        del(self.esat)
+        del(self.dq)
+        del(self.gammaq)
+        del(self.advq)
+        del(self.wq)
+        
+        del(self.u)
+        del(self.du)
+        del(self.gammau)
+        del(self.advu)
+        
+        del(self.v)
+        del(self.dv)
+        del(self.gammav)
+        del(self.advv)
+  
+        del(self.htend)
+        del(self.thetatend)
+        del(self.dthetatend)
+        del(self.qtend)
+        del(self.dqtend)
+        del(self.utend)
+        del(self.dutend)
+        del(self.vtend)
+        del(self.dvtend)
+     
+        del(self.Tsoiltend) 
+        del(self.wgtend)  
+        del(self.Wltend) 
+  
+        del(self.ustar)
+        del(self.uw)
+        del(self.vw)
+        del(self.z0m)
+        del(self.z0h)        
+        del(self.Cm)         
+        del(self.Cs)
+        del(self.L)
+        del(self.Rib)
+        del(self.ra)
+  
+        del(self.lat)
+        del(self.lon)
+        del(self.doy)
+        del(self.tstart)
+   
+        del(self.Swin)
+        del(self.Swout)
+        del(self.Lwin)
+        del(self.Lwout)
+        del(self.cc)
+  
+        del(self.wg)
+        del(self.w2)
+        del(self.cveg)
+        del(self.cliq)
+        del(self.Tsoil)
+        del(self.T2)
+        del(self.a)
+        del(self.b)
+        del(self.p)
+        del(self.CGsat)
+  
+        del(self.wsat)
+        del(self.wfc)
+        del(self.wwilt)
+  
+        del(self.C1sat)
+        del(self.C2ref)
+  
+        del(self.LAI)
+        del(self.rs)
+        del(self.rssoil)
+        del(self.rsmin)
+        del(self.rssoilmin)
+        del(self.alpha)
+        del(self.gD)
+  
+        del(self.Ts)
+  
+        del(self.Wmax)
+        del(self.Wl)
+  
+        del(self.Lambda)
+        
+        del(self.Q)
+        del(self.H)
+        del(self.LE)
+        del(self.LEliq)
+        del(self.LEveg)
+        del(self.LEsoil)
+        del(self.LEpot)
+        del(self.LEref)
+        del(self.G)
+  
+        del(self.sw_ls)
+        del(self.sw_rad)
+        del(self.sw_sl)
+        del(self.sw_wind)
+        del(self.sw_shearwe)
+
+# class for storing mixed-layer model output data
+class model_output:
+    def __init__(self, tsteps):
+        self.time          = np.zeros(tsteps)    # time [s]
+
+        # mixed-layer variables
+        self.h          = np.zeros(tsteps)    # ABL height [m]
+        
+        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammau     = np.zeros(tsteps)
+        self.gammav     = np.zeros(tsteps)
+        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
+        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
+        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
+        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
+        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
+        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
+        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
+        
+        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
+        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
+        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
+        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
+
+        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
+        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
+        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
+
+        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
+        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
+        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
+        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
+        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
+        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
+        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
+        
+        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
+        
+        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
+
+        # diagnostic meteorological variables
+        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
+        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
+        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
+        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
+        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
+        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
+
+        # ground variables
+        self.Tsoil       = np.zeros(tsteps)
+        self.T2          = np.zeros(tsteps)
+        self.Ts          = np.zeros(tsteps)
+        self.wg          = np.zeros(tsteps)
+
+        # surface-layer variables
+        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
+        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
+        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
+        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
+        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
+        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
+        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
+        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
+        self.L          = np.zeros(tsteps)    # Obukhov length [m]
+        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
+
+        # radiation variables
+        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
+        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
+        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
+        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
+        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
+
+        # land surface variables
+        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
+        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
+        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
+        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
+        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
+        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
+        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
+        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
+        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
+        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
+
+        # Mixed-layer top variables
+        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
+        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
+
+        # cumulus variables
+        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
+        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
+        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
+        
+        
+        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
+
+# class for storing mixed-layer model input data
+class model_input:
+    def __init__(self):
+
+        # # comment not valid
+        # we comment out the initialization, because there is a problem when
+        # inheriting values from one the another class4gl_iput. We also expect
+        # that the user specifies all the required parmameters (if not, an error
+        # is raised). 
+
+        # general model variables
+        self.runtime    = None  # duration of model run [s]
+        self.dt         = None  # time step [s]
+
+        # mixed-layer variables
+        self.sw_ml      = None  # mixed-layer model switch
+        self.sw_shearwe = None  # Shear growth ABL switch
+        self.sw_fixft   = None  # Fix the free-troposphere switch
+        self.h          = None  # initial ABL height [m]
+        self.Ps         = None  # surface pressure [Pa]
+        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
+        self.fc         = None  # Coriolis parameter [s-1]
+        
+        self.theta      = None  # initial mixed-layer potential temperature [K]
+        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
+
+        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
+
+        self.dtheta     = None  # initial temperature jump at h [K]
+        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = None  # advection of heat [K s-1]
+        self.beta       = None  # entrainment ratio for virtual heat [-]
+        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
+        
+        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
+        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
+
+        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = None  # advection of moisture [kg kg-1 s-1]
+        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
+
+        self.CO2        = None  # initial mixed-layer potential temperature [K]
+        self.dCO2       = None  # initial temperature jump at h [K]
+        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advCO2     = None  # advection of heat [K s-1]
+        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
+        
+        self.sw_wind    = None  # prognostic wind switch
+        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.du         = None  # initial u-wind jump at h [m s-1]
+        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = None  # advection of u-wind [m s-2]
+
+        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = None  # initial u-wind jump at h [m s-1]
+        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = None  # advection of v-wind [m s-2]
+
+        # surface layer variables
+        self.sw_sl      = None  # surface layer switch
+        self.ustar      = None  # surface friction velocity [m s-1]
+        self.z0m        = None  # roughness length for momentum [m]
+        self.z0h        = None  # roughness length for scalars [m]
+        self.Cm         = None  # drag coefficient for momentum [-]
+        self.Cs         = None  # drag coefficient for scalars [-]
+        self.L          = None  # Obukhov length [-]
+        self.Rib        = None  # bulk Richardson number [-]
+
+        # radiation parameters
+        self.sw_rad     = None  # radiation switch
+        self.lat        = None  # latitude [deg]
+        self.lon        = None  # longitude [deg]
+        self.doy        = None  # day of the year [-]
+        self.tstart     = None  # time of the day [h UTC]
+        self.cc         = None  # cloud cover fraction [-]
+        self.Q          = None  # net radiation [W m-2] 
+        self.dFz        = None  # cloud top radiative divergence [W m-2] 
+
+        # land surface parameters
+        self.sw_ls      = None  # land surface switch
+        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
+        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = None  # temperature top soil layer [K]
+        self.T2         = None  # temperature deeper soil layer [K]
+        
+        self.a          = None  # Clapp and Hornberger retention curve parameter a
+        self.b          = None  # Clapp and Hornberger retention curve parameter b
+        self.p          = None  # Clapp and Hornberger retention curve parameter p 
+        self.CGsat      = None  # saturated soil conductivity for heat
+        
+        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
+        self.wfc        = None  # volumetric water content field capacity [-]
+        self.wwilt      = None  # volumetric water content wilting point [-]
+        
+        self.C1sat      = None 
+        self.C2ref      = None
+
+        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
+        
+        self.LAI        = None  # leaf area index [-]
+        self.gD         = None  # correction factor transpiration for VPD [-]
+        self.rsmin      = None  # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = None  # surface albedo [-]
+        
+        self.Ts         = None  # initial surface temperature [K]
+        
+        self.cveg       = None  # vegetation fraction [-]
+        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
+        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
+        
+        self.Lambda     = None  # thermal diffusivity skin layer [-]
+
+        # A-Gs parameters
+        self.c3c4       = None  # Plant type ('c3' or 'c4')
+
+        # Cumulus parameters
+        self.sw_cu      = None  # Cumulus parameterization switch
+        self.dz_h       = None  # Transition layer thickness [m]
+        
+# BEGIN -- HW 20171027
+        # self.cala       = None      # soil heat conductivity [W/(K*m)]
+        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
+# END -- HW 20171027
diff --git a/dist/class4gl-0.1dev/setup.py b/dist/class4gl-0.1dev/setup.py
new file mode 100644
index 0000000..a806fa0
--- /dev/null
+++ b/dist/class4gl-0.1dev/setup.py
@@ -0,0 +1,9 @@
+from distutils.core import setup
+
+setup(
+        name='class4gl',
+        version='0.1dev',
+        packages=['lib','bin'],
+        license='GPLv3 licence',
+        long_description=open('README.md').read(),
+)
diff --git a/examples/run_soundings/batch_run_soundings.py b/examples/run_soundings/batch_run_soundings.py
new file mode 100644
index 0000000..c4fc40e
--- /dev/null
+++ b/examples/run_soundings/batch_run_soundings.py
@@ -0,0 +1,76 @@
+
+import argparse
+
+import pandas as pd
+import os
+import math
+import numpy as np
+import sys
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+df_stations = pd.read_csv(fn_stations)
+
+# if 'path-soundings' in args.__dict__.keys():
+#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+# else:
+
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
+    parser.add_argument('--experiments')#should be ';'-seperated list
+    parser.add_argument('--split-by',default=-1)
+    args = parser.parse_args()
+
+experiments = args.experiments.split(';')
+#SET = 'GLOBAL'
+SET = args.dataset
+print(args.experiments)
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+for expname in experiments:
+    #exp = EXP_DEFS[expname]
+    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+    os.system('rm -R '+path_exp)
+
+totalchunks = 0
+for istation,current_station in all_stations.iterrows():
+    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
+    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
+    totalchunks +=chunks_current_station
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
+                                       ',split_by='+str(args.split_by)+\
+                                       ',exec='+str(args.exec)+\
+                                       ',experiments='+str(args.experiments))
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/examples/run_soundings/run.py b/examples/run_soundings/run.py
new file mode 100644
index 0000000..46eeea1
--- /dev/null
+++ b/examples/run_soundings/run.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--error-handling',default='dump_on_success')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    parser.add_argument('--c4gl-path',default='')
+    args = parser.parse_args()
+
+
+
+if args.c4gl_path == '': 
+    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+else:
+    sys.path.insert(0, args.c4gl_path)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            print(records_morning_station_chunk)
+
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+
+                    if args.error_handling == 'dump_always':
+                        try:
+                            c4gl.run()
+                        except:
+                            print('run not succesfull')
+                        onerun = True
+
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                  include_input=False,\
+                                  #timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    # in this case, only the file will dumped if the runs were
+                    # successful
+                    elif args.error_handling == 'dump_on_succes':
+                        try:
+                            c4gl.run()
+                            print('run not succesfull')
+                            onerun = True
+
+                            c4gli_morning.dump(file_ini)
+                            
+                            
+                            c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                      #timeseries_only=timeseries_only,\
+                                     )
+                            onerun = True
+                        except:
+                            print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/examples/run_soundings/run_iter.py b/examples/run_soundings/run_iter.py
new file mode 100644
index 0000000..5dfbaff
--- /dev/null
+++ b/examples/run_soundings/run_iter.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk',default=0)
+    args = parser.parse_args()
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if 'global_chunk' in args.__dict__.keys():
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    if 'last_station' in args.__dict__.keys():
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    
+    if 'first_station' in args.__dict__.keys():
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    if 'station_chunk' in args.__dict__.keys():
+        run_station_chunk = args.station_chunk
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+    for istation,current_station in run_stations.iterrows():
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                #if iexp == 11:
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+
+                        c4gli_morning.pars.itersteps = i
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                   #   timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    except:
+                        print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/examples/run_soundings/run_iter_test.py b/examples/run_soundings/run_iter_test.py
new file mode 100644
index 0000000..eefd475
--- /dev/null
+++ b/examples/run_soundings/run_iter_test.py
@@ -0,0 +1,367 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    args = parser.parse_args()
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+                #if iexp == 11:
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+                        onerun = True
+
+                        c4gli_morning.pars.itersteps = i
+                    except:
+                        print('run not succesfull')
+                    c4gli_morning.dump(file_ini)
+                    
+                    
+                    c4gl.dump(file_mod,\
+                                  include_input=False,\
+                               #   timeseries_only=timeseries_only,\
+                             )
+                    onerun = True
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/examples/run_soundings/trash/run_test.py b/examples/run_soundings/trash/run_test.py
new file mode 100644
index 0000000..767d960
--- /dev/null
+++ b/examples/run_soundings/trash/run_test.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    parser.add_argument('--c4gl-path',default='')
+    args = parser.parse_args()
+
+if args.c4gl_path == '': 
+    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+else:
+    sys.path.insert(0, args.c4gl_path)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            print(records_morning_station_chunk)
+
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    try:
+                        c4gl.run()
+                    except:
+                        print('run not succesfull')
+                    onerun = True
+
+                    c4gli_morning.dump(file_ini)
+                    
+                    
+                    c4gl.dump(file_mod,\
+                              include_input=False,\
+                              #timeseries_only=timeseries_only,\
+                             )
+                    onerun = True
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/examples/setup_global.py b/examples/setup_global.py
deleted file mode 100644
index 8b13789..0000000
--- a/examples/setup_global.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/examples/setup_soundings/setup_bllast.py b/examples/setup_soundings/setup_bllast.py
new file mode 100644
index 0000000..af8c8bb
--- /dev/null
+++ b/examples/setup_soundings/setup_bllast.py
@@ -0,0 +1,719 @@
+# -*- coding: utf-8 -*-
+# Read data from BLLAST campaing and convert it to class4gl input
+
+# WARNING!! stupid tab versus space formatting, grrrmmmlmlmlll!  the following command needs to be executed first: 
+#    for file in RS_2011????_????_site1_MODEM_CRA.cor ;  do expand -i -t 4 $file > $file.fmt ; done
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 42.971834,
+                  "longitude" : 0.3671169,
+                  "name" : "the BLLAST experiment"
+                })
+current_station.name = 90001
+
+
+
+
+
+# RS_20110624_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110630_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110702_1655_site1_MODEM_CRA.cor.fmt
+# RS_20110621_0509_site1_MODEM_CRA.cor.fmt
+
+HOUR_FILES = \
+{ dt.datetime(2011,6,19,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110619_0521_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110619_1750_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,20,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110620_0515_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110620_1750_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,25,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110625_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110625_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,26,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110626_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110626_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,27,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110627_0503_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110627_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 2,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110702_0501_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110702_1655_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 5,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110705_0448_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110705_1701_site1_MODEM_CRA.cor.fmt']},
+}
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_csv(balloon_file,delimiter='\t',)
+                                     #widths=[14]*19,
+                                     #skiprows=9,
+                                     #skipfooter=15,
+                                     #decimal='.',
+                                     #header=None,
+                                     #names = columns,
+                                     #na_values='-----')
+        air_balloon_in = air_balloon_in.rename(columns=lambda x: x.strip())
+        print(air_balloon_in.columns)
+        rowmatches = {
+            't':      lambda x: x['TaRad']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['Press']*100.,
+            'u':      lambda x: x['VHor'] * np.sin((90.-x['VDir'])/180.*np.pi),
+            'v':      lambda x: x['VHor'] * np.cos((90.-x['VDir'])/180.*np.pi),
+            'z':      lambda x: x['Altitude'] -582.,
+            # from virtual temperature to absolute humidity
+            'q':      lambda x: qfrom_e_p(efrom_rh100_T(x['UCal'],x['TaRad']+273.15),x['Press']*100.),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['VHor'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+        # filter data so that potential temperature always increases with
+        # height 
+        cols = []
+        for column in air_ap_tail.columns:
+            #if column != 'z':
+                cols.append(column)
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        # 
+        # # we copy the pressure at ground level from balloon sounding. The
+        # # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually
+        # write local solar time, we need to assign the timezone to UTC (which
+        # is WRONG!!!). Otherwise ruby cannot understand it (it always converts
+        # tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise']+dt.timedelta(hours=2))\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        print('ldatetime_daylight',dpars['ldatetime_daylight'])
+        print('ldatetime',dpars['ldatetime'])
+        print('lSunrise',dpars['lSunrise'])
+        dpars['day'] = dpars['ldatetime'].day
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        print('tstart',dpars['tstart'])
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='bllast',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1]
+    
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = bllast_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = bllast_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+
+    
+    c4gli_morning.pars.sw_ac = []
+    c4gli_morning.pars.sw_ap = True
+    c4gli_morning.pars.sw_lit = False
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/examples/setup_soundings/setup_global.py b/examples/setup_soundings/setup_global.py
new file mode 100644
index 0000000..79224d9
--- /dev/null
+++ b/examples/setup_soundings/setup_global.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under odir+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+
+
+#calculate the root mean square error
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    
+    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_csv(fn_stations)
+
+
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = 100
+BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+
+
+iPROC = int(sys.argv[1])
+
+
+for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+    one_run = False
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings after 1981
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+                
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            try:
+                c4gli.get_profile_wyoming(wy_strm)
+                #print(STN['ID'],c4gli.pars.datetime)
+                #c4gli.get_global_input(globaldata)
+
+                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+                logic = dict()
+                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+                logic['daylight'] = \
+                    ((c4gli.pars.ldatetime_daylight - 
+                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
+                
+                logic['springsummer'] = (c4gli.pars.theta > 278.)
+                
+                # we take 3000 because previous analysis (ie., HUMPPA) has
+                # focussed towards such altitude
+                le3000 = (c4gli.air_balloon.z <= 3000.)
+                logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+                leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta)) and \
+                        (rmse(c4gli.air_balloon.theta[leh] , \
+                              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                              )
+    
+
+                logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+                
+                print('logic:', logic)
+                # the result
+                morning_ok = np.mean(list(logic.values()))
+                print(morning_ok,c4gli.pars.ldatetime)
+
+            except:
+                morning_ok =False
+                print('obtain morning not good')
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                print('MORNING OK!')
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                print('AFTERNOON PROFILE CLEARED')
+                try:
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+                    print('AFTERNOON PROFILE OK')
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # we will dump the latest afternoon sounding that fits the
+                    # minimum criteria specified by logic_afternoon
+                    print(current_date,current_date_afternoon)
+                    c4gli_afternoon_for_dump = None
+                    while ((current_date_afternoon == current_date) and \
+                           (wy_strm.current is not None)):
+                        logic_afternoon =dict()
+
+                        logic_afternoon['afternoon'] = \
+                            (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                        logic_afternoon['daylight'] = \
+                          ((c4gli_afternoon.pars.ldatetime - \
+                            c4gli_afternoon.pars.ldatetime_daylight \
+                           ).total_seconds()/3600. <= 0.)
+
+
+                        le3000_afternoon = \
+                            (c4gli_afternoon.air_balloon.z <= 3000.)
+                        logic_afternoon['5measurements'] = \
+                            (np.sum(le3000_afternoon) >= 5) 
+
+                        # we only store the last afternoon sounding that fits these
+                        # minimum criteria
+
+                        afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                        print('logic_afternoon: ',logic_afternoon)
+                        print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                        if afternoon_ok == 1.:
+                            # # doesn't work :(
+                            # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                            
+                            # so we just create a new one from the same wyoming profile
+                            c4gli_afternoon_for_dump = class4gl_input()
+                            c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                        wy_strm.find_next()
+                        c4gli_afternoon.clear()
+                        c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                        if wy_strm.current is not None:
+                            current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                        else:
+                            # a dummy date: this will be ignored anyway
+                            current_date_afternoon = dt.date(1900,1,1)
+
+                        # Only in the case we have a good pair of soundings, we
+                        # dump them to disk
+                    if c4gli_afternoon_for_dump is not None:
+                        c4gli.update(source='pairs',pars={'runtime' : \
+                            int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                                 c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                        print('ALMOST...')
+                        if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+                                
+        
+                            c4gli.get_global_input(globaldata)
+                            print('VERY CLOSE...')
+                            if c4gli.check_source_globaldata() and \
+                                (c4gli.check_source(source='wyoming',\
+                                                   check_only_sections='pars')):
+                                c4gli.dump(fileout)
+                                
+                                c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                                
+                                
+                                # for keyEXP,dictEXP in experiments.items():
+                                #     
+                                #     c4gli.update(source=keyEXP,pars = dictEXP)
+                                #     c4gl = class4gl(c4gli)
+                                #     # c4gl.run()
+                                #     
+                                #     c4gl.dump(c4glfiles[key])
+                                
+                                print('HIT!!!')
+                                one_run = True
+                except:
+                    print('get profile failed')
+                
+    if one_run:
+        STN.name = STN['ID']
+        all_records_morning = get_records(pd.DataFrame([STN]),\
+                                      odir,\
+                                      subset='morning',
+                                      refetch_records=True,
+                                      )
+        all_records_afternoon = get_records(pd.DataFrame([STN]),\
+                                      odir,\
+                                      subset='afternoon',
+                                      refetch_records=True,
+                                      )
+    else:
+        os.system('rm '+fnout)
+        os.system('rm '+fnout_afternoon)
+
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/examples/setup_soundings/setup_goamazon.py b/examples/setup_soundings/setup_goamazon.py
new file mode 100644
index 0000000..f9efe2c
--- /dev/null
+++ b/examples/setup_soundings/setup_goamazon.py
@@ -0,0 +1,740 @@
+# -*- coding: utf-8 -*-
+
+import xarray as xr
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+import glob
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : -3.21,
+                  "longitude" : -60.6,
+                  "name" : "the GOAMAZON experiment"
+                })
+current_station.name = 90002
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC)
+DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC)
+
+
+DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))]
+HOUR_FILES = {}
+for iDT, DT in enumerate(DTS):
+    morning_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf')
+    if len(possible_files)>0:
+        morning_file= possible_files[0]
+    afternoon_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*.cdf')
+    if len(possible_files)>0:
+        afternoon_file= possible_files[0]
+
+    if (morning_file is not None) and (afternoon_file is not None):
+        HOUR_FILES[DT] = {'morning':[5.5,morning_file],
+                          'afternoon':[17.5,afternoon_file]}
+
+print(HOUR_FILES)
+
+# HOUR_FILES = \
+# {
+#     dt.datetime(2015,5,7,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150507.052900.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150507.172700.custom.cdf']},
+#     dt.datetime(2015,3,13,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150313.052700.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150313.173000.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+# }
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
+        print(balloon_file)
+        
+        xrin = balloon_file
+        air_balloon = pd.DataFrame()
+
+        air_balloon['t'] = xrin.tdry.values+273.15
+        air_balloon['p'] = xrin.pres.values*100.
+        
+        air_balloon['u'] = xrin.u_wind.values
+        air_balloon['v'] = xrin.v_wind.values
+        air_balloon['WSPD'] = xrin['wspd'].values
+        
+        print(xrin.rh.values.shape)
+        air_balloon['q'] = qfrom_e_p(efrom_rh100_T(xrin.rh.values,air_balloon['t'].values),air_balloon.p.values)
+        
+
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        # air_balloon_in = pd.read_fwf(balloon_file,
+        #                              widths=[14]*19,
+        #                              skiprows=9,
+        #                              skipfooter=15,
+        #                              decimal=',',
+        #                              header=None,
+        #                              names = columns,
+        #                              na_values='-----')
+    
+
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q,
+            'rho': lambda x: x.p /x.t / x.R ,
+        }
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        print('alt in xrin?:','alt' in xrin)
+        if 'alt' in xrin:
+            air_balloon['z'] = xrin.alt.values
+        else:
+            air_balloon['z'] = 0.
+            for irow,row in air_balloon.iloc[1:].iterrows():
+                air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \
+                        2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \
+                        (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1])
+                        
+             
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            print(air_balloon.z.shape,air_balloon.thetav.shape,)
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-4)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn =pair['morning'][1]
+    print(humpafn)
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn = pair['afternoon'][1]
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM//humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+"""
+stations_for_iter = stations(path_exp)
+for STNID,station in stations_iterator(stations_for_iter):
+    records_current_station_index = \
+            (records_ini.index.get_level_values('STNID') == STNID)
+    file_current_station_mod = STNID
+
+    with \
+    open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+        for (STNID,index),record_ini in records_iterator(records_ini):
+            c4gli_ini = get_record_yaml(file_station_ini, 
+                                        record_ini.index_start, 
+                                        record_ini.index_end,
+                                        mode='ini')
+            #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+
+            record_mod = records_mod.loc[(STNID,index)]
+            c4gl_mod = get_record_yaml(file_station_mod, 
+                                        record_mod.index_start, 
+                                        record_mod.index_end,
+                                        mode='mod')
+            record_afternoon = records_afternoon.loc[(STNID,index)]
+            c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+                                        record_afternoon.index_start, 
+                                        record_afternoon.index_end,
+                                        mode='ini')
+"""
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/examples/setup_soundings/setup_humppa.py b/examples/setup_soundings/setup_humppa.py
new file mode 100644
index 0000000..ff37628
--- /dev/null
+++ b/examples/setup_soundings/setup_humppa.py
@@ -0,0 +1,732 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 61.8448,
+                  "longitude" : 24.2882,
+                  "name" : "the HUMMPA experiment"
+                })
+current_station.name = 90000
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+
+HOUR_FILES = \
+{ dt.datetime(2010,7,12,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071210_0300.txt'],'afternoon':[15,'humppa_071210_1500.txt']},
+  dt.datetime(2010,7,13,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071310_0300.txt'],'afternoon':[18,'humppa_071310_1800.txt']},
+  dt.datetime(2010,7,14,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071410_0300.txt'],'afternoon':[16,'humppa_071410_1600.txt']},
+  dt.datetime(2010,7,15,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071510_0300.txt'],'afternoon':[15,'humppa_071510_1500.txt']},
+  dt.datetime(2010,7,16,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071610_0300.txt'],'afternoon':[21,'humppa_071610_2100.txt']},
+  dt.datetime(2010,7,17,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071710_0300.txt'],'afternoon':[18,'humppa_071710_1800.txt']},
+  dt.datetime(2010,7,18,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071810_0300.txt'],'afternoon':[21,'humppa_071810_2100.txt']},
+  dt.datetime(2010,7,19,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071910_0300.txt'],'afternoon':[21,'humppa_071910_2100.txt']},
+#  dt.datetime(2010,7,20):{'morning':[4,'humppa_072010_0400.txt'],'afternoon':[15,'humppa_072010_1500.txt']},
+  dt.datetime(2010,7,21,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072110_0300.txt'],'afternoon':[21,'humppa_072110_2100.txt']},
+  dt.datetime(2010,7,22,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072210_0400.txt'],'afternoon':[18,'humppa_072210_1800.txt']},
+ # something is wrong with ths profile
+ # dt.datetime(2010,7,23,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072310_0300.txt'],'afternoon':[15,'humppa_072310_1500.txt']},
+  dt.datetime(2010,7,24,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072410_0300.txt'],'afternoon':[16,'humppa_072410_1600.txt']},
+  dt.datetime(2010,7,25,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072510_0300.txt'],'afternoon':[21,'humppa_072510_2100.txt']},
+  dt.datetime(2010,7,26,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072610_0300.txt'],'afternoon':[21,'humppa_072610_2100.txt']},
+  dt.datetime(2010,7,27,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072710_0300.txt'],'afternoon':[15,'humppa_072710_1500.txt']},
+  dt.datetime(2010,7,28,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072810_0300.txt'],'afternoon':[15,'humppa_072810_1500.txt']},
+  dt.datetime(2010,7,29,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072910_0400.txt'],'afternoon':[18,'humppa_072910_1800.txt']},
+  dt.datetime(2010,7,30,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_073010_0900.txt'],'afternoon':[15,'humppa_073010_1500.txt']},
+  dt.datetime(2010,7,31,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_073110_0300_01.txt'],'afternoon':[15,'humppa_073110_1500.txt']},
+  dt.datetime(2010,8, 1,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080110_0300.txt'],'afternoon':[18,'humppa_080110_1800.txt']},
+  dt.datetime(2010,8, 2,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080210_0300.txt'],'afternoon':[18,'humppa_080210_1800.txt']},
+  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_080310_0900.txt'],'afternoon':[18,'humppa_080310_1800.txt']},
+  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[8,'humppa_080410_0800.txt'],'afternoon':[18,'humppa_080410_1800.txt']},
+  dt.datetime(2010,8, 5,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080510_0300.txt'],'afternoon':[18,'humppa_080510_1800.txt']},
+  dt.datetime(2010,8, 6,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_080610_0400.txt'],'afternoon':[18,'humppa_080610_1800.txt']},
+  dt.datetime(2010,8, 7,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080710_0300.txt'],'afternoon':[18,'humppa_080710_1800.txt']},
+  dt.datetime(2010,8, 8,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080810_0300.txt'],'afternoon':[18,'humppa_080810_1800.txt']},
+  dt.datetime(2010,8,10,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_081010_0300.txt'],'afternoon':[18,'humppa_081010_1800.txt']},
+}
+
+
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_fwf(balloon_file,
+                                     widths=[14]*19,
+                                     skiprows=9,
+                                     skipfooter=15,
+                                     decimal=',',
+                                     header=None,
+                                     names = columns,
+                                     na_values='-----')
+    
+        rowmatches = {
+            't':      lambda x: x['T[C]']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['P[hPa]']*100.,
+            'u':      lambda x: x['Wsp[m/s]'] * np.sin((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'v':      lambda x: x['Wsp[m/s]'] * np.cos((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'z':      lambda x: x['Altitude[m]'],
+            'q':      lambda x: np.clip((1. - (273.15+x['Virt. Temp[C]'])/(273.15+x['T[C]']))/(1. - 1./epsilon),a_min=0.,a_max=None),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['Wsp[m/s]'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # 
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-3)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1]
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/examples/setup_soundings/trash/setup_global_old.py b/examples/setup_soundings/trash/setup_global_old.py
new file mode 100644
index 0000000..d812684
--- /dev/null
+++ b/examples/setup_soundings/trash/setup_global_old.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under odir+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+
+
+#calculate the root mean square error
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    
+    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_csv(fn_stations)
+
+
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = 100
+BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+
+
+iPROC = int(sys.argv[1])
+
+
+for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings after 1981
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            c4gli.get_profile_wyoming(wy_strm)
+            #print(STN['ID'],c4gli.pars.datetime)
+            #c4gli.get_global_input(globaldata)
+
+            print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+            logic = dict()
+            logic['morning'] =  (c4gli.pars.ldatetime.hour < 12.)
+            logic['daylight'] = \
+                ((c4gli.pars.ldatetime_daylight - 
+                  c4gli.pars.ldatetime).total_seconds()/3600. <= 5.)
+            
+            logic['springsummer'] = (c4gli.pars.theta > 278.)
+            
+            # we take 3000 because previous analysis (ie., HUMPPA) has
+            # focussed towards such altitude
+            le3000 = (c4gli.air_balloon.z <= 3000.)
+            logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+            leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+            try:
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta)) and \
+                        (rmse(c4gli.air_balloon.theta[leh] , \
+                              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                              )
+    
+            except:
+                logic['mlerrlow'] = False
+                print('rmse probably failed')
+
+            logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+            
+            print('logic:', logic)
+            # the result
+            morning_ok = np.mean(list(logic.values()))
+            print(morning_ok,c4gli.pars.ldatetime)
+            
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                if wy_strm.current is not None:
+                    current_date_afternoon = \
+                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                       c4gli_afternoon.pars.ldatetime.month, \
+                                       c4gli_afternoon.pars.ldatetime.day)
+                else:
+                    # a dummy date: this will be ignored anyway
+                    current_date_afternoon = dt.date(1900,1,1)
+
+                # we will dump the latest afternoon sounding that fits the
+                # minimum criteria specified by logic_afternoon
+                c4gli_afternoon_for_dump = None
+                while ((current_date_afternoon == current_date) and \
+                       (wy_strm.current is not None)):
+                    logic_afternoon =dict()
+
+                    logic_afternoon['afternoon'] = \
+                        (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                    logic_afternoon['daylight'] = \
+                      ((c4gli_afternoon.pars.ldatetime - \
+                        c4gli_afternoon.pars.ldatetime_daylight \
+                       ).total_seconds()/3600. <= 2.)
+
+
+                    le3000_afternoon = \
+                        (c4gli_afternoon.air_balloon.z <= 3000.)
+                    logic_afternoon['5measurements'] = \
+                        (np.sum(le3000_afternoon) >= 5) 
+
+                    # we only store the last afternoon sounding that fits these
+                    # minimum criteria
+
+                    afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                    print('logic_afternoon: ',logic_afternoon)
+                    print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                    if afternoon_ok == 1.:
+                        # # doesn't work :(
+                        # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                        
+                        # so we just create a new one from the same wyoming profile
+                        c4gli_afternoon_for_dump = class4gl_input()
+                        c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                    wy_strm.find_next()
+                    c4gli_afternoon.clear()
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                       c4gli_afternoon.pars.ldatetime.month, \
+                                       c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # Only in the case we have a good pair of soundings, we
+                    # dump them to disk
+                if c4gli_afternoon_for_dump is not None:
+                    c4gli.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                             c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                    print('ALMOST...')
+                    if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+                            
+        
+                        c4gli.get_global_input(globaldata)
+                        print('VERY CLOSE...')
+                        if c4gli.check_source_globaldata() and \
+                            (c4gli.check_source(source='wyoming',\
+                                               check_only_sections='pars')):
+                            c4gli.dump(fileout)
+                            
+                            c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                            
+                            
+                            # for keyEXP,dictEXP in experiments.items():
+                            #     
+                            #     c4gli.update(source=keyEXP,pars = dictEXP)
+                            #     c4gl = class4gl(c4gli)
+                            #     # c4gl.run()
+                            #     
+                            #     c4gl.dump(c4glfiles[key])
+                            
+                            print('HIT!!!')
+                
+                
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/lib/class4gl.py b/lib/class4gl.py
new file mode 100644
index 0000000..7baaa51
--- /dev/null
+++ b/lib/class4gl.py
@@ -0,0 +1,1611 @@
+# -*- coding: utf-8 -*-
+
+"""
+
+Created on Mon Jan 29 12:33:51 2018
+
+Module file for class4gl, which  extents the class-model to be able to take
+global air profiles as input. It exists of:
+
+CLASSES:
+    - an input object, namely class4gl_input. It includes:
+        - a function to read Wyoming sounding data from a yyoming stream object
+        - a function to read global data from a globaldata library object 
+    - the model object: class4gl
+    - ....    
+
+DEPENDENCIES:
+    - xarray
+    - numpy
+    - data_global
+    - Pysolar
+    - yaml
+
+@author: Hendrik Wouters
+
+"""
+
+
+
+""" Setup of envirnoment """
+
+# Standard modules of the stand class-boundary-layer model
+from model import model
+from model import model_output as class4gl_output
+from model import model_input
+from model import qsat
+#from data_soundings import wyoming 
+import Pysolar
+import yaml
+import logging
+import warnings
+import pytz
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+
+# Generic Python Packages
+import numpy as np
+import datetime as dt
+import pandas as pd
+import xarray as xr
+import io
+#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
+from data_global import data_global
+grav = 9.81
+
+# this is just a generic input object
+class generic_input(object):
+    def __init__(self):
+        self.init = True
+
+
+# all units from all variables in CLASS(4GL) should be defined here!
+units = {
+         'h':'m',
+         'theta':'K', 
+         'q':'kg/kg',
+         'cc': '-',
+         'cveg': '-',
+         'wg': 'm3 m-3',
+         'w2': 'm3 m-3',
+         #'wg': 'kg/kg',
+         'Tsoil': 'K',
+         'T2': 'K',
+         'z0m': 'm',
+         'alpha': '-',
+         'LAI': '-',
+         'dhdt':'m/h',
+         'dthetadt':'K/h',
+         'dqdt':'kg/kg/h',
+         'BR': '-',
+         'EF': '-',
+}
+
+class class4gl_input(object):
+# this was the way it was defined previously.
+#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
+
+    def __init__(self,set_pars_defaults=True,debug_level=None):
+
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        print('hello')
+        self.logger = logging.getLogger('class4gl_input')
+        print(self.logger)
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # # create logger
+        # self.logger = logging.getLogger('class4gl_input')
+        # self.logger.setLevel(debug_level)
+
+        # # create console handler and set level to debug
+        # ch = logging.StreamHandler()
+        # ch.setLevel(debug_level)
+
+        # # create formatter
+        # formatter = logging.Formatter('%(asctime)s - \
+        #                                %(name)s - \
+        #                                %(levelname)s - \
+        #                                %(message)s')
+        # add formatter to ch
+        # ch.setFormatter(formatter)
+     
+        # # add ch to logger
+        # self.logger.addHandler(ch)
+
+        # """ end set up logger """
+
+
+
+        # these are the standard model input single-value parameters for class
+        self.pars = model_input()
+
+        # diagnostic parameters of the initial profile
+        self.diag = dict()
+
+        # In this variable, we keep track of the different parameters from where it originates from. 
+        self.sources = {}
+
+        if set_pars_defaults:
+            self.set_pars_defaults()
+
+    def set_pars_defaults(self):
+
+        """ 
+        Create empty model_input and set up case
+        """
+        defaults = dict( 
+        dt         = 60.    , # time step [s] 
+        runtime    = 6*3600 ,  # total run time [s]
+        
+        # mixed-layer input
+        sw_ml      = True   ,  # mixed-layer model switch
+        sw_shearwe = False  ,  # shear growth mixed-layer switch
+        sw_fixft   = False  ,  # Fix the free-troposphere switch
+        h          = 200.   ,  # initial ABL height [m]
+        Ps         = 101300.,  # surface pressure [Pa]
+        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
+        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
+        
+        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
+        dtheta     = 1.     ,  # initial temperature jump at h [K]
+        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
+        advtheta   = 0.     ,  # advection of heat [K s-1]
+        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
+        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
+        
+        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
+        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
+        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
+        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
+        
+        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
+        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
+        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
+        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
+        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
+        sw_wind    = True  ,  # prognostic wind switch
+        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
+        du         = 0.     ,  # initial u-wind jump at h [m s-1]
+        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
+        advu       = 0.     ,  # advection of u-wind [m s-2]
+        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
+        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
+        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
+        advv       = 0.     ,  # advection of v-wind [m s-2]
+        sw_sl      = True   , # surface layer switch
+        ustar      = 0.3    ,  # surface friction velocity [m s-1]
+        z0m        = 0.02   ,  # roughness length for momentum [m]
+        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
+        sw_rad     = True   , # radiation switch
+        lat        = 51.97  ,  # latitude [deg]
+        lon        = -4.93  ,  # longitude [deg]
+        doy        = 268.   ,  # day of the year [-]
+        tstart     = 6.8    ,  # time of the day [h UTC]
+        cc         = 0.0    ,  # cloud cover fraction [-]
+        Q          = 400.   ,  # net radiation [W m-2] 
+        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
+        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
+        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
+        cveg       = 0.85   ,  # vegetation fraction [-]
+        Tsoil      = 295.   ,  # temperature top soil layer [K]
+        Ts         = 295.   ,    # initial surface temperature [K]
+        T2         = 296.   ,  # temperature deeper soil layer [K]
+        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
+        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
+        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
+        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
+        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
+        wfc        = 0.323  ,  # volumetric water content field capacity [-]
+        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
+        C1sat      = 0.132  ,  
+        C2ref      = 1.8    ,
+        LAI        = 2.     ,  # leaf area index [-]
+        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
+        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
+        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
+        alpha      = 0.25   ,  # surface albedo [-]
+        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
+        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
+        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
+        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
+        sw_cu      = False  ,  # Cumulus parameterization switch
+        dz_h       = 150.   ,  # Transition layer thickness [m]
+        cala       = None   ,  # soil heat conductivity [W/(K*m)]
+        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
+        sw_ls      = True   ,
+        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
+        sw_lit     = False,
+        )
+        pars = model_input()
+        for key in defaults:
+            pars.__dict__[key] = defaults[key]
+        
+        self.update(source='defaults',pars=pars)
+        
+    def clear(self):
+        """ this procudure clears the class4gl_input """
+
+        for key in list(self.__dict__.keys()):
+            del(self.__dict__[key])
+        self.__init__()
+
+    def dump(self,file):
+        """ this procedure dumps the class4gl_input object into a yaml file
+            
+            Input: 
+                - self.__dict__ (internal): the dictionary from which we read 
+            Output:
+                - file: All the parameters in self.__init__() are written to
+                the yaml file, including pars, air_ap, sources etc.
+        """
+        file.write('---\n')
+        index = file.tell()
+        file.write('# CLASS4GL input; format version: 0.1\n')
+
+        # write out the position of the current record
+        yaml.dump({'index':index}, file, default_flow_style=False)
+
+        # we do not include the none values
+        for key,data in self.__dict__.items():
+            #if ((type(data) == model_input) or (type(class4gl_input):
+            if key == 'pars':
+
+                pars = {'pars' : self.__dict__['pars'].__dict__}
+                parsout = {}
+                for key in pars.keys():
+                    if pars[key] is not None:
+                        parsout[key] = pars[key]
+
+                yaml.dump(parsout, file, default_flow_style=False)
+            elif type(data) == dict:
+                if key == 'sources':
+                    # in case of sources, we want to have a
+                    # condensed list format as well, so we leave out
+                    # 'default_flow_style=False'
+                    yaml.dump({key : data}, file)
+                else: 
+                    yaml.dump({key : data}, file,
+                              default_flow_style=False)
+            elif type(data) == pd.DataFrame:
+                # in case of dataframes (for profiles), we want to have a
+                # condensed list format as well, so we leave out
+                # 'default_flow_style=False'
+                yaml.dump({key: data.to_dict(orient='list')},file)
+
+                # # these are trials to get it into a more human-readable
+                # fixed-width format, but it is too complex
+                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
+                #file.write(stream)
+                
+                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
+                #file.write(key+': !!str |\n')
+                #file.write(str(data)+'\n')
+       
+    def load_yaml_dict(self,yaml_dict,reset=True):
+        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
+            
+            Input: 
+                - yaml_dict: the dictionary from which we read 
+                - reset: reset data before reading        
+            Output:
+                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
+        """
+        
+        if reset:
+            for key in list(self.__dict__.keys()):
+                del(self.__dict__[key])
+            self.__init__()
+
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                self.__dict__[key] = model_input()
+                self.__dict__[key].__dict__ = data
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            elif key == 'sources':
+                self.__dict__[key] = data
+            elif key == 'diag':
+                self.__dict__[key] = data
+            else: 
+                warnings.warn("Key '"+key+"' may not be implemented.")
+                self.__dict__[key] = data
+
+    def update(self,source,**kwargs):
+        """ this procedure is to make updates of input parameters and tracking
+        of their source more convenient. It implements the assignment of
+        parameter source/sensitivity experiment IDs ('eg.,
+        'defaults', 'sounding balloon', any satellite information, climate
+        models, sensitivity tests etc.). These are all stored in a convenient
+        way with as class4gl_input.sources.  This way, the user can always consult with
+        from where parameters data originates from.  
+        
+        Input:
+            - source:    name of the underlying dataset
+            - **kwargs: a dictionary of data input, for which the key values
+            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
+            the values is a again a dictionary/dataframe of datakeys/columns
+            ('wg','PRES','datetime', ...) and datavalues (either single values,
+            profiles ...), eg., 
+
+                pars = {'wg': 0.007  , 'w2', 0.005}
+                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
+                                     300.,...]}
+            
+        Output:
+            - self.__dict__[datatype] : object to which the parameters are
+                                        assigned. They can be consulted with
+                                        self.pars, self.profiles, etc.
+                                        
+            - self.sources[source] : It supplements the overview overview of
+                                     data sources can be consulted with
+                                     self.sources. The structure is as follows:
+                                     as:
+                self.sources = { 
+                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
+                'GLEAM' :  ['pars:wg','pars:w2', ...],
+                 ...
+                }
+        
+        """
+
+        #print(source,kwargs)
+
+        for key,data in kwargs.items():
+
+            #print(key)
+            # if the key is not in class4gl_input object, then just add it. In
+            # that case, the update procedures below will just overwrite it 
+            if key not in self.__dict__:
+                self.__dict__[key] = data
+
+
+            
+
+            #... we do an additional check to see whether there is a type
+            # match. I not then raise a key error
+            if (type(data) != type(self.__dict__[key]) \
+                # we allow dict input for model_input pars
+                and not ((key == 'pars') and (type(data) == dict) and \
+                (type(self.__dict__[key]) == model_input))):
+
+                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
+
+
+            # This variable keeps track of the added data that is supplemented
+            # by the current source. We add this to class4gl_input.sources
+            datakeys = []
+
+            #... and we update the class4gl_input data, and this depends on the
+            # data type
+
+            if type(self.__dict__[key]) == pd.DataFrame:
+                # If the data type is a dataframe, then we update the columns
+                for column in list(data.columns):
+                    #print(column)
+                    self.__dict__[key][column] = data[column]
+                    datakeys.append(column)
+                    
+
+            elif type(self.__dict__[key]) == model_input:
+                # if the data type is a model_input, then we update its internal
+                # dictionary of parameters
+                if type(data) == model_input:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data.__dict__}
+                    datakeys = list(data.__dict__.keys())
+                elif type(data) == dict:
+                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
+                                                   **data}
+                    datakeys = list(data.keys())
+                else:
+                    raise TypeError('input key '+key+' is not of the same type\
+                                    as the one in the class4gl_object')
+
+            elif type(self.__dict__[key]) == dict:
+                # if the data type is a dictionary, we update the
+                # dictionary 
+                self.__dict__[key] = {self.__dict__[key] , data}
+                datakeys = list(data.keys())
+
+
+            # if source entry is not existing yet, we add it
+            if source not in self.sources.keys():
+                self.sources[source] = []
+
+
+            # self.logger.debug('updating section "'+\
+            #                  key+' ('+' '.join(datakeys)+')'\
+            #                  '" from source \
+            #                  "'+source+'"')
+
+            # Update the source dictionary: add the provided data keys to the
+            # specified source list
+            for datakey in datakeys:
+                # At first, remove the occurences of the keys in the other
+                # source lists
+                for sourcekey,sourcelist in self.sources.items():
+                    if key+':'+datakey in sourcelist:
+                        self.sources[sourcekey].remove(key+':'+datakey)
+                # Afterwards, add it to the current source list
+                self.sources[source].append(key+':'+datakey)
+
+
+        # # in case the datatype is a class4gl_input_pars, we update its keys
+        # # according to **kwargs dictionary
+        # if type(self.__dict__[datatype]) == class4gl_input_pars:
+        #     # add the data parameters to the datatype object dictionary of the
+        #     # datatype
+        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
+        #                                        **kwargs}
+        # # in case, the datatype reflects a dataframe, we update the columns according
+        # # to the *args list
+        # elif type(self.__dict__[datatype]) == pd.DataFrame:
+        #     for dataframe in args:
+        #         for column in list(dataframe.columns):
+        #             self.__dict__[datatype][column] = dataframe[column]
+        
+
+    def get_profile(self,IOBJ, *args, **argv):
+        # if type(IOBJ) == wyoming:
+        self.get_profile_wyoming(IOBJ,*args,**argv)
+        # else:
+        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
+        
+    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
+        """ 
+            Purpose: 
+                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
+
+            Input:
+                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
+                function will take the profile at the stream's current
+                position. 
+                2. air_ap_mode: which air profile do we take? 
+                    - b : best
+                    - l : according to lower limit for the mixed-layer height
+                            estimate
+                    - u : according to upper limit for the mixed-layer height
+                            estimate
+
+
+            Output:
+                1. all single-value parameters are stored in the
+                   class4gl_input.pars object
+                2. the souding profiles are stored in the in the
+                   class4gl_input.air_balloon dataframe
+                3. modified sounding profiles for which the mixed layer height
+                   is fitted
+                4. ...
+
+        """
+
+
+        # Raise an error in case the input stream is not the correct object
+        # if type(wy_strm) is not wyoming:
+        #    raise TypeError('Not a wyoming type input stream')
+
+        # Let's tell the class_input object that it is a Wyoming fit type
+        self.air_ap_type = 'wyoming'
+        # ... and which mode of fitting we apply
+        self.air_ap_mode = air_ap_mode
+
+        """ Temporary variables used for output """
+        # single value parameters derived from the sounding profile
+        dpars = dict()
+        # profile values
+        air_balloon = pd.DataFrame()
+        # fitted profile values
+        air_ap = pd.DataFrame()
+        
+        string = wy_strm.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = wy_strm.current.find_next('pre').find_next('pre').text
+        
+        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
+        dpars = {**dpars,
+                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
+               }
+        
+        # we get weird output when it's a numpy Timestamp, so we convert it to
+        # pd.datetime type
+
+        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
+        dpars['STNID'] = dpars['Station number']
+
+        # altitude above ground level
+        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
+        # absolute humidity in g/kg
+        air_balloon['q']= (air_balloon.MIXR/1000.) \
+                              / \
+                             (air_balloon.MIXR/1000.+1.)
+        # convert wind speed from knots to m/s
+        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
+        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+        
+        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
+        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
+
+        
+
+        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+
+        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
+        air_balloon['p'] = air_balloon.PRES*100.
+
+
+        # Therefore, determine the sounding that are valid for 'any' column 
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        #is_valid = (air_balloon.z >= 0)
+        # # this is an alternative pipe/numpy method
+        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
+        valid_indices = air_balloon.index[is_valid].values
+        print(valid_indices)
+
+        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+
+        air_balloon['t'] = air_balloon['TEMP']+273.15
+        air_balloon['theta'] = (air_balloon.t) * \
+                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
+        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
+
+        if len(valid_indices) > 0:
+            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
+            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            
+            # the final mixed-layer height that will be used by class. We round it
+            # to 1 decimal so that we get a clean yaml output format
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+
+
+        if np.isnan(dpars['h']):
+            dpars['Ps'] = np.nan
+
+
+
+
+        if ~np.isnan(dpars['h']):
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u 
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+            
+
+
+
+        # First 3 data points of the mixed-layer fit. We create a empty head
+        # first
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+        
+        #calculate mixed-layer jump ( this should be larger than 0.1)
+        
+        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        air_ap_head['HGHT'] = air_ap_head['z'] \
+                                + \
+                                np.round(dpars[ 'Station elevation'],1)
+        
+        # make a row object for defining the jump
+        jump = air_ap_head.iloc[0] * np.nan
+            
+        if air_ap_tail.shape[0] > 1:
+
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = dpars['theta']
+        z_low =     dpars['h']
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                (z_mean > (z_low+10.)) and \
+                (theta_mean > (theta_low+0.2) ) and \
+                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+
+
+
+
+
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        #print(air_ap['PRES'].iloc[0])
+
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+
+        
+        dpars['lat'] = dpars['Station latitude']
+        dpars['latitude'] = dpars['lat']
+        
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        dpars['longitude'] = dpars['Station longitude']
+        
+        dpars['ldatetime'] = dpars['datetime'] \
+                            + \
+                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+
+        # # we make a pars object that is similar to the destination object
+        # pars = model_input()
+        # for key,value in dpars.items():
+        #     pars.__dict__[key] = value
+
+
+        # we round the columns to a specified decimal, so that we get a clean
+        # output format for yaml
+        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
+                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
+                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
+# 
+        for column,decimal in decimals.items():
+            air_balloon[column] = air_balloon[column].round(decimal)
+            air_ap[column] = air_ap[column].round(decimal)
+
+        self.update(source='wyoming',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+
+        
+    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
+    
+        """
+        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
+                 according to the position (lat lon) and the class datetime and timespan
+                 globaldata should be a globaldata multifile object
+        
+        Input: 
+            - globaldata: this is the library object
+            - only_keys: only extract specified keys
+            - exclude_keys: do not inherit specified keys
+        """
+        classdatetime      = np.datetime64(self.pars.datetime_daylight)
+        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
+                                           + \
+                                           dt.timedelta(seconds=self.pars.runtime)\
+                                          )
+
+
+        # # list of variables that we get from global ground data
+        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
+        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
+        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
+        #                 'texture', 'itex', 'isoil', 'BR',
+        #                 'b', 'cveg',
+        #                 'C1sat', 
+        #                 'C2ref', 'p', 'a',
+        #                 ] #globaldata.datasets.keys():
+
+        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
+        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
+
+
+        if type(globaldata) is not data_global:
+            raise TypeError("Wrong type of input library") 
+
+        # by default, we get all dataset keys
+        keys = list(globaldata.datasets.keys())
+
+        # We add LAI manually, because it is not listed in the datasets and
+        #they its retreival is hard coded below based on LAIpixel and cveg
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            keys.append('LAI')
+
+        # # In case there is surface pressure, we also calculate the half-level
+        # # and full-level pressure fields
+        # if ('sp' in keys):
+        #     keys.append('pfull')
+        #     keys.append('phalf')
+
+        # If specified, we only take the keys that are in only_keys
+        if only_keys is not None:
+            for key in keys:
+                if key not in only_keys:
+                    keys.remove(key)
+                
+        # If specified, we take out keys that are in exclude keys
+        if exclude_keys is not None:
+            for key in keys:
+                if key in exclude_keys:
+                    keys.remove(key)
+
+        # we set everything to nan first in the pars section (non-profile parameters
+        # without lev argument), so that we can check afterwards whether the
+        # data is well-fetched or not.
+        for key in keys:
+            if not ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None) and \
+                ('lev' in globaldata.datasets[key].page[key].dims)):
+                self.update(source='globaldata',pars={key:np.nan})
+            # # we do not check profile input for now. We assume it is
+            # # available
+            #else:
+            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
+
+        self.logger.debug('getting keys "'+', '.join(keys)+'\
+                          from global data')
+
+        for key in keys:
+            # If we find it, then we obtain the variables
+            if ((key in globaldata.datasets) and \
+                (globaldata.datasets[key].page is not None)):
+
+                # check first whether the dataset has a height coordinate (3d space)
+                if 'lev' in globaldata.datasets[key].page[key].dims:
+
+                    # first, we browse to the correct file that has the current time
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+                        globaldata.datasets[key].browse_page(time=classdatetime)
+
+                    
+                    if (globaldata.datasets[key].page is not None):
+                        # find longitude and latitude coordinates
+                        ilats = (np.abs(globaldata.datasets[key].page.lat -
+                                        self.pars.latitude) < 0.5)
+                        ilons = (np.abs(globaldata.datasets[key].page.lon -
+                                        self.pars.longitude) < 0.5)
+                        
+                        # if we have a time dimension, then we look up the required timesteps during the class simulation
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            itimes = ((globaldata.datasets[key].page.time >= \
+                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
+
+                            # In case we didn't find any correct time, we take the
+                            # closest one.
+                            if np.sum(itimes) == 0.:
+
+
+                                classdatetimemean = \
+                                    np.datetime64(self.pars.datetime_daylight + \
+                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
+                                                ))
+
+                                dstimes = globaldata.datasets[key].page.time
+                                time = dstimes.sel(time=classdatetimemean,method='nearest')
+                                itimes = (globaldata.datasets[key].page.time ==
+                                          time)
+                                
+                        else:
+                            # we don't have a time coordinate so it doesn't matter
+                            # what itimes is
+                            itimes = 0
+
+                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
+
+                        # over which dimensions we take a mean:
+                        dims = globaldata.datasets[key].page[key].dims
+                        namesmean = list(dims)
+                        namesmean.remove('lev')
+                        idxmean = [dims.index(namemean) for namemean in namesmean]
+                        
+                        value = \
+                        globaldata.datasets[key].page[key].isel(time=itimes,
+                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
+
+                        # Ideally, source should be equal to the datakey of globaldata.library 
+                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
+                        #  but therefore the globaldata class requires a revision to make this work
+                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
+
+                else:
+                    # this procedure is for reading the ground fields (2d space). 
+                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
+
+    
+                    if 'time' in list(globaldata.datasets[key].page[key].dims):
+    
+                       # first, we browse to the correct file
+                       #print(key)
+                       globaldata.datasets[key].browse_page(time=classdatetime)
+    
+                    if globaldata.datasets[key].page is not None:
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - self.pars.latitude))
+                        ilat = np.where((DIST) == np.min(DIST))[0][0]
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - self.pars.longitude))
+                        ilon = np.where((DIST) == np.min(DIST))[0][0]
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
+                                - (self.pars.latitude + 0.5)))
+                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmax = ilat
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
+                                - (self.pars.longitude  + 0.5)))
+                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmax = ilon
+                        
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lat.values\
+                                - (self.pars.latitude - 0.5)))
+                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                            ilatmin = ilat
+                        DIST = \
+                        np.abs((globaldata.datasets[key].page.lon.values\
+                                - (self.pars.longitude  - 0.5)))
+                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                            ilonmin = ilon        
+                        
+                        if ilatmin < ilatmax:
+                            ilatrange = range(ilatmin,ilatmax+1)
+                        else:
+                            ilatrange = range(ilatmax,ilatmin+1)
+                            
+                        if ilonmin < ilonmax:
+                            ilonrange = range(ilonmin,ilonmax+1)
+                        else:
+                            ilonrange = range(ilonmax,ilonmin+1)     
+                            
+                        if 'time' in list(globaldata.datasets[key].page[key].dims):
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                            
+                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+                                idatetime += 1
+                            
+                            classdatetimeend = np.datetime64(\
+                                                             self.pars.datetime +\
+                                                             dt.timedelta(seconds=self.pars.runtime)\
+                                                            ) 
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
+                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
+                                idatetimeend -= 1
+                            idatetime = np.min((idatetime,idatetimeend))
+                            #for gleam, we take the previous day values
+                            if key in ['wg', 'w2']:
+                                idatetime = idatetime - 1
+                                idatetimeend = idatetimeend - 1
+
+                            # in case of soil temperature, we take the exact
+                            # timing (which is the morning)
+                            if key in ['Tsoil','T2']:
+                                idatetimeend = idatetime
+                            
+                            idts = range(idatetime,idatetimeend+1)
+                            
+                            count = 0
+                            self.__dict__[key] = 0.
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    for iidts in idts:
+                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
+                                        count += 1
+                            value = value/count
+                            self.update(source='globaldata',pars={key:value.item()})
+                                
+                        else:
+                                
+                            count = 0
+                            value = 0.
+                            for iilat in ilatrange:
+                                for iilon in ilonrange:
+                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
+                                    count += 1
+                            value = value/count                        
+
+                            self.update(source='globaldata',pars={key:value.item()})
+
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            self.logger.debug('also update LAI based on LAIpixel and cveg') 
+            # I suppose LAI pixel is already determined in the previous
+            # procedure. Anyway...
+            key = 'LAIpixel'
+
+            if globaldata.datasets[key].page is not None:
+                # first, we browse to the correct file that has the current time
+                if 'time' in list(globaldata.datasets[key].page[key].dims):
+                    globaldata.datasets[key].browse_page(time=classdatetime)
+            
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - self.pars.latitude))
+                ilat = np.where((DIST) == np.min(DIST))[0][0]
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - self.pars.longitude))
+                ilon = np.where((DIST) == np.min(DIST))[0][0]
+                 
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude + 0.5)))
+                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmax = ilat
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values \
+                        - (self.pars.longitude  + 0.5)))
+                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmax = ilon
+                
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lat.values\
+                        - (self.pars.latitude - 0.5)))
+                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
+                    ilatmin = ilat
+                DIST = \
+                np.abs((globaldata.datasets[key].page.lon.values\
+                        - (self.pars.longitude  - 0.5)))
+                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
+                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
+                    ilonmin = ilon        
+                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                
+                
+                if ilatmin < ilatmax:
+                    ilatrange = range(ilatmin,ilatmax+1)
+                else:
+                    ilatrange = range(ilatmax,ilatmin+1)
+                    
+                if ilonmin < ilonmax:
+                    ilonrange = range(ilonmin,ilonmax+1)
+                else:
+                    ilonrange = range(ilonmax,ilonmin+1)           
+                
+                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
+                LAIpixel = 0.
+                count = 0
+                for iilat in [ilat]: #ilatrange
+                    for iilon in [ilon]: #ilonrange
+                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
+                        
+                                        
+                        # if np.isnan(tarray[idatetime]):
+                        #     print("interpolating GIMMS LAIpixel nan value")
+                        #     
+                        #     mask = np.isnan(tarray)
+                        #     
+                        #     #replace each nan value with a interpolated value
+                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+                        #         
+                        #     else:
+                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
+                    
+                        #         tarray *= np.nan 
+                        
+                        count += 1
+                        #tarray_res += tarray
+                LAIpixel = LAIpixel/count
+                
+                count = 0
+                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
+  
+                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
+                #print('LAIpixel:',self.__dict__['LAIpixel'])
+                #print('cveg:',self.__dict__['cveg'])
+                
+                # finally, we rescale the LAI according to the vegetation
+                # fraction
+                value = 0. 
+                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
+                   value =self.pars.LAIpixel/self.pars.cveg
+                else:
+                    # in case of small vegetation fraction, we take just a standard 
+                    # LAI value. It doesn't have a big influence anyway for
+                    # small vegetation
+                    value = 2.
+                #print('LAI:',self.__dict__['LAI'])
+                self.update(source='globaldata',pars={'LAI':value}) 
+
+
+        # in case we have 'sp', we also calculate the 3d pressure fields at
+        # full level and half level
+        if ('sp' in keys) and ('sp' in self.pars.__dict__):
+            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
+
+            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # hydrostatic thickness of each model layer
+            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
+            # # dz = rhodz/(R * T / pfull)
+
+
+            # # subsidence multiplied by density. We calculate the subsidence of
+            # # the in class itself
+            # wrho = np.zeros_like(phalf)
+            # wrho[-1] = 0. 
+
+            # for ihlev in range(0,wrho.shape[0]-1):
+            #     # subsidence multiplied by density is the integral of
+            #     # divergences multiplied by the layer thicknessies
+            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
+            #                     self.air_ac['divU_y'][ihlev:]) * \
+            #                    delpdgrav[ihlev:]).sum()
+
+
+            
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'p':list(pfull)}))
+            self.update(source='globaldata',\
+                        air_ach=pd.DataFrame({'p':list(phalf)}))
+            self.update(source='globaldata',\
+                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
+            # self.update(source='globaldata',\
+            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
+
+    def check_source(self,source,check_only_sections=None):
+        """ this procedure checks whether data of a specified source is valid.
+
+        INPUT:
+            source: the data source we want to check
+            check_only_sections: a string or list with sections to be checked
+        OUTPUT:
+            returns True or False
+        """
+
+        # we set source ok to false as soon as we find a invalid input
+        source_ok = True
+
+        # convert to a single-item list in case of a string
+        check_only_sections_def = (([check_only_sections]) if \
+                                   type(check_only_sections) is str else \
+                                    check_only_sections)
+                                  
+        if source not in self.sources.keys():
+            self.logger.info('Source '+source+' does not exist')
+            source_ok = False
+
+        for sectiondatakey in self.sources[source]:                             
+            section,datakey = sectiondatakey.split(':')                         
+            if ((check_only_sections_def is None) or \
+                (section in check_only_sections_def)):                          
+                checkdatakeys = []
+                if type(self.__dict__[section]) is pd.DataFrame:
+                    checkdata = self.__dict__[section]
+                elif type(self.__dict__[section]) is model_input:
+                    checkdata = self.__dict__[section].__dict__
+
+                if (datakey not in checkdata):                              
+                    # self.logger.info('Expected key '+datakey+\
+                    #                  ' is not in parameter input')                        
+                    source_ok = False                                           
+                elif (checkdata[datakey] is None) or \
+                     (pd.isnull(checkdata[datakey]) is True):                    
+        
+                    # self.logger.info('Key value of "'+datakey+\
+                    #                  '" is invalid: ('+ \
+                    # str(self.__dict__[section].__dict__[datakey])+')')         
+                    source_ok = False
+
+        return source_ok
+
+    def check_source_globaldata(self):
+        """ this procedure checks whether all global parameter data is
+        available, according to the keys in the self.sources"""
+
+        source_globaldata_ok = True
+
+        #self.get_values_air_input()
+
+        # and now we can get the surface values
+        #class_settings = class4gl_input()
+        #class_settings.set_air_input(input_atm)
+        
+        # we only allow non-polar stations
+        if not (self.pars.lat <= 60.):
+            source_globaldata_ok = False
+            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+        
+        # check lat and lon
+        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
+            source_globaldata_ok = False
+            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
+            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
+        else:
+            # we only check the ground parameter data (pars section). The 
+            # profile data (air_ap section) are supposed to be valid in any 
+            # case.
+            source_ok = self.check_source(source='globaldata',\
+                                          check_only_sections=['air_ac',\
+                                                               'air_ap',\
+                                                               'pars'])
+            if not source_ok:
+                source_globaldata_ok = False
+        
+            # Additional check: we exclude desert-like
+            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
+                source_globaldata_ok = False
+                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
+                source_globaldata_ok = False
+                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
+            elif self.pars.cveg < 0.02:
+                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
+                source_globaldata_ok = False
+
+        return source_globaldata_ok
+
+
+class c4gli_iterator():
+    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
+    
+        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
+    """
+    def __init__(self,file):
+        # take file as IO stream
+        self.file = file
+        self.yaml_generator = yaml.load_all(file)
+        self.current_dict = {}
+        self.current_class4gl_input = class4gl_input()
+        separator = self.file.readline() # this is just dummy
+        self.header = file.readline()
+        if self.header != '# CLASS4GL record; format version: 0.1\n':
+            raise NotImplementedError("Wrong format version: '"+self.header+"'")
+    def __iter__(self):
+        return self
+    def __next__(self):
+        self.current_dict = self.yaml_generator.__next__()
+        self.current_class4gl_input.load_yaml_dict(self.current_dict)
+        return self.current_class4gl_input
+
+
+
+#get_cape and lift_parcel are adapted from the SkewT package
+    
+class gl_dia(object):
+    def get_lifted_index(self,timestep=-1):
+        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
+    
+#from SkewT
+#def get_lcl(startp,startt,startdp,nsteps=101):
+#    from numpy import interp
+#    #--------------------------------------------------------------------
+#    # Lift a parcel dry adiabatically from startp to LCL.
+#    # Init temp is startt in K, Init dew point is stwrtdp,
+#    # pressure levels are in Pa    
+#    #--------------------------------------------------------------------
+#
+#    assert startdp<=startt
+#
+#    if startdp==startt:
+#        return np.array([startp]),np.array([startt]),np.array([startdp]),
+#
+#    # Pres=linspace(startp,60000.,nsteps)
+#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
+#
+#    # Lift the dry parcel
+#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
+#    # Mixing ratio isopleth
+#    starte=VaporPressure(startdp)
+#    startw=MixRatio(starte,startp)
+#    e=Pres*startw/(.622+startw)
+#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
+#
+#    # Solve for the intersection of these lines (LCL).
+#    # interp requires the x argument (argument 2)
+#    # to be ascending in order!
+#    P_lcl=interp(0.,T_iso-T_dry,Pres)
+#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
+#
+#    # # presdry=linspace(startp,P_lcl)
+#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
+#
+#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
+#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
+#
+#    return P_lcl,T_lcl
+
+
+
+def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
+    """ Calculate mixed-layer height from temperature and wind speed profile
+
+        Input:
+            HAGL: height coordinates [m]
+            THTV: virtual potential temperature profile [K]
+            WSPD: wind speed profile [m/s]
+
+        Output:
+            BLH: best-guess mixed-layer height
+            BLHu: upper limit of mixed-layer height
+            BLHl: lower limit of mixed-layer height
+
+    """
+    
+    #initialize error BLH
+    BLHe = 0.
+    eps = 2.#security limit
+    iTHTV_0 = np.where(~np.isnan(THTV))[0]
+    if len(iTHTV_0) > 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHl
+
+
+
+#from class
+def get_lcl(startp,startt,startqv):
+        # Find lifting condensation level iteratively
+    lcl = 20.
+    RHlcl = 0.5
+    
+    itmax = 30
+    it = 0
+    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHd
+
+def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
+    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
+    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
+
+
+#from os import listdir
+#from os.path import isfile #,join
+import glob
+
+
+class wyoming(object):
+    def __init__(self):
+       self.status = 'init'
+       self.found = False
+       self.DT = None
+       self.current = None
+       #self.mode = 'b'
+       self.profile_type = 'wyoming'  
+       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
+       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+         
+    def set_STNM(self,STNM):
+        self.__init__()
+        self.STNM = STNM
+        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
+        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
+        self.current = None
+        self.found = False
+        self.FILES.sort()
+        
+    def find_first(self,year=None,get_atm=False):
+        self.found = False    
+                
+        # check first file/year or specified year
+        if year == None:
+            self.iFN = 0
+            self.FN = self.FILES[self.iFN]
+        else:
+            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+        self.current = self.sounding_series.find('h2')
+        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
+        
+        # go through other files and find first sounding when year is not specified
+        self.iFN=self.iFN+1
+        while keepsearching:
+            self.FN = self.FILES[self.iFN]
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            self.iFN=self.iFN+1
+            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
+        self.found = (self.current is not None)
+
+        self.status = 'fetch'
+        if self.found:
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        
+        if self.found and get_atm:
+            self.get_values_air_input()
+        
+    
+    def find(self,DT,get_atm=False):
+        
+        self.found = False
+        keepsearching = True
+        #print(DT)
+        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
+        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
+            self.DT = DT
+            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            
+        keepsearching = (self.current is not None)
+        while keepsearching:
+            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            if DTcurrent == DT:
+                self.found = True
+                keepsearching = False
+                if get_atm:
+                    self.get_values_air_input()
+                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            elif DTcurrent > DT:
+                keepsearching = False
+                self.current = None
+            else:
+                self.current = self.current.find_next('h2')
+                if self.current is None:
+                    keepsearching = False
+        self.found = (self.current is not None)
+        self.status = 'fetch'
+
+    def find_next(self,get_atm=False):
+        self.found = False
+        self.DT = None
+        if self.current is None:
+            self.find_first()
+        else:                
+            self.current = self.current.find_next('h2')
+            self.found = (self.current is not None)
+            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
+            while keepsearching:
+                self.iFN=self.iFN+1
+                self.FN = self.FILES[self.iFN]
+                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+                self.current = self.sounding_series.find('h2')
+                
+                self.found = (self.current is not None)
+                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
+        if self.found:        
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        if self.found and get_atm:
+            self.get_values_air_input()
+       
+
+
+    def get_values_air_input(self,latitude=None,longitude=None):
+
+        # for iDT,DT in enumerate(DTS):
+        
+            #websource = urllib.request.urlopen(webpage)
+        #soup = BeautifulSoup(open(webpage), "html.parser")
+        
+       
+        #workaround for ...last line has 
 which results in stringlike first column
+        string = self.current.find_next('pre').text
+        string = string.split('\n')[:-1]
+        string =  '\n'.join(string)
+        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
+        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
+        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
+        
+        #string =  soup.pre.next_sibling.next_sibling
+        
+        string = self.current.find_next('pre').find_next('pre').text
+
+        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
+        #PARAMS.insert(0,'date',DT)
+
+        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
+        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
+        
+        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
+        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
+        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
+        HAGL = HGHT - np.float(PARAMS['Station elevation'])
+        ONE_COLUMN.insert(0,'HAGL',HAGL)
+
+        
+        
+        
+        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
+        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
+        ONE_COLUMN.insert(0,'QABS',QABS)
+        
+        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
+
+        #mixed layer potential temperature
+        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
+
+        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
+        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
+        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
+
+        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
+        BLHV = np.max((BLHV,10.))
+        BLHVu = np.max((BLHVu,10.))
+        BLHVd = np.max((BLHVd,10.))
+        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
+
+        #security values for mixed-layer jump values dthetav, dtheta and dq
+        
+        # fit new profiles taking the above-estimated mixed-layer height
+        ONE_COLUMNNEW = []
+        for BLH in [BLHV,BLHVu,BLHVd]:
+            ONE_COLUMNNEW.append(pd.DataFrame())
+            
+            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+            
+            listHAGLNEW = list(HAGLNEW)
+            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
+                
+                # get index of lowest valid observation. This seems to vary
+                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
+                if len(idxvalid) > 0:
+                    #print('idxvalid',idxvalid)
+                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
+                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
+                    else:
+                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
+                else:
+                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+                    #print(col,meanabl)
+               
+                
+                # if col == 'PRES':
+                #     meanabl =  
+            
+                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+                #THTVM = np.nanmean(THTV[HAGL <= BLH])
+                #print("new_pro_h",new_pro_h)
+                # calculate jump ath the top of the mixed layer
+                if col in ['THTA','THTV',]:
+                    #for moisture
+                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+                    if len(listHAGLNEW) > 4:
+                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+                        dtheta = np.max((0.1,dtheta_pre))
+                        #meanabl = meanabl - (dtheta - dtheta_pre)
+                        #print('dtheta_pre',dtheta_pre)
+                        #print('dtheta',dtheta)
+                        #print('meanabl',meanabl)
+                        #stop
+                        
+                    else:
+                        dtheta = np.nan
+                else:
+                    if len(listHAGLNEW) > 4:
+                        #for moisture (it can have both negative and positive slope)
+                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+                    else:
+                        dtheta = np.nan
+                #print('dtheta',dtheta)
+                
+                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+            
+                
+                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+                
+            #QABSM = np.nanmean(QABS[HAGL <= BLH])
+            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+            
+        # we just make a copy of the fields, so that it can be read correctly by CLASS 
+        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
+            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
+            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+            
+            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
+        
+            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
+            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
+
+
+        # assign fields adopted by CLASS
+        if self.mode == 'o': #original 
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'b':
+            PARAMS.insert(0,'h',   np.float(BLHV))
+        elif self.mode == 'u':
+            PARAMS.insert(0,'h',   BLHVu)
+        elif self.mode == 'd':
+            PARAMS.insert(0,'h',   BLHVd)
+        else:
+            PARAMS.insert(0,'h',   BLHV)
+            
+
+        try:
+            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
+        except:
+            print("could not convert latitude coordinate")
+            PARAMS.insert(0,'latitude', np.nan)
+            PARAMS.insert(0,'lat', np.nan)
+        try:
+            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
+            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
+            PARAMS.insert(0,'lon', 0.)
+        except:
+            print("could not convert longitude coordinate")
+            PARAMS.insert(0,'longitude', np.nan)
+            PARAMS.insert(0,'lon', 0.)
+
+        if latitude is not None:
+            print('overwriting latitude with specified value')
+            PARAMS['latitude'] = np.float(latitude)
+            PARAMS['lat'] = np.float(latitude)
+        if longitude is not None:
+            print('overwriting longitude with specified value')
+            PARAMS['longitude'] = np.float(longitude)
+        try:
+            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
+            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
+            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            # This is the nearest datetime when sun is up (for class)
+            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
+            # apply the same time shift for UTC datetime
+            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
+            
+        except:
+            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
+            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
+            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
+            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
+
+        
+
+        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
+        # as we are forcing lon equal to zero this is is expressed in local suntime
+        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
+
+           
+        ONE_COLUMNb = ONE_COLUMNNEW[0]
+        ONE_COLUMNu = ONE_COLUMNNEW[1]
+        ONE_COLUMNd = ONE_COLUMNNEW[2]
+        
+
+        THTVM = np.nanmean(THTV[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+        
+        QABSM = np.nanmean(QABS[HAGL <= BLHV])
+        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
+        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
+        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
+
+        BLHVe = abs(BLHV - BLHVu)
+        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
+
+        #PARAMS.insert(0,'dq',0.)
+        
+        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
+        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+        
+        if self.mode == 'o': #original 
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
+        elif self.mode == 'b': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNb
+            BLCOLUMN = ONE_COLUMNb
+        elif self.mode == 'u': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNu
+            BLCOLUMN = ONE_COLUMNu
+        elif self.mode == 'd': # best BLH
+            USE_ONECOLUMN = ONE_COLUMNd
+            BLCOLUMN = ONE_COLUMNd
+        else:
+            USE_ONECOLUMN = ONE_COLUMN
+            BLCOLUMN = ONE_COLUMNb
+
+        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
+        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
+        # print(BLCOLUMN['HAGL'][lt6000])
+        # print(BLCOLUMN['HAGL'][lt2500])
+        # 
+        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
+
+        #print(BLCOLUMN['HAGL'][lt2500])
+        PARAMS.insert(0,'OK',
+                      ((BLHVe < 200.) and 
+                       ( len(np.where(lt6000)[0]) > 5) and
+                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
+                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
+                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
+                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
+                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
+                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
+                      )
+                     )
+
+        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
+        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
+        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
+        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
+        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
+        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
+        
+        
+        PARAMS = PARAMS.T
+
+        
+        self.PARAMS = PARAMS
+        self.ONE_COLUMN = USE_ONECOLUMN
+        # if self.mode == 'o': #original 
+        #     self.ONE_COLUMN = ONE_COLUMN
+        # elif self.mode == 'b': # best BLH
+        #     self.ONE_COLUMN = ONE_COLUMNb
+        # elif self.mode == 'u':# upper BLH
+        #     self.ONE_COLUMN = ONE_COLUMNu
+        # elif self.mode == 'd': # lower BLH
+        #     self.ONE_COLUMN=ONE_COLUMNd
+        # else:
+        #     self.ONE_COLUMN = ONE_COLUMN
+
diff --git a/lib/data_global.py b/lib/data_global.py
new file mode 100644
index 0000000..9c3d9b5
--- /dev/null
+++ b/lib/data_global.py
@@ -0,0 +1,936 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: Hendrik Wouters
+
+Purpose: provides class routines for ground and atmosphere conditions used for
+the CLASS miced-layer model
+
+Usage:
+    from data_global import data_global
+    from class4gl import class4gl_input
+    from data_soundings import wyoming
+
+    # create a data_global object and load initial data pages
+    globaldata = data_global()
+    globaldata.load_datasets()
+    # create a class4gl_input object
+    c4gli = class4gl_input()
+    # Initialize it with profile data. We need to do this first. Actually this
+    # will set the coordinate parameters (datetime, latitude, longitude) in
+    # class4gl_input.pars.__dict__, which is required to read point data from
+    # the data_global object.
+
+    # open a Wyoming stream for a specific station
+    wy_strm = wyoming(STNM=91376)
+    # load the first profile
+    wy_strm.find_first()
+    # load the profile data into the class4gl_input object
+    c4gli.get_profile_wyoming(wy_strm)
+    
+    # and finally, read the global input data for this profile
+    c4gli.get_global_input(globaldata)
+
+
+"""
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+#import pynacolada as pcd
+import pandas as pd
+import xarray as xr
+import os
+import glob
+import sys
+import errno
+import warnings
+import logging
+
+
+#formatter = logging.Formatter()
+logging.basicConfig(format='%(asctime)s - \
+                               %(name)s - \
+                               %(levelname)s - \
+                               %(message)s')
+
+class book(object):
+    """ this is a class for a dataset spread over multiple files. It has a
+    similar purpose  open_mfdataset, but only 1 file (called current 'page')
+    one is loaded at a time. This saves precious memory.  """
+    def __init__(self,fn,concat_dim = None,debug_level=None):
+        self.logger = logging.getLogger('book')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        # filenames are expanded as a list and sorted by filename
+        self.pages = glob.glob(fn); self.pages.sort()
+        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
+        if len(self.pages) == 0:
+            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
+        self.ipage = -1; self.page = None
+        self.renames = {} # each time when opening a file, a renaming should be done.
+        self.set_page(0)
+
+        # we consider that the outer dimension is the one we concatenate
+        self.concat_dim = concat_dim
+        if self.concat_dim is None:
+            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
+
+    # this wraps the xarray sel-commmand
+    def sel(*args, **kwargs):
+        for dim in kwargs.keys():
+            if dim == self.concat_dim:
+                self.browse_page(**{dim: kwargs[dim]})
+        return page.sel(*args,**kwargs)
+
+
+    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
+    #def __getattr__(self,attr):
+    #    orig_attr = self.page.__getattribute__(attr)
+    #    if callable(orig_attr):
+    #        def hooked(*args, **kwargs):
+    #            for dim in kwargs.keys():
+    #                if dim == self.concat_dim:
+    #                    self.browse_page(**{dim: kwargs[dim]})
+    #
+    #            result = orig_attr(*args, **kwargs)
+    #            # prevent wrapped_class from becoming unwrapped
+    #            if result == self.page:
+    #                return self
+    #            self.post()
+    #            return result
+    #        return hooked
+    #    else:
+    #        return orig_attr
+
+    def set_renames(self,renames):
+        #first, we convert back to original names, and afterwards, we apply the update of the renames.
+        reverse_renames = dict((v,k) for k,v in self.renames.items())
+        self.renames = renames
+        if self.page is not None:
+            self.page = self.page.rename(reverse_renames)
+            self.page = self.page.rename(self.renames)
+
+    def set_page(self,ipage,page=None):
+        """ this sets the right page according to ipage:
+                - We do not switch the page if we are already at the right one
+                - we set the correct renamings (level -> lev, latitude -> lat,
+                etc.)
+                - The dataset is also squeezed.
+        """
+
+        if ((ipage != self.ipage) or (page is not None)):
+
+            if self.page is not None:
+                self.page.close()
+
+            self.ipage = ipage
+            if page is not None:
+                self.page = page
+            else:
+                if self.ipage == -1:
+                   self.page = None
+                else:
+                    #try:
+
+                    self.logger.info("Switching to page "+str(self.ipage)+': '\
+                                     +self.pages[self.ipage])
+                    self.page = xr.open_dataset(self.pages[self.ipage])
+
+
+            # do some final corrections to the dataset to make them uniform
+            if self.page is not None:
+               if 'latitude' in self.page.dims:
+#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
+               if 'level' in self.page.dims:
+                   self.page = self.page.rename({'level':'lev'})
+
+               self.page = self.page.rename(self.renames)
+               self.page = self.page.squeeze(drop=True)
+
+    def browse_page(self,rewind=2,**args):
+
+        # at the moment, this is only tested with files that are stacked according to the time dimension.
+        dims = args.keys()
+
+
+        if self.ipage == -1:
+            self.set_page(0)
+
+        found = False
+        iipage = 0
+        startipage = self.ipage - rewind
+        while (iipage < len(self.pages)) and not found:
+            ipage = (iipage+startipage) % len(self.pages)
+            for dim in args.keys():
+                this_file = True
+
+                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
+                if 'dims' not in self.__dict__:
+                    self.dims = {}
+                if dim not in self.dims.keys():
+                    self.dims[dim] = [None]*len(self.pages)
+
+                if self.dims[dim][ipage] is None:
+                    self.logger.info('Loading coordinates of dimension "'+dim+\
+                                     '" of page "' +str(ipage)+'".')
+                    self.set_page(ipage)
+                    # print(ipage)
+                    # print(dim)
+                    # print(dim,self.page[dim].values)
+                    self.dims[dim][ipage] = self.page[dim].values
+
+                # determine current time range of the current page
+                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
+                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
+
+                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
+                    this_file = False
+
+            if this_file:
+                found = True
+                self.set_page(ipage)
+            else:
+
+                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
+                #    iipage = len(self.pages) # we stop searching
+
+                iipage += 1
+
+        if not found:
+            self.logger.info("Page not found. Setting to page -1")
+            #iipage = len(self.pages) # we stop searching further
+            self.set_page(-1)
+
+        if self.ipage != -1:
+            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
+        else:
+            self.logger.debug("I'm now at page "+ str(self.ipage))
+
+
+class data_global(object):
+    def __init__(self,sources= {
+        # # old gleam
+        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
+        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
+        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
+        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
+        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
+        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
+        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
+        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
+        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
+        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
+        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
+        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
+        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
+        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
+        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
+        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
+        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
+        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
+        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
+        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
+        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
+        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
+        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
+        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
+        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
+        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
+        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
+        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
+        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
+        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
+        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
+        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
+        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
+        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
+        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
+        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
+        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
+        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
+        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
+        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
+        },debug_level=None):
+        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
+        self.sources = sources
+        self.datarefs = {}
+        self.datasets = {}
+        self.datetime = dt.datetime(1981,1,1)
+
+        self.logger = logging.getLogger('data_global')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+        self.debug_level = debug_level
+
+        warnings.warn('omitting pressure field p and advection')
+
+    def in_library(self,fn):
+        if fn not in self.library.keys():
+            return False
+        else:
+            print("Warning: "+fn+" is already in the library.")
+            return True
+
+    def add_to_library(self,fn):
+        if not self.in_library(fn):
+            print("opening: "+fn)
+            self.library[fn] = \
+                book(fn,concat_dim='time',debug_level=self.debug_level)
+
+            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
+            #if 'latitude' in self.library[fn].variables:
+            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
+
+
+    # default procedure for loading datasets into the globaldata library
+    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
+        if type(varssource) is str:
+            varssource = [varssource]
+        if type(varsdest) is str:
+            varsdest = [varsdest]
+
+        self.add_to_library(input_fn)
+
+        if varssource is None:
+            varssource = []
+            for var in self.sources[input_fn].variables:
+                avoid = \
+                ['lat','lon','latitude','longitude','time','lev','level']
+                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
+                    varssource.append(var)
+
+        if varsdest is None:
+            varsdest = varssource
+
+        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        for ivar,vardest in enumerate(varsdest):
+            varsource = varssource[ivar]
+            print('setting '+vardest+' as '+varsource+' from '+input_fn)
+
+            if vardest in self.datarefs.keys():
+                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
+            #self.add_to_library(fn,varsource,vardest)
+            if vardest != varsource:
+                libkey = input_fn+'.'+varsource+'.'+vardest
+                if libkey not in self.library.keys():
+                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
+                    self.library[libkey] = book(input_fn,\
+                                                debug_level=self.debug_level)
+                    self.library[libkey].set_renames({varsource: vardest})
+
+                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+            else:
+                self.datarefs[vardest] = input_fn
+                self.datasets[vardest] =self.library[self.datarefs[vardest]]
+
+            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
+            #     print('Warning: '+ vardest "not in " + input_fn)
+
+
+
+    def load_datasets(self,sources = None,recalc=0):
+
+        if sources is None:
+            sources = self.sources
+        for key in sources.keys():
+            #datakey,vardest,*args = key.split(':')
+            datakey,vardest = key.split(':')
+            #print(datakey)
+
+            fnvarsource = sources[key].split(':')
+            if len(fnvarsource) > 2:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource,fnargs = fnvarsource
+                fnargs = [fnargs]
+            elif len(fnvarsource) > 1:
+                #fn,varsource,*fnargs = fnvarsource
+                fn,varsource = fnvarsource
+                fnargs = []
+            else:
+                fn = sources[key]
+                varsource = vardest
+            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
+
+    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
+            # the default way of loading a 2d dataset
+            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
+                self.load_dataset_default(fn,varsource,vardest)
+            elif datakey == 'IGBPDIS':
+                if vardest == 'alpha':
+                    ltypes = ['W','B','H','TC']
+                    for ltype in ltypes:
+                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
+                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
+
+
+                    # landfr = {}
+                    # for ltype in ['W','B','H','TC']:
+                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
+
+
+
+                    keytemp = 'alpha'
+                    fnkeytemp = fn+':IGBPDIS:alpha'
+                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
+                        self.library[fnkeytemp]  = book(fnkeytemp,
+                                                        debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+                    else:
+                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
+                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
+                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
+                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
+                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
+                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
+                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
+
+                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+
+                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
+                        for ltype in ltypes:
+                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
+
+                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
+                        print('writing file to: '+fnkeytemp)
+                        os.system('rm '+fnkeytemp)
+                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
+                        self.library[fnkeytemp].close()
+
+
+                        self.library[fnkeytemp]  = \
+                            book(fnkeytemp,debug_level=self.debug_level)
+                        self.datasets[keytemp] = self.library[fnkeytemp]
+                        self.datarefs[keytemp] = fnkeytemp
+
+
+                else:
+                    self.load_dataset_default(fn,varsource,vardest)
+
+
+            elif datakey == 'GLAS':
+                self.load_dataset_default(fn,varsource,vardest)
+                if vardest == 'z0m':
+                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
+                elif vardest == 'z0h':
+                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
+            elif datakey == 'DSMW':
+
+
+                # Procedure of the thermal properties:
+                # 1. determine soil texture from DSMW/10.
+                # 2. soil type with look-up table (according to DWD/EXTPAR)
+                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
+                #    with parameter look-up table from Noilhan and Planton (1989).
+                #    Note: The look-up table is inspired on DWD/COSMO
+
+                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
+
+
+
+                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
+                self.load_dataset_default(fn,'DSMW')
+                print('calculating texture')
+                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
+                TEMP  = {}
+                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
+                TEMP3 = {}
+                for SPKEY in SPKEYS:
+
+
+                    keytemp = SPKEY+'_values'
+                    fnoutkeytemp = fnout+':DSMW:'+keytemp
+                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                    else:
+                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
+                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
+                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
+                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+                        # for faster computation, we need to get it to memory out of Dask.
+                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
+                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
+
+                # yes, I know I only check the last file.
+                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
+                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
+                        print('idx',idx,SPKEY)
+                        SEL = (TEMP2 == idx)
+                    #     print(idx,len(TEMP3))
+                        for SPKEY in SPKEYS:
+                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
+
+                    for SPKEY in SPKEYS:
+                        keytemp = SPKEY+'_values'
+                        fnoutkeytemp = fnout+':DSMW:'+keytemp
+                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
+                        os.system('rm '+fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
+                        self.datasets[SPKEY+'_values'].close()
+
+
+                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
+                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
+
+
+                keytemp = 'texture'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+                else:
+                    self.library[fn+':DSMW:texture'] = xr.Dataset()
+                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
+                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
+                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
+                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+
+                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
+
+                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
+                    zundef[zundef < 0] = np.nan
+                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
+                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
+
+                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+
+
+                    self.library[fnoutkeytemp]  = \
+                        book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
+                    self.datarefs[keytemp] =fn+':DSMW:texture'
+
+
+                print('calculating texture type')
+
+
+
+                keytemp = 'itex'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+                else:
+                    self.library[fnoutkeytemp] = xr.Dataset()
+                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+                    X = self.datasets['texture'].page['texture'].values*100
+                    X[pd.isnull(X)] = -9
+
+
+                    self.datasets[keytemp][keytemp].values = X
+
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
+                    self.datasets['itex'].close()
+
+
+                    self.library[fnoutkeytemp] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
+                    self.datarefs[keytemp] =fn+':DSMW:itex'
+
+
+                keytemp = 'isoil'
+                fnoutkeytemp=fnout+':DSMW:'+keytemp
+                isoil_reprocessed = False
+                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+                else:
+                    isoil_reprocessed = True
+                    print('calculating soil type')
+                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                    ITEX = self.datasets['itex'].page['itex'].values
+                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
+                    LOOKUP = [
+                              [-10 ,9],# ocean
+                              [0 ,7],# fine textured, clay (soil type 7)
+                              [20,6],# medium to fine textured, loamy clay (soil type 6)
+                              [40,5],# medium textured, loam (soil type 5)
+                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                              [80,3],# coarse textured, sand (soil type 3)
+                              [100,9],# coarse textured, sand (soil type 3)
+                            ]
+                    for iitex,iisoil in LOOKUP:
+                        ISOIL[ITEX > iitex] = iisoil
+                        print('iitex,iisoil',iitex,iisoil)
+
+
+                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+                    LOOKUP = [
+                              [9001, 1 ], # ice, glacier (soil type 1)
+                              [9002, 2 ], # rock, lithosols (soil type 2)
+                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                              [9,    9 ], # undefined (ocean)
+                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                              [9000, 9 ], # undefined (inland lake)
+                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                            ]
+                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
+
+                    CODE_VALUES[ITEX == 901200] = 9012
+                    for icode,iisoil in LOOKUP:
+                        ISOIL[CODE_VALUES == icode] = iisoil
+
+                    self.datasets['isoil']['isoil'].values = ISOIL
+                    os.system('rm '+fnoutkeytemp)
+                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
+                    self.datasets[keytemp].close()
+                    print('saved inbetween file to: '+fnoutkeytemp)
+
+                    self.library[fn+':DSMW:isoil'] = \
+                            book(fnoutkeytemp,debug_level=self.debug_level)
+                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                    self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+                #adopted from data_soil.f90 (COSMO5.0)
+                SP_LOOKUP = {
+                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
+                  # (by index)                                           loam                    loam                                water      ice
+                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+                  # Important note: For peat, the unknown values below are set equal to that of loam
+                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
+                  #error in table 2 of NP89: values need to be multiplied by e-6
+                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
+
+                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
+                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
+                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
+                }
+
+
+                # isoil_reprocessed = False
+                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
+
+                #     self.library[fn+':DSMW:isoil'] = \
+                #             book(fnoutkeytemp,debug_level=self.debug_level)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+                # else:
+                #     isoil_reprocessed = True
+                #     print('calculating soil type')
+                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
+                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
+                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
+                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
+
+
+
+
+                # this should become cleaner in future but let's hard code it for now.
+                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
+                print('calculating soil parameter')
+                DATATEMPSPKEY = {}
+                if (recalc < 1) and (isoil_reprocessed == False): 
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        keytemp = SPKEY
+                        fnoutkeytemp=fnout+':DSMW:'+keytemp
+                        self.library[fn+':DSMW:'+SPKEY] =\
+                                book(fnoutkeytemp,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
+                        self.datarefs[SPKEY] =fnoutkeytemp
+                else:
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+
+                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
+                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
+                    ISOIL = self.datasets['isoil'].page['isoil'].values
+                    print(np.where(ISOIL>0.))
+                    for i in range(11):
+                        SELECT = (ISOIL == i)
+                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+
+                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
+                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
+
+                        os.system('rm '+fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
+                        self.datasets[SPKEY].close()
+                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
+
+                        self.library[fn+':DSMW:'+SPKEY] = \
+                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
+                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
+                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
+
+
+            else:
+                self.load_dataset_default(fn,varsource,vardest)
+
+
+
+
+
+
+#
+#                 # only print the last parameter value in the plot
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'cala'
+#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#                 #inputs.append(cp.deepcopy(class_settings))
+#                 #var = 'crhoc'
+#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
+#                 #valnew = class_settings.__dict__[var]
+#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#     key = "CERES"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         CERES_start_date = dt.datetime(2000,3,1)
+#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+#
+#         var = 'cc'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#         print(class_settings.lat,class_settings.lon)
+#
+#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
+#
+#         input_nc.close()
+#
+
+
+#     key = "GIMMS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+#         print("Reading Leag Area Index from "+input_fn)
+#         var = 'LAI'
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+#
+#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+#
+#         if np.isnan(tarray[idatetime]):
+#             print("interpolating GIMMS cveg nan value")
+#
+#             mask = np.isnan(tarray)
+#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+#             else:
+#                 print("Warning. Could not interpolate GIMMS cveg nan value")
+#
+#         class_settings.__dict__[var] = tarray[idatetime]
+#
+#         input_nc.close()
+#
+#     key = "IGBPDIS_ALPHA"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         var = 'alpha'
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+#         print("Reading albedo from "+input_fn)
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#
+#         landfr = {}
+#         for ltype in ['W','B','H','TC']:
+#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+#
+#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+#
+#         alpha=0.
+#         for ltype in landfr.keys():
+#             alpha += landfr[ltype]*aweights[ltype]
+#
+#
+#         class_settings.__dict__[var] = alpha
+#         input_nc.close()
+#
+#
+#     key = "ERAINT_ST"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         print("Reading soil temperature from "+input_fn)
+#
+#         var = 'Tsoil'
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+#
+#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+#         var = 'T2'
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+#
+#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+#
+#
+#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+#
+#
+#         input_nc.close()
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #var = 'T2'
+#     #valold = class_settings.__dict__[var]
+#     #
+#     #class_settings.__dict__[var] = 305.
+#     #class_settings.__dict__['Tsoil'] = 302.
+#     #valnew = class_settings.__dict__[var]
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     #inputs.append(cp.deepcopy(class_settings))
+#     #
+#     #var = 'Lambda'
+#     #valold = class_settings.__dict__[var]
+#
+#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
+#     ## I need to ask Chiel.
+#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+#     #
+#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
+#     #class_settings.__dict__[var] = valnew
+#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+#
+#
+#
+#     key = "GLAS"
+#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+#
+#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+#         print("Reading canopy height for determining roughness length from "+input_fn)
+#         var = 'z0m'
+#
+#
+#         #plt.plot
+#
+#         input_nc = nc4.Dataset(input_fn,'r')
+#
+#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+#
+#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+#
+#         lowerlimit = 0.01
+#         if testval < lowerlimit:
+#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+#             class_settings.__dict__[var] = lowerlimit
+#         else:
+#             class_settings.__dict__[var] = testval
+#
+#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+#
+#
+#         input_nc.close()
+
+
+
+
+
diff --git a/lib/interface_functions.py b/lib/interface_functions.py
new file mode 100644
index 0000000..3e483f3
--- /dev/null
+++ b/lib/interface_functions.py
@@ -0,0 +1,506 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+#from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+#'_afternoon.yaml'
+def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
+    filename = yaml_file.name
+    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+    #yaml_file = open(filename)
+
+    #print('going to next observation',filename)
+    yaml_file.seek(index_start)
+
+    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+
+    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+    filebuffer.write(buf)
+    filebuffer.close()
+    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+    
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+
+    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+    print(command)
+    os.system(command)
+    jsonstream = open(filename+'.buffer.json.'+str(index_start))
+    record_dict = json.load(jsonstream)
+    jsonstream.close()
+    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+
+
+    if mode =='mod':
+        modelout = class4gl()
+        modelout.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        return modelout
+    elif mode == 'ini':
+
+ 
+        # datetimes are incorrectly converted to strings. We need to convert them
+        # again to datetimes
+        for key,value in record_dict['pars'].items():
+            # we don't want the key with columns that have none values
+            if value is not None: 
+                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
+               # elif (type(value) == str):
+                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+
+            if (value == 0.9e19) or (value == '.9e19'):
+                record_dict['pars'][key] = np.nan
+        for key in record_dict.keys():
+            #print(key)
+            if key in ['air_ap','air_balloon',]:
+                #NNprint('check')
+                for datakey,datavalue in record_dict[key].items():
+                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+
+        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+
+        c4gli = class4gl_input()
+        print(c4gli.logger,'hello')
+        c4gli.load_yaml_dict(record_dict)
+        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+
+class stations(object):
+    def __init__(self,path,suffix='ini',refetch_stations=False):
+
+        self.path = path
+
+        self.file = self.path+'/stations_list.csv'
+        if (os.path.isfile(self.file)) and (not refetch_stations):
+            self.table = pd.read_csv(self.file)
+        else:
+            self.table = self.get_stations(suffix=suffix)
+            self.table.to_csv(self.file)
+        
+        self.table = self.table.set_index('STNID')
+        #print(self.table)
+
+    def get_stations(self,suffix):
+        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
+        if len(stations_list_files) == 0:
+            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
+        stations_list_files.sort()
+        print(stations_list_files)
+        if len(stations_list_files) == 0:
+            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
+        stations_list = []
+        for stations_list_file in stations_list_files:
+            thisfile = open(stations_list_file,'r')
+            yamlgen = yaml.load_all(thisfile)
+            try:
+                first_record  = yamlgen.__next__()
+            except:
+                first_record = None
+            if first_record is not None:
+                stations_list.append({})
+                for column in ['STNID','latitude','longitude']:
+                    #print(first_record['pars'].keys())
+                    stations_list[-1][column] = first_record['pars'][column]
+                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
+            yamlgen.close()
+            thisfile.close()
+    
+        print(stations_list)
+        return pd.DataFrame(stations_list)
+
+class stations_iterator(object):
+    def __init__(self,stations):
+        self.stations = stations
+        self.ix = -1 
+    def __iter__(self):
+        return self
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.stations.table)) 
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_row(self,row):
+        self.ix = row
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+    def set_STNID(self,STNID):
+        self.ix = np.where((self.stations.table.index == STNID))[0][0]
+        print(self.ix)
+        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
+        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
+
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+    def close():
+        del(self.ix)
+
+class records_iterator(object):
+    def __init__(self,records):
+            
+        self.records = records
+        self.ix = -1 
+        
+    def __iter__(self):
+        return self
+
+    def __next__(self,jump=1):
+        self.ix = (self.ix+jump) 
+        if self.ix >= len(self.records.index):
+            raise StopIteration
+        self.ix = np.mod(self.ix,len(self.records))
+        return self.records.index[self.ix], self.records.iloc[self.ix]
+    def __prev__(self):
+        return self.__next__(self,jump=-1)
+
+
+# #'_afternoon.yaml'
+# def get_record_yaml(yaml_file,index_start,index_end):
+#     filename = yaml_file.name
+#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
+#     #yaml_file = open(filename)
+# 
+#     #print('going to next observation',filename)
+#     yaml_file.seek(index_start)
+# 
+#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
+# 
+#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+#     filebuffer.write(buf)
+#     filebuffer.close()
+#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
+#     
+#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+# 
+#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
+#     print(command)
+#     os.system(command)
+#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
+#     record_dict = json.load(jsonstream)
+#     jsonstream.close()
+#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+#  
+#     # datetimes are incorrectly converted to strings. We need to convert them
+#     # again to datetimes
+#     for key,value in record_dict['pars'].items():
+#         # we don't want the key with columns that have none values
+#         if value is not None: 
+#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
+#            # elif (type(value) == str):
+#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+#                 
+#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
+# 
+#         if (value == 0.9e19) or (value == '.9e19'):
+#             record_dict['pars'][key] = np.nan
+#     for key in record_dict.keys():
+#         print(key)
+#         if key in ['air_ap','air_balloon',]:
+#             print('check')
+#             for datakey,datavalue in record_dict[key].items():
+#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
+# 
+#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
+# 
+#     c4gli = class4gl_input()
+#     c4gli.load_yaml_dict(record_dict)
+#     return c4gli
+
+
+
+
+
+
+        # self.frames['stats']['records_current_station_index'] = \
+        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+        #      == \
+        #      self.frames['stats']['current_station'].name)
+
+        # # create the value table of the records of the current station
+        # tab_suffixes = \
+        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        # for tab_suffix in tab_suffixes:
+        #     self.frames['stats']['records_current_station'+tab_suffix] = \
+        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+# class records_selection(object):
+#     def __init__
+
+# class records(object):
+#     def __init__(self,stations,path_obs,path_mod):
+#         self.stations = stations
+#         self.path_obs = path_obs
+#         self.path_mod = path_mod
+# 
+#         self.ini =       self.get_records(self.path_mod,'ini')
+#         self.mod =       self.get_records(self.path_mod,'mod')
+#         #self.morning =   self.get_records(self.path_obs,'morning')
+#         self.afternoon = self.get_records(self.path_obs,'afternoon')
+# 
+#         
+#         self.afternoon.index = self.afternoon.ldatetime.dt.date
+#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
+# 
+#         self.index = self.ini.index
+#         self.mod.index = self.index
+#         self.afternoon.index = self.index
+# 
+# 
+#         #self.records_iterator = records_current_station_mod.iterrows()
+
+
+
+def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
+
+    records = pd.DataFrame()
+    for STNID,station in stations.iterrows():
+        dictfnchunks = []
+        if getchunk is 'all':
+
+            # we try the old single-chunk filename format first (usually for
+            # original profile pairs)
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
+            if os.path.isfile(fn):
+                chunk = 0
+                dictfnchunks.append(dict(fn=fn,chunk=chunk))
+
+            # otherwise, we use the new multi-chunk filename format
+            else:
+                chunk = 0
+                end_of_chunks = False
+                while not end_of_chunks:
+                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                    if os.path.isfile(fn):
+                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                    else:
+                        end_of_chunks = True
+                    chunk += 1
+
+            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
+            # yamlfilenames = glob.glob(globyamlfilenames)
+            # yamlfilenames.sort()
+        else:
+            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
+            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
+            
+        if len(dictfnchunks) > 0:
+            for dictfnchunk in dictfnchunks:
+                yamlfilename = dictfnchunk['fn']
+                chunk = dictfnchunk['chunk']
+                print(chunk)
+
+                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
+                pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
+                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
+                generate_pkl = False
+                if not os.path.isfile(pklfilename): 
+                    print('pkl file does not exist. I generate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                elif not (os.path.getmtime(yamlfilename) <  \
+                    os.path.getmtime(pklfilename)):
+                    print('pkl file older than yaml file, so I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+
+                if refetch_records:
+                    print('refetch_records flag is True. I regenerate "'+\
+                          pklfilename+'" from "'+yamlfilename+'"...')
+                    generate_pkl = True
+                if not generate_pkl:
+                    records = pd.concat([records,pd.read_pickle(pklfilename)])
+                   # irecord = 0
+                else:
+                    with open(yamlfilename) as yaml_file:
+
+                        dictout = {}
+
+                        next_record_found = False
+                        end_of_file = False
+                        while (not next_record_found) and (not end_of_file):
+                            linebuffer = yaml_file.readline()
+                            next_record_found = (linebuffer == '---\n')
+                            end_of_file = (linebuffer == '')
+                        next_tell = yaml_file.tell()
+                        
+                        while not end_of_file:
+
+                            print(' next record:',next_tell)
+                            current_tell = next_tell
+                            next_record_found = False
+                            yaml_file.seek(current_tell)
+                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            linebuffer = ''
+                            while ( (not next_record_found) and (not end_of_file)):
+                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                                linebuffer = yaml_file.readline()
+                                next_record_found = (linebuffer == '---\n')
+                                end_of_file = (linebuffer == '')
+                            filebuffer.close()
+                            
+                            next_tell = yaml_file.tell()
+                            index_start = current_tell
+                            index_end = next_tell
+
+                            
+                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
+                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            print(command)
+                            
+                            os.system(command)
+                            #jsonoutput = subprocess.check_output(command,shell=True) 
+                            #print(jsonoutput)
+                            #jsonstream = io.StringIO(jsonoutput)
+                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
+                            record = json.load(jsonstream)
+                            dictouttemp = {}
+                            for key,value in record['pars'].items():
+                                # we don't want the key with columns that have none values
+                                if value is not None: 
+                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
+                                   if (type(value) in regular_numeric_types):
+                                        dictouttemp[key] = value
+                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
+                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
+                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
+                            recordindex = record['index']
+                            dictouttemp['chunk'] = chunk
+                            dictouttemp['index_start'] = index_start
+                            dictouttemp['index_end'] = index_end
+                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
+                            for key,value in dictouttemp.items():
+                                if key not in dictout.keys():
+                                    dictout[key] = {}
+                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
+                            print(' obs record registered')
+                            jsonstream.close()
+                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                    records_station = pd.DataFrame.from_dict(dictout)
+                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
+                    print('writing table file ('+pklfilename+') for station '\
+                          +str(STNID))
+                    records_station.to_pickle(pklfilename)
+                    # else:
+                    #     os.system('rm '+pklfilename)
+                    records = pd.concat([records,records_station])
+    return records
+
+def stdrel(mod,obs,columns):
+    stdrel = pd.DataFrame(columns = columns)
+    for column in columns:
+        stdrel[column] = \
+                (mod.groupby('STNID')[column].transform('mean') -
+                 obs.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') + \
+                (mod[column] -
+                 mod.groupby('STNID')[column].transform('mean')) /\
+                obs.groupby('STNID')[column].transform('std') 
+    return stdrel
+
+def pct(obs,columns):
+    pct = pd.DataFrame(columns=columns)
+    for column in columns:
+        #print(column)
+        pct[column] = ""
+        pct[column] = obs[column].rank(pct=True)
+    return pct
+
+def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
+    stats = pd.DataFrame()
+    for key in keys: 
+        stats['d'+key+'dt'] = ""
+        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+                              (obs_afternoon.ldatetime - \
+                               obs_morning.ldatetime).dt.seconds*3600.
+    return stats
+
diff --git a/lib/interface_multi.py b/lib/interface_multi.py
new file mode 100644
index 0000000..83148e5
--- /dev/null
+++ b/lib/interface_multi.py
@@ -0,0 +1,2061 @@
+import pandas as pd
+import numpy as np
+import datetime as dt
+import os
+import xarray as xr
+import sys
+from contextlib import suppress
+from time import sleep
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl,units
+from interface_functions import *
+# from data_soundings import wyoming
+import yaml
+import glob
+import pandas as pd
+import json
+import io
+import subprocess
+import pytz
+from scipy.stats import mstats
+
+from matplotlib.colors import LinearSegmentedColormap
+cdictpres = {'blue': (\
+                   (0.,    0.,  0.),
+                   (0.25,  0.25, 0.25),
+                   (0.5,  .70, 0.70),
+                   (0.75, 1.0, 1.0),
+                   (1,     1.,  1.),
+                   ),
+       'green': (\
+                   (0. ,   0., 0.0),
+                   (0.25,  0.50, 0.50),
+                   (0.5,  .70, 0.70),
+                   (0.75,  0.50, 0.50),
+                   (1  ,    0,  0.),
+                   ),
+       'red':  (\
+                  (0 ,  1.0, 1.0),
+                  (0.25 ,  1.0, 1.0),
+                   (0.5,  .70, 0.70),
+                  (0.75 , 0.25, 0.25),
+                  (1,    0., 0.),
+                  )}
+
+statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+os.system('module load Ruby')
+
+class c4gl_interface_soundings(object):
+    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+        """ creates an interactive interface for analysing class4gl experiments
+
+        INPUT:
+            path_exp : path of the experiment output
+            path_obs : path of the observations 
+            globaldata: global data that is being shown on the map
+            refetch_stations: do we need to build the list of the stations again?
+        OUTPUT:
+            the procedure returns an interface object with interactive plots
+
+        """
+        
+        # set the ground
+        self.globaldata = globaldata
+
+ 
+        self.path_exp = path_exp
+        self.path_obs = path_obs
+        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
+
+        # # get the list of stations
+        # stationsfile = self.path_exp+'/stations_list.csv'
+        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
+        #     stations = pd.read_csv(stationsfile)
+        # else:
+        #     stations = get_stations(self.path_exp)
+        #     stations.to_csv(stationsfile)
+
+        # stations = stations.set_index('STNID')
+
+        self.frames = {}
+
+        self.frames['stats'] = {}
+        self.frames['worldmap'] = {}
+                
+        self.frames['profiles'] = {}
+        self.frames['profiles'] = {}
+        self.frames['profiles']['DT'] = None
+        self.frames['profiles']['STNID'] = None
+
+        #self.frames['worldmap']['stationsfile'] = stationsfile
+        self.frames['worldmap']['stations'] = stations(self.path_exp, \
+                                                       suffix='ini',\
+                                                       refetch_stations=refetch_stations)
+
+        # Initially, the stats frame inherets the values/iterators of
+        # worldmap
+        for key in self.frames['worldmap'].keys():
+            self.frames['stats'][key] = self.frames['worldmap'][key]
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_ini'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='ini',\
+                                           refetch_records=refetch_records
+                                           )
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_mod'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_exp,\
+                                           subset='mod',\
+                                           refetch_records=refetch_records
+                                           )
+
+        # get its records and load it into the stats frame
+        self.frames['stats']['records_all_stations_obs_afternoon'] =\
+                        get_records(self.frames['stats']['stations'].table,\
+                                           self.path_obs,\
+                                           subset='afternoon',\
+                                           refetch_records=refetch_records
+                                           )
+
+        self.frames['stats']['records_all_stations_mod'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['records_all_stations_ini']['dates'] = \
+            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+
+
+        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
+
+        self.frames['stats']['records_all_stations_obs_afternoon'] = \
+            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+
+        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+            self.frames['stats']['records_all_stations_ini'].index 
+
+        self.frames['stats']['viewkeys'] = ['h','theta','q']
+        print('Calculating table statistics')
+        self.frames['stats']['records_all_stations_mod_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_mod'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_obs_afternoon'],\
+                           self.frames['stats']['records_all_stations_ini'],\
+                           self.frames['stats']['viewkeys']\
+                          )
+
+        self.frames['stats']['inputkeys'] = inputkeys
+        
+        # self.frames['stats']['inputkeys'] = \
+        #     [ key for key in \
+        #       self.globaldata.datasets.keys() \
+        #       if key in \
+        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
+
+
+        # get units from the class4gl units database
+        self.units = dict(units)
+        # for those that don't have a definition yet, we just ask a question
+        # mark
+        for var in self.frames['stats']['inputkeys']:
+            self.units[var] = '?'
+
+        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
+        self.frames['stats']['records_all_stations_ini_pct'] = \
+                  pct(self.frames['stats']['records_all_stations_ini'], \
+                      columns = self.frames['stats']['inputkeys'])
+
+        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
+        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+        #     mod['
+
+        # 
+        # 
+        # \
+        #        self.frames['stats']['records_all_stations_mod'], \
+
+
+
+        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
+        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
+        #               columns = [ 'd'+key+'dt' for key in \
+        #                           self.frames['stats']['viewkeys']], \
+        #              )
+
+        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
+        #               obs = self.frames['stats']['records_all_stations_ini'], \
+        #               columns = self.frames['stats']['viewkeys'], \
+        #              )
+        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+        
+        print('filtering pathological data')
+        # some observational sounding still seem problematic, which needs to be
+        # investigated. In the meantime, we filter them
+        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+
+        # we filter ALL data frames!!!
+        for key in self.frames['stats'].keys():
+            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
+               (self.frames['stats'][key].index.names == indextype):
+                self.frames['stats'][key] = self.frames['stats'][key][valid]
+        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+
+        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
+
+
+        print("filtering stations from interface that have no records")
+        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
+            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                    == STNID).sum() == 0):
+                print("dropping", STNID)
+                self.frames['worldmap']['stations'].table = \
+                        self.frames['worldmap']['stations'].table.drop(STNID)
+                    
+        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+        
+        # TO TEST: should be removed, since it's is also done just below
+        self.frames['stats']['stations_iterator'] = \
+            self.frames['worldmap']['stations_iterator'] 
+
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
+        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
+        self.next_station()
+
+        # self.goto_datetime_worldmap(
+        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+        #     'after')
+    def sel_station(self,STNID=None,rownumber=None):
+
+        if (STNID is not None) and (rownumber is not None):
+            raise ValueError('Please provide either STNID or rownumber, not both.')
+
+        if (STNID is None) and (rownumber is None):
+            raise ValueError('Please provide either STNID or rownumber.')
+            
+        if STNID is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
+            print(
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+            )
+            self.update_station()
+        elif rownumber is not None:
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
+            self.update_station()
+
+
+
+    def next_station(self,event=None,jump=1):
+        with suppress(StopIteration):
+            self.frames['worldmap']['STNID'],\
+            self.frames['worldmap']['current_station'] \
+                = self.frames['worldmap']['stations_iterator'].__next__(jump)
+            # self.frames['worldmap']['stations_iterator'].close()
+            # del(self.frames['worldmap']['stations_iterator'])
+            # self.frames['worldmap']['stations_iterator'] = \
+            #                 selfself.frames['worldmap']['stations'].iterrows()
+            # self.frames['worldmap']['STNID'],\
+            # self.frames['worldmap']['current_station'] \
+            #     = self.frames['worldmap']['stations_iterator'].__next__()
+
+        self.update_station()
+
+    def prev_station(self,event=None):
+        self.next_station(jump = -1,event=event)
+    def update_station(self):
+        for key in ['STNID','current_station','stations_iterator']: 
+            self.frames['stats'][key] = self.frames['worldmap'][key] 
+
+
+
+        # generate index of the current station
+        self.frames['stats']['records_current_station_index'] = \
+            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+             == \
+             self.frames['stats']['current_station'].name)
+
+        # create the value table of the records of the current station
+        tab_suffixes = \
+                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+        for tab_suffix in tab_suffixes:
+            self.frames['stats']['records_current_station'+tab_suffix] = \
+                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+        # go to first record of current station
+        self.frames['stats']['records_iterator'] = \
+                        records_iterator(self.frames['stats']['records_current_station_mod'])
+        (self.frames['stats']['STNID'] , \
+        self.frames['stats']['current_record_chunk'] , \
+        self.frames['stats']['current_record_index']) , \
+        self.frames['stats']['current_record_mod'] = \
+                        self.frames['stats']['records_iterator'].__next__()
+
+        for key in self.frames['stats'].keys():
+            self.frames['profiles'][key] = self.frames['stats'][key]
+
+        STNID = self.frames['profiles']['STNID']
+        chunk = self.frames['profiles']['current_record_chunk']
+        if 'current_station_file_ini' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_ini'].close()
+        self.frames['profiles']['current_station_file_ini'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+        if 'current_station_file_mod' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_mod'].close()
+        self.frames['profiles']['current_station_file_mod'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_afternoon'].close()
+        self.frames['profiles']['current_station_file_afternoon'] = \
+            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+        self.frames['profiles']['records_iterator'] = \
+                        records_iterator(self.frames['profiles']['records_current_station_mod'])
+        (self.frames['profiles']['STNID'] , \
+        self.frames['profiles']['current_record_chunk'] , \
+        self.frames['profiles']['current_record_index']) , \
+        self.frames['profiles']['current_record_mod'] = \
+                        self.frames['profiles']['records_iterator'].__next__()
+
+
+        # for the profiles we make a distinct record iterator, so that the
+        # stats iterator can move independently
+
+        self.update_record()
+
+    def next_record(self,event=None,jump=1):
+        with suppress(StopIteration):
+            (self.frames['profiles']['STNID'] , \
+            self.frames['profiles']['current_record_chunk'] , \
+            self.frames['profiles']['current_record_index']) , \
+            self.frames['profiles']['current_record_mod'] = \
+                      self.frames['profiles']['records_iterator'].__next__(jump)
+        # except (StopIteration):
+        #     self.frames['profiles']['records_iterator'].close()
+        #     del( self.frames['profiles']['records_iterator'])
+        #     self.frames['profiles']['records_iterator'] = \
+        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
+        #     (self.frames['profiles']['STNID'] , \
+        #     self.frames['profiles']['current_record_index']) , \
+        #     self.frames['profiles']['current_record_mod'] = \
+        #                     self.frames['profiles']['records_iterator'].__next__()
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        self.update_record()
+
+    def prev_record(self,event=None):
+        self.next_record(jump=-1,event=event)
+
+    def update_record(self):
+        self.frames['profiles']['current_record_ini'] =  \
+            self.frames['profiles']['records_current_station_ini'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'],\
+                  self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon'] =  \
+            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+                  (self.frames['profiles']['STNID'] , \
+                  self.frames['profiles']['current_record_chunk'] , \
+                  self.frames['profiles']['current_record_index'])]
+
+        self.frames['profiles']['current_record_mod_stats'] = \
+                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+                    self.frames['profiles']['STNID'], \
+                    self.frames['profiles']['current_record_chunk'], \
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
+                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+        self.frames['profiles']['current_record_ini_pct'] = \
+                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
+                    self.frames['profiles']['STNID'],\
+                    self.frames['profiles']['current_record_chunk'],\
+                    self.frames['profiles']['current_record_index'])]
+
+        for key in self.frames['profiles'].keys():
+            self.frames['stats'][key] = self.frames['profiles'][key]
+        # frame
+        # note that the current station, record is the same as the stats frame for initialization
+
+        # select first 
+        #self.frames['profiles']['current_record_index'], \
+        #self.frames['profiles']['record_yaml_mod'] = \
+        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
+        #                   self.frames['stats']['current_record_index'])
+        self.frames['profiles']['record_yaml_mod'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_mod'], \
+               self.frames['profiles']['current_record_mod'].index_start,
+               self.frames['profiles']['current_record_mod'].index_end,
+               mode='mod')
+                                
+        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_ini'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_ini'], \
+               record_ini.index_start,
+               record_ini.index_end,
+                mode='ini')
+
+        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+                       (self.frames['stats']['STNID'] , \
+                        self.frames['stats']['current_record_chunk'] , \
+                        self.frames['stats']['current_record_index'])]
+
+        self.frames['profiles']['record_yaml_obs_afternoon'] = \
+           get_record_yaml(
+               self.frames['profiles']['current_station_file_afternoon'], \
+               record_afternoon.index_start,
+               record_afternoon.index_end,
+                mode='ini')
+
+
+        key = self.frames['worldmap']['inputkey']
+        # only redraw the map if the current world map has a time
+        # dimension
+        if 'time' in self.globaldata.datasets[key].page[key].dims:
+            self.goto_datetime_worldmap(
+                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                'after')
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap',
+                                                  'profiles'])
+        else:
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only=['stats_lightupdate',
+                                                  'worldmap_stations',
+                                                  'profiles'])
+
+    def abline(self,slope, intercept,axis):
+        """Plot a line from slope and intercept"""
+        #axis = plt.gca()
+        x_vals = np.array(axis.get_xlim())
+        y_vals = intercept + slope * x_vals
+        axis.plot(x_vals, y_vals, 'k--')
+
+    def plot(self):
+        import pylab as pl
+        from matplotlib.widgets import Button
+        import matplotlib.pyplot as plt
+        import matplotlib as mpl
+        '''
+        Definition of the axes for the sounding table stats
+        '''
+        
+        fig = pl.figure(figsize=(14,9))
+        axes = {} #axes
+        btns = {} #buttons
+
+        # frames, which sets attributes for a group of axes, buttens, 
+        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+            label = 'stats_'+str(key)
+            axes[label] = fig.add_subplot(\
+                            len(self.frames['stats']['viewkeys']),\
+                            5,\
+                            5*ikey+1,label=label)
+            # Actually, the axes should be a part of the frame!
+            #self.frames['stats']['axes'] = axes[
+
+            # pointer to the axes' point data
+            axes[label].data = {}
+
+            # pointer to the axes' color fields
+            axes[label].fields = {}
+
+
+        fig.tight_layout()
+        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
+
+        label ='stats_colorbar'
+        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
+        axes[label].fields = {}
+
+        from matplotlib.colors import LinearSegmentedColormap
+        cdictpres = {'blue': (\
+                           (0.,    0.,  0.),
+                           (0.25,  0.25, 0.25),
+                           (0.5,  .70, 0.70),
+                           (0.75, 1.0, 1.0),
+                           (1,     1.,  1.),
+                           ),
+               'green': (\
+                           (0. ,   0., 0.0),
+                           (0.25,  0.50, 0.50),
+                           (0.5,  .70, 0.70),
+                           (0.75,  0.50, 0.50),
+                           (1  ,    0,  0.),
+                           ),
+               'red':  (\
+                          (0 ,  1.0, 1.0),
+                          (0.25 ,  1.0, 1.0),
+                           (0.5,  .70, 0.70),
+                          (0.75 , 0.25, 0.25),
+                          (1,    0., 0.),
+                          )}
+        
+        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
+
+
+        label = 'times'
+               
+        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+
+
+        label = 'worldmap'
+               
+        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
+        # add pointers to the data of the axes
+        axes[label].data = {}
+        # add pointers to color fields (for maps and colorbars) in the axes
+        axes[label].fields = {}
+        axes[label].lat = None
+        axes[label].lon = None
+
+        label = 'worldmap_colorbar'
+        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
+        axes[label].fields = {}
+
+        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
+        label = 'worldmap_stations'
+        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
+        axes[label].data = {}
+
+        fig.canvas.mpl_connect('pick_event', self.on_pick)
+        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
+
+
+        """ buttons definitions """
+        
+        label = 'bprev_dataset'
+        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous dataset')
+        btns[label].on_clicked(self.prev_dataset)
+
+        label = 'bnext_dataset'
+        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next dataset')
+        btns[label].on_clicked(self.next_dataset)
+
+        label = 'bprev_datetime'
+        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous datetime')
+        btns[label].on_clicked(self.prev_datetime)
+
+        label = 'bnext_datetime'
+        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next datetime')
+        btns[label].on_clicked(self.next_datetime)
+
+
+        label = 'bprev_station'
+        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous station')
+        btns[label].on_clicked(self.prev_station)
+
+        label = 'bnext_station'
+        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next station')
+        btns[label].on_clicked(self.next_station)
+
+        label = 'bprev_record'
+        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Previous record')
+        btns[label].on_clicked(self.prev_record)
+
+        label = 'bnext_record'
+        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
+        btns[label] = Button(axes[label], 'Next record')
+        btns[label].on_clicked(self.next_record)
+
+
+        # self.nstatsview = nstatsview
+        # self.statsviewcmap = statsviewcmap
+        self.fig = fig
+        self.axes = axes
+        self.btns = btns
+        self.tbox = {}
+        # self.hover_active = False
+
+        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
+        #                                transform=plt.gcf().transFigure)
+
+        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
+                                          transform=plt.gcf().transFigure)
+
+        label = 'air_ap:theta'
+        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
+
+        label = 'air_ap:q'
+        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
+
+        label = 'out:h'
+        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
+
+        label = 'out:theta'
+        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
+
+        label = 'out:q'
+        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
+
+        label = 'SEB'
+        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
+
+
+        self.hover_active = False
+        self.fig = fig
+        self.fig.show()
+        self.fig.canvas.draw()
+        self.refresh_plot_interface()
+
+
+    # def scan_stations(self):
+    #     blabla
+        
+
+
+    # def get_records(current_file):
+    #     records = pd.DataFrame()
+
+    #     # initial position
+    #     next_record_found = False
+    #     while(not next_record_found):
+    #         next_record_found = (current_file.readline() == '---\n')
+    #     next_tell = current_file.tell() 
+    #     end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #     while not end_of_file:
+    #         current_tell = next_tell
+    #         next_record_found = False
+    #         current_file.seek(current_tell)
+    #         while ( (not next_record_found) and (not end_of_file)):
+    #             current_line = current_file.readline()
+    #             next_record_found = (currentline == '---\n')
+    #             end_of_file = (currentline == '') # an empty line means we are at the end
+
+    #         # we store the position of the next record
+    #         next_tell = current_file.tell() 
+    #         
+    #         # we get the current record. Unfortunately we need to reset the
+    #         # yaml record generator first.
+    #         current_yamlgen.close()
+    #         current_yamlgen = yaml.load_all(current_file)
+    #         current_file.seek(current_tell)
+    #         current_record_mod = current_yamlgen.__next__()
+    #     current_yamlgen.close()
+
+    #     return records
+
+       #      next_record_found = False
+       #      while(not record):
+       #          next_record_found = (self.current_file.readline() == '---\n')
+       #      self.current_tell0 = self.current_file.tell() 
+
+       #  
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell0 = self.current_file.tell() 
+
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell1 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell0)
+       #  self.r0 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell1)
+       #  next_record_found = False
+       #  while ( (not next_record_found) and (not end_of_file):
+       #      current_line = self.current_file.readline()
+       #      next_record_found = (currentline == '---\n')
+       #      end_of_file = (currentline == '') # an empty line means we are at the end
+
+       #  self.current_tell2 = self.current_file.tell() 
+
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell1)
+       #  self.r1 = self.current_yamlgen.__next__()
+
+       #  self.current_file.seek(self.current_tell2)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell3 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell2)
+       #  self.r2 = self.current_yamlgen.__next__()
+
+       #  # go to position of next record in file
+       #  self.current_file.seek(self.current_tell3)
+       #  next_record_found = False
+       #  while(not next_record_found):
+       #      next_record_found = (self.current_file.readline() == '---\n')
+       #  self.current_tell4 = self.current_file.tell() 
+
+       #  self.current_yamlgen.close()
+       #  self.current_yamlgen = yaml.load_all(self.current_file)
+       #  self.current_file.seek(self.current_tell3)
+       #  self.r3 = self.current_yamlgen.__next__()
+ 
+       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
+
+    def goto_datetime_worldmap(self,DT,shift=None):
+        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
+            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
+            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
+            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
+                self.frames['worldmap']['iDT'] += 1
+            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
+                self.frames['worldmap']['iDT'] -= 1 
+            # for gleam, we take the values of the previous day
+            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
+                self.frames['worldmap']['iDT'] -= 2 
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+        #else:
+        #    self.frames['worldmap'].pop('DT')
+
+    def next_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def prev_datetime(self,event=None):
+        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+            # for now we don't go to different files, so we cannot go to
+            # another file 
+            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
+            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            if "fig" in self.__dict__.keys():
+                self.refresh_plot_interface(only='worldmap') 
+
+    def next_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+    def prev_dataset(self,event=None):
+        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
+        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
+        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
+
+
+    def sel_dataset(self,inputkey):
+        self.frames['worldmap']['inputkey'] = inputkey
+        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
+        self.goto_datetime_worldmap(
+            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+            'after')# get nearest datetime of the current dataset to the profile
+        if "fig" in self.__dict__.keys():
+            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
+       
+    # def prev_station(self,event=None):
+    #     self.istation = (self.istation - 1) % self.stations.shape[0]
+    #     self.update_station()
+
+
+
+
+    #def update_datetime(self):
+    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
+    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
+    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
+    #        print(self.worldmapfocus['DT'])
+    #        self.refresh_plot_interface(only='worldmap')
+
+    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
+
+        #print('r1')
+        for argkey in args.keys():
+            self.__dict__[arg] = args[argkey]
+
+        axes = self.axes
+        tbox = self.tbox
+        frames = self.frames
+        fig = self.fig
+ 
+        if (only is None) or ('worldmap' in only):
+            globaldata = self.globaldata
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
+            else:
+                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+            keystotranspose = ['lat','lon']
+            for key in dict(datasetxr.dims).keys():
+                if key not in keystotranspose:
+                    keystotranspose.append(key)
+
+            datasetxr = datasetxr.transpose(*keystotranspose)
+            datasetxr = datasetxr.sortby('lat',ascending=False)
+
+            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
+            lonleft = lonleft - 360.
+            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
+            label = 'worldmap'
+            axes[label].clear()
+            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
+            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+
+        if (only is None) or ('worldmap' in only):
+            #if 'axmap' not in self.__dict__ :
+            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
+            #else:
+
+            #stations = self.stations
+
+
+            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
+            #     resolution = 'l', 
+            # area_thresh = 0.1,
+            #     llcrnrlon=-180., llcrnrlat=-90.0,
+            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
+            # 
+            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
+            # self.gmap.drawcountries(color='white',linewidth=0.3)
+            # #self.gmap.fillcontinents(color = 'gray')
+            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
+            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
+            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
+            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
+            # #self.ax5.shadedrelief()
+
+           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
+
+
+            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
+            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+
+            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
+            if 'lev' in field.dims:
+                field = field.isel(lev=-1)
+
+            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
+            axes[label].axis('off')
+
+            from matplotlib import cm
+            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
+            
+            
+            title=frames['worldmap']['inputkey']
+            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+            axes[label].set_title(title)
+
+            label ='worldmap_colorbar'
+            axes[label].clear()
+            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
+
+
+            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
+            # x,y = self.gmap(lons,lats)
+            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
+            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+
+        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
+
+            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
+            store_xlim = {}
+            store_ylim = {}
+            for ikey, key in enumerate(statskeys_out):
+                if (only is not None) and ('stats_lightupdate' in only):
+                    store_xlim[key] = axes['stats_'+key].get_xlim()
+                    store_ylim[key] = axes['stats_'+key].get_ylim()
+                self.axes['stats_'+key].clear()    
+
+            label = 'times'
+            self.axes[label].clear()
+
+            key = 'dthetadt'
+            x = self.frames['stats']['records_all_stations_ini']['datetime']
+            #print(x)
+            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+            #print(y)
+            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            #print(z)
+
+            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
+            self.axes[label].data[label] = self.axes[label].scatter(x.values,
+                                                                    y.values,
+                                                                    c=z.values,
+                                                                    cmap=self.statsviewcmap,
+                                                                    s=2,
+                                                                    vmin=0.,
+                                                                    vmax=1.,
+                                                                    alpha=alpha_cloud_pixels)
+
+            
+            x = self.frames['stats']['records_current_station_ini']['datetime']
+            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+
+            x = self.frames['profiles']['records_current_station_ini']['datetime']
+            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
+            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
+
+            for ikey, key in enumerate(statskeys_out):
+
+                # show data of all stations
+                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_all_stations_mod_stats'][key]
+                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                qvalmax = x.quantile(0.999)
+                qvalmin = x.quantile(0.001)
+                print('applying extra filter over extreme values for plotting stats')
+                selx = (x >= qvalmin) & (x < qvalmax)
+                sely = (x >= qvalmin) & (x < qvalmax)
+                x = x[selx & sely]
+                y = y[selx & sely]
+                z = z[selx & sely]
+                self.axes['stats_'+key].data['stats_'+key] = \
+                       self.axes['stats_'+key].scatter(x,y, c=z,\
+                                cmap=self.statsviewcmap,\
+                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
+
+                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['stats']['records_current_station_mod_stats'][key]
+                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
+
+                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+                y = self.frames['profiles']['records_current_station_mod_stats'][key]
+                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
+                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
+                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
+                                cmap=self.statsviewcmap,\
+                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
+
+                if len(x) > 1:
+                    fit = np.polyfit(x, y, deg=1)
+                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
+                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
+
+                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
+                y = self.frames['stats']['current_record_mod_stats'][key]
+                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
+
+                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
+                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
+                    axes['stats_'+key].annotate(text, \
+                                               xy=(x,y),\
+                                               xytext=(0.05,0.05),\
+                                               textcoords='axes fraction',\
+                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
+                                               color='white',\
+                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
+                # self.axes['stats_'+key].data[key+'_current_record'] = \
+                #        self.axes['stats_'+key].scatter(x,y, c=z,\
+                #                 cmap=self.statsviewcmap,\
+                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
+
+                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
+                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
+                # # highlight data for curent station
+                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
+
+                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
+
+                if ikey == len(statskeys_out)-1:
+                    self.axes['stats_'+key].set_xlabel('external')
+                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
+                axes['stats_'+key].set_ylabel('model')
+
+
+                if (only is not None) and ('stats_lightupdate' in only):
+                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
+                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
+                else:
+                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
+                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
+                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
+                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
+                self.abline(1,0,axis=self.axes['stats_'+key])
+
+        if (only is None) or ('stats_colorbar' in only):
+            label ='stats_colorbar'
+            axes[label].clear()
+            import matplotlib as mpl
+            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
+            self.axes[label].fields[label] = \
+             mpl.colorbar.ColorbarBase(self.axes[label],\
+                        orientation='horizontal',\
+                        label="percentile of "+self.frames['worldmap']['inputkey'],
+                        alpha=1.,
+                                cmap=self.statsviewcmap,\
+                                       norm=norm
+                         )
+
+        #print('r1')
+        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
+            #print('r2')
+            label = 'worldmap_stations'
+            axes[label].clear()
+            
+            stations = self.frames['worldmap']['stations'].table
+            globaldata = self.globaldata
+            
+            key = label
+
+            #print('r3')
+            if (stations is not None):
+                xlist = []
+                ylist = []
+                #print('r4')
+                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
+            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    xlist.append(x)
+                    ylist.append(y)
+                #picker is needed to make it clickable (pick_event)
+                axes[label].data[label] = axes[label].scatter(xlist,ylist,
+                                                              c='r', s=15,
+                                                              picker = 15,
+                                                              label=key,
+                                                              edgecolor='k',
+                                                              linewidth=0.8)
+
+            # cb.set_label('Wilting point [kg kg-3]')
+                #print('r5')
+
+                
+            #     xseries = []
+            #     yseries = []
+            #     for iSTN,STN in stations.iterrows():
+            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
+            #         xseries.append(x)                    
+            #         yseries.append(y)
+            #         
+            #         
+            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
+                    
+                if ('current_station' in frames['worldmap']):
+                    #print('r5')
+                    STN = frames['stats']['current_station']
+                    STNID = frames['stats']['STNID']
+                    #print('r5')
+
+                    x,y = len(axes['worldmap'].lon)* \
+                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
+                          len(axes['worldmap'].lat)* \
+                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
+                    #print('r6')
+                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
+                    #                          == \
+                    #                          self.frames['worldmap']['STNID'])\
+                    #                         & \
+                    #                         (self.seltablestats['DT'] \
+                    #                          == self.axes['statsview0].focus['DT']) \
+                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
+                    #print('r7')
+                    text = 'STNID: '+ format(STNID,'10.0f') + \
+                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
+                            ', LON: '+format(STN['longitude'],'3.3f')+ \
+                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
+
+                            #+', VAL: '+format(VAL,'.3e')
+
+                    axes[label].scatter(x,y, c='r', s=30,\
+                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
+                    #print('r8')
+            
+                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
+                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
+                    #colorstation = max((min((1.,colorstation)),0.))
+                    colorstation =0.2
+                    from matplotlib import cm
+                    axes[label].annotate(text,
+                                         xy=(x,y),
+                                         xytext=(0.05,0.05),
+                                         textcoords='axes fraction', 
+                                         bbox=dict(boxstyle="round",
+                                         fc = cm.viridis(colorstation)),
+                                         arrowprops=dict(arrowstyle="->",
+                                                         linewidth=1.1),
+                                         color='white' if colorstation < 0.5 else 'black')
+                    #print('r9')
+
+                    # #pos = sc.get_offsets()[ind["ind"][0]]
+                    # 
+                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
+                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
+                    # axes[label].data[label+'statannotate'].set_text(text)
+                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
+                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
+            #print('r9')
+            axes[label].axis('off')
+            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
+            axes[label].set_ylim((len(axes['worldmap'].lat),0))
+            #print('r10')
+
+        if (only is None) or ('profiles' in only): 
+            #print('r11')
+
+            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
+            # # self.update_station(goto_first_sounding=False)
+            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
+            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
+            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
+            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
+
+            label = 'air_ap:theta'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+                # +\
+                # ' -> '+ \
+                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+            
+            
+            
+            
+            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+            #print('r12')
+
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
+            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
+            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
+            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            #print('r13')
+            # 
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r14')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+
+            #print('r15')
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+                                < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+                          
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r16')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r17')
+            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            print(hmax)
+            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
+            if valid_mod:
+
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="mod "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
+
+            #print('r18')
+            axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('theta [K]')
+
+            label = 'air_ap:q'
+            axes[label].clear()
+
+            tbox['datetime'].set_text(\
+                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+            # 
+
+            #print('r19')
+            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
+            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # 
+            if valid_mod:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            else:
+                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
+                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+            # 
+            #print('r20')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+            #print('r21')
+
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            zco = range(zidxmax)
+
+
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                             label="obs "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+            zco = range(zidxmax)
+
+            #print('r23')
+            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
+                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                             label="fit "+\
+                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                             +'LT')
+
+            #print('r24')
+            if valid_mod:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zco = range(zidxmax)
+                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                                 label="fit ")#+\
+                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
+                             #+'LT')
+            #print('r25')
+            #axes[label].legend()
+
+            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
+            #axes[label].set_ylabel('height [m]')
+            axes[label].set_xlabel('q [kg/kg]')
+
+            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
+
+            # #pl.subplots_adjust(right=0.6)
+
+            # label = 'q_pro'
+            # axes[label].clear()
+
+            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
+            # 
+            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
+            # 
+            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
+
+            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
+            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
+            # 
+            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
+            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
+            # 
+            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
+            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
+            # #pl.subplots_adjust(right=0.6)
+            # axes[label].set_xlabel('specific humidity [kg/kg]')
+ 
+
+            #print('r26')
+            time = self.frames['profiles']['record_yaml_mod'].out.time
+            for ilabel,label in enumerate(['h','theta','q']):
+                axes["out:"+label].clear()
+                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
+                axes["out:"+label].set_ylabel(label)
+                if ilabel == 2:
+                    axes["out:"+label].set_xlabel('local sun time [h]')
+                
+            #print('r27')
+            label = 'SEB'
+            axes[label].clear()
+            
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
+            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
+            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
+            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
+                
+            #print('r28')
+            
+            axes[label].legend()
+            
+            #         for ax in self.fig_timeseries_axes:
+#             ax.clear()
+#         
+#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
+#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
+#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
+#         #print(self.morning_sounding.c4gl.out.Swin)
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
+#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
+#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
+#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
+#         self.fig_timeseries_axes[3].legend()
+#         self.fig.canvas.draw()
+            
+
+
+
+
+
+
+        #self.ready()
+        #print('r29')
+        fig.canvas.draw()
+        #fig.show()
+
+        self.axes = axes
+        self.tbox = tbox
+        self.fig = fig
+
+    def on_pick(self,event):
+        #print("HELLO")
+        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
+        #self.axes['theta_pro'].clear()
+        #self.axes['q_pro'].clear()
+        
+
+        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
+        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
+        keys_to_axes = {}
+        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
+            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
+
+        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
+        keys_to_axes['worldmap'] = 'worldmap'
+        
+        axes = self.axes
+        #nstatsview = self.nstatsview
+        #statsviewcmap = self.statsviewcmap
+        stations = self.frames['worldmap']['stations'].table
+
+
+        #print("p1")
+        current = event
+        artist = event.artist
+        
+        selkey = artist.get_label()
+        
+        #print(keys_to_axes)
+        
+        label = keys_to_axes[selkey]
+        #print("HELLO",selkey,label)
+
+        # # Get to know in which axes we are
+        # label = None
+        # for axeskey in axes.keys():
+        #     if event.inaxes == axes[axeskey]:
+        #         label = axeskey
+        #         
+
+        # cont, pos = None, None
+        
+        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
+        ind = event.ind
+        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
+        d = axes[label].collections[0]
+        #d.set_offset_position('data')
+        xy = d.get_offsets()
+        x, y =  xy[:,0],xy[:,1]
+        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
+
+        #print("p2")
+        if len(ind) > 0:
+            #print("p3")
+            pos = x[ind[0]], y[ind[0]]
+
+            #if label[:-1] == 'statsview':
+            #    #seltablestatsstdrel = self.seltablestatsstdrel
+            #    #seltablestatspct = self.seltablestatspct
+
+            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+            #    
+            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
+            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+            #    
+            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
+            #el
+            if (label == 'worldmap') or (label == 'worldmap_stations'):
+                self.hover_active = False
+                if (self.frames['worldmap']['STNID'] !=
+                    self.frames['profiles']['STNID']):
+                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
+                # so we just need to perform update_station
+                    self.update_station()
+            elif (label[:5] == 'stats'):
+
+                self.hover_active = False
+                if (self.frames['stats']['STNID'] !=
+                self.frames['profiles']['STNID']) or \
+                   (self.frames['stats']['current_record_chunk'] != 
+                    self.frames['profiles']['current_record_chunk']) or \
+                   (self.frames['stats']['current_record_index'] != 
+                    self.frames['profiles']['current_record_index']):
+
+
+
+                    for key in ['STNID','current_station','stations_iterator']: 
+                        self.frames['worldmap'][key] = self.frames['stats'][key] 
+
+                    for key in self.frames['stats'].keys():
+                        self.frames['profiles'][key] = self.frames['stats'][key]
+
+                    STNID = self.frames['profiles']['STNID']
+                    chunk = self.frames['profiles']['current_record_chunk']
+                    if 'current_station_file_ini' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_ini'].close()
+                    self.frames['profiles']['current_station_file_ini'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+                    if 'current_station_file_mod' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_mod'].close()
+                    self.frames['profiles']['current_station_file_mod'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_afternoon'].close()
+                    self.frames['profiles']['current_station_file_afternoon'] = \
+                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+
+                    # go to hovered record of current station
+                    self.frames['profiles']['records_iterator'] = \
+                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # ... and go to the record of the profile window (last one that
+                    # was picked by the user)
+                    found = False
+                    EOF = False
+                    while (not found) and (not EOF):
+                        try:
+                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
+                            #print("hello*")
+                            #print(self.frames['profiles']['current_record_index'])
+                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
+                               (index == self.frames['profiles']['current_record_index']) and \
+                               (STNID == self.frames['profiles']['STNID']):
+                                #print('found!')
+                                found = True
+                        except StopIteration:
+                            EOF = True
+                    if found:
+                        self.frames['stats']['current_record_mod'] = record
+                        self.frames['stats']['current_record_chunk'] = chunk
+                        self.frames['stats']['current_record_index'] = index
+                    # # for the profiles we make a distinct record iterator, so that the
+                    # # stats iterator can move independently
+                    # self.frames['profiles']['records_iterator'] = \
+                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    # (self.frames['profiles']['STNID'] , \
+                    # self.frames['profiles']['current_record_index']) , \
+                    # self.frames['profiles']['current_record_mod'] = \
+                    #                 self.frames['profiles']['records_iterator'].__next__()
+
+
+                    # for the profiles we make a distinct record iterator, so that the
+                    # stats iterator can move independently
+
+                    self.update_record()
+
+
+
+    def on_plot_hover(self,event):
+        axes = self.axes
+        #print('h1')
+
+        # Get to know in which axes we are
+        label = None
+        for axeskey in axes.keys():
+            if event.inaxes == axes[axeskey]:
+                label = axeskey
+                
+        #print('h2')
+
+        cont, pos = None, None
+        #print (label)
+        
+        if label is not None:
+            if  ('data' in axes[label].__dict__.keys()) and \
+                (label in axes[label].data.keys()) and \
+                (axes[label].data[label] is not None):
+                
+                #print('h3')
+                cont, ind =  axes[label].data[label].contains(event)
+                selkey = axes[label].data[label].get_label()
+                if len(ind["ind"]) > 0:
+                    #print('h4')
+                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
+                    #print('pos',pos,selkey)
+
+
+                    #if label[:-1] == 'statsview':
+                    #    seltablestatsstdrel = self.seltablestatsstdrel
+                    #    seltablestatspct = self.seltablestatspct
+
+                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
+                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
+                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
+                    #    self.hover_active = True
+                    #    
+                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
+                    #    
+                    #el
+                    #print(label[:5])
+                    if (label[:5] == 'stats') or (label == 'times'):
+                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
+                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
+                        
+
+                        if label[:5] == 'stats':
+                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            (self.frames['stats']['STNID'] ,
+                             self.frames['stats']['current_record_chunk'], 
+                             self.frames['stats']['current_record_index']) = \
+                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                        # elif label[:5] == 'stats':
+                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
+                        #     (self.frames['stats']['STNID'] ,
+                        #      self.frames['stats']['current_record_chunk'], 
+                        #      self.frames['stats']['current_record_index']) = \
+                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+
+
+                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+                        
+                        # # TO TEST: should be removed, since it's is also done just below
+                        # self.frames['stats']['stations_iterator'] = \
+                        #     self.frames['worldmap']['stations_iterator'] 
+                
+                
+                        # self.goto_datetime_worldmap(
+                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
+                        #     'after')
+
+
+                        # scrolling to the right station
+                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                        EOF = False
+                        found = False
+                        while (not found and not EOF):
+                            if (STNID == self.frames['stats']['STNID']):
+                                   found = True 
+                            if not found:
+                                try:
+                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
+                                except (StopIteration):
+                                    EOF = True
+                        if found:
+                        #    self.frames['stats']['STNID'] = STNID
+                            self.frames['stats']['current_station'] =  station
+
+                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
+                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
+
+
+                        # generate index of the current station
+                        self.frames['stats']['records_current_station_index'] = \
+                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                             == self.frames['stats']['STNID'])
+
+
+                        tab_suffixes = \
+                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            self.frames['stats']['records_current_station'+tab_suffix] = \
+                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+
+                        # go to hovered record of current station
+                        self.frames['stats']['records_iterator'] = \
+                                        records_iterator(self.frames['stats']['records_current_station_mod'])
+
+
+                        # ... and go to the record of the profile window (last one that
+                        # was picked by the user)
+                        found = False
+                        EOF = False
+                        while (not found) and (not EOF):
+                            try:
+                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                                #print("hello*")
+                                #print(self.frames['profiles']['current_record_index'])
+                                if (index == self.frames['stats']['current_record_index']) and \
+                                   (chunk == self.frames['stats']['current_record_chunk']) and \
+                                   (STNID == self.frames['stats']['STNID']):
+                                    #print('found!')
+                                    found = True
+                            except StopIteration:
+                                EOF = True
+                        if found:
+                            #print('h5')
+                            self.frames['stats']['current_record_mod'] = record
+                            self.frames['stats']['current_record_chunk'] = chunk
+                            self.frames['stats']['current_record_index'] = index
+
+                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
+                        tab_suffixes = \
+                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        for tab_suffix in tab_suffixes:
+                            #print(tab_suffix)
+                            #print(self.frames['stats']['records_current_station'+tab_suffix])
+                            self.frames['stats']['current_record'+tab_suffix] =  \
+                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                      (self.frames['stats']['STNID'] , \
+                                       self.frames['stats']['current_record_chunk'] , \
+                                       self.frames['stats']['current_record_index'])]
+
+
+                        self.hover_active = True
+                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                        # print('h13')
+                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
+                        #     self.goto_datetime_worldmap(
+                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
+                        #         'after')
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap',
+                        #                                           'profiles'])
+                        # else:
+                        #     if "fig" in self.__dict__.keys():
+                        #         self.refresh_plot_interface(only=['stats_lightupdate',
+                        #                                           'worldmap_stations',
+                        #                                           'profiles'])
+
+
+
+                    elif label in ['worldmap_stations','worldmap']:
+                        #print('h5')
+
+                        if (self.axes['worldmap'].lat is not None) and \
+                           (self.axes['worldmap'].lon is not None):
+
+
+                            #self.loading()
+                            self.fig.canvas.draw()
+                            self.fig.show()
+
+
+                            # get position of 
+                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
+                                                                 self.axes['worldmap'].lat[0]) + \
+                                           self.axes['worldmap'].lat[0],4)
+                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
+                                                                 self.axes['worldmap'].lon[0]) + \
+                                           self.axes['worldmap'].lon[0],4)
+                        
+                            stations = self.frames['worldmap']['stations'].table
+                            #print('h7')
+                        
+                            #reset stations iterator:
+                            # if 'stations_iterator' in self.frames['worldmap'].keys():
+                            #     self.frames['worldmap']['stations_iterator'].close()
+                            #     del(self.frames['worldmap']['stations_iterator'])
+                            # if 'stations_iterator' in self.frames['stats'].keys():
+                            #     self.frames['stats']['stations_iterator'].close()
+                            #     del(self.frames['stats']['stations_iterator'])
+                            self.frames['worldmap']['stations_iterator'] =\
+                               stations_iterator(self.frames['worldmap']['stations'])
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                            EOF = False
+                            found = False
+                            while (not found and not EOF):
+                                #print('h8',station.latitude,latmap)
+                                #print('h8',station.longitude,lonmap)
+                                if (round(station.latitude,3) == round(latmap,3)) and \
+                                    (round(station.longitude,3) == round(lonmap,3)):
+                                       found = True 
+                                if not found:
+                                    try:
+                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                                    except (StopIteration):
+                                        EOF = True
+                            if found:
+                                self.frames['worldmap']['STNID'] = STNID
+                                self.frames['worldmap']['current_station'] = \
+                                        station
+                        
+                            self.frames['stats']['stations_iterator'] = \
+                                self.frames['worldmap']['stations_iterator'] 
+                            #print('h8')
+                            # inherit station position for the stats frame...
+                            for key in self.frames['worldmap'].keys():
+                                self.frames['stats'][key] = self.frames['worldmap'][key]
+                                
+                            ## fetch records of current station...
+                            #self.frames['stats']['records_current_station_mod'] =\
+                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                            # ... and their indices
+                            self.frames['stats']['records_current_station_index'] = \
+                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                                     == \
+                                     self.frames['stats']['current_station'].name)
+
+
+                            tab_suffixes = \
+                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['records_current_station'+tab_suffix] = \
+                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+
+                            # ... create a record iterator ...
+                            #self.frames['stats']['records_iterator'].close()
+                            del(self.frames['stats']['records_iterator'])
+                            self.frames['stats']['records_iterator'] = \
+                                self.frames['stats']['records_current_station_mod'].iterrows()
+
+
+
+                        
+                            #print('h9')
+                            # ... and go to to the first record of the current station
+                            (self.frames['stats']['STNID'] , \
+                             self.frames['stats']['current_record_chunk'] , \
+                             self.frames['stats']['current_record_index']) , \
+                            self.frames['stats']['current_record_mod'] = \
+                                self.frames['stats']['records_iterator'].__next__()
+                        
+
+
+
+                            #print('h10')
+                            # cash the current record
+                            tab_suffixes = \
+                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                            for tab_suffix in tab_suffixes:
+                                self.frames['stats']['current_record'+tab_suffix] =  \
+                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                                          (self.frames['stats']['STNID'] , \
+                                           self.frames['stats']['current_record_chunk'] , \
+                                           self.frames['stats']['current_record_index'])]
+
+                            #print('h11')
+                            
+                            self.hover_active = True
+                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
+                            #print('h13')
+
+                        
+
+            #if (stations is not None):
+            #    for iSTN,STN in stations.iterrows():
+            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
+            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
+            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
+            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
+
+        # self.fig.show()
+ 
+        # we are hovering on nothing, so we are going back to the position of
+        # the profile sounding
+        if pos is None:
+            if self.hover_active == True:
+                #print('h1*')
+                
+                #self.loading()
+                # to do: reset stations iterators
+
+                # get station and record index from the current profile
+                for key in ['STNID', 'current_station']:
+                    self.frames['stats'][key] = self.frames['profiles'][key]
+
+                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
+                self.frames['stats']['current_station'] = \
+                        self.frames['profiles']['current_station']
+                #print('h3a*')
+                self.frames['stats']['records_current_station_mod'] = \
+                        self.frames['profiles']['records_current_station_mod']
+                #print('h3b*')
+
+                # the next lines recreate the records iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+
+                # reset stations iterator...
+                #self.frames['stats']['records_iterator'].close()
+                del(self.frames['stats']['records_iterator'])
+                self.frames['stats']['records_iterator'] = \
+                    self.frames['stats']['records_current_station_mod'].iterrows()
+                #print('h4*')
+
+                # ... and go to the record of the profile window (last one that
+                # was picked by the user)
+                found = False
+                EOF = False
+                while (not found) and (not EOF):
+                    try:
+                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
+                        #print("hello*")
+                        #print(self.frames['profiles']['current_record_index'])
+                        #print(self.frames['profiles']['STNID'])
+                        #print(STNID,index)
+                        if (index == self.frames['profiles']['current_record_index']) and \
+                            (chunk == self.frames['profiles']['current_record_chunk']) and \
+                            (STNID == self.frames['profiles']['STNID']):
+                            #print('found!')
+                            found = True
+                    except StopIteration:
+                        EOF = True
+                if found:
+                    #print('h5*')
+                    self.frames['stats']['current_record_mod'] = record
+                    self.frames['stats']['current_record_chunk'] = chunk
+                    self.frames['stats']['current_record_index'] = index
+
+                #print('h6*')
+
+
+
+                # # fetch records of current station...
+                # self.frames['stats']['records_current_station_mod'] =\
+                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+
+                # ... and their indices
+                self.frames['stats']['records_current_station_index'] = \
+                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
+                         == \
+                         self.frames['stats']['current_station'].name)
+
+
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['records_current_station'+tab_suffix] = \
+                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
+
+                
+
+                # cash the records of the current stations
+                tab_suffixes = \
+                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                for tab_suffix in tab_suffixes:
+                    self.frames['stats']['current_record'+tab_suffix] =  \
+                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
+                              (self.frames['stats']['STNID'] , \
+                               self.frames['stats']['current_record_chunk'] , \
+                               self.frames['stats']['current_record_index'])]
+
+
+                # the next lines recreate the stations iterator. Probably it's
+                # better to just copy the profile iterator and its position to
+                # the worldmap/stats 
+                #print('h7*')
+
+                # reset the stations iterators
+                for framekey in ['stats','worldmap']:
+                    ##print(framekey)
+                    if 'stations_iterator' in self.frames[framekey]:
+                        #self.frames[framekey]['stations_iterator'].close()
+                        del(self.frames[framekey]['stations_iterator'])
+
+                self.frames['worldmap']['current_station'] = \
+                        self.frames['profiles']['current_station']
+
+                #recreate the stations iterator for the worldmap...
+                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
+
+                # ... and go the position of the profile
+                #print('h8*')
+                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                EOF = False
+                found = False
+                while (not found and not EOF):
+                    if STNID == self.frames['profiles']['STNID'] :
+                        found = True 
+                    if not found:
+                        try:
+                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
+                        except (StopIteration):
+                            EOF = True
+                if found:
+                    self.frames['worldmap']['current_station'] = station
+                    self.frames['worldmap']['STNID'] = STNID
+                #print('h9*')
+                self.frames['stats']['stations_iterator'] = \
+                    self.frames['worldmap']['stations_iterator'] 
+
+                # the stats window now inherits the current station from the
+                # worldmap
+                for key in ['STNID','current_station','stations_iterator']: 
+                    self.frames['stats'][key] = self.frames['worldmap'][key] 
+                #print('h10*')
+
+                # # we now only need inherit station position and go to first record
+                # for key in self.frames['worldmap'].keys():
+                #     self.frames['stats'][key] = self.frames['worldmap'][key]
+
+                # self.frames['stats']['records_current_station'] =\
+                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
+
+                # #print(self.frames['stats']['records_current_station'])
+                # self.frames['stats']['records_iterator'] = \
+                #                 self.frames['stats']['records_current_station'].iterrows()
+                # (self.frames['stats']['STNID'] , \
+                # self.frames['stats']['current_record_index']) , \
+                # self.frames['stats']['current_record_mod'] = \
+                #                 self.frames['stats']['records_iterator'].__next__()
+                
+
+
+
+
+
+
+                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
+                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
+                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
+                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
+                self.hover_active = False
+                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
+    # def loading(self):
+    #     self.tbox['loading'].set_text('Loading...')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+    #     sleep(0.1)
+    # def ready(self):
+    #     self.tbox['loading'].set_text('Ready')
+    #     self.fig.canvas.draw()
+    #     self.fig.show()
+
+
+
diff --git a/lib/model.py b/lib/model.py
new file mode 100644
index 0000000..8760411
--- /dev/null
+++ b/lib/model.py
@@ -0,0 +1,2214 @@
+# 
+# CLASS
+# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
+# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
+# Copyright (c) 2011-2015 Chiel van Heerwaarden
+# Copyright (c) 2011-2015 Bart van Stratum
+# Copyright (c) 2011-2015 Kees van den Dries
+# 
+# This file is part of CLASS
+# 
+# CLASS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published bygamma
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# 
+# CLASS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with CLASS.  If not, see .
+#
+
+import copy as cp
+import numpy as np
+import sys
+import warnings
+import pandas as pd
+from ribtol_hw import zeta_hs2 , funcsche
+import logging
+#from SkewT.thermodynamics import Density
+#import ribtol
+
+grav = 9.81
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+
+def qsat(T,p):
+    return 0.622 * esat(T) / p
+
+
+def ribtol(Rib, zsl, z0m, z0h): 
+    Rib = np.float64(Rib)
+    zsl = np.float64(zsl)
+    z0m = np.float64(z0m)
+    z0h = np.float64(z0h)
+    #print(Rib,zsl,z0m,z0h)
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    #print(Rib,zsl,z0m,z0h)
+    while (abs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+        #print(L,fx/fxdif)
+        if(abs(L) > 1e12):
+            break
+
+    return L
+  
+def psim(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psim = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+  
+def psih(zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * np.log( (1. + x*x) / 2.)
+        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+ 
+class model:
+    def __init__(self, model_input = None,debug_level=None):
+
+        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
+        """
+
+        self.logger = logging.getLogger('model')
+        if debug_level is not None:
+            self.logger.setLevel(debug_level)
+
+        """ initialize the different components of the model """ 
+
+        if model_input is not None:
+            # class4gl style input
+            if 'pars' in model_input.__dict__.keys():
+
+                # we make a reference to the full input first, so we can dump it
+                # afterwards
+                self.input_c4gl = model_input
+
+                # we copy the regular parameters first. We keep the classical input
+                # format as self.input so that we don't have to change the entire
+                # model code.
+                self.input = cp.deepcopy(model_input.pars)
+
+                # we copy other sections we are interested in, such as profile
+                # data, and store it also under input
+
+                # I know we mess up a bit the structure of the class4gl_input, but
+                # we will make it clean again at the time of dumping data
+
+                # So here, we copy the profile data into self.input
+                # 1. Air circulation data 
+                if 'sw_ac' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ac']:
+                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
+                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
+
+                    # correct pressure of levels according to surface pressure
+                    # error (so that interpolation is done in a consistent way)
+
+                    p_e = self.input.Ps - self.input.sp
+                    for irow in self.input.air_ac.index[::-1]:
+                       self.input.air_ac.p.iloc[irow] =\
+                        self.input.air_ac.p.iloc[irow] + p_e
+                       p_e = p_e -\
+                       (self.input.air_ac.p.iloc[irow]+p_e)/\
+                        self.input.air_ac.p.iloc[irow] *\
+                        self.input.air_ac.delpdgrav.iloc[irow]*grav
+
+
+
+                # 2. Air circulation data 
+                if 'sw_ap' in self.input.__dict__.keys() \
+                   and self.input.__dict__['sw_ap']:
+                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
+
+            # standard class input
+            else:
+                self.input = cp.deepcopy(model_input)
+
+    def load_yaml_dict(self,yaml_dict):
+        for key,data in yaml_dict.items():
+            if key == 'pars':
+                for keydata,value in data.items():
+                    self.__dict__[keydata] = value
+            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
+                self.__dict__[key] = pd.DataFrame(data)
+            #elif key == 'sources':
+            #    self.__dict__[key] = data
+            elif key == 'out':
+                # lets convert it to a list of dictionaries
+                dictouttemp = pd.DataFrame(data).to_dict('list')
+            else: 
+                 warnings.warn("Key '"+key+"' is be implemented.")
+            #     self.__dict__[key] = data
+
+
+        self.tsteps = len(dictouttemp['h'])
+        self.out = model_output(self.tsteps)
+        for keydictouttemp in dictouttemp.keys():
+            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
+
+
+  
+    def run(self):
+        # initialize model variables
+        self.init()
+  
+        # time integrate model 
+        #for self.t in range(self.tsteps):
+        while self.t < self.tsteps:
+          
+            # time integrate components
+            self.timestep()
+  
+        # delete unnecessary variables from memory
+        self.exitmodel()
+    
+    def init(self):
+        # assign variables from input data
+        # initialize constants
+        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
+        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+        self.rho        = 1.2                   # density of air [kg m-3]
+        self.k          = 0.4                   # Von Karman constant [-]
+        self.g          = 9.81                  # gravity acceleration [m s-2]
+        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+        self.bolz       = 5.67e-8               # Bolzman constant [-]
+        self.rhow       = 1000.                 # density of water [kg m-3]
+        self.S0         = 1368.                 # solar constant [W m-2]
+
+        # A-Gs constants and settings
+        # Plant type:       -C3-     -C4-
+        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
+        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
+        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
+        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
+        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
+        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
+        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
+        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
+        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
+        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
+        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
+        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
+        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
+
+        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
+        self.mair       =  28.9;                # molecular weight air [g mol -1]
+        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
+
+        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
+        self.wmax       =  0.55;                # upper reference value soil water [-]
+        self.wmin       =  0.005;               # lower reference value soil water [-]
+        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
+        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
+
+        # Read switches
+        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
+        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
+        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
+        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
+        self.sw_sl      = self.input.sw_sl      # surface layer switch
+        self.sw_rad     = self.input.sw_rad     # radiation switch
+        self.sw_ls      = self.input.sw_ls      # land surface switch
+        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
+        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
+
+        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
+        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
+        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
+  
+        # initialize mixed-layer
+        self.h          = self.input.h          # initial ABL height [m]
+        self.Ps         = self.input.Ps         # surface pressure [Pa]
+        self.sp         = self.input.sp         # This is also surface pressure
+                                                #but derived from the global data [Pa]
+        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
+        self.ws         = None                  # large-scale vertical velocity [m s-1]
+        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
+        self.we         = -1.                   # entrainment velocity [m s-1]
+       
+         # Temperature 
+        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
+        
+        
+        self.substep    = False
+        self.substeps   = 0
+
+
+
+        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
+        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
+        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
+        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
+        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
+ 
+        self.wstar      = 0.                    # convective velocity scale [m s-1]
+ 
+        # 2m diagnostic variables 
+        self.T2m        = None                  # 2m temperature [K]
+        self.q2m        = None                  # 2m specific humidity [kg kg-1]
+        self.e2m        = None                  # 2m vapor pressure [Pa]
+        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
+        self.u2m        = None                  # 2m u-wind [m s-1]
+        self.v2m        = None                  # 2m v-wind [m s-1]
+ 
+        # Surface variables 
+        self.thetasurf  = self.input.theta      # surface potential temperature [K]
+        self.thetavsurf = None                  # surface virtual potential temperature [K]
+        self.qsurf      = None                  # surface specific humidity [g kg-1]
+
+        # Mixed-layer top variables
+        self.P_h        = None                  # Mixed-layer top pressure [pa]
+        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
+        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
+        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
+        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
+        self.dz_h       = None                  # Transition layer thickness [-]
+        self.lcl        = None                  # Lifting condensation level [m]
+
+        # Virtual temperatures and fluxes
+        self.thetav     = None                  # initial mixed-layer potential temperature [K]
+        self.dthetav    = None                  # initial virtual temperature jump at h [K]
+        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
+        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
+       
+        
+        
+        
+        
+        
+        # Moisture 
+        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
+
+        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
+        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
+        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
+  
+        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
+        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
+        self.e          = None                  # mixed-layer vapor pressure [Pa]
+        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
+        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
+      
+        
+        
+        # CO2
+        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
+        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
+        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
+        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
+        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
+        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
+        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
+        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
+        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
+        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
+       
+        # Wind 
+        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
+        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
+        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = self.input.advu       # advection of u-wind [m s-2]
+        
+        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
+        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = self.input.advv       # advection of v-wind [m s-2]
+         
+  # BEGIN -- HW 20170606
+        # z-coordinate for vertical profiles of stratification above the mixed-layer height
+
+        if self.sw_ac:
+        # this is the data frame with the grided profile on the L60 grid
+        # (subsidence, and advection) 
+            self.air_ac      = self.input.air_ac  # full level air circulation
+                                                  # forcing
+            # self.air_ach     = self.input.air_ach # half level air circulation
+            #                                       # forcing
+            
+
+        if self.sw_ap:
+        # this is the data frame with the fitted profile (including HAGL,
+        # THTA,WSPD, SNDU,WNDV PRES ...)
+            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
+
+            # just for legacy reasons...
+            if 'z' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
+            if 'p' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
+
+            indexh = np.where(self.air_ap.z.values == self.h)
+            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
+                raise ValueError("Error input profile consistency: mixed- \
+                                 layer height needs to be equal to the second \
+                                 and third \
+                                 level of the vertical profile input!")
+            # initialize q from its profile when available
+            p_old = self.Ps
+            p_new = self.air_ap.p[indexh[0][0]]
+            
+            if ((p_old is not None) & (p_old != p_new)):
+                print("Warning: Ps input was provided ("+str(p_old)+\
+                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
+                    +str(p_new)+"Pa).")
+                                    
+            self.Ps = p_new
+            # these variables/namings are more convenient to work with in the code
+            # we will update the original variables afterwards
+            #self.air_ap['q'] = self.air_ap.QABS/1000.
+
+            self.air_ap = \
+                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
+            # we require the temperature fields, since we need to consider
+            # advection
+            # if self.sw_ac:
+            #     #self.air_ap['theta'] = self.air_ap['t'] *
+
+            #     # we consider self.sp in case of air-circulation input (for
+            #     # consistence)
+            #     self.air_ap['t'] = \
+            #                 self.air_ap.theta *  \
+            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
+            # else:
+            # we consider self.Ps in case of balloon input only 
+            self.air_ap = self.air_ap.assign(t = lambda x: \
+                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
+
+            #self.air_ap['theta'] = self.air_ap.THTA
+            if 'u' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
+            if 'v' not in list(self.air_ap.columns):
+                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
+
+            for var in ['theta','q','u','v']:
+
+                
+                if self.air_ap[var][1] != self.air_ap[var][0]:
+                    raise ValueError("Error input profile consistency: two \
+                                     lowest profile levels for "+var+" should \
+                                     be equal.")
+                
+                # initialize the value from its profile when available
+                value_old = self.__dict__[var]
+                value_new = self.air_ap[var][indexh[0][0]]
+                
+                if ((value_old is not None) & (value_old != value_new)):
+                    warnings.warn("Warning:  input was provided \
+                                     ("+str(value_old)+ "kg kg-1), \
+                                     but it is now overwritten by the first \
+                                     level (index 0) of air_ap]var\ which is \
+                                     different (" +str(value_new)+"K).")
+                                        
+                self.__dict__[var] = value_new
+
+                # make a profile of the stratification 
+                # please note that the stratification between z_pro[i] and
+                # z_pro[i+1] is given by air_ap.GTHT[i]
+
+                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
+                # np.gradient(self.z_pro)
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
+
+
+                self.__dict__['gamma'+var] = \
+                    self.air_ap['gamma'+var][np.where(self.h >= \
+                                                     self.air_ap.z)[0][-1]]
+
+
+
+        # the variable p_pro is just for diagnosis of lifted index
+            
+            
+
+            # input Ph is wrong, so we correct it according to hydrostatic equation
+            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+
+            #if self.sw_ac:
+                # note that we use sp as surface pressure, which is determined
+                # from era-interim instead of the observations. This is to
+                # avoid possible failure of the interpolation routine
+                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
+                #                          + \
+                #                          list(self.air_ap.p[3:]))
+
+            # else:
+                # in the other case, it is updated at the time of calculting
+                # the statistics 
+
+# END -- HW 20170606      
+        #print(self.air_ap)
+
+        if self.sw_ac and not self.sw_ap:
+            raise ValueError("air circulation switch only possible when air \
+                             profiles are given")
+        
+        if self.sw_ac:
+
+            # # # we comment this out, because subsidence is calculated
+            # according to advection
+            # #interpolate subsidence towards the air_ap height coordinate
+            # self.air_ap['w'] = np.interp(self.air_ap.p,\
+            #                               self.air_ac.p,\
+            #                               self.air_ac.w) 
+            # #subsidence at the mixed-layer top
+            # self.w = self.air_ap.w[1]
+        
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+                # in case we didn't find any points, we just take the lowest one.
+                # actually, this can happen if ERA-INTERIM pressure levels are
+                # inconsistent with 
+                if in_ml.sum() == 0:
+                    warnings.warn(" no circulation points in the mixed layer \
+                                  found. We just take the bottom one.")
+                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+
+                for var in ['t','q','u','v']:
+    
+                   # calculation of the advection variables for the mixed layer
+                   # we weight by the hydrostatic thickness of each layer and
+                   # divide by the total thickness
+                   self.__dict__['adv'+var] = \
+                            ((self.air_ac['adv'+var+'_x'][in_ml] \
+                             + \
+                             self.air_ac['adv'+var+'_y'][in_ml])* \
+                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                            self.air_ac['delpdgrav'][in_ml].sum()
+
+                   # calculation of the advection variables for the profile above
+                   # (lowest 3 values are not used by class)
+                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
+                   self.air_ap['adv'+var] = \
+                           np.interp(self.air_ap.p,\
+                                     self.air_ac.p,\
+                                     self.air_ac['adv'+var+'_x']) \
+                           + \
+                           np.interp(self.air_ap.p, \
+                                       self.air_ac.p, \
+                                       self.air_ac['adv'+var+'_y'])
+
+                # as an approximation, we consider that advection of theta in the
+                # mixed layer is equal to advection of t. This is a sufficient
+                # approximation since theta and t are very similar at the surface
+                # pressure.
+                self.__dict__['advtheta'] = self.__dict__['advt']
+
+
+            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
+            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
+            # # # CALCULATING THE ADVECTION PROFILES
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # self.wrho = np.interp(self.P_h,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) 
+            # self.ws   = self.air_ap.w.iloc[1]
+
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                self.air_ap = self.air_ap.assign(wp = 0.)
+                self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                              self.air_ac.p, \
+                                              self.air_ac['wp'])
+                self.air_ap = self.air_ap.assign(R = 0.)
+                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                     self.Rv*self.air_ap.q)
+                self.air_ap = self.air_ap.assign(rho = 0.)
+                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+                
+                self.air_ap = self.air_ap.assign(w = 0.)
+                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+                #print('hello w ini')
+
+                # Note: in case of sw_ac is False, we update it from prescribed
+                # divergence
+                self.ws   = self.air_ap.w[1]
+
+                # self.ws   = self.wrho/self.rho
+                # self.ws   = self.wrho/(self.P_h/ \
+                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
+                #                         self.theta) # this should be T!!!
+
+                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+                #                         + \
+                #                         self.air_ac['divU_y'][in_ml])* \
+                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                #             self.air_ac['delpdgrav'][in_ml].sum() \
+        
+
+        # Tendencies 
+        self.htend      = None                  # tendency of CBL [m s-1]
+        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
+        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
+        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
+        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
+        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
+        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
+        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
+        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
+        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
+        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
+        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
+  
+        # initialize surface layer
+        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
+        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
+        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
+        self.z0m        = self.input.z0m        # roughness length for momentum [m]
+        self.z0h        = self.input.z0h        # roughness length for scalars [m]
+        self.Cm         = 1e12                  # drag coefficient for momentum [-]
+        self.Cs         = 1e12                  # drag coefficient for scalars [-]
+        self.L          = None                  # Obukhov length [m]
+        self.Rib        = None                  # bulk Richardson number [-]
+        self.ra         = None                  # aerodynamic resistance [s m-1]
+  
+        # initialize radiation
+        self.lat        = self.input.lat        # latitude [deg]
+        #self.fc         = self.input.fc         # coriolis parameter [s-1]
+        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
+        self.lon        = self.input.lon        # longitude [deg]
+        self.doy        = self.input.doy        # day of the year [-]
+        self.tstart     = self.input.tstart     # time of the day [-]
+        self.cc         = self.input.cc         # cloud cover fraction [-]
+        self.Swin       = None                  # incoming short wave radiation [W m-2]
+        self.Swout      = None                  # outgoing short wave radiation [W m-2]
+        self.Lwin       = None                  # incoming long wave radiation [W m-2]
+        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
+        self.Q          = self.input.Q          # net radiation [W m-2]
+        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
+  
+        # initialize land surface
+        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
+        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
+        self.T2         = self.input.T2         # temperature deeper soil layer [K]
+                           
+        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
+        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
+        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
+        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
+                           
+        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
+        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
+        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
+                           
+        self.C1sat      = self.input.C1sat      
+        self.C2ref      = self.input.C2ref      
+
+        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
+        
+        self.LAI        = self.input.LAI        # leaf area index [-]
+        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
+        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = self.input.alpha      # surface albedo [-]
+  
+        self.rs         = 1.e6                  # resistance transpiration [s m-1]
+        self.rssoil     = 1.e6                  # resistance soil [s m-1]
+                           
+        self.Ts         = self.input.Ts         # surface temperature [K]
+                           
+        self.cveg       = self.input.cveg       # vegetation fraction [-]
+        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
+        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
+        self.cliq       = None                  # wet fraction [-]
+                          
+        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
+  
+        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
+        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
+        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
+  
+        self.H          = None                  # sensible heat flux [W m-2]
+        self.LE         = None                  # evapotranspiration [W m-2]
+        self.LEliq      = None                  # open water evaporation [W m-2]
+        self.LEveg      = None                  # transpiration [W m-2]
+        self.LEsoil     = None                  # soil evaporation [W m-2]
+        self.LEpot      = None                  # potential evaporation [W m-2]
+        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
+        self.G          = None                  # ground heat flux [W m-2]
+
+        # initialize A-Gs surface scheme
+        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
+
+        # initialize cumulus parameterization
+        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
+        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
+        self.ac         = 0.                    # Cloud core fraction [-]
+        self.M          = 0.                    # Cloud core mass flux [m s-1] 
+        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
+  
+        # initialize time variables
+        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
+        self.dt     = self.input.dt
+        self.dtcur      = self.dt
+        self.firsttime = True
+        self.t      = 0
+ 
+        # Some sanity checks for valid input
+        if (self.c_beta is None): 
+            self.c_beta = 0                     # Zero curvature; linear response
+        assert(self.c_beta >= 0 or self.c_beta <= 1)
+
+        # initialize output
+        self.out = model_output(self.tsteps)
+ 
+        self.statistics()
+  
+        # calculate initial diagnostic variables
+        if(self.sw_rad):
+            self.run_radiation()
+ 
+        if(self.sw_sl):
+            for i in range(10): 
+                self.run_surface_layer()
+  
+        if(self.sw_ls):
+            self.run_land_surface()
+
+        if(self.sw_cu):
+            self.run_mixed_layer()
+            self.run_cumulus()
+        
+        if(self.sw_ml):
+            self.run_mixed_layer()
+
+    def timestep(self):
+
+        self.dtmax = +np.inf
+        self.logger.debug('before stats') 
+        self.statistics()
+
+        # run radiation model
+        self.logger.debug('before rad') 
+        if(self.sw_rad):
+            self.run_radiation()
+  
+        # run surface layer model
+        if(self.sw_sl):
+            self.logger.debug('before surface layer') 
+            self.run_surface_layer()
+        
+        # run land surface model
+        if(self.sw_ls):
+            self.logger.debug('before land surface') 
+            self.run_land_surface()
+ 
+        # run cumulus parameterization
+        if(self.sw_cu):
+            self.logger.debug('before cumulus') 
+            self.run_cumulus()
+   
+        self.logger.debug('before mixed layer') 
+        # run mixed-layer model
+        if(self.sw_ml):
+            self.run_mixed_layer()
+        self.logger.debug('after mixed layer') 
+ 
+        #get first profile data point above mixed layer
+        if self.sw_ap:
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            
+            if (self.sw_ac is not None) and ('w' in self.sw_ac):
+                # here we correct for the fact that the upper profile also
+                # shifts in the vertical.
+
+                diffhtend = self.htend - self.air_ap.w[zidx_first]
+                if diffhtend > 0:
+                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            else:
+                if self.htend > 0:
+                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
+                    self.dtmax= min(dtmax_new,self.dtmax)
+            #print(self.h,zidx_first,self.ws,self.air_ap.z)
+
+        
+        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
+        self.logger.debug('before store') 
+        self.substep =  (self.dtcur > self.dtmax)
+        if self.substep:
+            dtnext = self.dtcur - self.dtmax
+            self.dtcur = self.dtmax
+
+        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
+
+        # HW: this will be done multiple times in case of a substep is needed
+        # store output before time integration
+        if self.firsttime:
+            self.store()
+  
+        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
+        # time integrate land surface model
+        if(self.sw_ls):
+            self.integrate_land_surface()
+        self.logger.debug('before integrate mixed layer') 
+        # time integrate mixed-layer model
+        if(self.sw_ml):
+            self.integrate_mixed_layer() 
+        self.logger.debug('after integrate mixed layer') 
+        if self.substep:
+            self.dtcur = dtnext
+            self.firsttime = False
+            self.substeps += 1
+        else:
+            self.dtcur = self.dt
+            self.t += 1 
+            self.firsttime = True
+            self.substeps = 0
+        self.logger.debug('going to next step')
+        
+        
+  
+    def statistics(self):
+        # Calculate virtual temperatures 
+        self.thetav   = self.theta  + 0.61 * self.theta * self.q
+        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
+        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
+        # Mixed-layer top properties
+        self.P_h    = self.Ps - self.rho * self.g * self.h
+        # else:
+            # in the other case, it is updated at the time that the profile is
+            # updated (and at the initialization
+
+        self.T_h    = self.theta - self.g/self.cp * self.h
+
+        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
+        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
+
+        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
+
+        # Find lifting condensation level iteratively
+        if(self.t == 0):
+            self.lcl = self.h
+            RHlcl = 0.5
+        else:
+            RHlcl = 0.9998 
+
+        itmax = 30
+        it = 0
+        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
+            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
+            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
+        else:
+            self.q2_h   = 0.
+            self.CO22_h = 0.
+
+        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
+        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
+        self.M      = self.ac * self.wstar
+        self.wqM    = self.M * self.q2_h**0.5
+
+        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
+        if(self.dCO2 < 0):
+            self.wCO2M  = self.M * self.CO22_h**0.5
+        else:
+            self.wCO2M  = 0.
+
+    def run_mixed_layer(self):
+        if(not self.sw_sl):
+            # decompose ustar along the wind components
+            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
+            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
+
+
+
+        # calculate large-scale vertical velocity (subsidence)
+        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
+            self.ws = -self.divU * self.h
+        # else:
+        #     in case the air circulation switch is turned on, subsidence is
+        #     calculated from the circulate profile at the initialization and
+        #     in the integrate_mixed_layer routine
+              
+        # calculate compensation to fix the free troposphere in case of subsidence 
+        if(self.sw_fixft):
+            w_th_ft  = self.gammatheta * self.ws
+            w_q_ft   = self.gammaq     * self.ws
+            w_CO2_ft = self.gammaCO2   * self.ws 
+        else:
+            w_th_ft  = 0.
+            w_q_ft   = 0.
+            w_CO2_ft = 0. 
+      
+        # calculate mixed-layer growth due to cloud top radiative divergence
+        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
+       
+        # calculate convective velocity scale w* 
+        if(self.wthetav > 0.):
+            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
+        else:
+            self.wstar  = 1e-6;
+      
+        # Virtual heat entrainment flux 
+        self.wthetave    = -self.beta * self.wthetav 
+        
+        # compute mixed-layer tendencies
+        if(self.sw_shearwe):
+            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
+        else:
+            self.we    = -self.wthetave / self.dthetav
+        # Don't allow boundary layer shrinking if wtheta < 0 
+        if(self.we < 0):
+            self.we = 0.
+
+        # Calculate entrainment fluxes
+        self.wthetae     = -self.we * self.dtheta
+        self.wqe         = -self.we * self.dq
+        self.wCO2e       = -self.we * self.dCO2
+        
+        htend_pre       = self.we + self.ws + self.wf - self.M
+        
+        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+        
+ 
+        #print('thetatend_pre',thetatend_pre)
+        
+        #preliminary boundary-layer top chenage
+        #htend_pre = self.we + self.ws + self.wf - self.M
+        #preliminary change in temperature jump
+        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
+                          thetatend_pre + w_th_ft
+        
+        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
+        l_entrainment = True
+
+        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
+            l_entrainment = False
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! temperature jump is at the lower limit \
+                          and is not growing: entrainment is disabled for this (sub)timestep.") 
+        elif dtheta_pre < 0.1:
+            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
+            l_entrainment = True
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          " Warning! Potential temperature jump at mixed- \
+                          layer height would become too low limiting timestep \
+                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
+            self.dtmax = min(self.dtmax,dtmax_new)
+            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "next subtimestep, entrainment will be disabled")
+            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
+
+
+
+        # when entrainment is disabled, we just use the simplified formulation
+        # as in Wouters et al., 2013 (section 2.2.1)
+
+        self.dthetatend = l_entrainment*dthetatend_pre + \
+                        (1.-l_entrainment)*0.
+        self.thetatend = l_entrainment*thetatend_pre + \
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
+        self.htend = l_entrainment*htend_pre + \
+                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
+        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
+        #stop
+
+
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+
+
+        # self.qtend = l_entrainment*qtend_pre + \
+        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
+        # self.CO2tend = l_entrainment*CO2tend_pre + \
+        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+
+
+        #     # part of the timestep for which the temperature mixed-layer jump
+        #     # was changing, and for which entrainment took place. For the other
+        #     # part, we don't assume entrainment anymore, and we use the
+        #     # simplified formulation  of Wouters et al., 2013
+
+        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
+        #   
+        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
+        #                      self.dthetatend + w_th_ft) + \
+        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
+        #     self.htend = fac*self.htend + \
+        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
+        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
+        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
+
+        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+
+        # else:
+        #     #self.htend = htend_pre
+        #     self.dthetatend = dthetatend_pre
+        #     self.thetatend = thetatend_pre
+        
+        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
+        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
+     
+        # assume u + du = ug, so ug - u = du
+        if(self.sw_wind):
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
+  
+            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
+            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
+        
+        # tendency of the transition layer thickness
+        if(self.ac > 0 or self.lcl - self.h < 300):
+            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
+        else:
+            self.dztend = 0.
+
+   
+    def integrate_mixed_layer(self):
+        # set values previous time step
+        h0      = self.h
+        
+        theta0  = self.theta
+        dtheta0 = self.dtheta
+        q0      = self.q
+        dq0     = self.dq
+        CO20    = self.CO2
+        dCO20   = self.dCO2
+        
+        u0      = self.u
+        du0     = self.du
+        v0      = self.v
+        dv0     = self.dv
+
+        dz0     = self.dz_h
+  
+        # integrate mixed-layer equations
+        
+            
+
+# END -- HW 20170606        
+        self.h        = h0      + self.dtcur * self.htend
+        # print(self.h,self.htend)
+        # stop
+        self.theta    = theta0  + self.dtcur * self.thetatend
+        #print(dtheta0,self.dtcur,self.dthetatend)
+        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
+        self.q        = q0      + self.dtcur * self.qtend
+        self.dq       = dq0     + self.dtcur * self.dqtend
+        self.CO2      = CO20    + self.dtcur * self.CO2tend
+        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
+        self.dz_h     = dz0     + self.dtcur * self.dztend
+            
+        # Limit dz to minimal value
+        dz0 = 50
+        if(self.dz_h < dz0):
+            self.dz_h = dz0 
+  
+        if(self.sw_wind):
+            self.u        = u0      + self.dtcur * self.utend
+            self.du       = du0     + self.dtcur * self.dutend
+            self.v        = v0      + self.dtcur * self.vtend
+            self.dv       = dv0     + self.dtcur * self.dvtend
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+            for var in ['t','q','u','v']:
+                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
+
+            # take into account advection for the whole profile
+                
+                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
+
+            var = 'z'
+            #print(self.air_ap[var])
+                #     print(self.air_ap['adv'+var])
+
+
+
+
+            #moving the profile vertically according to the vertical wind
+                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
+
+
+            # air_apvarold = pd.Series(np.array(self.air_ap.z))
+            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
+            # stop
+
+
+                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
+                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
+
+            #As t is updated, we also need to recalculate theta (and R)
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+
+            # air_aptheta_old = pd.Series(self.air_ap['theta'])
+            self.air_ap['theta'] = \
+                        self.air_ap.t * \
+                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
+            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
+                                         self.dtcur * self.air_ap.w[zidx_first:]
+
+#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
+#            print(self.t, self.dtcur,self.dt,self.htend)
+
+            # # the pressure levels of the profiles are recalculated according to
+            # # there new height (after subsidence)
+            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
+            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
+            #         * self.dtcur *  self.air_ap.w[zidx_first:]
+
+            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
+                    self.dtcur * self.air_ap.wp[zidx_first:]
+
+            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
+        # note that theta and q itself are updatet by class itself
+
+    
+        if self.sw_ap:
+            # Just for model consistency preservation purposes, we set the
+            # theta variables of the mixed-layer to nan values, since the
+            # mixed-layer values should overwritte by the mixed-layer
+            # calculations of class.
+            self.air_ap['theta'][0:3] = np.nan 
+            self.air_ap['p'][0:3] = np.nan 
+            self.air_ap['q'][0:3] = np.nan 
+            self.air_ap['u'][0:3] = np.nan 
+            self.air_ap['v'][0:3] = np.nan 
+            self.air_ap['t'][0:3] = np.nan 
+            self.air_ap['z'][0:3] = np.nan 
+
+            # Update the vertical profiles: 
+            #   - new mixed layer properties( h, theta, q ...)
+            #   - any data points below the new ixed-layer height are removed
+
+            # Three data points at the bottom that describe the mixed-layer
+            # properties
+            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
+                                           # columns as air_ap
+            # air_ap_head['z'].iloc[0] = 2.
+            # air_ap_head['z'].iloc[1] = self.__dict__['h']
+            # air_ap_head['z'].iloc[2] = self.__dict__['h']
+            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
+                        [2.,self.__dict__['h'],self.__dict__['h']]
+            for var in ['theta','q','u','v']:
+
+                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
+                        [self.__dict__[var], \
+                         self.__dict__[var], \
+                         self.__dict__[var] + self.__dict__['d'+var]]
+                
+            #print(self.air_ap)
+
+            # This is the remaining profile considering the remaining
+            # datapoints above the mixed layer height
+            air_ap_tail = self.air_ap.iloc[3:]
+            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
+
+            # print('h',self.h)
+            # # only select samples monotonically increasing with height
+            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            # air_ap_tail = pd.DataFrame()
+            # theta_low = self.theta
+            # z_low =     self.h
+            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            # for ibottom in range(1,len(air_ap_tail_orig)):
+            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
+            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+            # make theta increase strong enough to avoid numerical
+            # instability
+            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+            air_ap_tail = pd.DataFrame()
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+            theta_low = self.theta
+            z_low =     self.h
+            ibottom = 0
+            itop = 0
+            # print(air_ap_tail_orig)
+            # stop
+
+            # HW: this is the lower limit that we use for gammatheta, which is
+            # there to avoid model crashes. Besides on this limit, the upper
+            # air profile is modified in a way that is still conserves total
+            # quantities of moisture and temperature. The limit is set by trial
+            # and error. The numerics behind the crash should be investigated
+            # so that a cleaner solution can be provided.
+            gammatheta_lower_limit = 0.002
+            while ((itop in range(0,1)) or (itop != ibottom)):
+                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+                if (
+                    #(z_mean > (z_low+0.2)) and \
+                    #(theta_mean > (theta_low+0.02) ) and \
+                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
+                  (itop >= (len(air_ap_tail_orig)-1)) \
+                   :
+
+                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                    ibottom = itop+1
+                    theta_low = air_ap_tail.theta.iloc[-1]
+                    z_low =     air_ap_tail.z.iloc[-1]
+    
+
+                itop +=1
+                # elif  (itop > len(air_ap_tail_orig)-10):
+                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+                #print(itop,ibottom)
+
+            if itop > 1:
+                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
+                          "Warning! Temperature profile was too steep. \
+                                  Modifying profile: "+ \
+                                  str(itop - 1)+ " measurements were dropped \
+                                  and replaced with its average \
+                                  Modifying profile. \
+                                  mean with next profile point(s).") 
+
+
+            self.air_ap = pd.concat((air_ap_head,\
+                                     air_ap_tail,\
+                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
+                                                                      axis=1)
+
+            if  self.sw_ac:
+                qvalues = \
+                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
+
+                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
+                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
+                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
+                self.P_h    = self.Ps - self.rho * self.g * self.h
+                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
+                        [self.Ps,  self.P_h, self.P_h-0.1]
+
+                self.air_ap.t = \
+                            self.air_ap.theta * \
+                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
+
+
+        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
+
+
+
+
+        # else:
+            # in the other case, it is updated at the time the statistics are
+            # calculated 
+
+        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
+
+
+            self.P_h    = self.Ps - self.rho * self.g * self.h
+            in_ml = (self.air_ac.p >= self.P_h)
+
+            if in_ml.sum() == 0:
+                warnings.warn(" no circulation points in the mixed layer \
+                              found. We just take the bottom one.")
+                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
+            for var in ['t','q','u','v']:
+
+                # calculation of the advection variables for the mixed-layer
+                # these will be used for the next timestep
+                # Warning: w is excluded for now.
+
+                self.__dict__['adv'+var] = \
+                        ((self.air_ac['adv'+var+'_x'][in_ml] \
+                         + \
+                         self.air_ac['adv'+var+'_y'][in_ml])* \
+                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
+                        self.air_ac['delpdgrav'][in_ml].sum()
+
+                # calculation of the advection variables for the profile above
+                # the mixed layer (also for the next timestep)
+                self.air_ap['adv'+var] = \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p,\
+                                              self.air_ac['adv'+var+'_x']) \
+                                    + \
+                                    np.interp(self.air_ap.p,\
+                                              self.air_ac.p, \
+                                              self.air_ac['adv'+var+'_y'])
+                # if var == 't':
+                #     print(self.air_ap['adv'+var])
+                #     stop
+
+            # as an approximation, we consider that advection of theta in the
+            # mixed layer is equal to advection of t. This is a sufficient
+            # approximation since theta and t are very similar at the surface
+            # pressure.
+
+            self.__dict__['advtheta'] = self.__dict__['advt']
+
+        if (self.sw_ac is not None) and ('w' in self.sw_ac):
+            # update the vertical wind profile
+            self.air_ap['wp'] = np.interp(self.air_ap.p, \
+                                          self.air_ac.p, \
+                                          self.air_ac['wp'])
+            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
+                                                 self.Rv*self.air_ap.q)
+            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
+            
+            air_apwold = self.air_ap['w']
+            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
+            #print('hello w upd')
+
+            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
+            # # interpolate subsidence x density
+            # self.air_ap['wrho'] = \
+            #            np.interp(self.air_ap.p,\
+            #                      self.air_ach.p,\
+            #                      self.air_ach['wrho']) \
+            #     
+            # self.air_ap['w'] = \
+            #     self.air_ap['wrho']/(self.air_ap.p/ \
+            #                          (self.Rd*(1.-self.air_ap.q) + \
+            #                           self.Rv*self.air_ap.q)* \
+            #                          self.air_ap.TEMP)
+            # # self.wrho = np.interp(self.P_h,\
+            # #                      self.air_ach.p,\
+            # #                      self.air_ach['wrho']) \
+
+
+
+            # Also update the vertical wind at the mixed-layer height
+            # (subsidence)
+            self.ws   = self.air_ap.w[1]
+        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
+
+            ## Finally, we update he 
+            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
+            #                        + \
+            #                        self.air_ac['divU_y'][in_ml])* \
+            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
+            #            self.air_ac['delpdgrav'][in_ml].sum() 
+            
+
+        if self.sw_ap:
+            for var in ['theta','q','u','v']:
+
+                # update of the slope (gamma) for the different variables, for
+                # the next timestep!
+
+                # there is an warning message that tells about dividing through
+                # zero, which we ignore
+
+                with np.errstate(divide='ignore'):
+                    gammavar = list(np.array(self.air_ap[var][1:].values - \
+                                             self.air_ap[var][:-1].values) \
+                                    / np.array(self.air_ap['z'][1:].values - \
+                                               self.air_ap['z'][:-1].values))
+
+                    # add last element twice (since we have one element less)
+                gammavar.append(gammavar[-1])
+                gammavar = np.array(gammavar)
+                self.air_ap['gamma'+var] = gammavar
+
+                # Based on the above, update the gamma value at the mixed-layer
+                # top
+                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
+                                                                     self.air_ap.z)[0][-1]]
+
+            
+    def run_radiation(self):
+        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
+        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
+        sinlea = max(sinlea, 0.0001)
+        
+        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
+  
+        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
+  
+        self.Swin  = self.S0 * Tr * sinlea
+        self.Swout = self.alpha * self.S0 * Tr * sinlea
+        
+        
+        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
+        self.Lwout = self.bolz * self.Ts ** 4.
+          
+        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
+        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
+  
+    def run_surface_layer(self):
+        # HW: I had to raise the minimum wind speed to make the simulation with
+        # the non-iterative solution stable (this solution was a wild guess, so I don't
+        # know the exact problem of the instability in case of very low wind
+        # speeds yet)
+        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        # version of 20180730 where there are still some runs crashing. Maybe
+        # an upper limit should be set on the monin-obukhov length instead of
+        # a lower limmit on the wind speed?
+        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
+
+        
+        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
+        qsatsurf       = qsat(self.thetasurf, self.Ps)
+        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
+        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
+
+        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
+  
+        zsl       = 0.1 * self.h
+        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
+        
+
+
+        if self.sw_lit:
+            self.Rib  = min(self.Rib, 0.2)
+            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
+            self.zeta  = zsl/self.L
+            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
+            
+        
+            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
+            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
+            
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+        
+     
+            # diagnostic meteorological variables
+            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
+            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
+            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
+            
+            # diagnostic meteorological variables
+        else:
+            
+            ## circumventing any iteration with Wouters et al., 2012
+            self.zslz0m = np.max((zsl/self.z0m,10.))
+            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
+            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
+            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
+            self.L = zsl/self.zeta
+            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
+        
+            self.Cm = self.k**2.0/funm/funm
+            self.Cs = self.k**2.0/funm/funh
+            
+            self.ustar = np.sqrt(self.Cm) * ueff
+            self.uw    = - self.Cm * ueff * self.u
+            self.vw    = - self.Cm * ueff * self.v
+            
+            # extrapolation from mixed layer (instead of from surface) to 2meter
+            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
+            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
+            self.u2m    =                - self.uw     / self.ustar / self.k * funm
+            self.v2m    =                - self.vw     / self.ustar / self.k * funm
+        
+        
+        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
+        self.e2m    = self.q2m * self.Ps / 0.622
+     
+    def ribtol(self, Rib, zsl, z0m, z0h): 
+        if(Rib > 0.):
+            L    = 1.
+            L0   = 2.
+        else:
+            L  = -1.
+            L0 = -2.
+        #print(Rib,zsl,z0m,z0h)
+        
+        while (abs(L - L0) > 0.001):
+            L0      = L
+            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
+            Lstart  = L - 0.001*L
+            Lend    = L + 0.001*L
+            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
+                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
+                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+            L       = L - fx / fxdif
+            #print(L)
+            if(abs(L) > 1e12):
+                break
+
+        return L
+      
+    def psim(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
+            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psim = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+        return psim
+      
+    def psih(self, zeta):
+        if(zeta <= 0):
+            x     = (1. - 16. * zeta)**(0.25)
+            psih  = 2. * np.log( (1. + x*x) / 2.)
+            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
+            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
+        else:
+            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+        return psih
+ 
+    def jarvis_stewart(self):
+        # calculate surface resistances using Jarvis-Stewart model
+        if(self.sw_rad):
+            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
+        else:
+            f1 = 1.
+  
+        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
+            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
+        else:
+            f2 = 1.e8
+ 
+        # Limit f2 in case w2 > wfc, where f2 < 1
+        f2 = max(f2, 1.);
+ 
+        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
+        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
+  
+        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
+
+    def factorial(self,k):
+        factorial = 1
+        for n in range(2,k+1):
+            factorial = factorial * float(n)
+        return factorial;
+
+    def E1(self,x):
+        E1sum = 0
+        for k in range(1,100):
+            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
+        return -0.57721566490153286060 - np.log(x) - E1sum
+ 
+    def ags(self):
+        # Select index for plant type
+        if(self.c3c4 == 'c3'):
+            c = 0
+        elif(self.c3c4 == 'c4'):
+            c = 1
+        else:
+            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
+
+        # calculate CO2 compensation concentration
+        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
+
+        # calculate mesophyll conductance
+        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
+                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
+        gm            = gm / 1000. # conversion from mm s-1 to m s-1
+  
+        # calculate CO2 concentration inside the leaf (ci)
+        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
+        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
+  
+        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
+        D0            = (self.f0[c] - fmin) / self.ad[c]
+  
+        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
+        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
+        ci            = cfrac * (co2abs - CO2comp) + CO2comp
+  
+        # calculate maximal gross primary production in high light conditions (Ag)
+        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
+  
+        # calculate effect of soil moisture stress on gross assimilation rate
+        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
+  
+        # calculate stress function
+        if (self.c_beta == 0):
+            fstr = betaw;
+        else:
+            # Following Combe et al (2016)
+            if (self.c_beta < 0.25):
+                P = 6.4 * self.c_beta
+            elif (self.c_beta < 0.50):
+                P = 7.6 * self.c_beta - 0.3
+            else:
+                P = 2**(3.66 * self.c_beta + 0.34) - 1
+            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
+  
+        # calculate gross assimilation rate (Am)
+        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
+        Rdark        = (1. / 9.) * Am
+        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
+  
+        # calculate  light use efficiency
+        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
+  
+        # calculate gross primary productivity
+        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
+  
+        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
+        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
+        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
+  
+        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
+        a1           = 1. / (1. - self.f0[c])
+        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
+  
+        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
+  
+        # calculate surface resistance for moisture and carbon dioxide
+        self.rs      = 1. / (1.6 * gcco2)
+        rsCO2        = 1. / gcco2
+  
+        # calculate net flux of CO2 into the plant (An)
+        An           = -(co2abs - ci) / (self.ra + rsCO2)
+  
+        # CO2 soil surface flux
+        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
+        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
+  
+        # CO2 flux
+        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
+        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
+        self.wCO2    = self.wCO2A + self.wCO2R
+ 
+    def run_land_surface(self):
+        # compute ra
+        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
+        #print('ueff',self.u,self.v,self.wstar)
+
+        if(self.sw_sl):
+          self.ra = (self.Cs * ueff)**-1.
+        else:
+          self.ra = ueff / max(1.e-3, self.ustar)**2.
+
+        #print('ra',self.ra,self.ustar,ueff)
+
+        # first calculate essential thermodynamic variables
+        self.esat    = esat(self.theta)
+        self.qsat    = qsat(self.theta, self.Ps)
+        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
+        self.dqsatdT = 0.622 * desatdT / self.Ps
+        self.e       = self.q * self.Ps / 0.622
+
+        if(self.ls_type == 'js'): 
+            self.jarvis_stewart() 
+        elif(self.ls_type == 'ags'):
+            self.ags()
+        else:
+            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
+
+        # recompute f2 using wg instead of w2
+        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
+          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
+        else:
+          f2        = 1.e8
+        self.rssoil = self.rssoilmin * f2 
+ 
+        Wlmx = self.LAI * self.Wmax
+        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
+        self.cliq = min(1., self.Wl / Wlmx) 
+     
+        # calculate skin temperature implictly
+        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
+            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
+            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
+            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
+            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
+
+        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
+        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
+        #print('Ts',self.rs)
+
+        esatsurf      = esat(self.Ts)
+        self.qsatsurf = qsat(self.Ts, self.Ps)
+
+        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
+  
+        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
+  
+        self.LE     = self.LEsoil + self.LEveg + self.LEliq
+        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
+        #print('H',self.ra,self.Ts,self.theta)
+        self.G      = self.Lambda * (self.Ts - self.Tsoil)
+        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
+        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
+        
+        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
+  
+        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
+   
+        d1          = 0.1
+        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
+        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
+        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
+        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
+  
+        # calculate kinematic heat fluxes
+        self.wtheta   = self.H  / (self.rho * self.cp)
+        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
+        self.wq       = self.LE / (self.rho * self.Lv)
+ 
+    def integrate_land_surface(self):
+        # integrate soil equations
+        Tsoil0        = self.Tsoil
+        wg0           = self.wg
+        Wl0           = self.Wl
+  
+        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
+        self.wg       = wg0     + self.dtcur * self.wgtend
+        self.Wl       = Wl0     + self.dtcur * self.Wltend
+  
+    # store model output
+    def store(self):
+        t                      = self.t
+        
+        self.out.time[t]          = t * self.dt / 3600. + self.tstart
+
+        # in case we are at the end of the simulation, we store the vertical
+        # profiles to the output
+        
+        # if t == (len(self.out.time) - 1):
+        #     self.out.air_ac = self.air_ac
+        #     self.out.air_ap = self.air_ap
+
+        
+        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
+        #  for key in self.out.__dict__.keys():
+        #      if key in self.__dict__:
+        #          self.out.__dict__[key][t]  = self.__dict__[key]
+        
+        self.out.h[t]          = self.h
+        
+        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
+        
+        self.out.gammatheta[t] = self.gammatheta
+        self.out.gammau[t]     = self.gammau
+        self.out.gammav[t]     = self.gammav
+        self.out.gammaq[t]     = self.gammaq
+        self.out.theta[t]      = self.theta
+        self.out.thetav[t]     = self.thetav
+        self.out.dtheta[t]     = self.dtheta
+        self.out.dthetav[t]    = self.dthetav
+        self.out.wtheta[t]     = self.wtheta
+        self.out.wthetav[t]    = self.wthetav
+        self.out.wthetae[t]    = self.wthetae
+        self.out.wthetave[t]   = self.wthetave
+        
+        self.out.q[t]          = self.q
+        self.out.dq[t]         = self.dq
+        self.out.wq[t]         = self.wq
+        self.out.wqe[t]        = self.wqe
+        self.out.wqM[t]        = self.wqM
+      
+        self.out.qsat[t]       = self.qsat
+        self.out.e[t]          = self.e
+        self.out.esat[t]       = self.esat
+      
+        fac = (self.rho*self.mco2)/self.mair
+        self.out.CO2[t]        = self.CO2
+        self.out.dCO2[t]       = self.dCO2
+        self.out.wCO2[t]       = self.wCO2  * fac
+        self.out.wCO2e[t]      = self.wCO2e * fac
+        self.out.wCO2R[t]      = self.wCO2R * fac
+        self.out.wCO2A[t]      = self.wCO2A * fac
+
+        self.out.u[t]          = self.u
+        self.out.du[t]         = self.du
+        self.out.uw[t]         = self.uw
+        
+        self.out.v[t]          = self.v
+        self.out.dv[t]         = self.dv
+        self.out.vw[t]         = self.vw
+        
+        self.out.T2m[t]        = self.T2m
+        self.out.q2m[t]        = self.q2m
+        self.out.u2m[t]        = self.u2m
+        self.out.v2m[t]        = self.v2m
+        self.out.e2m[t]        = self.e2m
+        self.out.esat2m[t]     = self.esat2m
+
+
+        self.out.Tsoil[t]      = self.Tsoil
+        self.out.T2[t]         = self.T2
+        self.out.Ts[t]         = self.Ts
+        self.out.wg[t]         = self.wg
+        
+        self.out.thetasurf[t]  = self.thetasurf
+        self.out.thetavsurf[t] = self.thetavsurf
+        self.out.qsurf[t]      = self.qsurf
+        self.out.ustar[t]      = self.ustar
+        self.out.Cm[t]         = self.Cm
+        self.out.Cs[t]         = self.Cs
+        self.out.L[t]          = self.L
+        self.out.Rib[t]        = self.Rib
+  
+        self.out.Swin[t]       = self.Swin
+        self.out.Swout[t]      = self.Swout
+        self.out.Lwin[t]       = self.Lwin
+        self.out.Lwout[t]      = self.Lwout
+        self.out.Q[t]          = self.Q
+  
+        self.out.ra[t]         = self.ra
+        self.out.rs[t]         = self.rs
+        self.out.H[t]          = self.H
+        self.out.LE[t]         = self.LE
+        self.out.LEliq[t]      = self.LEliq
+        self.out.LEveg[t]      = self.LEveg
+        self.out.LEsoil[t]     = self.LEsoil
+        self.out.LEpot[t]      = self.LEpot
+        self.out.LEref[t]      = self.LEref
+        self.out.G[t]          = self.G
+
+        self.out.zlcl[t]       = self.lcl
+        self.out.RH_h[t]       = self.RH_h
+
+        self.out.ac[t]         = self.ac
+        self.out.M[t]          = self.M
+        self.out.dz[t]         = self.dz_h
+        self.out.substeps[t]   = self.substeps
+  
+    # delete class variables to facilitate analysis in ipython
+    def exitmodel(self):
+        del(self.Lv)
+        del(self.cp)
+        del(self.rho)
+        del(self.k)
+        del(self.g)
+        del(self.Rd)
+        del(self.Rv)
+        del(self.bolz)
+        del(self.S0)
+        del(self.rhow)
+  
+        del(self.t)
+        del(self.dt)
+        del(self.tsteps)
+         
+        del(self.h)          
+        del(self.Ps)        
+        del(self.fc)        
+        del(self.ws)
+        del(self.we)
+        
+        del(self.theta)
+        del(self.dtheta)
+        del(self.gammatheta)
+        del(self.advtheta)
+        del(self.beta)
+        del(self.wtheta)
+    
+        del(self.T2m)
+        del(self.q2m)
+        del(self.e2m)
+        del(self.esat2m)
+        del(self.u2m)
+        del(self.v2m)
+        
+        del(self.thetasurf)
+        del(self.qsatsurf)
+        del(self.thetav)
+        del(self.dthetav)
+        del(self.thetavsurf)
+        del(self.qsurf)
+        del(self.wthetav)
+        
+        del(self.q)
+        del(self.qsat)
+        del(self.dqsatdT)
+        del(self.e)
+        del(self.esat)
+        del(self.dq)
+        del(self.gammaq)
+        del(self.advq)
+        del(self.wq)
+        
+        del(self.u)
+        del(self.du)
+        del(self.gammau)
+        del(self.advu)
+        
+        del(self.v)
+        del(self.dv)
+        del(self.gammav)
+        del(self.advv)
+  
+        del(self.htend)
+        del(self.thetatend)
+        del(self.dthetatend)
+        del(self.qtend)
+        del(self.dqtend)
+        del(self.utend)
+        del(self.dutend)
+        del(self.vtend)
+        del(self.dvtend)
+     
+        del(self.Tsoiltend) 
+        del(self.wgtend)  
+        del(self.Wltend) 
+  
+        del(self.ustar)
+        del(self.uw)
+        del(self.vw)
+        del(self.z0m)
+        del(self.z0h)        
+        del(self.Cm)         
+        del(self.Cs)
+        del(self.L)
+        del(self.Rib)
+        del(self.ra)
+  
+        del(self.lat)
+        del(self.lon)
+        del(self.doy)
+        del(self.tstart)
+   
+        del(self.Swin)
+        del(self.Swout)
+        del(self.Lwin)
+        del(self.Lwout)
+        del(self.cc)
+  
+        del(self.wg)
+        del(self.w2)
+        del(self.cveg)
+        del(self.cliq)
+        del(self.Tsoil)
+        del(self.T2)
+        del(self.a)
+        del(self.b)
+        del(self.p)
+        del(self.CGsat)
+  
+        del(self.wsat)
+        del(self.wfc)
+        del(self.wwilt)
+  
+        del(self.C1sat)
+        del(self.C2ref)
+  
+        del(self.LAI)
+        del(self.rs)
+        del(self.rssoil)
+        del(self.rsmin)
+        del(self.rssoilmin)
+        del(self.alpha)
+        del(self.gD)
+  
+        del(self.Ts)
+  
+        del(self.Wmax)
+        del(self.Wl)
+  
+        del(self.Lambda)
+        
+        del(self.Q)
+        del(self.H)
+        del(self.LE)
+        del(self.LEliq)
+        del(self.LEveg)
+        del(self.LEsoil)
+        del(self.LEpot)
+        del(self.LEref)
+        del(self.G)
+  
+        del(self.sw_ls)
+        del(self.sw_rad)
+        del(self.sw_sl)
+        del(self.sw_wind)
+        del(self.sw_shearwe)
+
+# class for storing mixed-layer model output data
+class model_output:
+    def __init__(self, tsteps):
+        self.time          = np.zeros(tsteps)    # time [s]
+
+        # mixed-layer variables
+        self.h          = np.zeros(tsteps)    # ABL height [m]
+        
+        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
+        self.gammau     = np.zeros(tsteps)
+        self.gammav     = np.zeros(tsteps)
+        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
+        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
+        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
+        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
+        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
+        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
+        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
+        
+        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
+        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
+        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
+        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
+        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
+
+        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
+        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
+        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
+
+        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
+        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
+        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
+        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
+        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
+        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
+        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
+        
+        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
+        
+        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
+        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
+
+        # diagnostic meteorological variables
+        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
+        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
+        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
+        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
+        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
+        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
+
+        # ground variables
+        self.Tsoil       = np.zeros(tsteps)
+        self.T2          = np.zeros(tsteps)
+        self.Ts          = np.zeros(tsteps)
+        self.wg          = np.zeros(tsteps)
+
+        # surface-layer variables
+        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
+        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
+        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
+        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
+        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
+        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
+        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
+        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
+        self.L          = np.zeros(tsteps)    # Obukhov length [m]
+        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
+
+        # radiation variables
+        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
+        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
+        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
+        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
+        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
+
+        # land surface variables
+        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
+        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
+        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
+        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
+        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
+        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
+        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
+        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
+        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
+        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
+
+        # Mixed-layer top variables
+        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
+        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
+
+        # cumulus variables
+        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
+        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
+        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
+        
+        
+        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
+
+# class for storing mixed-layer model input data
+class model_input:
+    def __init__(self):
+
+        # # comment not valid
+        # we comment out the initialization, because there is a problem when
+        # inheriting values from one the another class4gl_iput. We also expect
+        # that the user specifies all the required parmameters (if not, an error
+        # is raised). 
+
+        # general model variables
+        self.runtime    = None  # duration of model run [s]
+        self.dt         = None  # time step [s]
+
+        # mixed-layer variables
+        self.sw_ml      = None  # mixed-layer model switch
+        self.sw_shearwe = None  # Shear growth ABL switch
+        self.sw_fixft   = None  # Fix the free-troposphere switch
+        self.h          = None  # initial ABL height [m]
+        self.Ps         = None  # surface pressure [Pa]
+        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
+        self.fc         = None  # Coriolis parameter [s-1]
+        
+        self.theta      = None  # initial mixed-layer potential temperature [K]
+        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
+
+        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
+
+        self.dtheta     = None  # initial temperature jump at h [K]
+        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advtheta   = None  # advection of heat [K s-1]
+        self.beta       = None  # entrainment ratio for virtual heat [-]
+        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
+        
+        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
+        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
+        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
+
+        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
+        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+        self.advq       = None  # advection of moisture [kg kg-1 s-1]
+        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
+
+        self.CO2        = None  # initial mixed-layer potential temperature [K]
+        self.dCO2       = None  # initial temperature jump at h [K]
+        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
+        self.advCO2     = None  # advection of heat [K s-1]
+        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
+        
+        self.sw_wind    = None  # prognostic wind switch
+        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.du         = None  # initial u-wind jump at h [m s-1]
+        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
+        self.advu       = None  # advection of u-wind [m s-2]
+
+        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
+        self.dv         = None  # initial u-wind jump at h [m s-1]
+        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
+        self.advv       = None  # advection of v-wind [m s-2]
+
+        # surface layer variables
+        self.sw_sl      = None  # surface layer switch
+        self.ustar      = None  # surface friction velocity [m s-1]
+        self.z0m        = None  # roughness length for momentum [m]
+        self.z0h        = None  # roughness length for scalars [m]
+        self.Cm         = None  # drag coefficient for momentum [-]
+        self.Cs         = None  # drag coefficient for scalars [-]
+        self.L          = None  # Obukhov length [-]
+        self.Rib        = None  # bulk Richardson number [-]
+
+        # radiation parameters
+        self.sw_rad     = None  # radiation switch
+        self.lat        = None  # latitude [deg]
+        self.lon        = None  # longitude [deg]
+        self.doy        = None  # day of the year [-]
+        self.tstart     = None  # time of the day [h UTC]
+        self.cc         = None  # cloud cover fraction [-]
+        self.Q          = None  # net radiation [W m-2] 
+        self.dFz        = None  # cloud top radiative divergence [W m-2] 
+
+        # land surface parameters
+        self.sw_ls      = None  # land surface switch
+        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
+        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
+        self.Tsoil      = None  # temperature top soil layer [K]
+        self.T2         = None  # temperature deeper soil layer [K]
+        
+        self.a          = None  # Clapp and Hornberger retention curve parameter a
+        self.b          = None  # Clapp and Hornberger retention curve parameter b
+        self.p          = None  # Clapp and Hornberger retention curve parameter p 
+        self.CGsat      = None  # saturated soil conductivity for heat
+        
+        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
+        self.wfc        = None  # volumetric water content field capacity [-]
+        self.wwilt      = None  # volumetric water content wilting point [-]
+        
+        self.C1sat      = None 
+        self.C2ref      = None
+
+        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
+        
+        self.LAI        = None  # leaf area index [-]
+        self.gD         = None  # correction factor transpiration for VPD [-]
+        self.rsmin      = None  # minimum resistance transpiration [s m-1]
+        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
+        self.alpha      = None  # surface albedo [-]
+        
+        self.Ts         = None  # initial surface temperature [K]
+        
+        self.cveg       = None  # vegetation fraction [-]
+        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
+        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
+        
+        self.Lambda     = None  # thermal diffusivity skin layer [-]
+
+        # A-Gs parameters
+        self.c3c4       = None  # Plant type ('c3' or 'c4')
+
+        # Cumulus parameters
+        self.sw_cu      = None  # Cumulus parameterization switch
+        self.dz_h       = None  # Transition layer thickness [m]
+        
+# BEGIN -- HW 20171027
+        # self.cala       = None      # soil heat conductivity [W/(K*m)]
+        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
+# END -- HW 20171027
diff --git a/lib/ribtol/Makefile b/lib/ribtol/Makefile
new file mode 100644
index 0000000..e23e3e1
--- /dev/null
+++ b/lib/ribtol/Makefile
@@ -0,0 +1,8 @@
+ribtol.so : ribtol.o
+	g++ -O3 -shared -Wl -z -def -o ribtol.so -lpython2.6 -lboost_python ribtol.o
+
+ribtol.o : ribtol.cpp
+	g++ -c -O3 -fPIC ribtol.cpp -I/usr/include/python2.6
+
+clean : 
+	rm -rf ribtol.o ribtol.so
diff --git a/lib/ribtol/MakefileMac b/lib/ribtol/MakefileMac
new file mode 100644
index 0000000..bf34ea8
--- /dev/null
+++ b/lib/ribtol/MakefileMac
@@ -0,0 +1,9 @@
+# Note: boost-python needs to be installed: brew install boost-python -with-python3 -without-python
+ribtol.so : ribtol.o
+	clang++ -O3 -shared -o ribtol.so -L/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib -lpython3.6m -L/usr/local/lib -lboost_python3-mt -lpython ribtol.o
+
+ribtol.o : ribtol.cpp
+	clang++ -c -O3 -fPIC ribtol.cpp -I/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/include/python3.6m -I/usr/local/include
+
+clean : 
+	rm -rf ribtol.o ribtol.so
diff --git a/lib/ribtol/ribtol.cpp b/lib/ribtol/ribtol.cpp
new file mode 100644
index 0000000..148b0d3
--- /dev/null
+++ b/lib/ribtol/ribtol.cpp
@@ -0,0 +1,81 @@
+// fast conversion of bulk Richardson number to Obukhov length
+
+#include 
+#include 
+#include 
+using namespace std;
+
+inline double psim(double zeta)
+{
+  double psim;
+  double x;
+  if(zeta <= 0.)
+  {
+    //x     = (1. - 16. * zeta) ** (0.25)
+    //psim  = 3.14159265 / 2. - 2. * arctan(x) + log( (1.+x) ** 2. * (1. + x ** 2.) / 8.)
+    x    = pow(1. + pow(3.6 * abs(zeta),2./3.), -0.5);
+    psim = 3. * log( (1. + 1. / x) / 2.);
+  }
+  else
+  {
+    psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35;
+  }
+  return psim;
+}
+    
+inline double psih(double zeta)
+{
+  double psih;
+  double x;
+  if(zeta <= 0.)
+  {
+    // x     = (1. - 16. * zeta) ** (0.25)
+    // psih  = 2. * log( (1. + x ** 2.) / 2. )
+    x     = pow(1. + pow(7.9 * abs(zeta), (2./3.)), -0.5);
+    psih  = 3. * log( (1. + 1. / x) / 2.);
+  }
+  else
+  {
+    psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - pow(1. + (2./3.) * zeta, 1.5) - (10./3.) / 0.35 + 1.;
+  }
+  return psih;
+}
+
+
+double ribtol(double Rib, double zsl, double z0m, double z0h)
+{
+  double L, L0;
+  double Lstart, Lend;
+  double fx, fxdif;
+
+  if(Rib > 0.)
+  {
+    L    = 1.;
+    L0   = 2.;
+  }
+  else
+  {
+    L  = -1.;
+    L0 = -2.;
+  }
+    
+  while (abs(L - L0) > 0.001)
+  {
+    L0      = L;
+    fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / pow(log(zsl / z0m) - psim(zsl / L) + psim(z0m / L), 2.);
+    Lstart  = L - 0.001 * L;
+    Lend    = L + 0.001 * L;
+    fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / pow(log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart), 2.)) - (-zsl / Lend * (log(zsl / z0h) - psih(zsl / Lend) + psih(z0h / Lend)) / pow(log(zsl / z0m) - psim(zsl / Lend) + psim(z0m / Lend), 2.)) ) / (Lstart - Lend);
+    L       = L - fx / fxdif;
+  }
+  
+  return L;
+
+}
+
+BOOST_PYTHON_MODULE(ribtol)
+{
+    using namespace boost::python;
+    def("ribtol", ribtol);
+}
+
diff --git a/lib/ribtol/ribtol.pyx b/lib/ribtol/ribtol.pyx
new file mode 100644
index 0000000..e11a147
--- /dev/null
+++ b/lib/ribtol/ribtol.pyx
@@ -0,0 +1,48 @@
+#cython: boundscheck=False
+#cython: wraparound=False
+
+from libc.math cimport atan, log, exp, fabs
+
+cdef double psim(double zeta):
+    cdef double x, psim
+
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psim  = 3.14159265 / 2. - 2. * atan(x) + log((1. + x)**2. * (1. + x**2.) / 8.)
+    else:
+        psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
+    return psim
+      
+cdef double psih(double zeta):
+    if(zeta <= 0):
+        x     = (1. - 16. * zeta)**(0.25)
+        psih  = 2. * log( (1. + x*x) / 2.)
+    else:
+        psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
+    return psih
+
+def ribtol(double Rib, double zsl, double z0m, double z0h): 
+    cdef double L, L0, fx, Lstart, Lend, fxdif
+
+    if(Rib > 0.):
+        L    = 1.
+        L0   = 2.
+    else:
+        L  = -1.
+        L0 = -2.
+    
+    while (fabs(L - L0) > 0.001):
+        L0      = L
+        fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
+        Lstart  = L - 0.001*L
+        Lend    = L + 0.001*L
+        fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
+                                      (log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
+                  - (-zsl /  Lend   * (log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
+                                      (log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
+        L       = L - fx / fxdif
+
+        if(fabs(L) > 1e15):
+            break
+
+    return L
diff --git a/lib/ribtol/ribtol_hw.py b/lib/ribtol/ribtol_hw.py
new file mode 100644
index 0000000..1946cc8
--- /dev/null
+++ b/lib/ribtol/ribtol_hw.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Jan 12 10:46:20 2018
+
+@author: vsc42247
+"""
+
+
+
+# purpose of calc_cm_ch: calculate momentum and thermal turbulent diffusion coefficients of the surface layer with a non-iterative procedure (Wouters et al., 2012)
+
+# input:
+
+# zrib = bulk Richardson number = (g/T)* DT * z/(ua^2)
+#   with:
+#     g = 9.81 m/s2 the gravitational acceleration
+#     z = height (in meters) of the surface layer under consideration 
+#     T = (reference) temperature (in Kelvin) at height z 
+#     DT = (T - T_s) = temperature (in Kelvin) gradient between the surface and height z 
+#     u_a^2 = u^2 +  v^2 is the squared horizontal absolute wind speed 
+# zzz0m = ratio z/z0 between the height z and the momentum roughness length z0m
+# zkbm = ln(z0m/z0h), with z0m, z0h the momentum and thermal roughness length, respectively.
+
+# output: diffusion coefficients (CM and CH) which cna be used to determine surface-layer turbulent transport
+# u'w' = - CM ua^2.
+# w'T' = - CH ua DT 
+
+
+# Reference:
+# Wouters, H., De Ridder, K., and Lipzig, N. P. M.: Comprehensive
+# Parametrization of Surface-Layer Transfer Coefficients for Use
+# in Atmospheric Numerical Models, Bound.-Lay. Meteorol., 145,
+# 539–550, doi:10.1007/s10546-012-9744-3, 2012.
+
+import numpy as np
+
+def calc_cm_ch (zeta,zzz0m,zkbm):
+    krm = 0.4
+
+    #ZETA = zeta_hs2(zrib,zzz0m,zkbm)
+    FUNM,FUNH = funcsche(ZETA,zzz0m,zkbm)
+    CM = krm**2.0/FUNM/FUNM
+    CH = krm**2.0/FUNM/FUNH
+
+    # FUNMn,FUNHn = funcsche(0.,zzz0m,zkbm)
+    # CMn = krm**2.0/FUNMn/FUNMn
+    # CHn = krm**2.0/FUNMn/FUNHn
+
+    # print ZETA,FUNM,FUNH
+    # print 'CMCMN',CM/CMn
+    # print 'CHCHN',CH/CHn
+
+    return CM,CH
+
+
+def zeta_hs2(RiB,zzz0m,kBmin1):
+    #print(RiB,zzz0m,kBmin1)
+    mum=2.59
+    muh=0.95
+    nu=0.5
+    lam=1.5
+
+    betah = 5.0
+
+    zzz0h = zzz0m*np.exp(kBmin1)
+    zzzs = zzz0m*0.06 # to be changed!! r. 101 nog bekijken!!
+
+    L0M = np.log(zzz0m)
+    L0H = np.log(zzz0h)
+    facM = np.log(1.+lam/mum/zzzs)*np.exp(-mum*zzzs)/lam
+    facH = np.log(1.+lam/muh/zzzs)*np.exp(-muh*zzzs)/lam
+    L0Ms = L0M + facM 
+    L0Hs = L0H + facH
+
+    if RiB < 0.:
+        p = np.log(1.-RiB)
+        Q = -0.486 +0.219*p - 0.0331*p**2-4.93*np.exp(-L0H) - 3.65/L0H +\
+            0.38*p/L0H+ 14.8/L0H/L0H-0.946*p/L0H/L0H-10.0/L0H**3+ \
+            0.392*L0M/L0H-0.084*p*L0M/L0H+0.368*L0M/L0H/L0H
+        # print 'p: ',p
+        # print 'Q: ',Q
+        zeta = (1. + p*Q)* L0Ms**2/L0Hs * RiB
+    else:
+        betam = 4.76+7.03/zzz0m +0.24*zzz0m/zzz0h # to be changed
+        # betam = 5.0 + 1.59*10.**(-5.)*(np.exp(13.0-L0M)-1.0) \
+        #         +0.24*(np.exp(-kBmin1)-1.0) # to be changed!!
+        # print('betam',betam)
+        lL0M = np.log(L0M)
+        S0Ms = 1.-1./zzz0m + (1.+nu/mum/zzzs)*facM
+        S0Hs = 1.-1./zzz0h + (1.+nu/muh/zzzs)*facH
+        zetat = -0.316-0.515*np.exp(-L0H) + 25.8 *np.exp(-2.*L0H) + 4.36/L0H \
+                -6.39/L0H/L0H+0.834*lL0M - 0.0267*lL0M**2
+        # print('zetat',zetat)
+        RiBt = zetat *(L0Hs+ S0Hs*betah*zetat)/(L0Ms+S0Ms*betam*zetat)**2 
+        # print('RiBt',RiBt)
+
+        if (RiB > RiBt):
+            D = (L0Ms+S0Ms*betam*zetat)**3/\
+                (L0Ms*L0Hs+zetat*(2.*S0Hs * betah * L0Ms - S0Ms*betam*L0Hs))
+            zeta = zetat + D*(RiB-RiBt)
+        else:
+            r = RiB - S0Hs*betah/(S0Ms*betam)**2
+            B = S0Ms*betam*L0Hs- 2.*S0Hs*betah*L0Ms
+            C = 4.*(S0Ms*betam)**2 * L0Ms *(S0Hs*betah*L0Ms-S0Ms*betam*L0Hs)
+            zeta = - L0Ms / S0Ms/betam - B*C/(4.*(S0Ms*betam)**3 *(B**2+abs(C*r)))
+            if r != 0:
+                zeta = zeta + (B-np.sqrt(B**2+C*r) + B*C*r/(2.*(B**2+abs(C*r))))/(2.*(S0Ms*betam)**3*r)
+    # print('zeta',zeta)
+    return zeta
+
+def funcsche(zeta,zzz0,kBmin1):
+
+
+    mum=2.5
+    muh=0.9
+    nu=0.5
+    lam=1.5
+    
+    p2=3.141592/2.
+    
+    lnzzz0=np.log(zzz0)
+    zzzs=zzz0*0.06
+    zetamcorr=(1.+nu/(mum*zzzs))*zeta
+    zetam0=zeta/zzz0
+    zetahcorr=(1.+nu/(muh*zzzs))*zeta
+    zetah0=zeta/(zzz0*np.exp(kBmin1))
+    
+    if (zeta <= 0.):
+    
+        gamma=15.2
+        alfam=0.25
+        xx=(1.-gamma*zeta)**alfam
+        psim=2.*np.log((1.+xx)/2.)+np.log((1.+xx**2.)/2.)-2.*np.arctan(xx)+p2
+        xx0=(1.-gamma*zetam0)**alfam
+        psim0=2.*np.log((1.+xx0)/2.)+np.log((1.+xx0**2.)/2.)-2.*np.arctan(xx0)+p2
+        phimcorr=(1.-gamma*zetamcorr)**(-alfam)
+        
+        alfah=0.5
+        yy=(1.-gamma*zeta)**alfah
+        psih=2.*np.log((1.+yy)/2.)
+        yy0=(1.-gamma*zetah0)**alfah
+        psih0=2.*np.log((1.+yy0)/2.)
+        phihcorr=(1.-gamma*zetahcorr)**(-alfah)
+    else: 
+    
+        aa=6.1
+        bb=2.5
+        psim=-aa*np.log(zeta+(1.+zeta**bb)**(1./bb))
+        psim0=-aa*np.log(zetam0+(1.+zetam0**bb)**(1./bb))
+        phimcorr=1.+aa*(zetamcorr+zetamcorr**bb*(1.+zetamcorr**bb)**((1.-bb)/bb))/(zetamcorr+(1.+zetamcorr**bb)**(1./bb))
+        
+        cc=5.3
+        dd=1.1
+        psih=-cc*np.log(zeta+(1.+zeta**dd)**(1./dd))
+        psih0=-cc*np.log(zetah0+(1.+zetah0**dd)**(1./dd))
+        phihcorr=1.+cc*(zetahcorr+zetahcorr**dd*(1.+zetahcorr**dd)**((1.-dd)/dd))/(zetahcorr+(1.+zetahcorr**dd)**(1./dd))
+    
+    psistrm=phimcorr*(1./lam)*np.log(1.+lam/(mum*zzzs))*np.exp(-mum*zzzs)
+    psistrh=phihcorr*(1./lam)*np.log(1.+lam/(muh*zzzs))*np.exp(-muh*zzzs)
+    
+    funm=lnzzz0-psim+psim0 +psistrm
+    funh=lnzzz0+kBmin1-psih+psih0 +psistrh
+    return funm,funh
+
diff --git a/lib/ribtol/setup.py b/lib/ribtol/setup.py
new file mode 100644
index 0000000..bfb44db
--- /dev/null
+++ b/lib/ribtol/setup.py
@@ -0,0 +1,12 @@
+# build with "python setup.py build_ext --inplace"
+from distutils.core import setup
+from distutils.extension import Extension
+from Cython.Build import cythonize
+import numpy as np
+import os
+
+os.environ["CC"] = "g++-7"
+
+setup(
+    ext_modules = cythonize((Extension("ribtol", sources=["ribtol.pyx"], include_dirs=[np.get_include()], ), ))
+)
diff --git a/setup.py b/setup.py
index bfb44db..a806fa0 100644
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,9 @@
-# build with "python setup.py build_ext --inplace"
 from distutils.core import setup
-from distutils.extension import Extension
-from Cython.Build import cythonize
-import numpy as np
-import os
-
-os.environ["CC"] = "g++-7"
 
 setup(
-    ext_modules = cythonize((Extension("ribtol", sources=["ribtol.pyx"], include_dirs=[np.get_include()], ), ))
+        name='class4gl',
+        version='0.1dev',
+        packages=['lib','bin'],
+        license='GPLv3 licence',
+        long_description=open('README.md').read(),
 )
diff --git a/trash/data_ground.py b/trash/data_ground.py
new file mode 100644
index 0000000..d4e0b5a
--- /dev/null
+++ b/trash/data_ground.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Nov  7 10:51:03 2017
+
+@author: vsc42247
+
+Purpose: Set surface conditions for the CLASS boundary-layer model
+"""
+
+
+import netCDF4 as nc4
+import numpy as np
+import datetime as dt
+#you can install with
+import pynacolada as pcd
+import pandas as pd
+
+def get_class4gl_ground(class_settings,**kwargs):   
+    
+    key = "IGBPDIS"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+    
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
+        print('reading soil water saturation from '+input_fn)
+
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__['wsat'] = input_nc.variables['wsat'][ilon,ilat]
+        input_nc.close()
+
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc"
+        print('reading soil water field capacity from '+input_fn)
+    
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__['wfc'] = input_nc.variables['wfc'][ilon,ilat]
+        input_nc.close()
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc"
+        print('reading soil wilting point from '+input_fn)
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__['wwilt'] = input_nc.variables['wwp'][ilon,ilat]
+        input_nc.close()
+        
+    key = "GLEAM"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+        
+        #INPUT_gleam = gleam() 
+        #INPUT_gleam.path = "/kyukon/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/" 
+        
+        gleam_path = "/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/"
+        print('reading soil-water content for "+str(class_settings,datetime.year)+" from '+gleam_path)
+        
+        gleam_files = {}
+        
+        gleam_vars = ['SMroot','SMsurf']
+        
+        for VAR in gleam_vars:
+            gleam_files[VAR] = nc4.Dataset(gleam_path+'/'+str(class_settings.datetime.year)+'/'+VAR+'_'+str(class_settings.datetime.year)+'_GLEAM_v3.1a.nc','r')
+        
+
+        year = class_settings.datetime.year
+        day = class_settings.datetime.day
+        hour = class_settings.datetime.hour
+  
+        ilat = np.where(gleam_files['SMsurf'].variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(gleam_files['SMsurf'].variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        VAR = 'SMsurf'; class_settings.wg = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
+        VAR = 'SMroot'; class_settings.w2 = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
+        
+        for VAR in gleam_vars:
+            gleam_files[VAR].close()
+    
+    key = "MOD44B"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+    
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc"
+        print('initializing vegetation fraction from '+input_fn)
+        var = 'cveg'
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        class_settings.__dict__[var] = input_nc.variables['fv'][ilon,ilat]
+        input_nc.close()
+        
+    key = "DSMW"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
+         # Procedure of the thermal properties:
+         # 1. determine soil texture from DSMW
+         # 2. soil type with look-up table (according to DWD/EXTPAR)
+         # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987) 
+         #    with parameter look-up table from Noilhan and Planton (1989). 
+         #    Note: The look-up table is inspired on DWD/COSMO
+                 
+       
+        #preparing for soil thermal properties
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc"
+        
+        print("deriving soil thermal properties for the force-restore methodes from the soil texture file "+ input_fn)
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        DSMW = input_nc.variables['DSMW'][ilat,ilon]
+        
+        
+        #EXTPAR: zfine   = soil_texslo(soil_unit)%tex_fine
+        SP = {}; SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code']
+        for SPKEY in SPKEYS: 
+            SP[SPKEY] = np.array(input_nc.variables[SPKEY][DSMW])
+        input_nc.close()
+        
+        SP['texture'] = (0.5*SP['tex_medium']+1.0*SP['tex_coarse']) /(SP['tex_coarse']+SP['tex_medium']+SP['tex_fine'])
+        
+        if pd.isnull(SP['texture']):
+            print('Warning, texture is invalid> Setting to Ocean')
+            SP['itex'] = 9
+        
+        else:
+            SP['itex'] = int(SP['texture']*100)
+        
+        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+        SP['isoil'] = np.zeros_like(SP['itex'],dtype=np.int)
+        LOOKUP = [
+                  [0 ,7],# fine textured, clay (soil type 7)
+                  [20,6],# medium to fine textured, loamy clay (soil type 6)
+                  [40,5],# medium textured, loam (soil type 5)
+                  [60,4],# coarse to medium textured, sandy loam (soil type 4)
+                  [80,3],# coarse textured, sand (soil type 3)
+                ]
+        for iitex,iisoil in LOOKUP: 
+            SP['isoil'][SP['itex'] >= iitex ] = iisoil 
+        
+        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
+        LOOKUP = [
+                  [9001, 1 ], # ice, glacier (soil type 1) 
+                  [9002, 2 ], # rock, lithosols (soil type 2)
+                  [9003, 3 ], # salt, set soiltype to sand (soil type 3)
+                  [9004, 8 ], # histosol, e.g. peat (soil type 8)
+                  [9,    9 ], # undefined (ocean)
+                  [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
+                  [9000, 9 ], # undefined (inland lake)
+                  [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
+                  [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
+                ]
+        # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
+        for icode,iisoil in LOOKUP: 
+            SP['isoil'][SP['code'] == icode] = iisoil 
+        
+        #adopted from data_soil.f90 (COSMO5.0)
+        SP_LOOKUP = { 
+          # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea  
+          # (by index)                                           loam                    loam                                water      ice
+          'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
+          'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
+          'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
+          'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
+          'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
+          'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
+          'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
+          'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
+          'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
+          'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
+          'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
+          'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
+          'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
+          'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
+          'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
+          'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
+          'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
+          'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
+          'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
+          #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
+          'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , np.nan    , np.nan   ,  np.nan  ],
+          #error in table 2 of NP89: values need to be multiplied by e-6
+          'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
+          'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , np.nan    , np.nan   ,  np.nan  ],
+          'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , np.nan    , np.nan   ,  np.nan  ],
+          'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , np.nan    , np.nan   ,  np.nan  ],
+          'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , np.nan    , np.nan   ,  np.nan  ],
+        }
+        
+        for SPKEY in SP_LOOKUP.keys(): 
+            SP[SPKEY] = np.zeros_like(SP['isoil'],dtype=np.float)
+        
+        for i in range(11):
+            SELECT = (SP['isoil'] == i)
+            for SPKEY in SP_LOOKUP.keys(): 
+                SP[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
+        
+        for SPKEY in list(SP_LOOKUP.keys())[-6:]: 
+            var = SPKEY
+            class_settings.__dict__[var] = np.float(SP[SPKEY])
+            
+        # only print the last parameter value in the plot
+        
+        #inputs.append(cp.deepcopy(class_settings))
+        #var = 'cala'
+        #class_settings.__dict__[var] = np.float(SP['cala0'])
+        #valnew = class_settings.__dict__[var]
+        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+        
+        #inputs.append(cp.deepcopy(class_settings))
+        #var = 'crhoc'
+        #class_settings.__dict__[var] = np.float(SP['crhoc'])
+        #valnew = class_settings.__dict__[var]
+        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+        
+    key = "CERES"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
+        
+        CERES_start_date = dt.datetime(2000,3,1)
+        DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
+        DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
+        print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
+            
+        var = 'cc'
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        print(class_settings.lat,class_settings.lon)
+        
+        class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:(idatetime+class_settings.runtime),ilat,ilon])/100.
+   
+        input_nc.close()
+    
+    key = "GIMMS"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
+       
+    
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
+        print("Reading Leag Area Index from "+input_fn)
+        var = 'LAI'
+        
+        #plt.plot
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
+        
+        print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
+        tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
+        
+        if np.isnan(tarray[idatetime]):
+            print("interpolating GIMMS cveg nan value")
+            
+            mask = np.isnan(tarray)
+            if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
+                tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
+            else:
+                print("Warning. Could not interpolate GIMMS cveg nan value")
+                
+        class_settings.__dict__[var] = tarray[idatetime]
+        
+        input_nc.close()
+ 
+    key = "IGBPDIS_ALPHA"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
+       
+        var = 'alpha'
+        
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
+        print("Reading albedo from "+input_fn)
+    
+        input_nc = nc4.Dataset(input_fn,'r')
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        
+        landfr = {}
+        for ltype in ['W','B','H','TC']:   
+            landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
+        
+        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
+        
+        alpha=0.
+        for ltype in landfr.keys():
+            alpha += landfr[ltype]*aweights[ltype]
+        
+        
+        class_settings.__dict__[var] = alpha
+        input_nc.close()        
+        
+        
+    key = "ERAINT_ST"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
+       
+        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
+        print("Reading soil temperature from "+input_fn)
+        
+        var = 'Tsoil'
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+        
+        
+        class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
+        
+        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
+        var = 'T2'
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
+        
+        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
+        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
+        
+        
+        class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
+        
+        
+        input_nc.close()
+        
+        
+    
+    #inputs.append(cp.deepcopy(class_settings))
+    #var = 'T2'
+    #valold = class_settings.__dict__[var]
+    #
+    #class_settings.__dict__[var] = 305.
+    #class_settings.__dict__['Tsoil'] = 302.
+    #valnew = class_settings.__dict__[var]
+    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+    
+    
+    
+    #inputs.append(cp.deepcopy(class_settings))
+    #
+    #var = 'Lambda'
+    #valold = class_settings.__dict__[var]
+    
+    ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book. 
+    ## I need to ask Chiel.
+    ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
+    #
+    #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg'] 
+    #class_settings.__dict__[var] = valnew
+    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
+    
+    
+    
+    key = "GLAS"
+    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
+       
+        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
+        print("Reading canopy height for determining roughness length from "+input_fn)
+        var = 'z0m'
+    
+        
+        #plt.plot
+        
+        input_nc = nc4.Dataset(input_fn,'r')
+        
+        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
+        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
+        
+        testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
+        
+        lowerlimit = 0.01
+        if testval < lowerlimit:
+            print('forest canopy height very very small. We take a value of '+str(lowerlimit))
+            class_settings.__dict__[var] = lowerlimit
+        else:
+            class_settings.__dict__[var] = testval
+        
+        class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
+        
+        
+        input_nc.close()
+        

From 54b4cc3e68a7ce66d52062af6d4d9a4f9b08ee4e Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 21 Aug 2018 21:54:14 +0200
Subject: [PATCH 008/129] restructure

---
 MANIFEST                                      |  14 +-
 build/lib/bin/__init__.py                     |   1 -
 build/lib/{lib => class4gl}/__init__.py       |   0
 build/lib/{lib => class4gl}/class4gl.py       |   0
 build/lib/{lib => class4gl}/data_air.py       |   0
 build/lib/{lib => class4gl}/data_global.py    |   0
 .../{lib => class4gl}/interface_functions.py  |   0
 .../lib/{lib => class4gl}/interface_multi.py  |   0
 build/lib/{lib => class4gl}/model.py          |   0
 class4gl/setup/batch_setup_global.py          |  42 +
 class4gl/setup/setup_bllast.py                | 719 +++++++++++++++++
 class4gl/setup/setup_global.py                | 310 ++++++++
 class4gl/setup/setup_goamazon.py              | 740 ++++++++++++++++++
 class4gl/setup/setup_humppa.py                | 732 +++++++++++++++++
 class4gl/setup/trash/setup_global_old.py      | 284 +++++++
 class4gl/simulations/batch_simulations.py     |  77 ++
 class4gl/simulations/runmodel.py              | 130 +++
 class4gl/simulations/simulations.py           | 260 ++++++
 class4gl/simulations/simulations_iter.py      | 364 +++++++++
 class4gl/simulations/simulations_iter_test.py | 367 +++++++++
 class4gl/simulations/trash/run_test.py        | 241 ++++++
 setup.py                                      |   2 +-
 22 files changed, 4274 insertions(+), 9 deletions(-)
 rename build/lib/{lib => class4gl}/__init__.py (100%)
 rename build/lib/{lib => class4gl}/class4gl.py (100%)
 rename build/lib/{lib => class4gl}/data_air.py (100%)
 rename build/lib/{lib => class4gl}/data_global.py (100%)
 rename build/lib/{lib => class4gl}/interface_functions.py (100%)
 rename build/lib/{lib => class4gl}/interface_multi.py (100%)
 rename build/lib/{lib => class4gl}/model.py (100%)
 create mode 100644 class4gl/setup/batch_setup_global.py
 create mode 100644 class4gl/setup/setup_bllast.py
 create mode 100644 class4gl/setup/setup_global.py
 create mode 100644 class4gl/setup/setup_goamazon.py
 create mode 100644 class4gl/setup/setup_humppa.py
 create mode 100644 class4gl/setup/trash/setup_global_old.py
 create mode 100644 class4gl/simulations/batch_simulations.py
 create mode 100644 class4gl/simulations/runmodel.py
 create mode 100644 class4gl/simulations/simulations.py
 create mode 100644 class4gl/simulations/simulations_iter.py
 create mode 100644 class4gl/simulations/simulations_iter_test.py
 create mode 100644 class4gl/simulations/trash/run_test.py

diff --git a/MANIFEST b/MANIFEST
index 534f2f4..1dde1bb 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -1,10 +1,10 @@
 # file GENERATED by distutils, do NOT edit
 setup.py
 bin/__init__.py
-lib/__init__.py
-lib/class4gl.py
-lib/data_air.py
-lib/data_global.py
-lib/interface_functions.py
-lib/interface_multi.py
-lib/model.py
+class4gl/__init__.py
+class4gl/class4gl.py
+class4gl/data_air.py
+class4gl/data_global.py
+class4gl/interface_functions.py
+class4gl/interface_multi.py
+class4gl/model.py
diff --git a/build/lib/bin/__init__.py b/build/lib/bin/__init__.py
index a21583b..58f6cca 100644
--- a/build/lib/bin/__init__.py
+++ b/build/lib/bin/__init__.py
@@ -1,4 +1,3 @@
-from . import model,class4gl,interface_multi,data_air,data_global
 
 __version__ = '0.1.0'
 
diff --git a/build/lib/lib/__init__.py b/build/lib/class4gl/__init__.py
similarity index 100%
rename from build/lib/lib/__init__.py
rename to build/lib/class4gl/__init__.py
diff --git a/build/lib/lib/class4gl.py b/build/lib/class4gl/class4gl.py
similarity index 100%
rename from build/lib/lib/class4gl.py
rename to build/lib/class4gl/class4gl.py
diff --git a/build/lib/lib/data_air.py b/build/lib/class4gl/data_air.py
similarity index 100%
rename from build/lib/lib/data_air.py
rename to build/lib/class4gl/data_air.py
diff --git a/build/lib/lib/data_global.py b/build/lib/class4gl/data_global.py
similarity index 100%
rename from build/lib/lib/data_global.py
rename to build/lib/class4gl/data_global.py
diff --git a/build/lib/lib/interface_functions.py b/build/lib/class4gl/interface_functions.py
similarity index 100%
rename from build/lib/lib/interface_functions.py
rename to build/lib/class4gl/interface_functions.py
diff --git a/build/lib/lib/interface_multi.py b/build/lib/class4gl/interface_multi.py
similarity index 100%
rename from build/lib/lib/interface_multi.py
rename to build/lib/class4gl/interface_multi.py
diff --git a/build/lib/lib/model.py b/build/lib/class4gl/model.py
similarity index 100%
rename from build/lib/lib/model.py
rename to build/lib/class4gl/model.py
diff --git a/class4gl/setup/batch_setup_global.py b/class4gl/setup/batch_setup_global.py
new file mode 100644
index 0000000..4a3f623
--- /dev/null
+++ b/class4gl/setup/batch_setup_global.py
@@ -0,0 +1,42 @@
+
+
+''' 
+Purpose: 
+    launch array job to get sounding and other global forcing data in class4gl input format"
+Usage:
+    python start_setup_global.py
+
+Author:
+    Hendrik Wouters 
+
+'''
+
+import pandas as pd
+import os
+import math
+import numpy as np
+import sys
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+df_stations = pd.read_csv(fn_stations)
+
+# if sys.argv[1] == 'qsub':
+# with qsub
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = len(STNlist) 
+print(PROCS)
+BATCHSIZE = math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/SOUNDINGS/setup_global.pbs -t 0-'+str(PROCS-1))
+# elif sys.argv[1] == 'wsub':
+#     
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/SOUNDINGS/setup_global.pbs -t 0-'+str(PROCS-1))
+
diff --git a/class4gl/setup/setup_bllast.py b/class4gl/setup/setup_bllast.py
new file mode 100644
index 0000000..af8c8bb
--- /dev/null
+++ b/class4gl/setup/setup_bllast.py
@@ -0,0 +1,719 @@
+# -*- coding: utf-8 -*-
+# Read data from BLLAST campaing and convert it to class4gl input
+
+# WARNING!! stupid tab versus space formatting, grrrmmmlmlmlll!  the following command needs to be executed first: 
+#    for file in RS_2011????_????_site1_MODEM_CRA.cor ;  do expand -i -t 4 $file > $file.fmt ; done
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 42.971834,
+                  "longitude" : 0.3671169,
+                  "name" : "the BLLAST experiment"
+                })
+current_station.name = 90001
+
+
+
+
+
+# RS_20110624_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110630_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110702_1655_site1_MODEM_CRA.cor.fmt
+# RS_20110621_0509_site1_MODEM_CRA.cor.fmt
+
+HOUR_FILES = \
+{ dt.datetime(2011,6,19,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110619_0521_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110619_1750_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,20,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110620_0515_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110620_1750_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,25,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110625_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110625_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,26,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110626_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110626_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,27,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110627_0503_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110627_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 2,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110702_0501_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110702_1655_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 5,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110705_0448_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110705_1701_site1_MODEM_CRA.cor.fmt']},
+}
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_csv(balloon_file,delimiter='\t',)
+                                     #widths=[14]*19,
+                                     #skiprows=9,
+                                     #skipfooter=15,
+                                     #decimal='.',
+                                     #header=None,
+                                     #names = columns,
+                                     #na_values='-----')
+        air_balloon_in = air_balloon_in.rename(columns=lambda x: x.strip())
+        print(air_balloon_in.columns)
+        rowmatches = {
+            't':      lambda x: x['TaRad']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['Press']*100.,
+            'u':      lambda x: x['VHor'] * np.sin((90.-x['VDir'])/180.*np.pi),
+            'v':      lambda x: x['VHor'] * np.cos((90.-x['VDir'])/180.*np.pi),
+            'z':      lambda x: x['Altitude'] -582.,
+            # from virtual temperature to absolute humidity
+            'q':      lambda x: qfrom_e_p(efrom_rh100_T(x['UCal'],x['TaRad']+273.15),x['Press']*100.),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['VHor'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+        # filter data so that potential temperature always increases with
+        # height 
+        cols = []
+        for column in air_ap_tail.columns:
+            #if column != 'z':
+                cols.append(column)
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        # 
+        # # we copy the pressure at ground level from balloon sounding. The
+        # # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually
+        # write local solar time, we need to assign the timezone to UTC (which
+        # is WRONG!!!). Otherwise ruby cannot understand it (it always converts
+        # tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise']+dt.timedelta(hours=2))\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        print('ldatetime_daylight',dpars['ldatetime_daylight'])
+        print('ldatetime',dpars['ldatetime'])
+        print('lSunrise',dpars['lSunrise'])
+        dpars['day'] = dpars['ldatetime'].day
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        print('tstart',dpars['tstart'])
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='bllast',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1]
+    
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = bllast_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = bllast_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+
+    
+    c4gli_morning.pars.sw_ac = []
+    c4gli_morning.pars.sw_ap = True
+    c4gli_morning.pars.sw_lit = False
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/class4gl/setup/setup_global.py b/class4gl/setup/setup_global.py
new file mode 100644
index 0000000..79224d9
--- /dev/null
+++ b/class4gl/setup/setup_global.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under odir+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+
+
+#calculate the root mean square error
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    
+    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_csv(fn_stations)
+
+
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = 100
+BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+
+
+iPROC = int(sys.argv[1])
+
+
+for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+    one_run = False
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings after 1981
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+                
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            try:
+                c4gli.get_profile_wyoming(wy_strm)
+                #print(STN['ID'],c4gli.pars.datetime)
+                #c4gli.get_global_input(globaldata)
+
+                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+                logic = dict()
+                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+                logic['daylight'] = \
+                    ((c4gli.pars.ldatetime_daylight - 
+                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
+                
+                logic['springsummer'] = (c4gli.pars.theta > 278.)
+                
+                # we take 3000 because previous analysis (ie., HUMPPA) has
+                # focussed towards such altitude
+                le3000 = (c4gli.air_balloon.z <= 3000.)
+                logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+                leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta)) and \
+                        (rmse(c4gli.air_balloon.theta[leh] , \
+                              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                              )
+    
+
+                logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+                
+                print('logic:', logic)
+                # the result
+                morning_ok = np.mean(list(logic.values()))
+                print(morning_ok,c4gli.pars.ldatetime)
+
+            except:
+                morning_ok =False
+                print('obtain morning not good')
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                print('MORNING OK!')
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                print('AFTERNOON PROFILE CLEARED')
+                try:
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+                    print('AFTERNOON PROFILE OK')
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # we will dump the latest afternoon sounding that fits the
+                    # minimum criteria specified by logic_afternoon
+                    print(current_date,current_date_afternoon)
+                    c4gli_afternoon_for_dump = None
+                    while ((current_date_afternoon == current_date) and \
+                           (wy_strm.current is not None)):
+                        logic_afternoon =dict()
+
+                        logic_afternoon['afternoon'] = \
+                            (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                        logic_afternoon['daylight'] = \
+                          ((c4gli_afternoon.pars.ldatetime - \
+                            c4gli_afternoon.pars.ldatetime_daylight \
+                           ).total_seconds()/3600. <= 0.)
+
+
+                        le3000_afternoon = \
+                            (c4gli_afternoon.air_balloon.z <= 3000.)
+                        logic_afternoon['5measurements'] = \
+                            (np.sum(le3000_afternoon) >= 5) 
+
+                        # we only store the last afternoon sounding that fits these
+                        # minimum criteria
+
+                        afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                        print('logic_afternoon: ',logic_afternoon)
+                        print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                        if afternoon_ok == 1.:
+                            # # doesn't work :(
+                            # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                            
+                            # so we just create a new one from the same wyoming profile
+                            c4gli_afternoon_for_dump = class4gl_input()
+                            c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                        wy_strm.find_next()
+                        c4gli_afternoon.clear()
+                        c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                        if wy_strm.current is not None:
+                            current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                        else:
+                            # a dummy date: this will be ignored anyway
+                            current_date_afternoon = dt.date(1900,1,1)
+
+                        # Only in the case we have a good pair of soundings, we
+                        # dump them to disk
+                    if c4gli_afternoon_for_dump is not None:
+                        c4gli.update(source='pairs',pars={'runtime' : \
+                            int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                                 c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                        print('ALMOST...')
+                        if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+                                
+        
+                            c4gli.get_global_input(globaldata)
+                            print('VERY CLOSE...')
+                            if c4gli.check_source_globaldata() and \
+                                (c4gli.check_source(source='wyoming',\
+                                                   check_only_sections='pars')):
+                                c4gli.dump(fileout)
+                                
+                                c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                                
+                                
+                                # for keyEXP,dictEXP in experiments.items():
+                                #     
+                                #     c4gli.update(source=keyEXP,pars = dictEXP)
+                                #     c4gl = class4gl(c4gli)
+                                #     # c4gl.run()
+                                #     
+                                #     c4gl.dump(c4glfiles[key])
+                                
+                                print('HIT!!!')
+                                one_run = True
+                except:
+                    print('get profile failed')
+                
+    if one_run:
+        STN.name = STN['ID']
+        all_records_morning = get_records(pd.DataFrame([STN]),\
+                                      odir,\
+                                      subset='morning',
+                                      refetch_records=True,
+                                      )
+        all_records_afternoon = get_records(pd.DataFrame([STN]),\
+                                      odir,\
+                                      subset='afternoon',
+                                      refetch_records=True,
+                                      )
+    else:
+        os.system('rm '+fnout)
+        os.system('rm '+fnout_afternoon)
+
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/class4gl/setup/setup_goamazon.py b/class4gl/setup/setup_goamazon.py
new file mode 100644
index 0000000..f9efe2c
--- /dev/null
+++ b/class4gl/setup/setup_goamazon.py
@@ -0,0 +1,740 @@
+# -*- coding: utf-8 -*-
+
+import xarray as xr
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+import glob
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : -3.21,
+                  "longitude" : -60.6,
+                  "name" : "the GOAMAZON experiment"
+                })
+current_station.name = 90002
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC)
+DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC)
+
+
+DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))]
+HOUR_FILES = {}
+for iDT, DT in enumerate(DTS):
+    morning_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf')
+    if len(possible_files)>0:
+        morning_file= possible_files[0]
+    afternoon_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*.cdf')
+    if len(possible_files)>0:
+        afternoon_file= possible_files[0]
+
+    if (morning_file is not None) and (afternoon_file is not None):
+        HOUR_FILES[DT] = {'morning':[5.5,morning_file],
+                          'afternoon':[17.5,afternoon_file]}
+
+print(HOUR_FILES)
+
+# HOUR_FILES = \
+# {
+#     dt.datetime(2015,5,7,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150507.052900.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150507.172700.custom.cdf']},
+#     dt.datetime(2015,3,13,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150313.052700.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150313.173000.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+# }
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
+        print(balloon_file)
+        
+        xrin = balloon_file
+        air_balloon = pd.DataFrame()
+
+        air_balloon['t'] = xrin.tdry.values+273.15
+        air_balloon['p'] = xrin.pres.values*100.
+        
+        air_balloon['u'] = xrin.u_wind.values
+        air_balloon['v'] = xrin.v_wind.values
+        air_balloon['WSPD'] = xrin['wspd'].values
+        
+        print(xrin.rh.values.shape)
+        air_balloon['q'] = qfrom_e_p(efrom_rh100_T(xrin.rh.values,air_balloon['t'].values),air_balloon.p.values)
+        
+
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        # air_balloon_in = pd.read_fwf(balloon_file,
+        #                              widths=[14]*19,
+        #                              skiprows=9,
+        #                              skipfooter=15,
+        #                              decimal=',',
+        #                              header=None,
+        #                              names = columns,
+        #                              na_values='-----')
+    
+
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q,
+            'rho': lambda x: x.p /x.t / x.R ,
+        }
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        print('alt in xrin?:','alt' in xrin)
+        if 'alt' in xrin:
+            air_balloon['z'] = xrin.alt.values
+        else:
+            air_balloon['z'] = 0.
+            for irow,row in air_balloon.iloc[1:].iterrows():
+                air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \
+                        2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \
+                        (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1])
+                        
+             
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            print(air_balloon.z.shape,air_balloon.thetav.shape,)
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-4)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn =pair['morning'][1]
+    print(humpafn)
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn = pair['afternoon'][1]
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM//humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+"""
+stations_for_iter = stations(path_exp)
+for STNID,station in stations_iterator(stations_for_iter):
+    records_current_station_index = \
+            (records_ini.index.get_level_values('STNID') == STNID)
+    file_current_station_mod = STNID
+
+    with \
+    open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+        for (STNID,index),record_ini in records_iterator(records_ini):
+            c4gli_ini = get_record_yaml(file_station_ini, 
+                                        record_ini.index_start, 
+                                        record_ini.index_end,
+                                        mode='ini')
+            #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+
+            record_mod = records_mod.loc[(STNID,index)]
+            c4gl_mod = get_record_yaml(file_station_mod, 
+                                        record_mod.index_start, 
+                                        record_mod.index_end,
+                                        mode='mod')
+            record_afternoon = records_afternoon.loc[(STNID,index)]
+            c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+                                        record_afternoon.index_start, 
+                                        record_afternoon.index_end,
+                                        mode='ini')
+"""
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/class4gl/setup/setup_humppa.py b/class4gl/setup/setup_humppa.py
new file mode 100644
index 0000000..ff37628
--- /dev/null
+++ b/class4gl/setup/setup_humppa.py
@@ -0,0 +1,732 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 61.8448,
+                  "longitude" : 24.2882,
+                  "name" : "the HUMMPA experiment"
+                })
+current_station.name = 90000
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+
+HOUR_FILES = \
+{ dt.datetime(2010,7,12,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071210_0300.txt'],'afternoon':[15,'humppa_071210_1500.txt']},
+  dt.datetime(2010,7,13,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071310_0300.txt'],'afternoon':[18,'humppa_071310_1800.txt']},
+  dt.datetime(2010,7,14,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071410_0300.txt'],'afternoon':[16,'humppa_071410_1600.txt']},
+  dt.datetime(2010,7,15,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071510_0300.txt'],'afternoon':[15,'humppa_071510_1500.txt']},
+  dt.datetime(2010,7,16,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071610_0300.txt'],'afternoon':[21,'humppa_071610_2100.txt']},
+  dt.datetime(2010,7,17,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071710_0300.txt'],'afternoon':[18,'humppa_071710_1800.txt']},
+  dt.datetime(2010,7,18,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071810_0300.txt'],'afternoon':[21,'humppa_071810_2100.txt']},
+  dt.datetime(2010,7,19,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071910_0300.txt'],'afternoon':[21,'humppa_071910_2100.txt']},
+#  dt.datetime(2010,7,20):{'morning':[4,'humppa_072010_0400.txt'],'afternoon':[15,'humppa_072010_1500.txt']},
+  dt.datetime(2010,7,21,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072110_0300.txt'],'afternoon':[21,'humppa_072110_2100.txt']},
+  dt.datetime(2010,7,22,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072210_0400.txt'],'afternoon':[18,'humppa_072210_1800.txt']},
+ # something is wrong with ths profile
+ # dt.datetime(2010,7,23,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072310_0300.txt'],'afternoon':[15,'humppa_072310_1500.txt']},
+  dt.datetime(2010,7,24,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072410_0300.txt'],'afternoon':[16,'humppa_072410_1600.txt']},
+  dt.datetime(2010,7,25,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072510_0300.txt'],'afternoon':[21,'humppa_072510_2100.txt']},
+  dt.datetime(2010,7,26,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072610_0300.txt'],'afternoon':[21,'humppa_072610_2100.txt']},
+  dt.datetime(2010,7,27,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072710_0300.txt'],'afternoon':[15,'humppa_072710_1500.txt']},
+  dt.datetime(2010,7,28,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072810_0300.txt'],'afternoon':[15,'humppa_072810_1500.txt']},
+  dt.datetime(2010,7,29,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072910_0400.txt'],'afternoon':[18,'humppa_072910_1800.txt']},
+  dt.datetime(2010,7,30,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_073010_0900.txt'],'afternoon':[15,'humppa_073010_1500.txt']},
+  dt.datetime(2010,7,31,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_073110_0300_01.txt'],'afternoon':[15,'humppa_073110_1500.txt']},
+  dt.datetime(2010,8, 1,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080110_0300.txt'],'afternoon':[18,'humppa_080110_1800.txt']},
+  dt.datetime(2010,8, 2,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080210_0300.txt'],'afternoon':[18,'humppa_080210_1800.txt']},
+  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_080310_0900.txt'],'afternoon':[18,'humppa_080310_1800.txt']},
+  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[8,'humppa_080410_0800.txt'],'afternoon':[18,'humppa_080410_1800.txt']},
+  dt.datetime(2010,8, 5,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080510_0300.txt'],'afternoon':[18,'humppa_080510_1800.txt']},
+  dt.datetime(2010,8, 6,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_080610_0400.txt'],'afternoon':[18,'humppa_080610_1800.txt']},
+  dt.datetime(2010,8, 7,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080710_0300.txt'],'afternoon':[18,'humppa_080710_1800.txt']},
+  dt.datetime(2010,8, 8,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080810_0300.txt'],'afternoon':[18,'humppa_080810_1800.txt']},
+  dt.datetime(2010,8,10,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_081010_0300.txt'],'afternoon':[18,'humppa_081010_1800.txt']},
+}
+
+
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_fwf(balloon_file,
+                                     widths=[14]*19,
+                                     skiprows=9,
+                                     skipfooter=15,
+                                     decimal=',',
+                                     header=None,
+                                     names = columns,
+                                     na_values='-----')
+    
+        rowmatches = {
+            't':      lambda x: x['T[C]']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['P[hPa]']*100.,
+            'u':      lambda x: x['Wsp[m/s]'] * np.sin((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'v':      lambda x: x['Wsp[m/s]'] * np.cos((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'z':      lambda x: x['Altitude[m]'],
+            'q':      lambda x: np.clip((1. - (273.15+x['Virt. Temp[C]'])/(273.15+x['T[C]']))/(1. - 1./epsilon),a_min=0.,a_max=None),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['Wsp[m/s]'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # 
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-3)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1]
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/class4gl/setup/trash/setup_global_old.py b/class4gl/setup/trash/setup_global_old.py
new file mode 100644
index 0000000..d812684
--- /dev/null
+++ b/class4gl/setup/trash/setup_global_old.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under odir+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+
+
+#calculate the root mean square error
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    
+    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+
+
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_csv(fn_stations)
+
+
+STNlist = list(df_stations.iterrows())
+NUMSTNS = len(STNlist)
+PROCS = 100
+BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+
+
+iPROC = int(sys.argv[1])
+
+
+for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings after 1981
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            c4gli.get_profile_wyoming(wy_strm)
+            #print(STN['ID'],c4gli.pars.datetime)
+            #c4gli.get_global_input(globaldata)
+
+            print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+            logic = dict()
+            logic['morning'] =  (c4gli.pars.ldatetime.hour < 12.)
+            logic['daylight'] = \
+                ((c4gli.pars.ldatetime_daylight - 
+                  c4gli.pars.ldatetime).total_seconds()/3600. <= 5.)
+            
+            logic['springsummer'] = (c4gli.pars.theta > 278.)
+            
+            # we take 3000 because previous analysis (ie., HUMPPA) has
+            # focussed towards such altitude
+            le3000 = (c4gli.air_balloon.z <= 3000.)
+            logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+            leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+            try:
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta)) and \
+                        (rmse(c4gli.air_balloon.theta[leh] , \
+                              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                              )
+    
+            except:
+                logic['mlerrlow'] = False
+                print('rmse probably failed')
+
+            logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+            
+            print('logic:', logic)
+            # the result
+            morning_ok = np.mean(list(logic.values()))
+            print(morning_ok,c4gli.pars.ldatetime)
+            
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                if wy_strm.current is not None:
+                    current_date_afternoon = \
+                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                       c4gli_afternoon.pars.ldatetime.month, \
+                                       c4gli_afternoon.pars.ldatetime.day)
+                else:
+                    # a dummy date: this will be ignored anyway
+                    current_date_afternoon = dt.date(1900,1,1)
+
+                # we will dump the latest afternoon sounding that fits the
+                # minimum criteria specified by logic_afternoon
+                c4gli_afternoon_for_dump = None
+                while ((current_date_afternoon == current_date) and \
+                       (wy_strm.current is not None)):
+                    logic_afternoon =dict()
+
+                    logic_afternoon['afternoon'] = \
+                        (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                    logic_afternoon['daylight'] = \
+                      ((c4gli_afternoon.pars.ldatetime - \
+                        c4gli_afternoon.pars.ldatetime_daylight \
+                       ).total_seconds()/3600. <= 2.)
+
+
+                    le3000_afternoon = \
+                        (c4gli_afternoon.air_balloon.z <= 3000.)
+                    logic_afternoon['5measurements'] = \
+                        (np.sum(le3000_afternoon) >= 5) 
+
+                    # we only store the last afternoon sounding that fits these
+                    # minimum criteria
+
+                    afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                    print('logic_afternoon: ',logic_afternoon)
+                    print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                    if afternoon_ok == 1.:
+                        # # doesn't work :(
+                        # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                        
+                        # so we just create a new one from the same wyoming profile
+                        c4gli_afternoon_for_dump = class4gl_input()
+                        c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                    wy_strm.find_next()
+                    c4gli_afternoon.clear()
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                       c4gli_afternoon.pars.ldatetime.month, \
+                                       c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # Only in the case we have a good pair of soundings, we
+                    # dump them to disk
+                if c4gli_afternoon_for_dump is not None:
+                    c4gli.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                             c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                    print('ALMOST...')
+                    if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+                            
+        
+                        c4gli.get_global_input(globaldata)
+                        print('VERY CLOSE...')
+                        if c4gli.check_source_globaldata() and \
+                            (c4gli.check_source(source='wyoming',\
+                                               check_only_sections='pars')):
+                            c4gli.dump(fileout)
+                            
+                            c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                            
+                            
+                            # for keyEXP,dictEXP in experiments.items():
+                            #     
+                            #     c4gli.update(source=keyEXP,pars = dictEXP)
+                            #     c4gl = class4gl(c4gli)
+                            #     # c4gl.run()
+                            #     
+                            #     c4gl.dump(c4glfiles[key])
+                            
+                            print('HIT!!!')
+                
+                
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
new file mode 100644
index 0000000..b5d4cc3
--- /dev/null
+++ b/class4gl/simulations/batch_simulations.py
@@ -0,0 +1,77 @@
+
+import argparse
+
+import pandas as pd
+import os
+import math
+import numpy as np
+import sys
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+df_stations = pd.read_csv(fn_stations)
+
+# if 'path-soundings' in args.__dict__.keys():
+#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+# else:
+
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    #parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
+    parser.add_argument('--exec')
+    parser.add_argument('--experiments')#should be ';'-seperated list
+    parser.add_argument('--split-by',default=-1)
+    args = parser.parse_args()
+
+experiments = args.experiments.split(';')
+#SET = 'GLOBAL'
+SET = args.dataset
+print(args.experiments)
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+for expname in experiments:
+    #exp = EXP_DEFS[expname]
+    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+    os.system('rm -R '+path_exp)
+
+totalchunks = 0
+for istation,current_station in all_stations.iterrows():
+    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
+    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
+    totalchunks +=chunks_current_station
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
+                                       ',split_by='+str(args.split_by)+\
+                                       ',exec='+str(args.exec)+\
+                                       ',experiments='+str(args.experiments))
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/class4gl/simulations/runmodel.py b/class4gl/simulations/runmodel.py
new file mode 100644
index 0000000..fc4fd19
--- /dev/null
+++ b/class4gl/simulations/runmodel.py
@@ -0,0 +1,130 @@
+#
+# Example of how to run the Python code, and access the output
+# This case is identical to the default setup of CLASS (the version with interface) 
+#
+
+from pylab import *
+from model import *
+
+""" 
+Create empty model_input and set up case
+"""
+run1input = model_input()
+
+run1input.dt         = 60.       # time step [s]
+run1input.runtime    = 12*3600    # total run time [s]
+
+# mixed-layer input
+run1input.sw_ml      = True      # mixed-layer model switch
+run1input.sw_shearwe = False     # shear growth mixed-layer switch
+run1input.sw_fixft   = False     # Fix the free-troposphere switch
+run1input.h          = 200.      # initial ABL height [m]
+run1input.Ps         = 101300.   # surface pressure [Pa]
+run1input.divU       = 0.        # horizontal large-scale divergence of wind [s-1]
+run1input.fc         = 1.e-4     # Coriolis parameter [m s-1]
+
+run1input.theta      = 288.      # initial mixed-layer potential temperature [K]
+run1input.dtheta     = 1.        # initial temperature jump at h [K]
+run1input.gammatheta = 0.006     # free atmosphere potential temperature lapse rate [K m-1]
+run1input.advtheta   = 0.        # advection of heat [K s-1]
+run1input.beta       = 0.2       # entrainment ratio for virtual heat [-]
+run1input.wtheta     = 0.1       # surface kinematic heat flux [K m s-1]
+
+run1input.q          = 0.008     # initial mixed-layer specific humidity [kg kg-1]
+run1input.dq         = -0.001    # initial specific humidity jump at h [kg kg-1]
+run1input.gammaq     = 0.        # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
+run1input.advq       = 0.        # advection of moisture [kg kg-1 s-1]
+run1input.wq         = 0.1e-3    # surface kinematic moisture flux [kg kg-1 m s-1]
+
+run1input.CO2        = 422.      # initial mixed-layer CO2 [ppm]
+run1input.dCO2       = -44.      # initial CO2 jump at h [ppm]
+run1input.gammaCO2   = 0.        # free atmosphere CO2 lapse rate [ppm m-1]
+run1input.advCO2     = 0.        # advection of CO2 [ppm s-1]
+run1input.wCO2       = 0.        # surface kinematic CO2 flux [ppm m s-1]
+
+run1input.sw_wind    = False     # prognostic wind switch
+run1input.u          = 6.        # initial mixed-layer u-wind speed [m s-1]
+run1input.du         = 4.        # initial u-wind jump at h [m s-1]
+run1input.gammau     = 0.        # free atmosphere u-wind speed lapse rate [s-1]
+run1input.advu       = 0.        # advection of u-wind [m s-2]
+
+run1input.v          = -4.0      # initial mixed-layer u-wind speed [m s-1]
+run1input.dv         = 4.0       # initial u-wind jump at h [m s-1]
+run1input.gammav     = 0.        # free atmosphere v-wind speed lapse rate [s-1]
+run1input.advv       = 0.        # advection of v-wind [m s-2]
+
+run1input.sw_sl      = False     # surface layer switch
+run1input.ustar      = 0.3       # surface friction velocity [m s-1]
+run1input.z0m        = 0.02      # roughness length for momentum [m]
+run1input.z0h        = 0.002     # roughness length for scalars [m]
+
+run1input.sw_rad     = False     # radiation switch
+run1input.lat        = 51.97     # latitude [deg]
+run1input.lon        = -4.93     # longitude [deg]
+run1input.doy        = 268.      # day of the year [-]
+run1input.tstart     = 6.8       # time of the day [h UTC]
+run1input.cc         = 0.0       # cloud cover fraction [-]
+run1input.Q          = 400.      # net radiation [W m-2] 
+run1input.dFz        = 0.        # cloud top radiative divergence [W m-2] 
+
+run1input.sw_ls      = False     # land surface switch
+run1input.ls_type    = 'js'      # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
+run1input.wg         = 0.21      # volumetric water content top soil layer [m3 m-3]
+run1input.w2         = 0.21      # volumetric water content deeper soil layer [m3 m-3]
+run1input.cveg       = 0.85      # vegetation fraction [-]
+run1input.Tsoil      = 285.      # temperature top soil layer [K]
+run1input.T2         = 286.      # temperature deeper soil layer [K]
+run1input.a          = 0.219     # Clapp and Hornberger retention curve parameter a
+run1input.b          = 4.90      # Clapp and Hornberger retention curve parameter b
+run1input.p          = 4.        # Clapp and Hornberger retention curve parameter c
+run1input.CGsat      = 3.56e-6   # saturated soil conductivity for heat
+
+run1input.wsat       = 0.472     # saturated volumetric water content ECMWF config [-]
+run1input.wfc        = 0.323     # volumetric water content field capacity [-]
+run1input.wwilt      = 0.171     # volumetric water content wilting point [-]
+
+run1input.C1sat      = 0.132     
+run1input.C2ref      = 1.8
+
+run1input.LAI        = 2.        # leaf area index [-]
+run1input.gD         = 0.0       # correction factor transpiration for VPD [-]
+run1input.rsmin      = 110.      # minimum resistance transpiration [s m-1]
+run1input.rssoilmin  = 50.       # minimun resistance soil evaporation [s m-1]
+run1input.alpha      = 0.25      # surface albedo [-]
+
+run1input.Ts         = 290.      # initial surface temperature [K]
+
+run1input.Wmax       = 0.0002    # thickness of water layer on wet vegetation [m]
+run1input.Wl         = 0.0000    # equivalent water layer depth for wet vegetation [m]
+
+run1input.Lambda     = 5.9       # thermal diffusivity skin layer [-]
+
+run1input.c3c4       = 'c3'      # Plant type ('c3' or 'c4')
+
+run1input.sw_cu      = False     # Cumulus parameterization switch
+run1input.dz_h       = 150.      # Transition layer thickness [m]
+
+"""
+Init and run the model
+"""
+r1 = model(run1input)
+r1.run()
+
+"""
+Plot output
+"""
+figure()
+subplot(131)
+plot(r1.out.t, r1.out.h)
+xlabel('time [h]')
+ylabel('h [m]')
+
+subplot(132)
+plot(r1.out.t, r1.out.theta)
+xlabel('time [h]')
+ylabel('theta [K]')
+
+subplot(133)
+plot(r1.out.t, r1.out.q*1000.)
+xlabel('time [h]')
+ylabel('q [g kg-1]')
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
new file mode 100644
index 0000000..719f9a5
--- /dev/null
+++ b/class4gl/simulations/simulations.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--global-chunk') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--first-station-row')
+parser.add_argument('--last-station-row')
+parser.add_argument('--station-id') # run a specific station id
+parser.add_argument('--dataset')
+parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--error-handling',default='dump_on_success')
+parser.add_argument('--experiments')
+parser.add_argument('--split-by',default=-1)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+path_soundingsSET = args.path_soundings+'/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iter = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iter.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    if args.station_id is not None:
+        stations_iter = stations_iterator(all_stations)
+        STNID,run_station = stations_iterator.set_STNID(STNID)
+        run_stations = pd.DataFrame(run_station)
+    else:
+        run_stations = pd.DataFrame(all_stations)
+        if args.last_station_row is not None:
+            run_stations = run_stations.iloc[:(int(args.last_station)+1)]
+        if args.first_station_row is not None:
+            run_stations = run_stations.iloc[int(args.first_station):]
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = args.path_experiments+'/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            print(records_morning_station_chunk)
+
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+
+                    if args.error_handling == 'dump_always':
+                        try:
+                            c4gl.run()
+                        except:
+                            print('run not succesfull')
+                        onerun = True
+
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                  include_input=False,\
+                                  #timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    # in this case, only the file will dumped if the runs were
+                    # successful
+                    elif args.error_handling == 'dump_on_succes':
+                        try:
+                            c4gl.run()
+                            print('run not succesfull')
+                            onerun = True
+
+                            c4gli_morning.dump(file_ini)
+                            
+                            
+                            c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                      #timeseries_only=timeseries_only,\
+                                     )
+                            onerun = True
+                        except:
+                            print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py
new file mode 100644
index 0000000..5dfbaff
--- /dev/null
+++ b/class4gl/simulations/simulations_iter.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk',default=0)
+    args = parser.parse_args()
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if 'global_chunk' in args.__dict__.keys():
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    if 'last_station' in args.__dict__.keys():
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    
+    if 'first_station' in args.__dict__.keys():
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    if 'station_chunk' in args.__dict__.keys():
+        run_station_chunk = args.station_chunk
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+    for istation,current_station in run_stations.iterrows():
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                #if iexp == 11:
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+
+                        c4gli_morning.pars.itersteps = i
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                   #   timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    except:
+                        print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/class4gl/simulations/simulations_iter_test.py b/class4gl/simulations/simulations_iter_test.py
new file mode 100644
index 0000000..eefd475
--- /dev/null
+++ b/class4gl/simulations/simulations_iter_test.py
@@ -0,0 +1,367 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    args = parser.parse_args()
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+                #if iexp == 11:
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+                        onerun = True
+
+                        c4gli_morning.pars.itersteps = i
+                    except:
+                        print('run not succesfull')
+                    c4gli_morning.dump(file_ini)
+                    
+                    
+                    c4gl.dump(file_mod,\
+                                  include_input=False,\
+                               #   timeseries_only=timeseries_only,\
+                             )
+                    onerun = True
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/class4gl/simulations/trash/run_test.py b/class4gl/simulations/trash/run_test.py
new file mode 100644
index 0000000..767d960
--- /dev/null
+++ b/class4gl/simulations/trash/run_test.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--global-chunk')
+    parser.add_argument('--first-station')
+    parser.add_argument('--last-station')
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    parser.add_argument('--experiments')
+    parser.add_argument('--split-by',default=-1)# station soundings are split
+                                                # up in chunks
+    parser.add_argument('--station-chunk')
+    parser.add_argument('--c4gl-path',default='')
+    args = parser.parse_args()
+
+if args.c4gl_path == '': 
+    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+else:
+    sys.path.insert(0, args.c4gl_path)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+#SET = 'GLOBAL'
+SET = args.dataset
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+
+all_records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+if args.global_chunk is not None:
+    totalchunks = 0
+    stations_iterator = all_stations.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iterator.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    run_stations = pd.DataFrame(all_stations)
+    if args.last_station is not None:
+        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
+    if args.first_station is not None:
+        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
+    run_station_chunk = 0
+    if args.station_chunk is not None:
+        run_station_chunk = args.station_chunk
+
+#print(all_stations)
+print(run_stations)
+print(args.__dict__.keys())
+records_morning = get_records(run_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                path_soundingsSET,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            print(records_morning_station_chunk)
+
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    try:
+                        c4gl.run()
+                    except:
+                        print('run not succesfull')
+                    onerun = True
+
+                    c4gli_morning.dump(file_ini)
+                    
+                    
+                    c4gl.dump(file_mod,\
+                              include_input=False,\
+                              #timeseries_only=timeseries_only,\
+                             )
+                    onerun = True
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/setup.py b/setup.py
index a806fa0..4dcb51d 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
 setup(
         name='class4gl',
         version='0.1dev',
-        packages=['lib','bin'],
+        packages=['class4gl','bin'],
         license='GPLv3 licence',
         long_description=open('README.md').read(),
 )

From 919a737f8fb12a261e8ebe511d45dbcf91847b11 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 21 Aug 2018 22:00:36 +0200
Subject: [PATCH 009/129] Delete class4gl.py

---
 class4gl.py | 1611 ---------------------------------------------------
 1 file changed, 1611 deletions(-)
 delete mode 100644 class4gl.py

diff --git a/class4gl.py b/class4gl.py
deleted file mode 100644
index 7baaa51..0000000
--- a/class4gl.py
+++ /dev/null
@@ -1,1611 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-
-Created on Mon Jan 29 12:33:51 2018
-
-Module file for class4gl, which  extents the class-model to be able to take
-global air profiles as input. It exists of:
-
-CLASSES:
-    - an input object, namely class4gl_input. It includes:
-        - a function to read Wyoming sounding data from a yyoming stream object
-        - a function to read global data from a globaldata library object 
-    - the model object: class4gl
-    - ....    
-
-DEPENDENCIES:
-    - xarray
-    - numpy
-    - data_global
-    - Pysolar
-    - yaml
-
-@author: Hendrik Wouters
-
-"""
-
-
-
-""" Setup of envirnoment """
-
-# Standard modules of the stand class-boundary-layer model
-from model import model
-from model import model_output as class4gl_output
-from model import model_input
-from model import qsat
-#from data_soundings import wyoming 
-import Pysolar
-import yaml
-import logging
-import warnings
-import pytz
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-
-# Generic Python Packages
-import numpy as np
-import datetime as dt
-import pandas as pd
-import xarray as xr
-import io
-#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
-from data_global import data_global
-grav = 9.81
-
-# this is just a generic input object
-class generic_input(object):
-    def __init__(self):
-        self.init = True
-
-
-# all units from all variables in CLASS(4GL) should be defined here!
-units = {
-         'h':'m',
-         'theta':'K', 
-         'q':'kg/kg',
-         'cc': '-',
-         'cveg': '-',
-         'wg': 'm3 m-3',
-         'w2': 'm3 m-3',
-         #'wg': 'kg/kg',
-         'Tsoil': 'K',
-         'T2': 'K',
-         'z0m': 'm',
-         'alpha': '-',
-         'LAI': '-',
-         'dhdt':'m/h',
-         'dthetadt':'K/h',
-         'dqdt':'kg/kg/h',
-         'BR': '-',
-         'EF': '-',
-}
-
-class class4gl_input(object):
-# this was the way it was defined previously.
-#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
-
-    def __init__(self,set_pars_defaults=True,debug_level=None):
-
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        print('hello')
-        self.logger = logging.getLogger('class4gl_input')
-        print(self.logger)
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # # create logger
-        # self.logger = logging.getLogger('class4gl_input')
-        # self.logger.setLevel(debug_level)
-
-        # # create console handler and set level to debug
-        # ch = logging.StreamHandler()
-        # ch.setLevel(debug_level)
-
-        # # create formatter
-        # formatter = logging.Formatter('%(asctime)s - \
-        #                                %(name)s - \
-        #                                %(levelname)s - \
-        #                                %(message)s')
-        # add formatter to ch
-        # ch.setFormatter(formatter)
-     
-        # # add ch to logger
-        # self.logger.addHandler(ch)
-
-        # """ end set up logger """
-
-
-
-        # these are the standard model input single-value parameters for class
-        self.pars = model_input()
-
-        # diagnostic parameters of the initial profile
-        self.diag = dict()
-
-        # In this variable, we keep track of the different parameters from where it originates from. 
-        self.sources = {}
-
-        if set_pars_defaults:
-            self.set_pars_defaults()
-
-    def set_pars_defaults(self):
-
-        """ 
-        Create empty model_input and set up case
-        """
-        defaults = dict( 
-        dt         = 60.    , # time step [s] 
-        runtime    = 6*3600 ,  # total run time [s]
-        
-        # mixed-layer input
-        sw_ml      = True   ,  # mixed-layer model switch
-        sw_shearwe = False  ,  # shear growth mixed-layer switch
-        sw_fixft   = False  ,  # Fix the free-troposphere switch
-        h          = 200.   ,  # initial ABL height [m]
-        Ps         = 101300.,  # surface pressure [Pa]
-        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
-        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
-        
-        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
-        dtheta     = 1.     ,  # initial temperature jump at h [K]
-        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
-        advtheta   = 0.     ,  # advection of heat [K s-1]
-        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
-        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
-        
-        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
-        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
-        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
-        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
-        
-        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
-        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
-        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
-        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
-        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
-        sw_wind    = True  ,  # prognostic wind switch
-        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
-        du         = 0.     ,  # initial u-wind jump at h [m s-1]
-        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
-        advu       = 0.     ,  # advection of u-wind [m s-2]
-        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
-        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
-        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
-        advv       = 0.     ,  # advection of v-wind [m s-2]
-        sw_sl      = True   , # surface layer switch
-        ustar      = 0.3    ,  # surface friction velocity [m s-1]
-        z0m        = 0.02   ,  # roughness length for momentum [m]
-        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
-        sw_rad     = True   , # radiation switch
-        lat        = 51.97  ,  # latitude [deg]
-        lon        = -4.93  ,  # longitude [deg]
-        doy        = 268.   ,  # day of the year [-]
-        tstart     = 6.8    ,  # time of the day [h UTC]
-        cc         = 0.0    ,  # cloud cover fraction [-]
-        Q          = 400.   ,  # net radiation [W m-2] 
-        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
-        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
-        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
-        cveg       = 0.85   ,  # vegetation fraction [-]
-        Tsoil      = 295.   ,  # temperature top soil layer [K]
-        Ts         = 295.   ,    # initial surface temperature [K]
-        T2         = 296.   ,  # temperature deeper soil layer [K]
-        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
-        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
-        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
-        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
-        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
-        wfc        = 0.323  ,  # volumetric water content field capacity [-]
-        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
-        C1sat      = 0.132  ,  
-        C2ref      = 1.8    ,
-        LAI        = 2.     ,  # leaf area index [-]
-        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
-        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
-        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
-        alpha      = 0.25   ,  # surface albedo [-]
-        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
-        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
-        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
-        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
-        sw_cu      = False  ,  # Cumulus parameterization switch
-        dz_h       = 150.   ,  # Transition layer thickness [m]
-        cala       = None   ,  # soil heat conductivity [W/(K*m)]
-        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
-        sw_ls      = True   ,
-        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
-        sw_lit     = False,
-        )
-        pars = model_input()
-        for key in defaults:
-            pars.__dict__[key] = defaults[key]
-        
-        self.update(source='defaults',pars=pars)
-        
-    def clear(self):
-        """ this procudure clears the class4gl_input """
-
-        for key in list(self.__dict__.keys()):
-            del(self.__dict__[key])
-        self.__init__()
-
-    def dump(self,file):
-        """ this procedure dumps the class4gl_input object into a yaml file
-            
-            Input: 
-                - self.__dict__ (internal): the dictionary from which we read 
-            Output:
-                - file: All the parameters in self.__init__() are written to
-                the yaml file, including pars, air_ap, sources etc.
-        """
-        file.write('---\n')
-        index = file.tell()
-        file.write('# CLASS4GL input; format version: 0.1\n')
-
-        # write out the position of the current record
-        yaml.dump({'index':index}, file, default_flow_style=False)
-
-        # we do not include the none values
-        for key,data in self.__dict__.items():
-            #if ((type(data) == model_input) or (type(class4gl_input):
-            if key == 'pars':
-
-                pars = {'pars' : self.__dict__['pars'].__dict__}
-                parsout = {}
-                for key in pars.keys():
-                    if pars[key] is not None:
-                        parsout[key] = pars[key]
-
-                yaml.dump(parsout, file, default_flow_style=False)
-            elif type(data) == dict:
-                if key == 'sources':
-                    # in case of sources, we want to have a
-                    # condensed list format as well, so we leave out
-                    # 'default_flow_style=False'
-                    yaml.dump({key : data}, file)
-                else: 
-                    yaml.dump({key : data}, file,
-                              default_flow_style=False)
-            elif type(data) == pd.DataFrame:
-                # in case of dataframes (for profiles), we want to have a
-                # condensed list format as well, so we leave out
-                # 'default_flow_style=False'
-                yaml.dump({key: data.to_dict(orient='list')},file)
-
-                # # these are trials to get it into a more human-readable
-                # fixed-width format, but it is too complex
-                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
-                #file.write(stream)
-                
-                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
-                #file.write(key+': !!str |\n')
-                #file.write(str(data)+'\n')
-       
-    def load_yaml_dict(self,yaml_dict,reset=True):
-        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
-            
-            Input: 
-                - yaml_dict: the dictionary from which we read 
-                - reset: reset data before reading        
-            Output:
-                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
-        """
-        
-        if reset:
-            for key in list(self.__dict__.keys()):
-                del(self.__dict__[key])
-            self.__init__()
-
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                self.__dict__[key] = model_input()
-                self.__dict__[key].__dict__ = data
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            elif key == 'sources':
-                self.__dict__[key] = data
-            elif key == 'diag':
-                self.__dict__[key] = data
-            else: 
-                warnings.warn("Key '"+key+"' may not be implemented.")
-                self.__dict__[key] = data
-
-    def update(self,source,**kwargs):
-        """ this procedure is to make updates of input parameters and tracking
-        of their source more convenient. It implements the assignment of
-        parameter source/sensitivity experiment IDs ('eg.,
-        'defaults', 'sounding balloon', any satellite information, climate
-        models, sensitivity tests etc.). These are all stored in a convenient
-        way with as class4gl_input.sources.  This way, the user can always consult with
-        from where parameters data originates from.  
-        
-        Input:
-            - source:    name of the underlying dataset
-            - **kwargs: a dictionary of data input, for which the key values
-            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
-            the values is a again a dictionary/dataframe of datakeys/columns
-            ('wg','PRES','datetime', ...) and datavalues (either single values,
-            profiles ...), eg., 
-
-                pars = {'wg': 0.007  , 'w2', 0.005}
-                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
-                                     300.,...]}
-            
-        Output:
-            - self.__dict__[datatype] : object to which the parameters are
-                                        assigned. They can be consulted with
-                                        self.pars, self.profiles, etc.
-                                        
-            - self.sources[source] : It supplements the overview overview of
-                                     data sources can be consulted with
-                                     self.sources. The structure is as follows:
-                                     as:
-                self.sources = { 
-                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
-                'GLEAM' :  ['pars:wg','pars:w2', ...],
-                 ...
-                }
-        
-        """
-
-        #print(source,kwargs)
-
-        for key,data in kwargs.items():
-
-            #print(key)
-            # if the key is not in class4gl_input object, then just add it. In
-            # that case, the update procedures below will just overwrite it 
-            if key not in self.__dict__:
-                self.__dict__[key] = data
-
-
-            
-
-            #... we do an additional check to see whether there is a type
-            # match. I not then raise a key error
-            if (type(data) != type(self.__dict__[key]) \
-                # we allow dict input for model_input pars
-                and not ((key == 'pars') and (type(data) == dict) and \
-                (type(self.__dict__[key]) == model_input))):
-
-                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
-
-
-            # This variable keeps track of the added data that is supplemented
-            # by the current source. We add this to class4gl_input.sources
-            datakeys = []
-
-            #... and we update the class4gl_input data, and this depends on the
-            # data type
-
-            if type(self.__dict__[key]) == pd.DataFrame:
-                # If the data type is a dataframe, then we update the columns
-                for column in list(data.columns):
-                    #print(column)
-                    self.__dict__[key][column] = data[column]
-                    datakeys.append(column)
-                    
-
-            elif type(self.__dict__[key]) == model_input:
-                # if the data type is a model_input, then we update its internal
-                # dictionary of parameters
-                if type(data) == model_input:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data.__dict__}
-                    datakeys = list(data.__dict__.keys())
-                elif type(data) == dict:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data}
-                    datakeys = list(data.keys())
-                else:
-                    raise TypeError('input key '+key+' is not of the same type\
-                                    as the one in the class4gl_object')
-
-            elif type(self.__dict__[key]) == dict:
-                # if the data type is a dictionary, we update the
-                # dictionary 
-                self.__dict__[key] = {self.__dict__[key] , data}
-                datakeys = list(data.keys())
-
-
-            # if source entry is not existing yet, we add it
-            if source not in self.sources.keys():
-                self.sources[source] = []
-
-
-            # self.logger.debug('updating section "'+\
-            #                  key+' ('+' '.join(datakeys)+')'\
-            #                  '" from source \
-            #                  "'+source+'"')
-
-            # Update the source dictionary: add the provided data keys to the
-            # specified source list
-            for datakey in datakeys:
-                # At first, remove the occurences of the keys in the other
-                # source lists
-                for sourcekey,sourcelist in self.sources.items():
-                    if key+':'+datakey in sourcelist:
-                        self.sources[sourcekey].remove(key+':'+datakey)
-                # Afterwards, add it to the current source list
-                self.sources[source].append(key+':'+datakey)
-
-
-        # # in case the datatype is a class4gl_input_pars, we update its keys
-        # # according to **kwargs dictionary
-        # if type(self.__dict__[datatype]) == class4gl_input_pars:
-        #     # add the data parameters to the datatype object dictionary of the
-        #     # datatype
-        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
-        #                                        **kwargs}
-        # # in case, the datatype reflects a dataframe, we update the columns according
-        # # to the *args list
-        # elif type(self.__dict__[datatype]) == pd.DataFrame:
-        #     for dataframe in args:
-        #         for column in list(dataframe.columns):
-        #             self.__dict__[datatype][column] = dataframe[column]
-        
-
-    def get_profile(self,IOBJ, *args, **argv):
-        # if type(IOBJ) == wyoming:
-        self.get_profile_wyoming(IOBJ,*args,**argv)
-        # else:
-        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
-        
-    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
-        """ 
-            Purpose: 
-                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
-
-            Input:
-                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
-                function will take the profile at the stream's current
-                position. 
-                2. air_ap_mode: which air profile do we take? 
-                    - b : best
-                    - l : according to lower limit for the mixed-layer height
-                            estimate
-                    - u : according to upper limit for the mixed-layer height
-                            estimate
-
-
-            Output:
-                1. all single-value parameters are stored in the
-                   class4gl_input.pars object
-                2. the souding profiles are stored in the in the
-                   class4gl_input.air_balloon dataframe
-                3. modified sounding profiles for which the mixed layer height
-                   is fitted
-                4. ...
-
-        """
-
-
-        # Raise an error in case the input stream is not the correct object
-        # if type(wy_strm) is not wyoming:
-        #    raise TypeError('Not a wyoming type input stream')
-
-        # Let's tell the class_input object that it is a Wyoming fit type
-        self.air_ap_type = 'wyoming'
-        # ... and which mode of fitting we apply
-        self.air_ap_mode = air_ap_mode
-
-        """ Temporary variables used for output """
-        # single value parameters derived from the sounding profile
-        dpars = dict()
-        # profile values
-        air_balloon = pd.DataFrame()
-        # fitted profile values
-        air_ap = pd.DataFrame()
-        
-        string = wy_strm.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = wy_strm.current.find_next('pre').find_next('pre').text
-        
-        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
-        dpars = {**dpars,
-                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
-               }
-        
-        # we get weird output when it's a numpy Timestamp, so we convert it to
-        # pd.datetime type
-
-        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
-        dpars['STNID'] = dpars['Station number']
-
-        # altitude above ground level
-        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
-        # absolute humidity in g/kg
-        air_balloon['q']= (air_balloon.MIXR/1000.) \
-                              / \
-                             (air_balloon.MIXR/1000.+1.)
-        # convert wind speed from knots to m/s
-        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
-        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-        
-        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
-        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
-
-        
-
-        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-
-        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
-        air_balloon['p'] = air_balloon.PRES*100.
-
-
-        # Therefore, determine the sounding that are valid for 'any' column 
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        #is_valid = (air_balloon.z >= 0)
-        # # this is an alternative pipe/numpy method
-        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
-        valid_indices = air_balloon.index[is_valid].values
-        print(valid_indices)
-
-        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-
-        air_balloon['t'] = air_balloon['TEMP']+273.15
-        air_balloon['theta'] = (air_balloon.t) * \
-                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
-        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
-
-        if len(valid_indices) > 0:
-            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
-            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
-            
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            
-            # the final mixed-layer height that will be used by class. We round it
-            # to 1 decimal so that we get a clean yaml output format
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-
-
-        if np.isnan(dpars['h']):
-            dpars['Ps'] = np.nan
-
-
-
-
-        if ~np.isnan(dpars['h']):
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u 
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-            
-
-
-
-        # First 3 data points of the mixed-layer fit. We create a empty head
-        # first
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-        
-        #calculate mixed-layer jump ( this should be larger than 0.1)
-        
-        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        air_ap_head['HGHT'] = air_ap_head['z'] \
-                                + \
-                                np.round(dpars[ 'Station elevation'],1)
-        
-        # make a row object for defining the jump
-        jump = air_ap_head.iloc[0] * np.nan
-            
-        if air_ap_tail.shape[0] > 1:
-
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = dpars['theta']
-        z_low =     dpars['h']
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                (z_mean > (z_low+10.)) and \
-                (theta_mean > (theta_low+0.2) ) and \
-                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-
-
-
-
-
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        #print(air_ap['PRES'].iloc[0])
-
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-
-        
-        dpars['lat'] = dpars['Station latitude']
-        dpars['latitude'] = dpars['lat']
-        
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        dpars['longitude'] = dpars['Station longitude']
-        
-        dpars['ldatetime'] = dpars['datetime'] \
-                            + \
-                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-
-        # # we make a pars object that is similar to the destination object
-        # pars = model_input()
-        # for key,value in dpars.items():
-        #     pars.__dict__[key] = value
-
-
-        # we round the columns to a specified decimal, so that we get a clean
-        # output format for yaml
-        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
-                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
-                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
-# 
-        for column,decimal in decimals.items():
-            air_balloon[column] = air_balloon[column].round(decimal)
-            air_ap[column] = air_ap[column].round(decimal)
-
-        self.update(source='wyoming',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-
-        
-    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
-    
-        """
-        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
-                 according to the position (lat lon) and the class datetime and timespan
-                 globaldata should be a globaldata multifile object
-        
-        Input: 
-            - globaldata: this is the library object
-            - only_keys: only extract specified keys
-            - exclude_keys: do not inherit specified keys
-        """
-        classdatetime      = np.datetime64(self.pars.datetime_daylight)
-        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
-                                           + \
-                                           dt.timedelta(seconds=self.pars.runtime)\
-                                          )
-
-
-        # # list of variables that we get from global ground data
-        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
-        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
-        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
-        #                 'texture', 'itex', 'isoil', 'BR',
-        #                 'b', 'cveg',
-        #                 'C1sat', 
-        #                 'C2ref', 'p', 'a',
-        #                 ] #globaldata.datasets.keys():
-
-        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
-        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
-
-
-        if type(globaldata) is not data_global:
-            raise TypeError("Wrong type of input library") 
-
-        # by default, we get all dataset keys
-        keys = list(globaldata.datasets.keys())
-
-        # We add LAI manually, because it is not listed in the datasets and
-        #they its retreival is hard coded below based on LAIpixel and cveg
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            keys.append('LAI')
-
-        # # In case there is surface pressure, we also calculate the half-level
-        # # and full-level pressure fields
-        # if ('sp' in keys):
-        #     keys.append('pfull')
-        #     keys.append('phalf')
-
-        # If specified, we only take the keys that are in only_keys
-        if only_keys is not None:
-            for key in keys:
-                if key not in only_keys:
-                    keys.remove(key)
-                
-        # If specified, we take out keys that are in exclude keys
-        if exclude_keys is not None:
-            for key in keys:
-                if key in exclude_keys:
-                    keys.remove(key)
-
-        # we set everything to nan first in the pars section (non-profile parameters
-        # without lev argument), so that we can check afterwards whether the
-        # data is well-fetched or not.
-        for key in keys:
-            if not ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None) and \
-                ('lev' in globaldata.datasets[key].page[key].dims)):
-                self.update(source='globaldata',pars={key:np.nan})
-            # # we do not check profile input for now. We assume it is
-            # # available
-            #else:
-            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
-
-        self.logger.debug('getting keys "'+', '.join(keys)+'\
-                          from global data')
-
-        for key in keys:
-            # If we find it, then we obtain the variables
-            if ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None)):
-
-                # check first whether the dataset has a height coordinate (3d space)
-                if 'lev' in globaldata.datasets[key].page[key].dims:
-
-                    # first, we browse to the correct file that has the current time
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-                        globaldata.datasets[key].browse_page(time=classdatetime)
-
-                    
-                    if (globaldata.datasets[key].page is not None):
-                        # find longitude and latitude coordinates
-                        ilats = (np.abs(globaldata.datasets[key].page.lat -
-                                        self.pars.latitude) < 0.5)
-                        ilons = (np.abs(globaldata.datasets[key].page.lon -
-                                        self.pars.longitude) < 0.5)
-                        
-                        # if we have a time dimension, then we look up the required timesteps during the class simulation
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            itimes = ((globaldata.datasets[key].page.time >= \
-                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
-
-                            # In case we didn't find any correct time, we take the
-                            # closest one.
-                            if np.sum(itimes) == 0.:
-
-
-                                classdatetimemean = \
-                                    np.datetime64(self.pars.datetime_daylight + \
-                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
-                                                ))
-
-                                dstimes = globaldata.datasets[key].page.time
-                                time = dstimes.sel(time=classdatetimemean,method='nearest')
-                                itimes = (globaldata.datasets[key].page.time ==
-                                          time)
-                                
-                        else:
-                            # we don't have a time coordinate so it doesn't matter
-                            # what itimes is
-                            itimes = 0
-
-                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
-
-                        # over which dimensions we take a mean:
-                        dims = globaldata.datasets[key].page[key].dims
-                        namesmean = list(dims)
-                        namesmean.remove('lev')
-                        idxmean = [dims.index(namemean) for namemean in namesmean]
-                        
-                        value = \
-                        globaldata.datasets[key].page[key].isel(time=itimes,
-                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
-
-                        # Ideally, source should be equal to the datakey of globaldata.library 
-                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
-                        #  but therefore the globaldata class requires a revision to make this work
-                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
-
-                else:
-                    # this procedure is for reading the ground fields (2d space). 
-                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
-
-    
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-    
-                       # first, we browse to the correct file
-                       #print(key)
-                       globaldata.datasets[key].browse_page(time=classdatetime)
-    
-                    if globaldata.datasets[key].page is not None:
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - self.pars.latitude))
-                        ilat = np.where((DIST) == np.min(DIST))[0][0]
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - self.pars.longitude))
-                        ilon = np.where((DIST) == np.min(DIST))[0][0]
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - (self.pars.latitude + 0.5)))
-                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmax = ilat
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - (self.pars.longitude  + 0.5)))
-                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmax = ilon
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lat.values\
-                                - (self.pars.latitude - 0.5)))
-                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmin = ilat
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lon.values\
-                                - (self.pars.longitude  - 0.5)))
-                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmin = ilon        
-                        
-                        if ilatmin < ilatmax:
-                            ilatrange = range(ilatmin,ilatmax+1)
-                        else:
-                            ilatrange = range(ilatmax,ilatmin+1)
-                            
-                        if ilonmin < ilonmax:
-                            ilonrange = range(ilonmin,ilonmax+1)
-                        else:
-                            ilonrange = range(ilonmax,ilonmin+1)     
-                            
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                            
-                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
-                                idatetime += 1
-                            
-                            classdatetimeend = np.datetime64(\
-                                                             self.pars.datetime +\
-                                                             dt.timedelta(seconds=self.pars.runtime)\
-                                                            ) 
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
-                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
-                                idatetimeend -= 1
-                            idatetime = np.min((idatetime,idatetimeend))
-                            #for gleam, we take the previous day values
-                            if key in ['wg', 'w2']:
-                                idatetime = idatetime - 1
-                                idatetimeend = idatetimeend - 1
-
-                            # in case of soil temperature, we take the exact
-                            # timing (which is the morning)
-                            if key in ['Tsoil','T2']:
-                                idatetimeend = idatetime
-                            
-                            idts = range(idatetime,idatetimeend+1)
-                            
-                            count = 0
-                            self.__dict__[key] = 0.
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    for iidts in idts:
-                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
-                                        count += 1
-                            value = value/count
-                            self.update(source='globaldata',pars={key:value.item()})
-                                
-                        else:
-                                
-                            count = 0
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
-                                    count += 1
-                            value = value/count                        
-
-                            self.update(source='globaldata',pars={key:value.item()})
-
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            self.logger.debug('also update LAI based on LAIpixel and cveg') 
-            # I suppose LAI pixel is already determined in the previous
-            # procedure. Anyway...
-            key = 'LAIpixel'
-
-            if globaldata.datasets[key].page is not None:
-                # first, we browse to the correct file that has the current time
-                if 'time' in list(globaldata.datasets[key].page[key].dims):
-                    globaldata.datasets[key].browse_page(time=classdatetime)
-            
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - self.pars.latitude))
-                ilat = np.where((DIST) == np.min(DIST))[0][0]
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - self.pars.longitude))
-                ilon = np.where((DIST) == np.min(DIST))[0][0]
-                 
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude + 0.5)))
-                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmax = ilat
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values \
-                        - (self.pars.longitude  + 0.5)))
-                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmax = ilon
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude - 0.5)))
-                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmin = ilat
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - (self.pars.longitude  - 0.5)))
-                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmin = ilon        
-                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                
-                
-                if ilatmin < ilatmax:
-                    ilatrange = range(ilatmin,ilatmax+1)
-                else:
-                    ilatrange = range(ilatmax,ilatmin+1)
-                    
-                if ilonmin < ilonmax:
-                    ilonrange = range(ilonmin,ilonmax+1)
-                else:
-                    ilonrange = range(ilonmax,ilonmin+1)           
-                
-                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
-                LAIpixel = 0.
-                count = 0
-                for iilat in [ilat]: #ilatrange
-                    for iilon in [ilon]: #ilonrange
-                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
-                        
-                                        
-                        # if np.isnan(tarray[idatetime]):
-                        #     print("interpolating GIMMS LAIpixel nan value")
-                        #     
-                        #     mask = np.isnan(tarray)
-                        #     
-                        #     #replace each nan value with a interpolated value
-                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-                        #         
-                        #     else:
-                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
-                    
-                        #         tarray *= np.nan 
-                        
-                        count += 1
-                        #tarray_res += tarray
-                LAIpixel = LAIpixel/count
-                
-                count = 0
-                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
-  
-                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
-                #print('LAIpixel:',self.__dict__['LAIpixel'])
-                #print('cveg:',self.__dict__['cveg'])
-                
-                # finally, we rescale the LAI according to the vegetation
-                # fraction
-                value = 0. 
-                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
-                   value =self.pars.LAIpixel/self.pars.cveg
-                else:
-                    # in case of small vegetation fraction, we take just a standard 
-                    # LAI value. It doesn't have a big influence anyway for
-                    # small vegetation
-                    value = 2.
-                #print('LAI:',self.__dict__['LAI'])
-                self.update(source='globaldata',pars={'LAI':value}) 
-
-
-        # in case we have 'sp', we also calculate the 3d pressure fields at
-        # full level and half level
-        if ('sp' in keys) and ('sp' in self.pars.__dict__):
-            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
-
-            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # hydrostatic thickness of each model layer
-            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
-            # # dz = rhodz/(R * T / pfull)
-
-
-            # # subsidence multiplied by density. We calculate the subsidence of
-            # # the in class itself
-            # wrho = np.zeros_like(phalf)
-            # wrho[-1] = 0. 
-
-            # for ihlev in range(0,wrho.shape[0]-1):
-            #     # subsidence multiplied by density is the integral of
-            #     # divergences multiplied by the layer thicknessies
-            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
-            #                     self.air_ac['divU_y'][ihlev:]) * \
-            #                    delpdgrav[ihlev:]).sum()
-
-
-            
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'p':list(pfull)}))
-            self.update(source='globaldata',\
-                        air_ach=pd.DataFrame({'p':list(phalf)}))
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
-            # self.update(source='globaldata',\
-            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
-
-    def check_source(self,source,check_only_sections=None):
-        """ this procedure checks whether data of a specified source is valid.
-
-        INPUT:
-            source: the data source we want to check
-            check_only_sections: a string or list with sections to be checked
-        OUTPUT:
-            returns True or False
-        """
-
-        # we set source ok to false as soon as we find a invalid input
-        source_ok = True
-
-        # convert to a single-item list in case of a string
-        check_only_sections_def = (([check_only_sections]) if \
-                                   type(check_only_sections) is str else \
-                                    check_only_sections)
-                                  
-        if source not in self.sources.keys():
-            self.logger.info('Source '+source+' does not exist')
-            source_ok = False
-
-        for sectiondatakey in self.sources[source]:                             
-            section,datakey = sectiondatakey.split(':')                         
-            if ((check_only_sections_def is None) or \
-                (section in check_only_sections_def)):                          
-                checkdatakeys = []
-                if type(self.__dict__[section]) is pd.DataFrame:
-                    checkdata = self.__dict__[section]
-                elif type(self.__dict__[section]) is model_input:
-                    checkdata = self.__dict__[section].__dict__
-
-                if (datakey not in checkdata):                              
-                    # self.logger.info('Expected key '+datakey+\
-                    #                  ' is not in parameter input')                        
-                    source_ok = False                                           
-                elif (checkdata[datakey] is None) or \
-                     (pd.isnull(checkdata[datakey]) is True):                    
-        
-                    # self.logger.info('Key value of "'+datakey+\
-                    #                  '" is invalid: ('+ \
-                    # str(self.__dict__[section].__dict__[datakey])+')')         
-                    source_ok = False
-
-        return source_ok
-
-    def check_source_globaldata(self):
-        """ this procedure checks whether all global parameter data is
-        available, according to the keys in the self.sources"""
-
-        source_globaldata_ok = True
-
-        #self.get_values_air_input()
-
-        # and now we can get the surface values
-        #class_settings = class4gl_input()
-        #class_settings.set_air_input(input_atm)
-        
-        # we only allow non-polar stations
-        if not (self.pars.lat <= 60.):
-            source_globaldata_ok = False
-            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-        
-        # check lat and lon
-        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
-            source_globaldata_ok = False
-            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
-            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
-        else:
-            # we only check the ground parameter data (pars section). The 
-            # profile data (air_ap section) are supposed to be valid in any 
-            # case.
-            source_ok = self.check_source(source='globaldata',\
-                                          check_only_sections=['air_ac',\
-                                                               'air_ap',\
-                                                               'pars'])
-            if not source_ok:
-                source_globaldata_ok = False
-        
-            # Additional check: we exclude desert-like
-            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
-                source_globaldata_ok = False
-                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
-                source_globaldata_ok = False
-                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
-            elif self.pars.cveg < 0.02:
-                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
-                source_globaldata_ok = False
-
-        return source_globaldata_ok
-
-
-class c4gli_iterator():
-    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
-    
-        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
-    """
-    def __init__(self,file):
-        # take file as IO stream
-        self.file = file
-        self.yaml_generator = yaml.load_all(file)
-        self.current_dict = {}
-        self.current_class4gl_input = class4gl_input()
-        separator = self.file.readline() # this is just dummy
-        self.header = file.readline()
-        if self.header != '# CLASS4GL record; format version: 0.1\n':
-            raise NotImplementedError("Wrong format version: '"+self.header+"'")
-    def __iter__(self):
-        return self
-    def __next__(self):
-        self.current_dict = self.yaml_generator.__next__()
-        self.current_class4gl_input.load_yaml_dict(self.current_dict)
-        return self.current_class4gl_input
-
-
-
-#get_cape and lift_parcel are adapted from the SkewT package
-    
-class gl_dia(object):
-    def get_lifted_index(self,timestep=-1):
-        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
-    
-#from SkewT
-#def get_lcl(startp,startt,startdp,nsteps=101):
-#    from numpy import interp
-#    #--------------------------------------------------------------------
-#    # Lift a parcel dry adiabatically from startp to LCL.
-#    # Init temp is startt in K, Init dew point is stwrtdp,
-#    # pressure levels are in Pa    
-#    #--------------------------------------------------------------------
-#
-#    assert startdp<=startt
-#
-#    if startdp==startt:
-#        return np.array([startp]),np.array([startt]),np.array([startdp]),
-#
-#    # Pres=linspace(startp,60000.,nsteps)
-#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
-#
-#    # Lift the dry parcel
-#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
-#    # Mixing ratio isopleth
-#    starte=VaporPressure(startdp)
-#    startw=MixRatio(starte,startp)
-#    e=Pres*startw/(.622+startw)
-#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
-#
-#    # Solve for the intersection of these lines (LCL).
-#    # interp requires the x argument (argument 2)
-#    # to be ascending in order!
-#    P_lcl=interp(0.,T_iso-T_dry,Pres)
-#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
-#
-#    # # presdry=linspace(startp,P_lcl)
-#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
-#
-#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
-#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
-#
-#    return P_lcl,T_lcl
-
-
-
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    """ Calculate mixed-layer height from temperature and wind speed profile
-
-        Input:
-            HAGL: height coordinates [m]
-            THTV: virtual potential temperature profile [K]
-            WSPD: wind speed profile [m/s]
-
-        Output:
-            BLH: best-guess mixed-layer height
-            BLHu: upper limit of mixed-layer height
-            BLHl: lower limit of mixed-layer height
-
-    """
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHl
-
-
-
-#from class
-def get_lcl(startp,startt,startqv):
-        # Find lifting condensation level iteratively
-    lcl = 20.
-    RHlcl = 0.5
-    
-    itmax = 30
-    it = 0
-    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it
Date: Tue, 21 Aug 2018 22:01:12 +0200
Subject: [PATCH 010/129] restructure again

---
 bin/__init__.py                           |    7 -
 bin/setup/batch_setup_global.py           |   42 -
 bin/setup/setup_bllast.py                 |  719 -------
 bin/setup/setup_global.py                 |  310 ---
 bin/setup/setup_goamazon.py               |  740 -------
 bin/setup/setup_humppa.py                 |  732 -------
 bin/setup/trash/setup_global_old.py       |  284 ---
 bin/simulations/batch_simulations.py      |   77 -
 bin/simulations/runmodel.py               |  130 --
 bin/simulations/simulations.py            |  260 ---
 bin/simulations/simulations_iter.py       |  364 ----
 bin/simulations/simulations_iter_test.py  |  367 ----
 bin/simulations/trash/run_test.py         |  241 ---
 build/lib/bin/__init__.py                 |    6 -
 build/lib/class4gl/__init__.py            |    7 -
 build/lib/class4gl/class4gl.py            | 1611 ---------------
 build/lib/class4gl/data_air.py            |  473 -----
 build/lib/class4gl/data_global.py         |  936 ---------
 build/lib/class4gl/interface_functions.py |  506 -----
 build/lib/class4gl/interface_multi.py     | 2061 -------------------
 build/lib/class4gl/model.py               | 2214 ---------------------
 class4gl.py                               | 1611 ---------------
 22 files changed, 13698 deletions(-)
 delete mode 100644 bin/__init__.py
 delete mode 100644 bin/setup/batch_setup_global.py
 delete mode 100644 bin/setup/setup_bllast.py
 delete mode 100644 bin/setup/setup_global.py
 delete mode 100644 bin/setup/setup_goamazon.py
 delete mode 100644 bin/setup/setup_humppa.py
 delete mode 100644 bin/setup/trash/setup_global_old.py
 delete mode 100644 bin/simulations/batch_simulations.py
 delete mode 100644 bin/simulations/runmodel.py
 delete mode 100644 bin/simulations/simulations.py
 delete mode 100644 bin/simulations/simulations_iter.py
 delete mode 100644 bin/simulations/simulations_iter_test.py
 delete mode 100644 bin/simulations/trash/run_test.py
 delete mode 100644 build/lib/bin/__init__.py
 delete mode 100644 build/lib/class4gl/__init__.py
 delete mode 100644 build/lib/class4gl/class4gl.py
 delete mode 100644 build/lib/class4gl/data_air.py
 delete mode 100644 build/lib/class4gl/data_global.py
 delete mode 100644 build/lib/class4gl/interface_functions.py
 delete mode 100644 build/lib/class4gl/interface_multi.py
 delete mode 100644 build/lib/class4gl/model.py
 delete mode 100644 class4gl.py

diff --git a/bin/__init__.py b/bin/__init__.py
deleted file mode 100644
index a21583b..0000000
--- a/bin/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from . import model,class4gl,interface_multi,data_air,data_global
-
-__version__ = '0.1.0'
-
-__author__ = 'Hendrik Wouters '
-
-__all__ = []
diff --git a/bin/setup/batch_setup_global.py b/bin/setup/batch_setup_global.py
deleted file mode 100644
index 4a3f623..0000000
--- a/bin/setup/batch_setup_global.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-''' 
-Purpose: 
-    launch array job to get sounding and other global forcing data in class4gl input format"
-Usage:
-    python start_setup_global.py
-
-Author:
-    Hendrik Wouters 
-
-'''
-
-import pandas as pd
-import os
-import math
-import numpy as np
-import sys
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-df_stations = pd.read_csv(fn_stations)
-
-# if sys.argv[1] == 'qsub':
-# with qsub
-STNlist = list(df_stations.iterrows())
-NUMSTNS = len(STNlist)
-PROCS = len(STNlist) 
-print(PROCS)
-BATCHSIZE = math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/SOUNDINGS/setup_global.pbs -t 0-'+str(PROCS-1))
-# elif sys.argv[1] == 'wsub':
-#     
-#     
-#     # with wsub
-#     STNlist = list(df_stations.iterrows())
-#     NUMSTNS = len(STNlist)
-#     PROCS = NUMSTNS 
-#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-# 
-#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/SOUNDINGS/setup_global.pbs -t 0-'+str(PROCS-1))
-
diff --git a/bin/setup/setup_bllast.py b/bin/setup/setup_bllast.py
deleted file mode 100644
index af8c8bb..0000000
--- a/bin/setup/setup_bllast.py
+++ /dev/null
@@ -1,719 +0,0 @@
-# -*- coding: utf-8 -*-
-# Read data from BLLAST campaing and convert it to class4gl input
-
-# WARNING!! stupid tab versus space formatting, grrrmmmlmlmlll!  the following command needs to be executed first: 
-#    for file in RS_2011????_????_site1_MODEM_CRA.cor ;  do expand -i -t 4 $file > $file.fmt ; done
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import Pysolar
-import sys
-import pytz
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-
-globaldata = data_global()
-globaldata.load_datasets(recalc=0)
-
-Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-epsilon = Rd/Rv # or mv/md
-
-
-def replace_iter(iterable, search, replace):
-    for value in iterable:
-        value.replace(search, replace)
-        yield value
-
-from class4gl import blh,class4gl_input
-
-# definition of the humpa station
-current_station = pd.Series({ "latitude"  : 42.971834,
-                  "longitude" : 0.3671169,
-                  "name" : "the BLLAST experiment"
-                })
-current_station.name = 90001
-
-
-
-
-
-# RS_20110624_1700_site1_MODEM_CRA.cor.fmt
-# RS_20110630_1700_site1_MODEM_CRA.cor.fmt
-# RS_20110702_1655_site1_MODEM_CRA.cor.fmt
-# RS_20110621_0509_site1_MODEM_CRA.cor.fmt
-
-HOUR_FILES = \
-{ dt.datetime(2011,6,19,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110619_0521_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110619_1750_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,20,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110620_0515_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110620_1750_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,25,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110625_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110625_1700_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,26,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110626_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110626_1700_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,27,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110627_0503_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110627_1700_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,7, 2,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110702_0501_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110702_1655_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,7, 5,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110705_0448_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110705_1701_site1_MODEM_CRA.cor.fmt']},
-}
-
-
-#only include the following timeseries in the model output
-timeseries_only = \
-['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
- 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
- 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
- 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
- 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-def efrom_rh100_T(rh100,T):
-    return esat(T)*rh100/100.
-def qfrom_e_p(e,p):
-    return epsilon * e/(p - (1.-epsilon)*e)
-
-def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
-        #balloon_conv = replace_iter(balloon_file,"°","deg")
-        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
-        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
-        air_balloon_in = pd.read_csv(balloon_file,delimiter='\t',)
-                                     #widths=[14]*19,
-                                     #skiprows=9,
-                                     #skipfooter=15,
-                                     #decimal='.',
-                                     #header=None,
-                                     #names = columns,
-                                     #na_values='-----')
-        air_balloon_in = air_balloon_in.rename(columns=lambda x: x.strip())
-        print(air_balloon_in.columns)
-        rowmatches = {
-            't':      lambda x: x['TaRad']+273.15,
-            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
-            'p':      lambda x: x['Press']*100.,
-            'u':      lambda x: x['VHor'] * np.sin((90.-x['VDir'])/180.*np.pi),
-            'v':      lambda x: x['VHor'] * np.cos((90.-x['VDir'])/180.*np.pi),
-            'z':      lambda x: x['Altitude'] -582.,
-            # from virtual temperature to absolute humidity
-            'q':      lambda x: qfrom_e_p(efrom_rh100_T(x['UCal'],x['TaRad']+273.15),x['Press']*100.),
-        }
-        
-        air_balloon = pd.DataFrame()
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon_in)
-        
-        rowmatches = {
-            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
-            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
-            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
-        }
-        
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        dpars = {}
-        dpars['longitude']  = current_station['longitude']
-        dpars['latitude']  = current_station['latitude'] 
-        
-        dpars['STNID'] = current_station.name
-        
-        
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        valid_indices = air_balloon.index[is_valid].values
-        
-        air_ap_mode='b'
-        
-        if len(valid_indices) > 0:
-            dpars['h'],dpars['h_u'],dpars['h_l'] =\
-                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['VHor'])
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-        
-        
-        
-        if ~np.isnan(dpars['h']):
-            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-        else:
-            dpars['Ps'] = np.nan
-        
-        if ~np.isnan(dpars['h']):
-        
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-        
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-
-
-
-
-
-        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        jump = air_ap_head.iloc[0] * np.nan
-        
-        
-        if air_ap_tail.shape[0] > 1:
-        
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-        # filter data so that potential temperature always increases with
-        # height 
-        cols = []
-        for column in air_ap_tail.columns:
-            #if column != 'z':
-                cols.append(column)
-
-        # only select samples monotonically increasing with height
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        for ibottom in range(1,len(air_ap_tail_orig)):
-            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-
-
-
-        # # make theta increase strong enough to avoid numerical
-        # # instability
-        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        # air_ap_tail = pd.DataFrame()
-        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # theta_low = air_ap_head['theta'].iloc[2]
-        # z_low = air_ap_head['z'].iloc[2]
-        # ibottom = 0
-        # for itop in range(0,len(air_ap_tail_orig)):
-        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-        #     if ((theta_mean > (theta_low+0.2) ) and \
-        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
-
-        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-        #         ibottom = itop+1
-        #         theta_low = air_ap_tail.theta.iloc[-1]
-        #         z_low =     air_ap_tail.z.iloc[-1]
-        #     # elif  (itop > len(air_ap_tail_orig)-10):
-        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        # air_ap = \
-        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        # 
-        # # we copy the pressure at ground level from balloon sounding. The
-        # # pressure at mixed-layer height will be determined internally by class
-        
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-        
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-        
-        
-        dpars['lat'] = dpars['latitude']
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        
-        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
-        dpars['datetime'] = ldate+dt.timedelta(hours=hour)
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        
-        
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        
-        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually
-        # write local solar time, we need to assign the timezone to UTC (which
-        # is WRONG!!!). Otherwise ruby cannot understand it (it always converts
-        # tolocal computer time :( ). 
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise']+dt.timedelta(hours=2))\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        print('ldatetime_daylight',dpars['ldatetime_daylight'])
-        print('ldatetime',dpars['ldatetime'])
-        print('lSunrise',dpars['lSunrise'])
-        dpars['day'] = dpars['ldatetime'].day
-        
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-        print('tstart',dpars['tstart'])
-        dpars['sw_lit'] = False
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-        
-                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
-        # 
-                for column,decimal in decimals.items():
-                    air_balloon[column] = air_balloon[column].round(decimal)
-                    air_ap[column] = air_ap[column].round(decimal)
-        
-        updateglobal = False
-        if c4gli is None:
-            c4gli = class4gl_input()
-            updateglobal = True
-        
-        print('updating...')
-        print(column)
-        c4gli.update(source='bllast',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-        if updateglobal:
-            c4gli.get_global_input(globaldata)
-
-        # if profile_ini:
-        #     c4gli.runtime = 10 * 3600
-
-        c4gli.dump(file_sounding)
-        
-        # if profile_ini:
-        #     c4gl = class4gl(c4gli)
-        #     c4gl.run()
-        #     c4gl.dump(file_model,\
-        #               include_input=True,\
-        #               timeseries_only=timeseries_only)
-        #     
-        #     # This will cash the observations and model tables per station for
-        #     # the interface
-        # 
-        # if profile_ini:
-        #     profile_ini=False
-        # else:
-        #     profile_ini=True
-        return c4gli
-
-
-path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
-
-
-file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    print(pair['morning'])
-    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1]
-    
-    print(humpafn)
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_morning = bllast_parser(balloon_file,file_morning,date,pair['morning'][0])
-    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
-file_morning.close()
-
-file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1]
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_afternoon = bllast_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
-    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
-file_afternoon.close()
- 
-
-# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-# for date,pair  in HOUR_FILES.items(): 
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
-#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
-# file_morning.close()
-# 
-# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-# for hour in [18]:
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
-# file_afternoon.close()
-
-
-
-# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
-# 
-# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
-
-
-records_morning = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='morning',
-                                           refetch_records=True,
-                                           )
-print('records_morning_ldatetime',records_morning.ldatetime)
-
-records_afternoon = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='afternoon',
-                                           refetch_records=True,
-                                           )
-
-# align afternoon records with noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
-
-os.system('mkdir -p '+path_exp)
-file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
-file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
-
-for (STNID,chunk,index),record_morning in records_morning.iterrows():
-    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-
-    c4gli_morning = get_record_yaml(file_morning, 
-                                    record_morning.index_start, 
-                                    record_morning.index_end,
-                                    mode='ini')
-    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-    
-    
-    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                      record_afternoon.index_start, 
-                                      record_afternoon.index_end,
-                                    mode='ini')
-
-    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon.pars.datetime_daylight - 
-                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-
-    
-    c4gli_morning.pars.sw_ac = []
-    c4gli_morning.pars.sw_ap = True
-    c4gli_morning.pars.sw_lit = False
-    c4gli_morning.dump(file_ini)
-    
-    c4gl = class4gl(c4gli_morning)
-    c4gl.run()
-    
-    c4gl.dump(file_mod,\
-              include_input=False,\
-              timeseries_only=timeseries_only)
-file_ini.close()
-file_mod.close()
-file_morning.close()
-file_afternoon.close()
-
-records_ini = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='ini',
-                                           refetch_records=True,
-                                           )
-records_mod = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='mod',
-                                           refetch_records=True,
-                                           )
-
-records_mod.index = records_ini.index
-
-# align afternoon records with initial records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-records_afternoon.index = records_ini.index
-
-
-
-# stations_for_iter = stations(path_exp)
-# for STNID,station in stations_iterator(stations_for_iter):
-#     records_current_station_index = \
-#             (records_ini.index.get_level_values('STNID') == STNID)
-#     file_current_station_mod = STNID
-# 
-#     with \
-#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-#         for (STNID,index),record_ini in records_iterator(records_ini):
-#             c4gli_ini = get_record_yaml(file_station_ini, 
-#                                         record_ini.index_start, 
-#                                         record_ini.index_end,
-#                                         mode='ini')
-#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-# 
-#             record_mod = records_mod.loc[(STNID,index)]
-#             c4gl_mod = get_record_yaml(file_station_mod, 
-#                                         record_mod.index_start, 
-#                                         record_mod.index_end,
-#                                         mode='mod')
-#             record_afternoon = records_afternoon.loc[(STNID,index)]
-#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-#                                         record_afternoon.index_start, 
-#                                         record_afternoon.index_end,
-#                                         mode='ini')
-
-
-# # select the samples of the afternoon list that correspond to the timing of the
-# # morning list
-# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
-# records_afternoon.index = recods_morning.index
-# 
-# 
-# # create intersectino index
-# index_morning = pd.Index(records_morning.ldatetime.to_date())
-# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
-# 
-# for record_morning in records_morning.iterrows():
-#     
-#     c4gl = class4gl(c4gli)
-#     c4gl.run()
-#     c4gl.dump(c4glfile,\
-#               include_input=True,\
-#               timeseries_only=timeseries_only)
-# 
-# # This will cash the observations and model tables per station for
-# # the interface
-# 
-# records_ini = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=0,\
-#                                    by=2,\
-#                                    subset='ini',
-#                                    refetch_records=True,
-#                                    )
-# records_mod = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='mod',
-#                                    refetch_records=True,
-#                                    )
-# records_eval = get_records(pd.DataFrame([current_station]),\
-#                                    path_obs,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='eval',
-#                                    refetch_records=True,
-#                                    )
-# 
-# 
-# # mod_scores = pd.DataFrame(index=mod_records.index)
-# # for (STNID,index), current_record_mod in mod_records.iterrows():
-# #     print(STNID,index)
-# #     current_station = STN
-# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
-# #     current_record_obs = obs_records.loc[(STNID,index)]
-# # 
-# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
-# #                                           current_station,\
-# #                                           current_record_mod,\
-# #                                          )
-# # 
-# #     record_yaml_obs = \
-# #             get_record_yaml_obs(odirexperiments[keyEXP],\
-# #                                 current_station,\
-# #                                 current_record_obs,\
-# #                                 suffix='.yaml')
-# # 
-# #     record_yaml_obs_afternoon = \
-# #             get_record_yaml_obs(odir,\
-# #                                 current_station,\
-# #                                 current_record_obs_afternoon,\
-# #                                 suffix='_afternoon.yaml')
-# # 
-# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
-# #                    record_yaml_mod.h])
-# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
-# #     
-# # 
-# #     for height,hvalue in HEIGHTS.items():
-# # 
-# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
-# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
-# #         try:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
-# #                 rmse(\
-# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
-# #                     np.interp(\
-# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
-# #                         record_yaml_mod.air_ap.z[lt_mod],\
-# #                         record_yaml_mod.air_ap.theta[lt_mod]\
-# #                     ))
-# #         except ValueError:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
-# #     # # we calculate these things in the interface itself
-# #     # for key in ['q','theta','h']:
-# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_mod.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# #     #     # the actual time of the initial and evaluation sounding can be 
-# #     #     # different, but we consider this as a measurement error for
-# #     #     # the starting and end time of the simulation.
-# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
-# #         
-# #                 
-# #                 
-# # # for EXP,c4glfile in c4glfiles.items():
-# # #     c4glfile.close()            
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# #     
-# #     # {'Time[min:sec]': None 
-# #     #  'P[hPa]': None, 
-# #     #  'T[C]': None, 
-# #     #  'U[%]': None, 
-# #     #  'Wsp[m/s]': None, 
-# #     #  'Wdir[Grd]': None,
-# #     #  'Lon[°]', 
-# #     #  'Lat[°]', 
-# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
-# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
-# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
-# #     # }
-# #     # 
-# #     # #pivotrows =
-# #     # #{
-# # 
-# # 
-# # 
diff --git a/bin/setup/setup_global.py b/bin/setup/setup_global.py
deleted file mode 100644
index 79224d9..0000000
--- a/bin/setup/setup_global.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thursday, March 29, 11:30 AM
-
-@author: Hendrik Wouters
-
-The dry-2-dry global radio sounding experiment.
-
-usage:
-    python setup_global.py 
-    where  is an integer indicating the row index of the station list
-    under odir+'/'+fn_stations (see below)
-
-this scripts should be called from the pbs script setup_global.pbs
-
-
-
-dependencies:
-    - pandas
-    - class4gl
-    - data_soundings
-
-
-"""
-
-""" import libraries """
-import pandas as pd
-import sys
-#import copy as cp
-import numpy as np
-from sklearn.metrics import mean_squared_error
-import logging
-import datetime as dt
-import os
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-
-
-#calculate the root mean square error
-def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
-    """ calculated root mean squared error 
-        
-    
-        INPUT:
-            y_actual: reference dataset
-            y_predicted: predicting dataset
-            z_actual: coordinate values of reference dataset
-            z_predicted: coordinate values of the predicting dataset
-            
-            filternan_actual: throw away reference values that have nans
-    """
-    
-    y_actual_temp = np.array(y_actual)
-    y_predicted_temp = np.array(y_predicted)
-    
-    if z_actual is not None:
-        z_actual_temp = np.array(z_actual)
-    else: 
-        z_actual_temp = None
-        
-    
-    if filternan_actual:
-        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
-        if z_actual_temp is not None:
-            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
-    
-    if ((z_actual_temp is not None) or (z_predicted is not None)):    
-        if (z_actual_temp is None) or (z_predicted is None):
-            raise ValueError('Input z_actual and z_predicted need \
-                              to be specified simultaneously.')
-        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
-    
-    else:
-        # this catches the situation that y_predicted is a single value (eg., 
-        # which is the case for evaluating eg., mixed-layer estimates)
-        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
-        
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from data_soundings import wyoming
-#from data_global import data_global
-
-# iniitialize global data
-globaldata = data_global()
-# ...  and load initial data pages
-globaldata.load_datasets(recalc=0)
-
-# read the list of stations with valid ground data (list generated with
-# get_valid_stations.py)
-idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-
-df_stations = pd.read_csv(fn_stations)
-
-
-STNlist = list(df_stations.iterrows())
-NUMSTNS = len(STNlist)
-PROCS = 100
-BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-
-
-iPROC = int(sys.argv[1])
-
-
-for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
-    one_run = False
-# for iSTN,STN in STNlist[5:]:  
-    
-    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
-    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
-    
-
-    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
-    #                   for EXP in experiments.keys()])
-        
-    with open(fnout,'w') as fileout, \
-         open(fnout_afternoon,'w') as fileout_afternoon:
-        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
-        wy_strm.set_STNM(int(STN['ID']))
-
-        # we consider all soundings after 1981
-        wy_strm.find_first(year=1981)
-        #wy_strm.find(dt.datetime(2004,10,19,6))
-        
-        c4gli = class4gl_input(debug_level=logging.INFO)
-        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
-        # so we continue as long as we can find a new sounding
-                
-        while wy_strm.current is not None:
-            
-            c4gli.clear()
-            try:
-                c4gli.get_profile_wyoming(wy_strm)
-                #print(STN['ID'],c4gli.pars.datetime)
-                #c4gli.get_global_input(globaldata)
-
-                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
-
-                logic = dict()
-                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
-                logic['daylight'] = \
-                    ((c4gli.pars.ldatetime_daylight - 
-                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
-                
-                logic['springsummer'] = (c4gli.pars.theta > 278.)
-                
-                # we take 3000 because previous analysis (ie., HUMPPA) has
-                # focussed towards such altitude
-                le3000 = (c4gli.air_balloon.z <= 3000.)
-                logic['10measurements'] = (np.sum(le3000) >= 10) 
-
-                leh = (c4gli.air_balloon.z <= c4gli.pars.h)
-
-                logic['mlerrlow'] = (\
-                        (len(np.where(leh)[0]) > 0) and \
-                        # in cases where humidity is not defined, the mixed-layer
-                        # values get corr
-                        (not np.isnan(c4gli.pars.theta)) and \
-                        (rmse(c4gli.air_balloon.theta[leh] , \
-                              c4gli.pars.theta,filternan_actual=True) < 1.)\
-                              )
-    
-
-                logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
-                
-                print('logic:', logic)
-                # the result
-                morning_ok = np.mean(list(logic.values()))
-                print(morning_ok,c4gli.pars.ldatetime)
-
-            except:
-                morning_ok =False
-                print('obtain morning not good')
-            # the next sounding will be used either for an afternoon sounding
-            # or for the morning sounding of the next day.
-            wy_strm.find_next()
-            # If the morning is ok, then we try to find a decent afternoon
-            # sounding
-            if morning_ok == 1.:
-                print('MORNING OK!')
-                # we get the current date
-                current_date = dt.date(c4gli.pars.ldatetime.year, \
-                                       c4gli.pars.ldatetime.month, \
-                                       c4gli.pars.ldatetime.day)
-                c4gli_afternoon.clear()
-                print('AFTERNOON PROFILE CLEARED')
-                try:
-                    c4gli_afternoon.get_profile_wyoming(wy_strm)
-                    print('AFTERNOON PROFILE OK')
-
-                    if wy_strm.current is not None:
-                        current_date_afternoon = \
-                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                           c4gli_afternoon.pars.ldatetime.month, \
-                                           c4gli_afternoon.pars.ldatetime.day)
-                    else:
-                        # a dummy date: this will be ignored anyway
-                        current_date_afternoon = dt.date(1900,1,1)
-
-                    # we will dump the latest afternoon sounding that fits the
-                    # minimum criteria specified by logic_afternoon
-                    print(current_date,current_date_afternoon)
-                    c4gli_afternoon_for_dump = None
-                    while ((current_date_afternoon == current_date) and \
-                           (wy_strm.current is not None)):
-                        logic_afternoon =dict()
-
-                        logic_afternoon['afternoon'] = \
-                            (c4gli_afternoon.pars.ldatetime.hour >= 12.)
-                        logic_afternoon['daylight'] = \
-                          ((c4gli_afternoon.pars.ldatetime - \
-                            c4gli_afternoon.pars.ldatetime_daylight \
-                           ).total_seconds()/3600. <= 0.)
-
-
-                        le3000_afternoon = \
-                            (c4gli_afternoon.air_balloon.z <= 3000.)
-                        logic_afternoon['5measurements'] = \
-                            (np.sum(le3000_afternoon) >= 5) 
-
-                        # we only store the last afternoon sounding that fits these
-                        # minimum criteria
-
-                        afternoon_ok = np.mean(list(logic_afternoon.values()))
-
-                        print('logic_afternoon: ',logic_afternoon)
-                        print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
-                        if afternoon_ok == 1.:
-                            # # doesn't work :(
-                            # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
-                            
-                            # so we just create a new one from the same wyoming profile
-                            c4gli_afternoon_for_dump = class4gl_input()
-                            c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
-
-                        wy_strm.find_next()
-                        c4gli_afternoon.clear()
-                        c4gli_afternoon.get_profile_wyoming(wy_strm)
-
-                        if wy_strm.current is not None:
-                            current_date_afternoon = \
-                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                           c4gli_afternoon.pars.ldatetime.month, \
-                                           c4gli_afternoon.pars.ldatetime.day)
-                        else:
-                            # a dummy date: this will be ignored anyway
-                            current_date_afternoon = dt.date(1900,1,1)
-
-                        # Only in the case we have a good pair of soundings, we
-                        # dump them to disk
-                    if c4gli_afternoon_for_dump is not None:
-                        c4gli.update(source='pairs',pars={'runtime' : \
-                            int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
-                                 c4gli.pars.datetime_daylight).total_seconds())})
-    
-    
-                        print('ALMOST...')
-                        if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
-                                
-        
-                            c4gli.get_global_input(globaldata)
-                            print('VERY CLOSE...')
-                            if c4gli.check_source_globaldata() and \
-                                (c4gli.check_source(source='wyoming',\
-                                                   check_only_sections='pars')):
-                                c4gli.dump(fileout)
-                                
-                                c4gli_afternoon_for_dump.dump(fileout_afternoon)
-                                
-                                
-                                # for keyEXP,dictEXP in experiments.items():
-                                #     
-                                #     c4gli.update(source=keyEXP,pars = dictEXP)
-                                #     c4gl = class4gl(c4gli)
-                                #     # c4gl.run()
-                                #     
-                                #     c4gl.dump(c4glfiles[key])
-                                
-                                print('HIT!!!')
-                                one_run = True
-                except:
-                    print('get profile failed')
-                
-    if one_run:
-        STN.name = STN['ID']
-        all_records_morning = get_records(pd.DataFrame([STN]),\
-                                      odir,\
-                                      subset='morning',
-                                      refetch_records=True,
-                                      )
-        all_records_afternoon = get_records(pd.DataFrame([STN]),\
-                                      odir,\
-                                      subset='afternoon',
-                                      refetch_records=True,
-                                      )
-    else:
-        os.system('rm '+fnout)
-        os.system('rm '+fnout_afternoon)
-
-    # for c4glfile in c4glfiles:
-    #     c4glfile.close()            
-
diff --git a/bin/setup/setup_goamazon.py b/bin/setup/setup_goamazon.py
deleted file mode 100644
index f9efe2c..0000000
--- a/bin/setup/setup_goamazon.py
+++ /dev/null
@@ -1,740 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import xarray as xr
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import Pysolar
-import sys
-import pytz
-import glob
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-
-globaldata = data_global()
-globaldata.load_datasets(recalc=0)
-
-Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-epsilon = Rd/Rv # or mv/md
-
-path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'
-
-def replace_iter(iterable, search, replace):
-    for value in iterable:
-        value.replace(search, replace)
-        yield value
-
-from class4gl import blh,class4gl_input
-
-# definition of the humpa station
-current_station = pd.Series({ "latitude"  : -3.21,
-                  "longitude" : -60.6,
-                  "name" : "the GOAMAZON experiment"
-                })
-current_station.name = 90002
-
-# we define the columns ourselves because it is a mess in the file itself.
-columns =\
-['Time[min:sec]',
- 'P[hPa]',
- 'T[C]',
- 'U[%]',
- 'Wsp[m/s]',
- 'Wdir[Grd]',
- 'Lon[°]',
- 'Lat[°]',
- 'Altitude[m]',
- 'GeoPot[m]',
- 'MRI',
- 'RI',    
- 'DewPoint[C]',
- 'Virt. Temp[C]',
- 'Rs[m/min]',
- 'D[kg/m3]',
- 'Azimut[°]',
- 'Elevation[°]',
- 'Range[m]',
-]
-
-DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC)
-DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC)
-
-
-DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))]
-HOUR_FILES = {}
-for iDT, DT in enumerate(DTS):
-    morning_file = None
-    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf')
-    if len(possible_files)>0:
-        morning_file= possible_files[0]
-    afternoon_file = None
-    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*.cdf')
-    if len(possible_files)>0:
-        afternoon_file= possible_files[0]
-
-    if (morning_file is not None) and (afternoon_file is not None):
-        HOUR_FILES[DT] = {'morning':[5.5,morning_file],
-                          'afternoon':[17.5,afternoon_file]}
-
-print(HOUR_FILES)
-
-# HOUR_FILES = \
-# {
-#     dt.datetime(2015,5,7,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150507.052900.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150507.172700.custom.cdf']},
-#     dt.datetime(2015,3,13,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150313.052700.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150313.173000.custom.cdf']},
-#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
-#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
-#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
-# }
-
-
-
-
-#only include the following timeseries in the model output
-timeseries_only = \
-['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
- 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
- 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
- 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
- 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-def efrom_rh100_T(rh100,T):
-    return esat(T)*rh100/100.
-def qfrom_e_p(e,p):
-    return epsilon * e/(p - (1.-epsilon)*e)
-
-
-
-def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
-        print(balloon_file)
-        
-        xrin = balloon_file
-        air_balloon = pd.DataFrame()
-
-        air_balloon['t'] = xrin.tdry.values+273.15
-        air_balloon['p'] = xrin.pres.values*100.
-        
-        air_balloon['u'] = xrin.u_wind.values
-        air_balloon['v'] = xrin.v_wind.values
-        air_balloon['WSPD'] = xrin['wspd'].values
-        
-        print(xrin.rh.values.shape)
-        air_balloon['q'] = qfrom_e_p(efrom_rh100_T(xrin.rh.values,air_balloon['t'].values),air_balloon.p.values)
-        
-
-        #balloon_conv = replace_iter(balloon_file,"°","deg")
-        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
-        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
-        # air_balloon_in = pd.read_fwf(balloon_file,
-        #                              widths=[14]*19,
-        #                              skiprows=9,
-        #                              skipfooter=15,
-        #                              decimal=',',
-        #                              header=None,
-        #                              names = columns,
-        #                              na_values='-----')
-    
-
-        
-        rowmatches = {
-            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
-            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
-            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q,
-            'rho': lambda x: x.p /x.t / x.R ,
-        }
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        print('alt in xrin?:','alt' in xrin)
-        if 'alt' in xrin:
-            air_balloon['z'] = xrin.alt.values
-        else:
-            air_balloon['z'] = 0.
-            for irow,row in air_balloon.iloc[1:].iterrows():
-                air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \
-                        2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \
-                        (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1])
-                        
-             
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        dpars = {}
-        dpars['longitude']  = current_station['longitude']
-        dpars['latitude']  = current_station['latitude'] 
-        
-        dpars['STNID'] = current_station.name
-        
-
-        # there are issues with the lower measurements in the HUMPPA campaign,
-        # for which a steady decrease of potential temperature is found, which
-        # is unrealistic.  Here I filter them away
-        ifirst = 0
-        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
-            ifirst = ifirst+1
-        print ('ifirst:',ifirst)
-        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
-        
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        valid_indices = air_balloon.index[is_valid].values
-        
-        air_ap_mode='b'
-        
-        if len(valid_indices) > 0:
-            print(air_balloon.z.shape,air_balloon.thetav.shape,)
-            dpars['h'],dpars['h_u'],dpars['h_l'] =\
-                blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-        
-        
-        
-        if ~np.isnan(dpars['h']):
-            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-        else:
-            dpars['Ps'] = np.nan
-        
-        if ~np.isnan(dpars['h']):
-        
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-        
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-
-
-
-        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        jump = air_ap_head.iloc[0] * np.nan
-        
-        if air_ap_tail.shape[0] > 1:
-        
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # only select samples monotonically increasing with height
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        for ibottom in range(1,len(air_ap_tail_orig)):
-            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-        
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-        
-        
-        dpars['lat'] = dpars['latitude']
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        
-        dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour)
-        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-4)
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        
-        
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        
-        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-        dpars['sw_lit'] = False
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-        
-                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
-        # 
-                for column,decimal in decimals.items():
-                    air_balloon[column] = air_balloon[column].round(decimal)
-                    air_ap[column] = air_ap[column].round(decimal)
-        
-        updateglobal = False
-        if c4gli is None:
-            c4gli = class4gl_input()
-            updateglobal = True
-        
-        print('updating...')
-        print(column)
-        c4gli.update(source='humppa',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-        if updateglobal:
-            c4gli.get_global_input(globaldata)
-
-        # if profile_ini:
-        #     c4gli.runtime = 10 * 3600
-
-        c4gli.dump(file_sounding)
-        
-        # if profile_ini:
-        #     c4gl = class4gl(c4gli)
-        #     c4gl.run()
-        #     c4gl.dump(file_model,\
-        #               include_input=True,\
-        #               timeseries_only=timeseries_only)
-        #     
-        #     # This will cash the observations and model tables per station for
-        #     # the interface
-        # 
-        # if profile_ini:
-        #     profile_ini=False
-        # else:
-        #     profile_ini=True
-        return c4gli
-
-
-path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
-
-
-file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    print(pair['morning'])
-    humpafn =pair['morning'][1]
-    print(humpafn)
-    balloon_file = xr.open_dataset(humpafn)
-
-    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
-    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
-file_morning.close()
-
-file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    humpafn = pair['afternoon'][1]
-    balloon_file = xr.open_dataset(humpafn)
-
-    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
-    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
-file_afternoon.close()
- 
-
-# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-# for date,pair  in HOUR_FILES.items(): 
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'+pair['morning'][1],
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
-#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
-# file_morning.close()
-# 
-# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-# for hour in [18]:
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM//humppa_080610_'+format(hour,"02d")+'00.txt'
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
-# file_afternoon.close()
-
-
-
-# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
-# 
-# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
-
-
-records_morning = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='morning',
-                                           refetch_records=True,
-                                           )
-print('records_morning_ldatetime',records_morning.ldatetime)
-
-records_afternoon = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='afternoon',
-                                           refetch_records=True,
-                                           )
-
-# align afternoon records with noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
-
-os.system('mkdir -p '+path_exp)
-file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
-file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
-
-for (STNID,chunk,index),record_morning in records_morning.iterrows():
-    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-
-    c4gli_morning = get_record_yaml(file_morning, 
-                                    record_morning.index_start, 
-                                    record_morning.index_end,
-                                    mode='ini')
-    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-    
-    
-    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                      record_afternoon.index_start, 
-                                      record_afternoon.index_end,
-                                    mode='ini')
-
-    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon.pars.datetime_daylight - 
-                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-    c4gli_morning.update(source='manual',
-                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
-    c4gli_morning.dump(file_ini)
-    
-    c4gl = class4gl(c4gli_morning)
-    c4gl.run()
-    
-    c4gl.dump(file_mod,\
-              include_input=False,\
-              timeseries_only=timeseries_only)
-file_ini.close()
-file_mod.close()
-file_morning.close()
-file_afternoon.close()
-
-records_ini = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='ini',
-                                           refetch_records=True,
-                                           )
-records_mod = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='mod',
-                                           refetch_records=True,
-                                           )
-
-records_mod.index = records_ini.index
-
-# align afternoon records with initial records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-records_afternoon.index = records_ini.index
-
-
-"""
-stations_for_iter = stations(path_exp)
-for STNID,station in stations_iterator(stations_for_iter):
-    records_current_station_index = \
-            (records_ini.index.get_level_values('STNID') == STNID)
-    file_current_station_mod = STNID
-
-    with \
-    open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-        for (STNID,index),record_ini in records_iterator(records_ini):
-            c4gli_ini = get_record_yaml(file_station_ini, 
-                                        record_ini.index_start, 
-                                        record_ini.index_end,
-                                        mode='ini')
-            #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-
-            record_mod = records_mod.loc[(STNID,index)]
-            c4gl_mod = get_record_yaml(file_station_mod, 
-                                        record_mod.index_start, 
-                                        record_mod.index_end,
-                                        mode='mod')
-            record_afternoon = records_afternoon.loc[(STNID,index)]
-            c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-                                        record_afternoon.index_start, 
-                                        record_afternoon.index_end,
-                                        mode='ini')
-"""
-
-
-# # select the samples of the afternoon list that correspond to the timing of the
-# # morning list
-# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
-# records_afternoon.index = recods_morning.index
-# 
-# 
-# # create intersectino index
-# index_morning = pd.Index(records_morning.ldatetime.to_date())
-# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
-# 
-# for record_morning in records_morning.iterrows():
-#     
-#     c4gl = class4gl(c4gli)
-#     c4gl.run()
-#     c4gl.dump(c4glfile,\
-#               include_input=True,\
-#               timeseries_only=timeseries_only)
-# 
-# # This will cash the observations and model tables per station for
-# # the interface
-# 
-# records_ini = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=0,\
-#                                    by=2,\
-#                                    subset='ini',
-#                                    refetch_records=True,
-#                                    )
-# records_mod = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='mod',
-#                                    refetch_records=True,
-#                                    )
-# records_eval = get_records(pd.DataFrame([current_station]),\
-#                                    path_obs,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='eval',
-#                                    refetch_records=True,
-#                                    )
-# 
-# 
-# # mod_scores = pd.DataFrame(index=mod_records.index)
-# # for (STNID,index), current_record_mod in mod_records.iterrows():
-# #     print(STNID,index)
-# #     current_station = STN
-# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
-# #     current_record_obs = obs_records.loc[(STNID,index)]
-# # 
-# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
-# #                                           current_station,\
-# #                                           current_record_mod,\
-# #                                          )
-# # 
-# #     record_yaml_obs = \
-# #             get_record_yaml_obs(odirexperiments[keyEXP],\
-# #                                 current_station,\
-# #                                 current_record_obs,\
-# #                                 suffix='.yaml')
-# # 
-# #     record_yaml_obs_afternoon = \
-# #             get_record_yaml_obs(odir,\
-# #                                 current_station,\
-# #                                 current_record_obs_afternoon,\
-# #                                 suffix='_afternoon.yaml')
-# # 
-# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
-# #                    record_yaml_mod.h])
-# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
-# #     
-# # 
-# #     for height,hvalue in HEIGHTS.items():
-# # 
-# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
-# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
-# #         try:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
-# #                 rmse(\
-# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
-# #                     np.interp(\
-# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
-# #                         record_yaml_mod.air_ap.z[lt_mod],\
-# #                         record_yaml_mod.air_ap.theta[lt_mod]\
-# #                     ))
-# #         except ValueError:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
-# #     # # we calculate these things in the interface itself
-# #     # for key in ['q','theta','h']:
-# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_mod.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# #     #     # the actual time of the initial and evaluation sounding can be 
-# #     #     # different, but we consider this as a measurement error for
-# #     #     # the starting and end time of the simulation.
-# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
-# #         
-# #                 
-# #                 
-# # # for EXP,c4glfile in c4glfiles.items():
-# # #     c4glfile.close()            
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# #     
-# #     # {'Time[min:sec]': None 
-# #     #  'P[hPa]': None, 
-# #     #  'T[C]': None, 
-# #     #  'U[%]': None, 
-# #     #  'Wsp[m/s]': None, 
-# #     #  'Wdir[Grd]': None,
-# #     #  'Lon[°]', 
-# #     #  'Lat[°]', 
-# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
-# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
-# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
-# #     # }
-# #     # 
-# #     # #pivotrows =
-# #     # #{
-# # 
-# # 
-# # 
diff --git a/bin/setup/setup_humppa.py b/bin/setup/setup_humppa.py
deleted file mode 100644
index ff37628..0000000
--- a/bin/setup/setup_humppa.py
+++ /dev/null
@@ -1,732 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import Pysolar
-import sys
-import pytz
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-
-globaldata = data_global()
-globaldata.load_datasets(recalc=0)
-
-Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-epsilon = Rd/Rv # or mv/md
-
-
-def replace_iter(iterable, search, replace):
-    for value in iterable:
-        value.replace(search, replace)
-        yield value
-
-from class4gl import blh,class4gl_input
-
-# definition of the humpa station
-current_station = pd.Series({ "latitude"  : 61.8448,
-                  "longitude" : 24.2882,
-                  "name" : "the HUMMPA experiment"
-                })
-current_station.name = 90000
-
-# we define the columns ourselves because it is a mess in the file itself.
-columns =\
-['Time[min:sec]',
- 'P[hPa]',
- 'T[C]',
- 'U[%]',
- 'Wsp[m/s]',
- 'Wdir[Grd]',
- 'Lon[°]',
- 'Lat[°]',
- 'Altitude[m]',
- 'GeoPot[m]',
- 'MRI',
- 'RI',    
- 'DewPoint[C]',
- 'Virt. Temp[C]',
- 'Rs[m/min]',
- 'D[kg/m3]',
- 'Azimut[°]',
- 'Elevation[°]',
- 'Range[m]',
-]
-
-
-HOUR_FILES = \
-{ dt.datetime(2010,7,12,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071210_0300.txt'],'afternoon':[15,'humppa_071210_1500.txt']},
-  dt.datetime(2010,7,13,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071310_0300.txt'],'afternoon':[18,'humppa_071310_1800.txt']},
-  dt.datetime(2010,7,14,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071410_0300.txt'],'afternoon':[16,'humppa_071410_1600.txt']},
-  dt.datetime(2010,7,15,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071510_0300.txt'],'afternoon':[15,'humppa_071510_1500.txt']},
-  dt.datetime(2010,7,16,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071610_0300.txt'],'afternoon':[21,'humppa_071610_2100.txt']},
-  dt.datetime(2010,7,17,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071710_0300.txt'],'afternoon':[18,'humppa_071710_1800.txt']},
-  dt.datetime(2010,7,18,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071810_0300.txt'],'afternoon':[21,'humppa_071810_2100.txt']},
-  dt.datetime(2010,7,19,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071910_0300.txt'],'afternoon':[21,'humppa_071910_2100.txt']},
-#  dt.datetime(2010,7,20):{'morning':[4,'humppa_072010_0400.txt'],'afternoon':[15,'humppa_072010_1500.txt']},
-  dt.datetime(2010,7,21,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072110_0300.txt'],'afternoon':[21,'humppa_072110_2100.txt']},
-  dt.datetime(2010,7,22,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072210_0400.txt'],'afternoon':[18,'humppa_072210_1800.txt']},
- # something is wrong with ths profile
- # dt.datetime(2010,7,23,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072310_0300.txt'],'afternoon':[15,'humppa_072310_1500.txt']},
-  dt.datetime(2010,7,24,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072410_0300.txt'],'afternoon':[16,'humppa_072410_1600.txt']},
-  dt.datetime(2010,7,25,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072510_0300.txt'],'afternoon':[21,'humppa_072510_2100.txt']},
-  dt.datetime(2010,7,26,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072610_0300.txt'],'afternoon':[21,'humppa_072610_2100.txt']},
-  dt.datetime(2010,7,27,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072710_0300.txt'],'afternoon':[15,'humppa_072710_1500.txt']},
-  dt.datetime(2010,7,28,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072810_0300.txt'],'afternoon':[15,'humppa_072810_1500.txt']},
-  dt.datetime(2010,7,29,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072910_0400.txt'],'afternoon':[18,'humppa_072910_1800.txt']},
-  dt.datetime(2010,7,30,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_073010_0900.txt'],'afternoon':[15,'humppa_073010_1500.txt']},
-  dt.datetime(2010,7,31,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_073110_0300_01.txt'],'afternoon':[15,'humppa_073110_1500.txt']},
-  dt.datetime(2010,8, 1,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080110_0300.txt'],'afternoon':[18,'humppa_080110_1800.txt']},
-  dt.datetime(2010,8, 2,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080210_0300.txt'],'afternoon':[18,'humppa_080210_1800.txt']},
-  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_080310_0900.txt'],'afternoon':[18,'humppa_080310_1800.txt']},
-  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[8,'humppa_080410_0800.txt'],'afternoon':[18,'humppa_080410_1800.txt']},
-  dt.datetime(2010,8, 5,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080510_0300.txt'],'afternoon':[18,'humppa_080510_1800.txt']},
-  dt.datetime(2010,8, 6,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_080610_0400.txt'],'afternoon':[18,'humppa_080610_1800.txt']},
-  dt.datetime(2010,8, 7,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080710_0300.txt'],'afternoon':[18,'humppa_080710_1800.txt']},
-  dt.datetime(2010,8, 8,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080810_0300.txt'],'afternoon':[18,'humppa_080810_1800.txt']},
-  dt.datetime(2010,8,10,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_081010_0300.txt'],'afternoon':[18,'humppa_081010_1800.txt']},
-}
-
-
-
-
-
-
-#only include the following timeseries in the model output
-timeseries_only = \
-['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
- 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
- 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
- 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
- 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
-        #balloon_conv = replace_iter(balloon_file,"°","deg")
-        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
-        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
-        air_balloon_in = pd.read_fwf(balloon_file,
-                                     widths=[14]*19,
-                                     skiprows=9,
-                                     skipfooter=15,
-                                     decimal=',',
-                                     header=None,
-                                     names = columns,
-                                     na_values='-----')
-    
-        rowmatches = {
-            't':      lambda x: x['T[C]']+273.15,
-            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
-            'p':      lambda x: x['P[hPa]']*100.,
-            'u':      lambda x: x['Wsp[m/s]'] * np.sin((90.-x['Wdir[Grd]'])/180.*np.pi),
-            'v':      lambda x: x['Wsp[m/s]'] * np.cos((90.-x['Wdir[Grd]'])/180.*np.pi),
-            'z':      lambda x: x['Altitude[m]'],
-            'q':      lambda x: np.clip((1. - (273.15+x['Virt. Temp[C]'])/(273.15+x['T[C]']))/(1. - 1./epsilon),a_min=0.,a_max=None),
-        }
-        
-        air_balloon = pd.DataFrame()
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon_in)
-        
-        rowmatches = {
-            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
-            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
-            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
-        }
-        
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        dpars = {}
-        dpars['longitude']  = current_station['longitude']
-        dpars['latitude']  = current_station['latitude'] 
-        
-        dpars['STNID'] = current_station.name
-        
-
-        # there are issues with the lower measurements in the HUMPPA campaign,
-        # for which a steady decrease of potential temperature is found, which
-        # is unrealistic.  Here I filter them away
-        ifirst = 0
-        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
-            ifirst = ifirst+1
-        print ('ifirst:',ifirst)
-        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
-        
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        valid_indices = air_balloon.index[is_valid].values
-        
-        air_ap_mode='b'
-        
-        if len(valid_indices) > 0:
-            dpars['h'],dpars['h_u'],dpars['h_l'] =\
-                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['Wsp[m/s]'])
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-        
-        
-        
-        if ~np.isnan(dpars['h']):
-            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-        else:
-            dpars['Ps'] = np.nan
-        
-        if ~np.isnan(dpars['h']):
-        
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-        
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-
-
-
-        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        jump = air_ap_head.iloc[0] * np.nan
-        
-        if air_ap_tail.shape[0] > 1:
-        
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # only select samples monotonically increasing with height
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        for ibottom in range(1,len(air_ap_tail_orig)):
-            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-
-        # # make theta increase strong enough to avoid numerical
-        # # instability
-        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        # air_ap_tail = pd.DataFrame()
-        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # theta_low = air_ap_head['theta'].iloc[2]
-        # z_low = air_ap_head['z'].iloc[2]
-        # ibottom = 0
-        # for itop in range(0,len(air_ap_tail_orig)):
-        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-        #     if ((theta_mean > (theta_low+0.2) ) and \
-        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
-
-        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-        #         ibottom = itop+1
-        #         theta_low = air_ap_tail.theta.iloc[-1]
-        #         z_low =     air_ap_tail.z.iloc[-1]
-        #     # elif  (itop > len(air_ap_tail_orig)-10):
-        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        # 
-        # air_ap = \
-        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-        
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-        
-        
-        dpars['lat'] = dpars['latitude']
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        
-        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
-        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-3)
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        
-        
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        
-        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-        dpars['sw_lit'] = False
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-        
-                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
-        # 
-                for column,decimal in decimals.items():
-                    air_balloon[column] = air_balloon[column].round(decimal)
-                    air_ap[column] = air_ap[column].round(decimal)
-        
-        updateglobal = False
-        if c4gli is None:
-            c4gli = class4gl_input()
-            updateglobal = True
-        
-        print('updating...')
-        print(column)
-        c4gli.update(source='humppa',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-        if updateglobal:
-            c4gli.get_global_input(globaldata)
-
-        # if profile_ini:
-        #     c4gli.runtime = 10 * 3600
-
-        c4gli.dump(file_sounding)
-        
-        # if profile_ini:
-        #     c4gl = class4gl(c4gli)
-        #     c4gl.run()
-        #     c4gl.dump(file_model,\
-        #               include_input=True,\
-        #               timeseries_only=timeseries_only)
-        #     
-        #     # This will cash the observations and model tables per station for
-        #     # the interface
-        # 
-        # if profile_ini:
-        #     profile_ini=False
-        # else:
-        #     profile_ini=True
-        return c4gli
-
-
-path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
-
-
-file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    print(pair['morning'])
-    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1]
-    print(humpafn)
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
-    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
-file_morning.close()
-
-file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1]
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
-    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
-file_afternoon.close()
- 
-
-# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-# for date,pair  in HOUR_FILES.items(): 
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
-#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
-# file_morning.close()
-# 
-# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-# for hour in [18]:
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
-# file_afternoon.close()
-
-
-
-# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
-# 
-# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
-
-
-records_morning = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='morning',
-                                           refetch_records=True,
-                                           )
-print('records_morning_ldatetime',records_morning.ldatetime)
-
-records_afternoon = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='afternoon',
-                                           refetch_records=True,
-                                           )
-
-# align afternoon records with noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
-
-os.system('mkdir -p '+path_exp)
-file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
-file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
-
-for (STNID,chunk,index),record_morning in records_morning.iterrows():
-    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-
-    c4gli_morning = get_record_yaml(file_morning, 
-                                    record_morning.index_start, 
-                                    record_morning.index_end,
-                                    mode='ini')
-    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-    
-    
-    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                      record_afternoon.index_start, 
-                                      record_afternoon.index_end,
-                                    mode='ini')
-
-    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon.pars.datetime_daylight - 
-                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-    c4gli_morning.update(source='manual',
-                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
-    c4gli_morning.dump(file_ini)
-    
-    c4gl = class4gl(c4gli_morning)
-    c4gl.run()
-    
-    c4gl.dump(file_mod,\
-              include_input=False,\
-              timeseries_only=timeseries_only)
-file_ini.close()
-file_mod.close()
-file_morning.close()
-file_afternoon.close()
-
-records_ini = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='ini',
-                                           refetch_records=True,
-                                           )
-records_mod = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='mod',
-                                           refetch_records=True,
-                                           )
-
-records_mod.index = records_ini.index
-
-# align afternoon records with initial records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-records_afternoon.index = records_ini.index
-
-# stations_for_iter = stations(path_exp)
-# for STNID,station in stations_iterator(stations_for_iter):
-#     records_current_station_index = \
-#             (records_ini.index.get_level_values('STNID') == STNID)
-#     file_current_station_mod = STNID
-# 
-#     with \
-#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-#         for (STNID,index),record_ini in records_iterator(records_ini):
-#             c4gli_ini = get_record_yaml(file_station_ini, 
-#                                         record_ini.index_start, 
-#                                         record_ini.index_end,
-#                                         mode='ini')
-#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-# 
-#             record_mod = records_mod.loc[(STNID,index)]
-#             c4gl_mod = get_record_yaml(file_station_mod, 
-#                                         record_mod.index_start, 
-#                                         record_mod.index_end,
-#                                         mode='mod')
-#             record_afternoon = records_afternoon.loc[(STNID,index)]
-#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-#                                         record_afternoon.index_start, 
-#                                         record_afternoon.index_end,
-#                                         mode='ini')
-
-
-
-# # select the samples of the afternoon list that correspond to the timing of the
-# # morning list
-# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
-# records_afternoon.index = recods_morning.index
-# 
-# 
-# # create intersectino index
-# index_morning = pd.Index(records_morning.ldatetime.to_date())
-# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
-# 
-# for record_morning in records_morning.iterrows():
-#     
-#     c4gl = class4gl(c4gli)
-#     c4gl.run()
-#     c4gl.dump(c4glfile,\
-#               include_input=True,\
-#               timeseries_only=timeseries_only)
-# 
-# # This will cash the observations and model tables per station for
-# # the interface
-# 
-# records_ini = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=0,\
-#                                    by=2,\
-#                                    subset='ini',
-#                                    refetch_records=True,
-#                                    )
-# records_mod = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='mod',
-#                                    refetch_records=True,
-#                                    )
-# records_eval = get_records(pd.DataFrame([current_station]),\
-#                                    path_obs,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='eval',
-#                                    refetch_records=True,
-#                                    )
-# 
-# 
-# # mod_scores = pd.DataFrame(index=mod_records.index)
-# # for (STNID,index), current_record_mod in mod_records.iterrows():
-# #     print(STNID,index)
-# #     current_station = STN
-# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
-# #     current_record_obs = obs_records.loc[(STNID,index)]
-# # 
-# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
-# #                                           current_station,\
-# #                                           current_record_mod,\
-# #                                          )
-# # 
-# #     record_yaml_obs = \
-# #             get_record_yaml_obs(odirexperiments[keyEXP],\
-# #                                 current_station,\
-# #                                 current_record_obs,\
-# #                                 suffix='.yaml')
-# # 
-# #     record_yaml_obs_afternoon = \
-# #             get_record_yaml_obs(odir,\
-# #                                 current_station,\
-# #                                 current_record_obs_afternoon,\
-# #                                 suffix='_afternoon.yaml')
-# # 
-# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
-# #                    record_yaml_mod.h])
-# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
-# #     
-# # 
-# #     for height,hvalue in HEIGHTS.items():
-# # 
-# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
-# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
-# #         try:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
-# #                 rmse(\
-# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
-# #                     np.interp(\
-# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
-# #                         record_yaml_mod.air_ap.z[lt_mod],\
-# #                         record_yaml_mod.air_ap.theta[lt_mod]\
-# #                     ))
-# #         except ValueError:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
-# #     # # we calculate these things in the interface itself
-# #     # for key in ['q','theta','h']:
-# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_mod.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# #     #     # the actual time of the initial and evaluation sounding can be 
-# #     #     # different, but we consider this as a measurement error for
-# #     #     # the starting and end time of the simulation.
-# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
-# #         
-# #                 
-# #                 
-# # # for EXP,c4glfile in c4glfiles.items():
-# # #     c4glfile.close()            
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# #     
-# #     # {'Time[min:sec]': None 
-# #     #  'P[hPa]': None, 
-# #     #  'T[C]': None, 
-# #     #  'U[%]': None, 
-# #     #  'Wsp[m/s]': None, 
-# #     #  'Wdir[Grd]': None,
-# #     #  'Lon[°]', 
-# #     #  'Lat[°]', 
-# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
-# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
-# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
-# #     # }
-# #     # 
-# #     # #pivotrows =
-# #     # #{
-# # 
-# # 
-# # 
diff --git a/bin/setup/trash/setup_global_old.py b/bin/setup/trash/setup_global_old.py
deleted file mode 100644
index d812684..0000000
--- a/bin/setup/trash/setup_global_old.py
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thursday, March 29, 11:30 AM
-
-@author: Hendrik Wouters
-
-The dry-2-dry global radio sounding experiment.
-
-usage:
-    python setup_global.py 
-    where  is an integer indicating the row index of the station list
-    under odir+'/'+fn_stations (see below)
-
-this scripts should be called from the pbs script setup_global.pbs
-
-
-
-dependencies:
-    - pandas
-    - class4gl
-    - data_soundings
-
-
-"""
-
-""" import libraries """
-import pandas as pd
-import sys
-#import copy as cp
-import numpy as np
-from sklearn.metrics import mean_squared_error
-import logging
-import datetime as dt
-import os
-import math
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-
-
-#calculate the root mean square error
-def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
-    """ calculated root mean squared error 
-        
-    
-        INPUT:
-            y_actual: reference dataset
-            y_predicted: predicting dataset
-            z_actual: coordinate values of reference dataset
-            z_predicted: coordinate values of the predicting dataset
-            
-            filternan_actual: throw away reference values that have nans
-    """
-    
-    y_actual_temp = np.array(y_actual)
-    y_predicted_temp = np.array(y_predicted)
-    
-    if z_actual is not None:
-        z_actual_temp = np.array(z_actual)
-    else: 
-        z_actual_temp = None
-        
-    
-    if filternan_actual:
-        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
-        if z_actual_temp is not None:
-            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
-    
-    if ((z_actual_temp is not None) or (z_predicted is not None)):    
-        if (z_actual_temp is None) or (z_predicted is None):
-            raise ValueError('Input z_actual and z_predicted need \
-                              to be specified simultaneously.')
-        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
-    
-    else:
-        # this catches the situation that y_predicted is a single value (eg., 
-        # which is the case for evaluating eg., mixed-layer estimates)
-        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
-        
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from data_soundings import wyoming
-#from data_global import data_global
-
-# iniitialize global data
-globaldata = data_global()
-# ...  and load initial data pages
-globaldata.load_datasets(recalc=0)
-
-# read the list of stations with valid ground data (list generated with
-# get_valid_stations.py)
-idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-
-df_stations = pd.read_csv(fn_stations)
-
-
-STNlist = list(df_stations.iterrows())
-NUMSTNS = len(STNlist)
-PROCS = 100
-BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-
-
-iPROC = int(sys.argv[1])
-
-
-for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
-# for iSTN,STN in STNlist[5:]:  
-    
-    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
-    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
-    
-
-    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
-    #                   for EXP in experiments.keys()])
-        
-    with open(fnout,'w') as fileout, \
-         open(fnout_afternoon,'w') as fileout_afternoon:
-        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
-        wy_strm.set_STNM(int(STN['ID']))
-
-        # we consider all soundings after 1981
-        wy_strm.find_first(year=1981)
-        #wy_strm.find(dt.datetime(2004,10,19,6))
-        
-        c4gli = class4gl_input(debug_level=logging.INFO)
-        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
-        # so we continue as long as we can find a new sounding
-        while wy_strm.current is not None:
-            
-            c4gli.clear()
-            c4gli.get_profile_wyoming(wy_strm)
-            #print(STN['ID'],c4gli.pars.datetime)
-            #c4gli.get_global_input(globaldata)
-
-            print(c4gli.pars.STNID, c4gli.pars.ldatetime)
-
-            logic = dict()
-            logic['morning'] =  (c4gli.pars.ldatetime.hour < 12.)
-            logic['daylight'] = \
-                ((c4gli.pars.ldatetime_daylight - 
-                  c4gli.pars.ldatetime).total_seconds()/3600. <= 5.)
-            
-            logic['springsummer'] = (c4gli.pars.theta > 278.)
-            
-            # we take 3000 because previous analysis (ie., HUMPPA) has
-            # focussed towards such altitude
-            le3000 = (c4gli.air_balloon.z <= 3000.)
-            logic['10measurements'] = (np.sum(le3000) >= 10) 
-
-            leh = (c4gli.air_balloon.z <= c4gli.pars.h)
-
-            try:
-                logic['mlerrlow'] = (\
-                        (len(np.where(leh)[0]) > 0) and \
-                        # in cases where humidity is not defined, the mixed-layer
-                        # values get corr
-                        (not np.isnan(c4gli.pars.theta)) and \
-                        (rmse(c4gli.air_balloon.theta[leh] , \
-                              c4gli.pars.theta,filternan_actual=True) < 1.)\
-                              )
-    
-            except:
-                logic['mlerrlow'] = False
-                print('rmse probably failed')
-
-            logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
-            
-            print('logic:', logic)
-            # the result
-            morning_ok = np.mean(list(logic.values()))
-            print(morning_ok,c4gli.pars.ldatetime)
-            
-            # the next sounding will be used either for an afternoon sounding
-            # or for the morning sounding of the next day.
-            wy_strm.find_next()
-
-            # If the morning is ok, then we try to find a decent afternoon
-            # sounding
-            if morning_ok == 1.:
-                # we get the current date
-                current_date = dt.date(c4gli.pars.ldatetime.year, \
-                                       c4gli.pars.ldatetime.month, \
-                                       c4gli.pars.ldatetime.day)
-                c4gli_afternoon.clear()
-                c4gli_afternoon.get_profile_wyoming(wy_strm)
-
-                if wy_strm.current is not None:
-                    current_date_afternoon = \
-                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                       c4gli_afternoon.pars.ldatetime.month, \
-                                       c4gli_afternoon.pars.ldatetime.day)
-                else:
-                    # a dummy date: this will be ignored anyway
-                    current_date_afternoon = dt.date(1900,1,1)
-
-                # we will dump the latest afternoon sounding that fits the
-                # minimum criteria specified by logic_afternoon
-                c4gli_afternoon_for_dump = None
-                while ((current_date_afternoon == current_date) and \
-                       (wy_strm.current is not None)):
-                    logic_afternoon =dict()
-
-                    logic_afternoon['afternoon'] = \
-                        (c4gli_afternoon.pars.ldatetime.hour >= 12.)
-                    logic_afternoon['daylight'] = \
-                      ((c4gli_afternoon.pars.ldatetime - \
-                        c4gli_afternoon.pars.ldatetime_daylight \
-                       ).total_seconds()/3600. <= 2.)
-
-
-                    le3000_afternoon = \
-                        (c4gli_afternoon.air_balloon.z <= 3000.)
-                    logic_afternoon['5measurements'] = \
-                        (np.sum(le3000_afternoon) >= 5) 
-
-                    # we only store the last afternoon sounding that fits these
-                    # minimum criteria
-
-                    afternoon_ok = np.mean(list(logic_afternoon.values()))
-
-                    print('logic_afternoon: ',logic_afternoon)
-                    print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
-                    if afternoon_ok == 1.:
-                        # # doesn't work :(
-                        # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
-                        
-                        # so we just create a new one from the same wyoming profile
-                        c4gli_afternoon_for_dump = class4gl_input()
-                        c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
-
-                    wy_strm.find_next()
-                    c4gli_afternoon.clear()
-                    c4gli_afternoon.get_profile_wyoming(wy_strm)
-
-                    if wy_strm.current is not None:
-                        current_date_afternoon = \
-                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                       c4gli_afternoon.pars.ldatetime.month, \
-                                       c4gli_afternoon.pars.ldatetime.day)
-                    else:
-                        # a dummy date: this will be ignored anyway
-                        current_date_afternoon = dt.date(1900,1,1)
-
-                    # Only in the case we have a good pair of soundings, we
-                    # dump them to disk
-                if c4gli_afternoon_for_dump is not None:
-                    c4gli.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
-                             c4gli.pars.datetime_daylight).total_seconds())})
-    
-    
-                    print('ALMOST...')
-                    if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
-                            
-        
-                        c4gli.get_global_input(globaldata)
-                        print('VERY CLOSE...')
-                        if c4gli.check_source_globaldata() and \
-                            (c4gli.check_source(source='wyoming',\
-                                               check_only_sections='pars')):
-                            c4gli.dump(fileout)
-                            
-                            c4gli_afternoon_for_dump.dump(fileout_afternoon)
-                            
-                            
-                            # for keyEXP,dictEXP in experiments.items():
-                            #     
-                            #     c4gli.update(source=keyEXP,pars = dictEXP)
-                            #     c4gl = class4gl(c4gli)
-                            #     # c4gl.run()
-                            #     
-                            #     c4gl.dump(c4glfiles[key])
-                            
-                            print('HIT!!!')
-                
-                
-    # for c4glfile in c4glfiles:
-    #     c4glfile.close()            
-
diff --git a/bin/simulations/batch_simulations.py b/bin/simulations/batch_simulations.py
deleted file mode 100644
index b5d4cc3..0000000
--- a/bin/simulations/batch_simulations.py
+++ /dev/null
@@ -1,77 +0,0 @@
-
-import argparse
-
-import pandas as pd
-import os
-import math
-import numpy as np
-import sys
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-df_stations = pd.read_csv(fn_stations)
-
-# if 'path-soundings' in args.__dict__.keys():
-#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-# else:
-
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    #parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
-    parser.add_argument('--exec')
-    parser.add_argument('--experiments')#should be ';'-seperated list
-    parser.add_argument('--split-by',default=-1)
-    args = parser.parse_args()
-
-experiments = args.experiments.split(';')
-#SET = 'GLOBAL'
-SET = args.dataset
-print(args.experiments)
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-for expname in experiments:
-    #exp = EXP_DEFS[expname]
-    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-    os.system('rm -R '+path_exp)
-
-totalchunks = 0
-for istation,current_station in all_stations.iterrows():
-    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
-    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
-    totalchunks +=chunks_current_station
-
-#if sys.argv[1] == 'qsub':
-# with qsub
-os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
-                                       ',split_by='+str(args.split_by)+\
-                                       ',exec='+str(args.exec)+\
-                                       ',experiments='+str(args.experiments))
-# elif sys.argv[1] == 'wsub':
-#     
-#     # with wsub
-#     STNlist = list(df_stations.iterrows())
-#     NUMSTNS = len(STNlist)
-#     PROCS = NUMSTNS 
-#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-# 
-#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
-
diff --git a/bin/simulations/runmodel.py b/bin/simulations/runmodel.py
deleted file mode 100644
index fc4fd19..0000000
--- a/bin/simulations/runmodel.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Example of how to run the Python code, and access the output
-# This case is identical to the default setup of CLASS (the version with interface) 
-#
-
-from pylab import *
-from model import *
-
-""" 
-Create empty model_input and set up case
-"""
-run1input = model_input()
-
-run1input.dt         = 60.       # time step [s]
-run1input.runtime    = 12*3600    # total run time [s]
-
-# mixed-layer input
-run1input.sw_ml      = True      # mixed-layer model switch
-run1input.sw_shearwe = False     # shear growth mixed-layer switch
-run1input.sw_fixft   = False     # Fix the free-troposphere switch
-run1input.h          = 200.      # initial ABL height [m]
-run1input.Ps         = 101300.   # surface pressure [Pa]
-run1input.divU       = 0.        # horizontal large-scale divergence of wind [s-1]
-run1input.fc         = 1.e-4     # Coriolis parameter [m s-1]
-
-run1input.theta      = 288.      # initial mixed-layer potential temperature [K]
-run1input.dtheta     = 1.        # initial temperature jump at h [K]
-run1input.gammatheta = 0.006     # free atmosphere potential temperature lapse rate [K m-1]
-run1input.advtheta   = 0.        # advection of heat [K s-1]
-run1input.beta       = 0.2       # entrainment ratio for virtual heat [-]
-run1input.wtheta     = 0.1       # surface kinematic heat flux [K m s-1]
-
-run1input.q          = 0.008     # initial mixed-layer specific humidity [kg kg-1]
-run1input.dq         = -0.001    # initial specific humidity jump at h [kg kg-1]
-run1input.gammaq     = 0.        # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-run1input.advq       = 0.        # advection of moisture [kg kg-1 s-1]
-run1input.wq         = 0.1e-3    # surface kinematic moisture flux [kg kg-1 m s-1]
-
-run1input.CO2        = 422.      # initial mixed-layer CO2 [ppm]
-run1input.dCO2       = -44.      # initial CO2 jump at h [ppm]
-run1input.gammaCO2   = 0.        # free atmosphere CO2 lapse rate [ppm m-1]
-run1input.advCO2     = 0.        # advection of CO2 [ppm s-1]
-run1input.wCO2       = 0.        # surface kinematic CO2 flux [ppm m s-1]
-
-run1input.sw_wind    = False     # prognostic wind switch
-run1input.u          = 6.        # initial mixed-layer u-wind speed [m s-1]
-run1input.du         = 4.        # initial u-wind jump at h [m s-1]
-run1input.gammau     = 0.        # free atmosphere u-wind speed lapse rate [s-1]
-run1input.advu       = 0.        # advection of u-wind [m s-2]
-
-run1input.v          = -4.0      # initial mixed-layer u-wind speed [m s-1]
-run1input.dv         = 4.0       # initial u-wind jump at h [m s-1]
-run1input.gammav     = 0.        # free atmosphere v-wind speed lapse rate [s-1]
-run1input.advv       = 0.        # advection of v-wind [m s-2]
-
-run1input.sw_sl      = False     # surface layer switch
-run1input.ustar      = 0.3       # surface friction velocity [m s-1]
-run1input.z0m        = 0.02      # roughness length for momentum [m]
-run1input.z0h        = 0.002     # roughness length for scalars [m]
-
-run1input.sw_rad     = False     # radiation switch
-run1input.lat        = 51.97     # latitude [deg]
-run1input.lon        = -4.93     # longitude [deg]
-run1input.doy        = 268.      # day of the year [-]
-run1input.tstart     = 6.8       # time of the day [h UTC]
-run1input.cc         = 0.0       # cloud cover fraction [-]
-run1input.Q          = 400.      # net radiation [W m-2] 
-run1input.dFz        = 0.        # cloud top radiative divergence [W m-2] 
-
-run1input.sw_ls      = False     # land surface switch
-run1input.ls_type    = 'js'      # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-run1input.wg         = 0.21      # volumetric water content top soil layer [m3 m-3]
-run1input.w2         = 0.21      # volumetric water content deeper soil layer [m3 m-3]
-run1input.cveg       = 0.85      # vegetation fraction [-]
-run1input.Tsoil      = 285.      # temperature top soil layer [K]
-run1input.T2         = 286.      # temperature deeper soil layer [K]
-run1input.a          = 0.219     # Clapp and Hornberger retention curve parameter a
-run1input.b          = 4.90      # Clapp and Hornberger retention curve parameter b
-run1input.p          = 4.        # Clapp and Hornberger retention curve parameter c
-run1input.CGsat      = 3.56e-6   # saturated soil conductivity for heat
-
-run1input.wsat       = 0.472     # saturated volumetric water content ECMWF config [-]
-run1input.wfc        = 0.323     # volumetric water content field capacity [-]
-run1input.wwilt      = 0.171     # volumetric water content wilting point [-]
-
-run1input.C1sat      = 0.132     
-run1input.C2ref      = 1.8
-
-run1input.LAI        = 2.        # leaf area index [-]
-run1input.gD         = 0.0       # correction factor transpiration for VPD [-]
-run1input.rsmin      = 110.      # minimum resistance transpiration [s m-1]
-run1input.rssoilmin  = 50.       # minimun resistance soil evaporation [s m-1]
-run1input.alpha      = 0.25      # surface albedo [-]
-
-run1input.Ts         = 290.      # initial surface temperature [K]
-
-run1input.Wmax       = 0.0002    # thickness of water layer on wet vegetation [m]
-run1input.Wl         = 0.0000    # equivalent water layer depth for wet vegetation [m]
-
-run1input.Lambda     = 5.9       # thermal diffusivity skin layer [-]
-
-run1input.c3c4       = 'c3'      # Plant type ('c3' or 'c4')
-
-run1input.sw_cu      = False     # Cumulus parameterization switch
-run1input.dz_h       = 150.      # Transition layer thickness [m]
-
-"""
-Init and run the model
-"""
-r1 = model(run1input)
-r1.run()
-
-"""
-Plot output
-"""
-figure()
-subplot(131)
-plot(r1.out.t, r1.out.h)
-xlabel('time [h]')
-ylabel('h [m]')
-
-subplot(132)
-plot(r1.out.t, r1.out.theta)
-xlabel('time [h]')
-ylabel('theta [K]')
-
-subplot(133)
-plot(r1.out.t, r1.out.q*1000.)
-xlabel('time [h]')
-ylabel('q [g kg-1]')
diff --git a/bin/simulations/simulations.py b/bin/simulations/simulations.py
deleted file mode 100644
index 719f9a5..0000000
--- a/bin/simulations/simulations.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-
-import argparse
-
-#if __name__ == '__main__':
-parser = argparse.ArgumentParser()
-parser.add_argument('--global-chunk') # this is the batch number according to split-by in case of considering all stations
-parser.add_argument('--first-station-row')
-parser.add_argument('--last-station-row')
-parser.add_argument('--station-id') # run a specific station id
-parser.add_argument('--dataset')
-parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--error-handling',default='dump_on_success')
-parser.add_argument('--experiments')
-parser.add_argument('--split-by',default=-1)# station soundings are split
-                                            # up in chunks
-
-parser.add_argument('--station-chunk',default=0)
-parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-args = parser.parse_args()
-
-sys.path.insert(0, args.c4gl_path_lib)
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-EXP_DEFS  =\
-{
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-path_soundingsSET = args.path_soundings+'/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if args.global_chunk is not None:
-    totalchunks = 0
-    stations_iter = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iter.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    if args.station_id is not None:
-        stations_iter = stations_iterator(all_stations)
-        STNID,run_station = stations_iterator.set_STNID(STNID)
-        run_stations = pd.DataFrame(run_station)
-    else:
-        run_stations = pd.DataFrame(all_stations)
-        if args.last_station_row is not None:
-            run_stations = run_stations.iloc[:(int(args.last_station)+1)]
-        if args.first_station_row is not None:
-            run_stations = run_stations.iloc[int(args.first_station):]
-        run_station_chunk = args.station_chunk
-
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = args.path_experiments+'/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-            print(records_morning_station_chunk)
-
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-
-                    if args.error_handling == 'dump_always':
-                        try:
-                            c4gl.run()
-                        except:
-                            print('run not succesfull')
-                        onerun = True
-
-                        c4gli_morning.dump(file_ini)
-                        
-                        
-                        c4gl.dump(file_mod,\
-                                  include_input=False,\
-                                  #timeseries_only=timeseries_only,\
-                                 )
-                        onerun = True
-                    # in this case, only the file will dumped if the runs were
-                    # successful
-                    elif args.error_handling == 'dump_on_succes':
-                        try:
-                            c4gl.run()
-                            print('run not succesfull')
-                            onerun = True
-
-                            c4gli_morning.dump(file_ini)
-                            
-                            
-                            c4gl.dump(file_mod,\
-                                      include_input=False,\
-                                      #timeseries_only=timeseries_only,\
-                                     )
-                            onerun = True
-                        except:
-                            print('run not succesfull')
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/bin/simulations/simulations_iter.py b/bin/simulations/simulations_iter.py
deleted file mode 100644
index 5dfbaff..0000000
--- a/bin/simulations/simulations_iter.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk',default=0)
-    args = parser.parse_args()
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if 'global_chunk' in args.__dict__.keys():
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    if 'last_station' in args.__dict__.keys():
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    
-    if 'first_station' in args.__dict__.keys():
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    if 'station_chunk' in args.__dict__.keys():
-        run_station_chunk = args.station_chunk
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-    for istation,current_station in run_stations.iterrows():
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                #if iexp == 11:
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-                    
-                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
-                    EFobs = c4gli_morning.pars.EF
-                    
-                    b = c4gli_morning.pars.wwilt
-                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
-                    
-                    
-                    try:
-                        #fb = f(b)
-                        c4gli_morning.pars.wg = b
-                        c4gli_morning.pars.w2 = b
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fb = EFmod - EFobs
-                        EFmodb = EFmod
-                        c4glb = c4gl
-                        c4gli_morningb = c4gli_morning
-                        
-                        #fc = f(c)
-                        c4gli_morning.pars.wg = c
-                        c4gli_morning.pars.w2 = c
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fc = EFmod - EFobs
-                        print (EFmodb,EFobs,fb)
-                        print (EFmod,EFobs,fc)
-                        c4glc = c4gl
-                        c4gli_morningc = c4gli_morning
-                        i=0
-                        
-
-                        if fc*fb > 0.:
-                            if abs(fb) < abs(fc):
-                                c4gl = c4glb
-                                c4gli_morning = c4gli_morningb
-                            else:
-                                c4gl = c4glc
-                                c4gli_morning = c4gli_morningc
-                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
-                        
-                        else:
-                            print('starting ITERATION!!!')
-                            cn  = c - fc/(fc-fb)*(c-b)
-                            
-                            
-                            #fcn = f(cn)
-                            c4gli_morning.pars.wg = np.asscalar(cn)
-                            c4gli_morning.pars.w2 = np.asscalar(cn)
-                            c4gl = class4gl(c4gli_morning)
-                            c4gl.run()
-                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                            
-                            tol = 0.02
-                            ftol = 10.
-                            maxiter = 10
-                            
-                            is1=0
-                            is1max=1
-                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
-                                if fc * fcn > 0:
-                                    temp = c
-                                    c = b
-                                    b = temp
-                                
-                                a = b
-                                fa = fb
-                                b = c
-                                fb = fc
-                                c = cn
-                                fc = fcn
-                                              
-                                print(i,a,b,c,fcn)
-                                
-                                s1 = c - fc/(fc-fb)*(c-b) 
-                                s2 = c - fc/(fc-fa)*(c-a)
-                                
-                                
-                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
-                                
-                                
-                                if (abs(s1-b) < abs(s2-b)):
-                                    is1 = 0
-                                else:
-                                    is1 +=1
-                                    
-                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
-                                if is1 < is1max:           
-                                    s = s1
-                                    print('s1')
-                                else:
-                                    is1 = 0
-                                    s = s2
-                                    print('s2')
-                                
-                                if c > b:
-                                    l = b
-                                    r = c
-                                else:
-                                    l = c
-                                    r = b
-                                
-                                m = (b+c)/2.
-                                     
-                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
-                                    cn = s
-                                    print('midpoint')
-                                else:
-                                    cn = m
-                                    print('bissection')
-                                    
-                                
-                                #fcn = f(cn)
-                                c4gli_morning.pars.wg = np.asscalar(cn)
-                                c4gli_morning.pars.w2 = np.asscalar(cn)
-                                c4gl = class4gl(c4gli_morning)
-                                c4gl.run()
-                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                                
-                            
-                                i+=1
-                                
-                            if i == maxiter:
-                                raise StopIteration('did not converge')
-
-
-
-
-                        #c4gl = class4gl(c4gli_morning)
-                        #c4gl.run()
-
-                        c4gli_morning.pars.itersteps = i
-                        c4gli_morning.dump(file_ini)
-                        
-                        
-                        c4gl.dump(file_mod,\
-                                      include_input=False,\
-                                   #   timeseries_only=timeseries_only,\
-                                 )
-                        onerun = True
-                    except:
-                        print('run not succesfull')
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/bin/simulations/simulations_iter_test.py b/bin/simulations/simulations_iter_test.py
deleted file mode 100644
index eefd475..0000000
--- a/bin/simulations/simulations_iter_test.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk')
-    args = parser.parse_args()
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if args.global_chunk is not None:
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    run_stations = pd.DataFrame(all_stations)
-    if args.last_station is not None:
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    if args.first_station is not None:
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    run_station_chunk = 0
-    if args.station_chunk is not None:
-        run_station_chunk = args.station_chunk
-
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-                #if iexp == 11:
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-                    
-                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
-                    EFobs = c4gli_morning.pars.EF
-                    
-                    b = c4gli_morning.pars.wwilt
-                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
-                    
-                    
-                    try:
-                        #fb = f(b)
-                        c4gli_morning.pars.wg = b
-                        c4gli_morning.pars.w2 = b
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fb = EFmod - EFobs
-                        EFmodb = EFmod
-                        c4glb = c4gl
-                        c4gli_morningb = c4gli_morning
-                        
-                        #fc = f(c)
-                        c4gli_morning.pars.wg = c
-                        c4gli_morning.pars.w2 = c
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fc = EFmod - EFobs
-                        print (EFmodb,EFobs,fb)
-                        print (EFmod,EFobs,fc)
-                        c4glc = c4gl
-                        c4gli_morningc = c4gli_morning
-                        i=0
-                        
-
-                        if fc*fb > 0.:
-                            if abs(fb) < abs(fc):
-                                c4gl = c4glb
-                                c4gli_morning = c4gli_morningb
-                            else:
-                                c4gl = c4glc
-                                c4gli_morning = c4gli_morningc
-                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
-                        
-                        else:
-                            print('starting ITERATION!!!')
-                            cn  = c - fc/(fc-fb)*(c-b)
-                            
-                            
-                            #fcn = f(cn)
-                            c4gli_morning.pars.wg = np.asscalar(cn)
-                            c4gli_morning.pars.w2 = np.asscalar(cn)
-                            c4gl = class4gl(c4gli_morning)
-                            c4gl.run()
-                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                            
-                            tol = 0.02
-                            ftol = 10.
-                            maxiter = 10
-                            
-                            is1=0
-                            is1max=1
-                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
-                                if fc * fcn > 0:
-                                    temp = c
-                                    c = b
-                                    b = temp
-                                
-                                a = b
-                                fa = fb
-                                b = c
-                                fb = fc
-                                c = cn
-                                fc = fcn
-                                              
-                                print(i,a,b,c,fcn)
-                                
-                                s1 = c - fc/(fc-fb)*(c-b) 
-                                s2 = c - fc/(fc-fa)*(c-a)
-                                
-                                
-                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
-                                
-                                
-                                if (abs(s1-b) < abs(s2-b)):
-                                    is1 = 0
-                                else:
-                                    is1 +=1
-                                    
-                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
-                                if is1 < is1max:           
-                                    s = s1
-                                    print('s1')
-                                else:
-                                    is1 = 0
-                                    s = s2
-                                    print('s2')
-                                
-                                if c > b:
-                                    l = b
-                                    r = c
-                                else:
-                                    l = c
-                                    r = b
-                                
-                                m = (b+c)/2.
-                                     
-                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
-                                    cn = s
-                                    print('midpoint')
-                                else:
-                                    cn = m
-                                    print('bissection')
-                                    
-                                
-                                #fcn = f(cn)
-                                c4gli_morning.pars.wg = np.asscalar(cn)
-                                c4gli_morning.pars.w2 = np.asscalar(cn)
-                                c4gl = class4gl(c4gli_morning)
-                                c4gl.run()
-                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                                
-                            
-                                i+=1
-                                
-                            if i == maxiter:
-                                raise StopIteration('did not converge')
-
-
-
-
-                        #c4gl = class4gl(c4gli_morning)
-                        #c4gl.run()
-                        onerun = True
-
-                        c4gli_morning.pars.itersteps = i
-                    except:
-                        print('run not succesfull')
-                    c4gli_morning.dump(file_ini)
-                    
-                    
-                    c4gl.dump(file_mod,\
-                                  include_input=False,\
-                               #   timeseries_only=timeseries_only,\
-                             )
-                    onerun = True
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/bin/simulations/trash/run_test.py b/bin/simulations/trash/run_test.py
deleted file mode 100644
index 767d960..0000000
--- a/bin/simulations/trash/run_test.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk')
-    parser.add_argument('--c4gl-path',default='')
-    args = parser.parse_args()
-
-if args.c4gl_path == '': 
-    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-else:
-    sys.path.insert(0, args.c4gl_path)
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if args.global_chunk is not None:
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    run_stations = pd.DataFrame(all_stations)
-    if args.last_station is not None:
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    if args.first_station is not None:
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    run_station_chunk = 0
-    if args.station_chunk is not None:
-        run_station_chunk = args.station_chunk
-
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-            print(records_morning_station_chunk)
-
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-                    try:
-                        c4gl.run()
-                    except:
-                        print('run not succesfull')
-                    onerun = True
-
-                    c4gli_morning.dump(file_ini)
-                    
-                    
-                    c4gl.dump(file_mod,\
-                              include_input=False,\
-                              #timeseries_only=timeseries_only,\
-                             )
-                    onerun = True
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/build/lib/bin/__init__.py b/build/lib/bin/__init__.py
deleted file mode 100644
index 58f6cca..0000000
--- a/build/lib/bin/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-
-__version__ = '0.1.0'
-
-__author__ = 'Hendrik Wouters '
-
-__all__ = []
diff --git a/build/lib/class4gl/__init__.py b/build/lib/class4gl/__init__.py
deleted file mode 100644
index a21583b..0000000
--- a/build/lib/class4gl/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from . import model,class4gl,interface_multi,data_air,data_global
-
-__version__ = '0.1.0'
-
-__author__ = 'Hendrik Wouters '
-
-__all__ = []
diff --git a/build/lib/class4gl/class4gl.py b/build/lib/class4gl/class4gl.py
deleted file mode 100644
index 7baaa51..0000000
--- a/build/lib/class4gl/class4gl.py
+++ /dev/null
@@ -1,1611 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-
-Created on Mon Jan 29 12:33:51 2018
-
-Module file for class4gl, which  extents the class-model to be able to take
-global air profiles as input. It exists of:
-
-CLASSES:
-    - an input object, namely class4gl_input. It includes:
-        - a function to read Wyoming sounding data from a yyoming stream object
-        - a function to read global data from a globaldata library object 
-    - the model object: class4gl
-    - ....    
-
-DEPENDENCIES:
-    - xarray
-    - numpy
-    - data_global
-    - Pysolar
-    - yaml
-
-@author: Hendrik Wouters
-
-"""
-
-
-
-""" Setup of envirnoment """
-
-# Standard modules of the stand class-boundary-layer model
-from model import model
-from model import model_output as class4gl_output
-from model import model_input
-from model import qsat
-#from data_soundings import wyoming 
-import Pysolar
-import yaml
-import logging
-import warnings
-import pytz
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-
-# Generic Python Packages
-import numpy as np
-import datetime as dt
-import pandas as pd
-import xarray as xr
-import io
-#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
-from data_global import data_global
-grav = 9.81
-
-# this is just a generic input object
-class generic_input(object):
-    def __init__(self):
-        self.init = True
-
-
-# all units from all variables in CLASS(4GL) should be defined here!
-units = {
-         'h':'m',
-         'theta':'K', 
-         'q':'kg/kg',
-         'cc': '-',
-         'cveg': '-',
-         'wg': 'm3 m-3',
-         'w2': 'm3 m-3',
-         #'wg': 'kg/kg',
-         'Tsoil': 'K',
-         'T2': 'K',
-         'z0m': 'm',
-         'alpha': '-',
-         'LAI': '-',
-         'dhdt':'m/h',
-         'dthetadt':'K/h',
-         'dqdt':'kg/kg/h',
-         'BR': '-',
-         'EF': '-',
-}
-
-class class4gl_input(object):
-# this was the way it was defined previously.
-#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
-
-    def __init__(self,set_pars_defaults=True,debug_level=None):
-
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        print('hello')
-        self.logger = logging.getLogger('class4gl_input')
-        print(self.logger)
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # # create logger
-        # self.logger = logging.getLogger('class4gl_input')
-        # self.logger.setLevel(debug_level)
-
-        # # create console handler and set level to debug
-        # ch = logging.StreamHandler()
-        # ch.setLevel(debug_level)
-
-        # # create formatter
-        # formatter = logging.Formatter('%(asctime)s - \
-        #                                %(name)s - \
-        #                                %(levelname)s - \
-        #                                %(message)s')
-        # add formatter to ch
-        # ch.setFormatter(formatter)
-     
-        # # add ch to logger
-        # self.logger.addHandler(ch)
-
-        # """ end set up logger """
-
-
-
-        # these are the standard model input single-value parameters for class
-        self.pars = model_input()
-
-        # diagnostic parameters of the initial profile
-        self.diag = dict()
-
-        # In this variable, we keep track of the different parameters from where it originates from. 
-        self.sources = {}
-
-        if set_pars_defaults:
-            self.set_pars_defaults()
-
-    def set_pars_defaults(self):
-
-        """ 
-        Create empty model_input and set up case
-        """
-        defaults = dict( 
-        dt         = 60.    , # time step [s] 
-        runtime    = 6*3600 ,  # total run time [s]
-        
-        # mixed-layer input
-        sw_ml      = True   ,  # mixed-layer model switch
-        sw_shearwe = False  ,  # shear growth mixed-layer switch
-        sw_fixft   = False  ,  # Fix the free-troposphere switch
-        h          = 200.   ,  # initial ABL height [m]
-        Ps         = 101300.,  # surface pressure [Pa]
-        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
-        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
-        
-        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
-        dtheta     = 1.     ,  # initial temperature jump at h [K]
-        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
-        advtheta   = 0.     ,  # advection of heat [K s-1]
-        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
-        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
-        
-        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
-        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
-        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
-        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
-        
-        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
-        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
-        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
-        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
-        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
-        sw_wind    = True  ,  # prognostic wind switch
-        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
-        du         = 0.     ,  # initial u-wind jump at h [m s-1]
-        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
-        advu       = 0.     ,  # advection of u-wind [m s-2]
-        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
-        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
-        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
-        advv       = 0.     ,  # advection of v-wind [m s-2]
-        sw_sl      = True   , # surface layer switch
-        ustar      = 0.3    ,  # surface friction velocity [m s-1]
-        z0m        = 0.02   ,  # roughness length for momentum [m]
-        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
-        sw_rad     = True   , # radiation switch
-        lat        = 51.97  ,  # latitude [deg]
-        lon        = -4.93  ,  # longitude [deg]
-        doy        = 268.   ,  # day of the year [-]
-        tstart     = 6.8    ,  # time of the day [h UTC]
-        cc         = 0.0    ,  # cloud cover fraction [-]
-        Q          = 400.   ,  # net radiation [W m-2] 
-        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
-        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
-        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
-        cveg       = 0.85   ,  # vegetation fraction [-]
-        Tsoil      = 295.   ,  # temperature top soil layer [K]
-        Ts         = 295.   ,    # initial surface temperature [K]
-        T2         = 296.   ,  # temperature deeper soil layer [K]
-        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
-        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
-        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
-        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
-        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
-        wfc        = 0.323  ,  # volumetric water content field capacity [-]
-        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
-        C1sat      = 0.132  ,  
-        C2ref      = 1.8    ,
-        LAI        = 2.     ,  # leaf area index [-]
-        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
-        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
-        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
-        alpha      = 0.25   ,  # surface albedo [-]
-        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
-        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
-        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
-        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
-        sw_cu      = False  ,  # Cumulus parameterization switch
-        dz_h       = 150.   ,  # Transition layer thickness [m]
-        cala       = None   ,  # soil heat conductivity [W/(K*m)]
-        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
-        sw_ls      = True   ,
-        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
-        sw_lit     = False,
-        )
-        pars = model_input()
-        for key in defaults:
-            pars.__dict__[key] = defaults[key]
-        
-        self.update(source='defaults',pars=pars)
-        
-    def clear(self):
-        """ this procudure clears the class4gl_input """
-
-        for key in list(self.__dict__.keys()):
-            del(self.__dict__[key])
-        self.__init__()
-
-    def dump(self,file):
-        """ this procedure dumps the class4gl_input object into a yaml file
-            
-            Input: 
-                - self.__dict__ (internal): the dictionary from which we read 
-            Output:
-                - file: All the parameters in self.__init__() are written to
-                the yaml file, including pars, air_ap, sources etc.
-        """
-        file.write('---\n')
-        index = file.tell()
-        file.write('# CLASS4GL input; format version: 0.1\n')
-
-        # write out the position of the current record
-        yaml.dump({'index':index}, file, default_flow_style=False)
-
-        # we do not include the none values
-        for key,data in self.__dict__.items():
-            #if ((type(data) == model_input) or (type(class4gl_input):
-            if key == 'pars':
-
-                pars = {'pars' : self.__dict__['pars'].__dict__}
-                parsout = {}
-                for key in pars.keys():
-                    if pars[key] is not None:
-                        parsout[key] = pars[key]
-
-                yaml.dump(parsout, file, default_flow_style=False)
-            elif type(data) == dict:
-                if key == 'sources':
-                    # in case of sources, we want to have a
-                    # condensed list format as well, so we leave out
-                    # 'default_flow_style=False'
-                    yaml.dump({key : data}, file)
-                else: 
-                    yaml.dump({key : data}, file,
-                              default_flow_style=False)
-            elif type(data) == pd.DataFrame:
-                # in case of dataframes (for profiles), we want to have a
-                # condensed list format as well, so we leave out
-                # 'default_flow_style=False'
-                yaml.dump({key: data.to_dict(orient='list')},file)
-
-                # # these are trials to get it into a more human-readable
-                # fixed-width format, but it is too complex
-                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
-                #file.write(stream)
-                
-                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
-                #file.write(key+': !!str |\n')
-                #file.write(str(data)+'\n')
-       
-    def load_yaml_dict(self,yaml_dict,reset=True):
-        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
-            
-            Input: 
-                - yaml_dict: the dictionary from which we read 
-                - reset: reset data before reading        
-            Output:
-                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
-        """
-        
-        if reset:
-            for key in list(self.__dict__.keys()):
-                del(self.__dict__[key])
-            self.__init__()
-
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                self.__dict__[key] = model_input()
-                self.__dict__[key].__dict__ = data
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            elif key == 'sources':
-                self.__dict__[key] = data
-            elif key == 'diag':
-                self.__dict__[key] = data
-            else: 
-                warnings.warn("Key '"+key+"' may not be implemented.")
-                self.__dict__[key] = data
-
-    def update(self,source,**kwargs):
-        """ this procedure is to make updates of input parameters and tracking
-        of their source more convenient. It implements the assignment of
-        parameter source/sensitivity experiment IDs ('eg.,
-        'defaults', 'sounding balloon', any satellite information, climate
-        models, sensitivity tests etc.). These are all stored in a convenient
-        way with as class4gl_input.sources.  This way, the user can always consult with
-        from where parameters data originates from.  
-        
-        Input:
-            - source:    name of the underlying dataset
-            - **kwargs: a dictionary of data input, for which the key values
-            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
-            the values is a again a dictionary/dataframe of datakeys/columns
-            ('wg','PRES','datetime', ...) and datavalues (either single values,
-            profiles ...), eg., 
-
-                pars = {'wg': 0.007  , 'w2', 0.005}
-                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
-                                     300.,...]}
-            
-        Output:
-            - self.__dict__[datatype] : object to which the parameters are
-                                        assigned. They can be consulted with
-                                        self.pars, self.profiles, etc.
-                                        
-            - self.sources[source] : It supplements the overview overview of
-                                     data sources can be consulted with
-                                     self.sources. The structure is as follows:
-                                     as:
-                self.sources = { 
-                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
-                'GLEAM' :  ['pars:wg','pars:w2', ...],
-                 ...
-                }
-        
-        """
-
-        #print(source,kwargs)
-
-        for key,data in kwargs.items():
-
-            #print(key)
-            # if the key is not in class4gl_input object, then just add it. In
-            # that case, the update procedures below will just overwrite it 
-            if key not in self.__dict__:
-                self.__dict__[key] = data
-
-
-            
-
-            #... we do an additional check to see whether there is a type
-            # match. I not then raise a key error
-            if (type(data) != type(self.__dict__[key]) \
-                # we allow dict input for model_input pars
-                and not ((key == 'pars') and (type(data) == dict) and \
-                (type(self.__dict__[key]) == model_input))):
-
-                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
-
-
-            # This variable keeps track of the added data that is supplemented
-            # by the current source. We add this to class4gl_input.sources
-            datakeys = []
-
-            #... and we update the class4gl_input data, and this depends on the
-            # data type
-
-            if type(self.__dict__[key]) == pd.DataFrame:
-                # If the data type is a dataframe, then we update the columns
-                for column in list(data.columns):
-                    #print(column)
-                    self.__dict__[key][column] = data[column]
-                    datakeys.append(column)
-                    
-
-            elif type(self.__dict__[key]) == model_input:
-                # if the data type is a model_input, then we update its internal
-                # dictionary of parameters
-                if type(data) == model_input:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data.__dict__}
-                    datakeys = list(data.__dict__.keys())
-                elif type(data) == dict:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data}
-                    datakeys = list(data.keys())
-                else:
-                    raise TypeError('input key '+key+' is not of the same type\
-                                    as the one in the class4gl_object')
-
-            elif type(self.__dict__[key]) == dict:
-                # if the data type is a dictionary, we update the
-                # dictionary 
-                self.__dict__[key] = {self.__dict__[key] , data}
-                datakeys = list(data.keys())
-
-
-            # if source entry is not existing yet, we add it
-            if source not in self.sources.keys():
-                self.sources[source] = []
-
-
-            # self.logger.debug('updating section "'+\
-            #                  key+' ('+' '.join(datakeys)+')'\
-            #                  '" from source \
-            #                  "'+source+'"')
-
-            # Update the source dictionary: add the provided data keys to the
-            # specified source list
-            for datakey in datakeys:
-                # At first, remove the occurences of the keys in the other
-                # source lists
-                for sourcekey,sourcelist in self.sources.items():
-                    if key+':'+datakey in sourcelist:
-                        self.sources[sourcekey].remove(key+':'+datakey)
-                # Afterwards, add it to the current source list
-                self.sources[source].append(key+':'+datakey)
-
-
-        # # in case the datatype is a class4gl_input_pars, we update its keys
-        # # according to **kwargs dictionary
-        # if type(self.__dict__[datatype]) == class4gl_input_pars:
-        #     # add the data parameters to the datatype object dictionary of the
-        #     # datatype
-        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
-        #                                        **kwargs}
-        # # in case, the datatype reflects a dataframe, we update the columns according
-        # # to the *args list
-        # elif type(self.__dict__[datatype]) == pd.DataFrame:
-        #     for dataframe in args:
-        #         for column in list(dataframe.columns):
-        #             self.__dict__[datatype][column] = dataframe[column]
-        
-
-    def get_profile(self,IOBJ, *args, **argv):
-        # if type(IOBJ) == wyoming:
-        self.get_profile_wyoming(IOBJ,*args,**argv)
-        # else:
-        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
-        
-    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
-        """ 
-            Purpose: 
-                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
-
-            Input:
-                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
-                function will take the profile at the stream's current
-                position. 
-                2. air_ap_mode: which air profile do we take? 
-                    - b : best
-                    - l : according to lower limit for the mixed-layer height
-                            estimate
-                    - u : according to upper limit for the mixed-layer height
-                            estimate
-
-
-            Output:
-                1. all single-value parameters are stored in the
-                   class4gl_input.pars object
-                2. the souding profiles are stored in the in the
-                   class4gl_input.air_balloon dataframe
-                3. modified sounding profiles for which the mixed layer height
-                   is fitted
-                4. ...
-
-        """
-
-
-        # Raise an error in case the input stream is not the correct object
-        # if type(wy_strm) is not wyoming:
-        #    raise TypeError('Not a wyoming type input stream')
-
-        # Let's tell the class_input object that it is a Wyoming fit type
-        self.air_ap_type = 'wyoming'
-        # ... and which mode of fitting we apply
-        self.air_ap_mode = air_ap_mode
-
-        """ Temporary variables used for output """
-        # single value parameters derived from the sounding profile
-        dpars = dict()
-        # profile values
-        air_balloon = pd.DataFrame()
-        # fitted profile values
-        air_ap = pd.DataFrame()
-        
-        string = wy_strm.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = wy_strm.current.find_next('pre').find_next('pre').text
-        
-        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
-        dpars = {**dpars,
-                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
-               }
-        
-        # we get weird output when it's a numpy Timestamp, so we convert it to
-        # pd.datetime type
-
-        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
-        dpars['STNID'] = dpars['Station number']
-
-        # altitude above ground level
-        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
-        # absolute humidity in g/kg
-        air_balloon['q']= (air_balloon.MIXR/1000.) \
-                              / \
-                             (air_balloon.MIXR/1000.+1.)
-        # convert wind speed from knots to m/s
-        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
-        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-        
-        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
-        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
-
-        
-
-        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-
-        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
-        air_balloon['p'] = air_balloon.PRES*100.
-
-
-        # Therefore, determine the sounding that are valid for 'any' column 
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        #is_valid = (air_balloon.z >= 0)
-        # # this is an alternative pipe/numpy method
-        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
-        valid_indices = air_balloon.index[is_valid].values
-        print(valid_indices)
-
-        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-
-        air_balloon['t'] = air_balloon['TEMP']+273.15
-        air_balloon['theta'] = (air_balloon.t) * \
-                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
-        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
-
-        if len(valid_indices) > 0:
-            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
-            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
-            
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            
-            # the final mixed-layer height that will be used by class. We round it
-            # to 1 decimal so that we get a clean yaml output format
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-
-
-        if np.isnan(dpars['h']):
-            dpars['Ps'] = np.nan
-
-
-
-
-        if ~np.isnan(dpars['h']):
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u 
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-            
-
-
-
-        # First 3 data points of the mixed-layer fit. We create a empty head
-        # first
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-        
-        #calculate mixed-layer jump ( this should be larger than 0.1)
-        
-        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        air_ap_head['HGHT'] = air_ap_head['z'] \
-                                + \
-                                np.round(dpars[ 'Station elevation'],1)
-        
-        # make a row object for defining the jump
-        jump = air_ap_head.iloc[0] * np.nan
-            
-        if air_ap_tail.shape[0] > 1:
-
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = dpars['theta']
-        z_low =     dpars['h']
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                (z_mean > (z_low+10.)) and \
-                (theta_mean > (theta_low+0.2) ) and \
-                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-
-
-
-
-
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        #print(air_ap['PRES'].iloc[0])
-
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-
-        
-        dpars['lat'] = dpars['Station latitude']
-        dpars['latitude'] = dpars['lat']
-        
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        dpars['longitude'] = dpars['Station longitude']
-        
-        dpars['ldatetime'] = dpars['datetime'] \
-                            + \
-                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-
-        # # we make a pars object that is similar to the destination object
-        # pars = model_input()
-        # for key,value in dpars.items():
-        #     pars.__dict__[key] = value
-
-
-        # we round the columns to a specified decimal, so that we get a clean
-        # output format for yaml
-        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
-                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
-                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
-# 
-        for column,decimal in decimals.items():
-            air_balloon[column] = air_balloon[column].round(decimal)
-            air_ap[column] = air_ap[column].round(decimal)
-
-        self.update(source='wyoming',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-
-        
-    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
-    
-        """
-        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
-                 according to the position (lat lon) and the class datetime and timespan
-                 globaldata should be a globaldata multifile object
-        
-        Input: 
-            - globaldata: this is the library object
-            - only_keys: only extract specified keys
-            - exclude_keys: do not inherit specified keys
-        """
-        classdatetime      = np.datetime64(self.pars.datetime_daylight)
-        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
-                                           + \
-                                           dt.timedelta(seconds=self.pars.runtime)\
-                                          )
-
-
-        # # list of variables that we get from global ground data
-        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
-        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
-        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
-        #                 'texture', 'itex', 'isoil', 'BR',
-        #                 'b', 'cveg',
-        #                 'C1sat', 
-        #                 'C2ref', 'p', 'a',
-        #                 ] #globaldata.datasets.keys():
-
-        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
-        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
-
-
-        if type(globaldata) is not data_global:
-            raise TypeError("Wrong type of input library") 
-
-        # by default, we get all dataset keys
-        keys = list(globaldata.datasets.keys())
-
-        # We add LAI manually, because it is not listed in the datasets and
-        #they its retreival is hard coded below based on LAIpixel and cveg
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            keys.append('LAI')
-
-        # # In case there is surface pressure, we also calculate the half-level
-        # # and full-level pressure fields
-        # if ('sp' in keys):
-        #     keys.append('pfull')
-        #     keys.append('phalf')
-
-        # If specified, we only take the keys that are in only_keys
-        if only_keys is not None:
-            for key in keys:
-                if key not in only_keys:
-                    keys.remove(key)
-                
-        # If specified, we take out keys that are in exclude keys
-        if exclude_keys is not None:
-            for key in keys:
-                if key in exclude_keys:
-                    keys.remove(key)
-
-        # we set everything to nan first in the pars section (non-profile parameters
-        # without lev argument), so that we can check afterwards whether the
-        # data is well-fetched or not.
-        for key in keys:
-            if not ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None) and \
-                ('lev' in globaldata.datasets[key].page[key].dims)):
-                self.update(source='globaldata',pars={key:np.nan})
-            # # we do not check profile input for now. We assume it is
-            # # available
-            #else:
-            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
-
-        self.logger.debug('getting keys "'+', '.join(keys)+'\
-                          from global data')
-
-        for key in keys:
-            # If we find it, then we obtain the variables
-            if ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None)):
-
-                # check first whether the dataset has a height coordinate (3d space)
-                if 'lev' in globaldata.datasets[key].page[key].dims:
-
-                    # first, we browse to the correct file that has the current time
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-                        globaldata.datasets[key].browse_page(time=classdatetime)
-
-                    
-                    if (globaldata.datasets[key].page is not None):
-                        # find longitude and latitude coordinates
-                        ilats = (np.abs(globaldata.datasets[key].page.lat -
-                                        self.pars.latitude) < 0.5)
-                        ilons = (np.abs(globaldata.datasets[key].page.lon -
-                                        self.pars.longitude) < 0.5)
-                        
-                        # if we have a time dimension, then we look up the required timesteps during the class simulation
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            itimes = ((globaldata.datasets[key].page.time >= \
-                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
-
-                            # In case we didn't find any correct time, we take the
-                            # closest one.
-                            if np.sum(itimes) == 0.:
-
-
-                                classdatetimemean = \
-                                    np.datetime64(self.pars.datetime_daylight + \
-                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
-                                                ))
-
-                                dstimes = globaldata.datasets[key].page.time
-                                time = dstimes.sel(time=classdatetimemean,method='nearest')
-                                itimes = (globaldata.datasets[key].page.time ==
-                                          time)
-                                
-                        else:
-                            # we don't have a time coordinate so it doesn't matter
-                            # what itimes is
-                            itimes = 0
-
-                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
-
-                        # over which dimensions we take a mean:
-                        dims = globaldata.datasets[key].page[key].dims
-                        namesmean = list(dims)
-                        namesmean.remove('lev')
-                        idxmean = [dims.index(namemean) for namemean in namesmean]
-                        
-                        value = \
-                        globaldata.datasets[key].page[key].isel(time=itimes,
-                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
-
-                        # Ideally, source should be equal to the datakey of globaldata.library 
-                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
-                        #  but therefore the globaldata class requires a revision to make this work
-                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
-
-                else:
-                    # this procedure is for reading the ground fields (2d space). 
-                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
-
-    
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-    
-                       # first, we browse to the correct file
-                       #print(key)
-                       globaldata.datasets[key].browse_page(time=classdatetime)
-    
-                    if globaldata.datasets[key].page is not None:
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - self.pars.latitude))
-                        ilat = np.where((DIST) == np.min(DIST))[0][0]
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - self.pars.longitude))
-                        ilon = np.where((DIST) == np.min(DIST))[0][0]
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - (self.pars.latitude + 0.5)))
-                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmax = ilat
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - (self.pars.longitude  + 0.5)))
-                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmax = ilon
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lat.values\
-                                - (self.pars.latitude - 0.5)))
-                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmin = ilat
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lon.values\
-                                - (self.pars.longitude  - 0.5)))
-                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmin = ilon        
-                        
-                        if ilatmin < ilatmax:
-                            ilatrange = range(ilatmin,ilatmax+1)
-                        else:
-                            ilatrange = range(ilatmax,ilatmin+1)
-                            
-                        if ilonmin < ilonmax:
-                            ilonrange = range(ilonmin,ilonmax+1)
-                        else:
-                            ilonrange = range(ilonmax,ilonmin+1)     
-                            
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                            
-                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
-                                idatetime += 1
-                            
-                            classdatetimeend = np.datetime64(\
-                                                             self.pars.datetime +\
-                                                             dt.timedelta(seconds=self.pars.runtime)\
-                                                            ) 
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
-                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
-                                idatetimeend -= 1
-                            idatetime = np.min((idatetime,idatetimeend))
-                            #for gleam, we take the previous day values
-                            if key in ['wg', 'w2']:
-                                idatetime = idatetime - 1
-                                idatetimeend = idatetimeend - 1
-
-                            # in case of soil temperature, we take the exact
-                            # timing (which is the morning)
-                            if key in ['Tsoil','T2']:
-                                idatetimeend = idatetime
-                            
-                            idts = range(idatetime,idatetimeend+1)
-                            
-                            count = 0
-                            self.__dict__[key] = 0.
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    for iidts in idts:
-                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
-                                        count += 1
-                            value = value/count
-                            self.update(source='globaldata',pars={key:value.item()})
-                                
-                        else:
-                                
-                            count = 0
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
-                                    count += 1
-                            value = value/count                        
-
-                            self.update(source='globaldata',pars={key:value.item()})
-
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            self.logger.debug('also update LAI based on LAIpixel and cveg') 
-            # I suppose LAI pixel is already determined in the previous
-            # procedure. Anyway...
-            key = 'LAIpixel'
-
-            if globaldata.datasets[key].page is not None:
-                # first, we browse to the correct file that has the current time
-                if 'time' in list(globaldata.datasets[key].page[key].dims):
-                    globaldata.datasets[key].browse_page(time=classdatetime)
-            
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - self.pars.latitude))
-                ilat = np.where((DIST) == np.min(DIST))[0][0]
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - self.pars.longitude))
-                ilon = np.where((DIST) == np.min(DIST))[0][0]
-                 
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude + 0.5)))
-                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmax = ilat
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values \
-                        - (self.pars.longitude  + 0.5)))
-                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmax = ilon
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude - 0.5)))
-                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmin = ilat
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - (self.pars.longitude  - 0.5)))
-                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmin = ilon        
-                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                
-                
-                if ilatmin < ilatmax:
-                    ilatrange = range(ilatmin,ilatmax+1)
-                else:
-                    ilatrange = range(ilatmax,ilatmin+1)
-                    
-                if ilonmin < ilonmax:
-                    ilonrange = range(ilonmin,ilonmax+1)
-                else:
-                    ilonrange = range(ilonmax,ilonmin+1)           
-                
-                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
-                LAIpixel = 0.
-                count = 0
-                for iilat in [ilat]: #ilatrange
-                    for iilon in [ilon]: #ilonrange
-                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
-                        
-                                        
-                        # if np.isnan(tarray[idatetime]):
-                        #     print("interpolating GIMMS LAIpixel nan value")
-                        #     
-                        #     mask = np.isnan(tarray)
-                        #     
-                        #     #replace each nan value with a interpolated value
-                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-                        #         
-                        #     else:
-                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
-                    
-                        #         tarray *= np.nan 
-                        
-                        count += 1
-                        #tarray_res += tarray
-                LAIpixel = LAIpixel/count
-                
-                count = 0
-                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
-  
-                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
-                #print('LAIpixel:',self.__dict__['LAIpixel'])
-                #print('cveg:',self.__dict__['cveg'])
-                
-                # finally, we rescale the LAI according to the vegetation
-                # fraction
-                value = 0. 
-                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
-                   value =self.pars.LAIpixel/self.pars.cveg
-                else:
-                    # in case of small vegetation fraction, we take just a standard 
-                    # LAI value. It doesn't have a big influence anyway for
-                    # small vegetation
-                    value = 2.
-                #print('LAI:',self.__dict__['LAI'])
-                self.update(source='globaldata',pars={'LAI':value}) 
-
-
-        # in case we have 'sp', we also calculate the 3d pressure fields at
-        # full level and half level
-        if ('sp' in keys) and ('sp' in self.pars.__dict__):
-            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
-
-            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # hydrostatic thickness of each model layer
-            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
-            # # dz = rhodz/(R * T / pfull)
-
-
-            # # subsidence multiplied by density. We calculate the subsidence of
-            # # the in class itself
-            # wrho = np.zeros_like(phalf)
-            # wrho[-1] = 0. 
-
-            # for ihlev in range(0,wrho.shape[0]-1):
-            #     # subsidence multiplied by density is the integral of
-            #     # divergences multiplied by the layer thicknessies
-            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
-            #                     self.air_ac['divU_y'][ihlev:]) * \
-            #                    delpdgrav[ihlev:]).sum()
-
-
-            
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'p':list(pfull)}))
-            self.update(source='globaldata',\
-                        air_ach=pd.DataFrame({'p':list(phalf)}))
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
-            # self.update(source='globaldata',\
-            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
-
-    def check_source(self,source,check_only_sections=None):
-        """ this procedure checks whether data of a specified source is valid.
-
-        INPUT:
-            source: the data source we want to check
-            check_only_sections: a string or list with sections to be checked
-        OUTPUT:
-            returns True or False
-        """
-
-        # we set source ok to false as soon as we find a invalid input
-        source_ok = True
-
-        # convert to a single-item list in case of a string
-        check_only_sections_def = (([check_only_sections]) if \
-                                   type(check_only_sections) is str else \
-                                    check_only_sections)
-                                  
-        if source not in self.sources.keys():
-            self.logger.info('Source '+source+' does not exist')
-            source_ok = False
-
-        for sectiondatakey in self.sources[source]:                             
-            section,datakey = sectiondatakey.split(':')                         
-            if ((check_only_sections_def is None) or \
-                (section in check_only_sections_def)):                          
-                checkdatakeys = []
-                if type(self.__dict__[section]) is pd.DataFrame:
-                    checkdata = self.__dict__[section]
-                elif type(self.__dict__[section]) is model_input:
-                    checkdata = self.__dict__[section].__dict__
-
-                if (datakey not in checkdata):                              
-                    # self.logger.info('Expected key '+datakey+\
-                    #                  ' is not in parameter input')                        
-                    source_ok = False                                           
-                elif (checkdata[datakey] is None) or \
-                     (pd.isnull(checkdata[datakey]) is True):                    
-        
-                    # self.logger.info('Key value of "'+datakey+\
-                    #                  '" is invalid: ('+ \
-                    # str(self.__dict__[section].__dict__[datakey])+')')         
-                    source_ok = False
-
-        return source_ok
-
-    def check_source_globaldata(self):
-        """ this procedure checks whether all global parameter data is
-        available, according to the keys in the self.sources"""
-
-        source_globaldata_ok = True
-
-        #self.get_values_air_input()
-
-        # and now we can get the surface values
-        #class_settings = class4gl_input()
-        #class_settings.set_air_input(input_atm)
-        
-        # we only allow non-polar stations
-        if not (self.pars.lat <= 60.):
-            source_globaldata_ok = False
-            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-        
-        # check lat and lon
-        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
-            source_globaldata_ok = False
-            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
-            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
-        else:
-            # we only check the ground parameter data (pars section). The 
-            # profile data (air_ap section) are supposed to be valid in any 
-            # case.
-            source_ok = self.check_source(source='globaldata',\
-                                          check_only_sections=['air_ac',\
-                                                               'air_ap',\
-                                                               'pars'])
-            if not source_ok:
-                source_globaldata_ok = False
-        
-            # Additional check: we exclude desert-like
-            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
-                source_globaldata_ok = False
-                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
-                source_globaldata_ok = False
-                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
-            elif self.pars.cveg < 0.02:
-                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
-                source_globaldata_ok = False
-
-        return source_globaldata_ok
-
-
-class c4gli_iterator():
-    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
-    
-        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
-    """
-    def __init__(self,file):
-        # take file as IO stream
-        self.file = file
-        self.yaml_generator = yaml.load_all(file)
-        self.current_dict = {}
-        self.current_class4gl_input = class4gl_input()
-        separator = self.file.readline() # this is just dummy
-        self.header = file.readline()
-        if self.header != '# CLASS4GL record; format version: 0.1\n':
-            raise NotImplementedError("Wrong format version: '"+self.header+"'")
-    def __iter__(self):
-        return self
-    def __next__(self):
-        self.current_dict = self.yaml_generator.__next__()
-        self.current_class4gl_input.load_yaml_dict(self.current_dict)
-        return self.current_class4gl_input
-
-
-
-#get_cape and lift_parcel are adapted from the SkewT package
-    
-class gl_dia(object):
-    def get_lifted_index(self,timestep=-1):
-        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
-    
-#from SkewT
-#def get_lcl(startp,startt,startdp,nsteps=101):
-#    from numpy import interp
-#    #--------------------------------------------------------------------
-#    # Lift a parcel dry adiabatically from startp to LCL.
-#    # Init temp is startt in K, Init dew point is stwrtdp,
-#    # pressure levels are in Pa    
-#    #--------------------------------------------------------------------
-#
-#    assert startdp<=startt
-#
-#    if startdp==startt:
-#        return np.array([startp]),np.array([startt]),np.array([startdp]),
-#
-#    # Pres=linspace(startp,60000.,nsteps)
-#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
-#
-#    # Lift the dry parcel
-#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
-#    # Mixing ratio isopleth
-#    starte=VaporPressure(startdp)
-#    startw=MixRatio(starte,startp)
-#    e=Pres*startw/(.622+startw)
-#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
-#
-#    # Solve for the intersection of these lines (LCL).
-#    # interp requires the x argument (argument 2)
-#    # to be ascending in order!
-#    P_lcl=interp(0.,T_iso-T_dry,Pres)
-#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
-#
-#    # # presdry=linspace(startp,P_lcl)
-#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
-#
-#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
-#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
-#
-#    return P_lcl,T_lcl
-
-
-
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    """ Calculate mixed-layer height from temperature and wind speed profile
-
-        Input:
-            HAGL: height coordinates [m]
-            THTV: virtual potential temperature profile [K]
-            WSPD: wind speed profile [m/s]
-
-        Output:
-            BLH: best-guess mixed-layer height
-            BLHu: upper limit of mixed-layer height
-            BLHl: lower limit of mixed-layer height
-
-    """
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHl
-
-
-
-#from class
-def get_lcl(startp,startt,startqv):
-        # Find lifting condensation level iteratively
-    lcl = 20.
-    RHlcl = 0.5
-    
-    itmax = 30
-    it = 0
-    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHd
-
-def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
-    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
-    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
-
-
-#from os import listdir
-#from os.path import isfile #,join
-import glob
-
-
-class wyoming(object):
-    def __init__(self):
-       self.status = 'init'
-       self.found = False
-       self.DT = None
-       self.current = None
-       #self.mode = 'b'
-       self.profile_type = 'wyoming'  
-       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
-       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-         
-    def set_STNM(self,STNM):
-        self.__init__()
-        self.STNM = STNM
-        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
-        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
-        self.current = None
-        self.found = False
-        self.FILES.sort()
-        
-    def find_first(self,year=None,get_atm=False):
-        self.found = False    
-                
-        # check first file/year or specified year
-        if year == None:
-            self.iFN = 0
-            self.FN = self.FILES[self.iFN]
-        else:
-            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-        self.current = self.sounding_series.find('h2')
-        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
-        
-        # go through other files and find first sounding when year is not specified
-        self.iFN=self.iFN+1
-        while keepsearching:
-            self.FN = self.FILES[self.iFN]
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            self.iFN=self.iFN+1
-            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
-        self.found = (self.current is not None)
-
-        self.status = 'fetch'
-        if self.found:
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        
-        if self.found and get_atm:
-            self.get_values_air_input()
-        
-    
-    def find(self,DT,get_atm=False):
-        
-        self.found = False
-        keepsearching = True
-        #print(DT)
-        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
-        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
-            self.DT = DT
-            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            
-        keepsearching = (self.current is not None)
-        while keepsearching:
-            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            if DTcurrent == DT:
-                self.found = True
-                keepsearching = False
-                if get_atm:
-                    self.get_values_air_input()
-                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            elif DTcurrent > DT:
-                keepsearching = False
-                self.current = None
-            else:
-                self.current = self.current.find_next('h2')
-                if self.current is None:
-                    keepsearching = False
-        self.found = (self.current is not None)
-        self.status = 'fetch'
-
-    def find_next(self,get_atm=False):
-        self.found = False
-        self.DT = None
-        if self.current is None:
-            self.find_first()
-        else:                
-            self.current = self.current.find_next('h2')
-            self.found = (self.current is not None)
-            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
-            while keepsearching:
-                self.iFN=self.iFN+1
-                self.FN = self.FILES[self.iFN]
-                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-                self.current = self.sounding_series.find('h2')
-                
-                self.found = (self.current is not None)
-                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
-        if self.found:        
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        if self.found and get_atm:
-            self.get_values_air_input()
-       
-
-
-    def get_values_air_input(self,latitude=None,longitude=None):
-
-        # for iDT,DT in enumerate(DTS):
-        
-            #websource = urllib.request.urlopen(webpage)
-        #soup = BeautifulSoup(open(webpage), "html.parser")
-        
-       
-        #workaround for ...last line has 
 which results in stringlike first column
-        string = self.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = self.current.find_next('pre').find_next('pre').text
-
-        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
-        #PARAMS.insert(0,'date',DT)
-
-        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
-        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
-        
-        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
-        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
-        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
-        HAGL = HGHT - np.float(PARAMS['Station elevation'])
-        ONE_COLUMN.insert(0,'HAGL',HAGL)
-
-        
-        
-        
-        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
-        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
-        ONE_COLUMN.insert(0,'QABS',QABS)
-        
-        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
-
-        #mixed layer potential temperature
-        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
-
-        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
-        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
-        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
-
-        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
-        BLHV = np.max((BLHV,10.))
-        BLHVu = np.max((BLHVu,10.))
-        BLHVd = np.max((BLHVd,10.))
-        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
-
-        #security values for mixed-layer jump values dthetav, dtheta and dq
-        
-        # fit new profiles taking the above-estimated mixed-layer height
-        ONE_COLUMNNEW = []
-        for BLH in [BLHV,BLHVu,BLHVd]:
-            ONE_COLUMNNEW.append(pd.DataFrame())
-            
-            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
-            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
-            
-            listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
-                
-                # get index of lowest valid observation. This seems to vary
-                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
-                if len(idxvalid) > 0:
-                    #print('idxvalid',idxvalid)
-                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
-                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
-                    else:
-                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
-                else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
-                    #print(col,meanabl)
-               
-                
-                # if col == 'PRES':
-                #     meanabl =  
-            
-                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
-                #THTVM = np.nanmean(THTV[HAGL <= BLH])
-                #print("new_pro_h",new_pro_h)
-                # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV',]:
-                    #for moisture
-                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
-                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
-                    if len(listHAGLNEW) > 4:
-                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
-                        dtheta = np.max((0.1,dtheta_pre))
-                        #meanabl = meanabl - (dtheta - dtheta_pre)
-                        #print('dtheta_pre',dtheta_pre)
-                        #print('dtheta',dtheta)
-                        #print('meanabl',meanabl)
-                        #stop
-                        
-                    else:
-                        dtheta = np.nan
-                else:
-                    if len(listHAGLNEW) > 4:
-                        #for moisture (it can have both negative and positive slope)
-                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
-                    else:
-                        dtheta = np.nan
-                #print('dtheta',dtheta)
-                
-                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
-            
-                
-                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
-                
-            #QABSM = np.nanmean(QABS[HAGL <= BLH])
-            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
-            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
-            
-        # we just make a copy of the fields, so that it can be read correctly by CLASS 
-        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
-            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
-            
-            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
-        
-            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
-            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
-
-
-        # assign fields adopted by CLASS
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'b':
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'u':
-            PARAMS.insert(0,'h',   BLHVu)
-        elif self.mode == 'd':
-            PARAMS.insert(0,'h',   BLHVd)
-        else:
-            PARAMS.insert(0,'h',   BLHV)
-            
-
-        try:
-            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
-        except:
-            print("could not convert latitude coordinate")
-            PARAMS.insert(0,'latitude', np.nan)
-            PARAMS.insert(0,'lat', np.nan)
-        try:
-            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
-            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
-            PARAMS.insert(0,'lon', 0.)
-        except:
-            print("could not convert longitude coordinate")
-            PARAMS.insert(0,'longitude', np.nan)
-            PARAMS.insert(0,'lon', 0.)
-
-        if latitude is not None:
-            print('overwriting latitude with specified value')
-            PARAMS['latitude'] = np.float(latitude)
-            PARAMS['lat'] = np.float(latitude)
-        if longitude is not None:
-            print('overwriting longitude with specified value')
-            PARAMS['longitude'] = np.float(longitude)
-        try:
-            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
-            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
-            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            # This is the nearest datetime when sun is up (for class)
-            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
-            # apply the same time shift for UTC datetime
-            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
-            
-        except:
-            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
-            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
-            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
-            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
-
-        
-
-        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
-        # as we are forcing lon equal to zero this is is expressed in local suntime
-        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
-
-           
-        ONE_COLUMNb = ONE_COLUMNNEW[0]
-        ONE_COLUMNu = ONE_COLUMNNEW[1]
-        ONE_COLUMNd = ONE_COLUMNNEW[2]
-        
-
-        THTVM = np.nanmean(THTV[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
-        
-        QABSM = np.nanmean(QABS[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
-        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
-        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
-
-        BLHVe = abs(BLHV - BLHVu)
-        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-
-        #PARAMS.insert(0,'dq',0.)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
-        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
-        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
-        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
-        
-        if self.mode == 'o': #original 
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
-        elif self.mode == 'b': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNb
-            BLCOLUMN = ONE_COLUMNb
-        elif self.mode == 'u': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNu
-            BLCOLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNd
-            BLCOLUMN = ONE_COLUMNd
-        else:
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb
-
-        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
-        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
-        # print(BLCOLUMN['HAGL'][lt6000])
-        # print(BLCOLUMN['HAGL'][lt2500])
-        # 
-        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
-
-        #print(BLCOLUMN['HAGL'][lt2500])
-        PARAMS.insert(0,'OK',
-                      ((BLHVe < 200.) and 
-                       ( len(np.where(lt6000)[0]) > 5) and
-                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
-                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
-                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
-                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
-                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
-                      )
-                     )
-
-        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
-        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
-        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
-        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
-        
-        
-        PARAMS = PARAMS.T
-
-        
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = USE_ONECOLUMN
-        # if self.mode == 'o': #original 
-        #     self.ONE_COLUMN = ONE_COLUMN
-        # elif self.mode == 'b': # best BLH
-        #     self.ONE_COLUMN = ONE_COLUMNb
-        # elif self.mode == 'u':# upper BLH
-        #     self.ONE_COLUMN = ONE_COLUMNu
-        # elif self.mode == 'd': # lower BLH
-        #     self.ONE_COLUMN=ONE_COLUMNd
-        # else:
-        #     self.ONE_COLUMN = ONE_COLUMN
-
diff --git a/build/lib/class4gl/data_global.py b/build/lib/class4gl/data_global.py
deleted file mode 100644
index 9c3d9b5..0000000
--- a/build/lib/class4gl/data_global.py
+++ /dev/null
@@ -1,936 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov  7 10:51:03 2017
-
-@author: Hendrik Wouters
-
-Purpose: provides class routines for ground and atmosphere conditions used for
-the CLASS miced-layer model
-
-Usage:
-    from data_global import data_global
-    from class4gl import class4gl_input
-    from data_soundings import wyoming
-
-    # create a data_global object and load initial data pages
-    globaldata = data_global()
-    globaldata.load_datasets()
-    # create a class4gl_input object
-    c4gli = class4gl_input()
-    # Initialize it with profile data. We need to do this first. Actually this
-    # will set the coordinate parameters (datetime, latitude, longitude) in
-    # class4gl_input.pars.__dict__, which is required to read point data from
-    # the data_global object.
-
-    # open a Wyoming stream for a specific station
-    wy_strm = wyoming(STNM=91376)
-    # load the first profile
-    wy_strm.find_first()
-    # load the profile data into the class4gl_input object
-    c4gli.get_profile_wyoming(wy_strm)
-    
-    # and finally, read the global input data for this profile
-    c4gli.get_global_input(globaldata)
-
-
-"""
-
-import netCDF4 as nc4
-import numpy as np
-import datetime as dt
-#you can install with
-#import pynacolada as pcd
-import pandas as pd
-import xarray as xr
-import os
-import glob
-import sys
-import errno
-import warnings
-import logging
-
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-class book(object):
-    """ this is a class for a dataset spread over multiple files. It has a
-    similar purpose  open_mfdataset, but only 1 file (called current 'page')
-    one is loaded at a time. This saves precious memory.  """
-    def __init__(self,fn,concat_dim = None,debug_level=None):
-        self.logger = logging.getLogger('book')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # filenames are expanded as a list and sorted by filename
-        self.pages = glob.glob(fn); self.pages.sort()
-        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
-        if len(self.pages) == 0:
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
-        self.ipage = -1; self.page = None
-        self.renames = {} # each time when opening a file, a renaming should be done.
-        self.set_page(0)
-
-        # we consider that the outer dimension is the one we concatenate
-        self.concat_dim = concat_dim
-        if self.concat_dim is None:
-            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
-
-    # this wraps the xarray sel-commmand
-    def sel(*args, **kwargs):
-        for dim in kwargs.keys():
-            if dim == self.concat_dim:
-                self.browse_page(**{dim: kwargs[dim]})
-        return page.sel(*args,**kwargs)
-
-
-    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
-    #def __getattr__(self,attr):
-    #    orig_attr = self.page.__getattribute__(attr)
-    #    if callable(orig_attr):
-    #        def hooked(*args, **kwargs):
-    #            for dim in kwargs.keys():
-    #                if dim == self.concat_dim:
-    #                    self.browse_page(**{dim: kwargs[dim]})
-    #
-    #            result = orig_attr(*args, **kwargs)
-    #            # prevent wrapped_class from becoming unwrapped
-    #            if result == self.page:
-    #                return self
-    #            self.post()
-    #            return result
-    #        return hooked
-    #    else:
-    #        return orig_attr
-
-    def set_renames(self,renames):
-        #first, we convert back to original names, and afterwards, we apply the update of the renames.
-        reverse_renames = dict((v,k) for k,v in self.renames.items())
-        self.renames = renames
-        if self.page is not None:
-            self.page = self.page.rename(reverse_renames)
-            self.page = self.page.rename(self.renames)
-
-    def set_page(self,ipage,page=None):
-        """ this sets the right page according to ipage:
-                - We do not switch the page if we are already at the right one
-                - we set the correct renamings (level -> lev, latitude -> lat,
-                etc.)
-                - The dataset is also squeezed.
-        """
-
-        if ((ipage != self.ipage) or (page is not None)):
-
-            if self.page is not None:
-                self.page.close()
-
-            self.ipage = ipage
-            if page is not None:
-                self.page = page
-            else:
-                if self.ipage == -1:
-                   self.page = None
-                else:
-                    #try:
-
-                    self.logger.info("Switching to page "+str(self.ipage)+': '\
-                                     +self.pages[self.ipage])
-                    self.page = xr.open_dataset(self.pages[self.ipage])
-
-
-            # do some final corrections to the dataset to make them uniform
-            if self.page is not None:
-               if 'latitude' in self.page.dims:
-#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
-               if 'level' in self.page.dims:
-                   self.page = self.page.rename({'level':'lev'})
-
-               self.page = self.page.rename(self.renames)
-               self.page = self.page.squeeze(drop=True)
-
-    def browse_page(self,rewind=2,**args):
-
-        # at the moment, this is only tested with files that are stacked according to the time dimension.
-        dims = args.keys()
-
-
-        if self.ipage == -1:
-            self.set_page(0)
-
-        found = False
-        iipage = 0
-        startipage = self.ipage - rewind
-        while (iipage < len(self.pages)) and not found:
-            ipage = (iipage+startipage) % len(self.pages)
-            for dim in args.keys():
-                this_file = True
-
-                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
-                if 'dims' not in self.__dict__:
-                    self.dims = {}
-                if dim not in self.dims.keys():
-                    self.dims[dim] = [None]*len(self.pages)
-
-                if self.dims[dim][ipage] is None:
-                    self.logger.info('Loading coordinates of dimension "'+dim+\
-                                     '" of page "' +str(ipage)+'".')
-                    self.set_page(ipage)
-                    # print(ipage)
-                    # print(dim)
-                    # print(dim,self.page[dim].values)
-                    self.dims[dim][ipage] = self.page[dim].values
-
-                # determine current time range of the current page
-                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
-                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
-
-                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
-                    this_file = False
-
-            if this_file:
-                found = True
-                self.set_page(ipage)
-            else:
-
-                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
-                #    iipage = len(self.pages) # we stop searching
-
-                iipage += 1
-
-        if not found:
-            self.logger.info("Page not found. Setting to page -1")
-            #iipage = len(self.pages) # we stop searching further
-            self.set_page(-1)
-
-        if self.ipage != -1:
-            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
-        else:
-            self.logger.debug("I'm now at page "+ str(self.ipage))
-
-
-class data_global(object):
-    def __init__(self,sources= {
-        # # old gleam
-        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
-        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
-        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
-        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
-        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
-        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
-        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
-        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
-        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
-        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
-        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
-        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
-        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
-        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
-        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
-        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
-        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
-        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
-        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
-        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
-        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
-        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
-        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
-        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
-        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
-        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
-        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
-        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
-        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
-        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
-        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
-        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
-        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
-        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
-        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
-        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
-        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
-        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
-        },debug_level=None):
-        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
-        self.sources = sources
-        self.datarefs = {}
-        self.datasets = {}
-        self.datetime = dt.datetime(1981,1,1)
-
-        self.logger = logging.getLogger('data_global')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-        self.debug_level = debug_level
-
-        warnings.warn('omitting pressure field p and advection')
-
-    def in_library(self,fn):
-        if fn not in self.library.keys():
-            return False
-        else:
-            print("Warning: "+fn+" is already in the library.")
-            return True
-
-    def add_to_library(self,fn):
-        if not self.in_library(fn):
-            print("opening: "+fn)
-            self.library[fn] = \
-                book(fn,concat_dim='time',debug_level=self.debug_level)
-
-            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
-            #if 'latitude' in self.library[fn].variables:
-            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-
-    # default procedure for loading datasets into the globaldata library
-    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
-        if type(varssource) is str:
-            varssource = [varssource]
-        if type(varsdest) is str:
-            varsdest = [varsdest]
-
-        self.add_to_library(input_fn)
-
-        if varssource is None:
-            varssource = []
-            for var in self.sources[input_fn].variables:
-                avoid = \
-                ['lat','lon','latitude','longitude','time','lev','level']
-                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
-                    varssource.append(var)
-
-        if varsdest is None:
-            varsdest = varssource
-
-        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
-        for ivar,vardest in enumerate(varsdest):
-            varsource = varssource[ivar]
-            print('setting '+vardest+' as '+varsource+' from '+input_fn)
-
-            if vardest in self.datarefs.keys():
-                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
-            #self.add_to_library(fn,varsource,vardest)
-            if vardest != varsource:
-                libkey = input_fn+'.'+varsource+'.'+vardest
-                if libkey not in self.library.keys():
-                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
-                    self.library[libkey] = book(input_fn,\
-                                                debug_level=self.debug_level)
-                    self.library[libkey].set_renames({varsource: vardest})
-
-                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-            else:
-                self.datarefs[vardest] = input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-
-            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
-            #     print('Warning: '+ vardest "not in " + input_fn)
-
-
-
-    def load_datasets(self,sources = None,recalc=0):
-
-        if sources is None:
-            sources = self.sources
-        for key in sources.keys():
-            #datakey,vardest,*args = key.split(':')
-            datakey,vardest = key.split(':')
-            #print(datakey)
-
-            fnvarsource = sources[key].split(':')
-            if len(fnvarsource) > 2:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource,fnargs = fnvarsource
-                fnargs = [fnargs]
-            elif len(fnvarsource) > 1:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource = fnvarsource
-                fnargs = []
-            else:
-                fn = sources[key]
-                varsource = vardest
-            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
-
-    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
-            # the default way of loading a 2d dataset
-            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
-                self.load_dataset_default(fn,varsource,vardest)
-            elif datakey == 'IGBPDIS':
-                if vardest == 'alpha':
-                    ltypes = ['W','B','H','TC']
-                    for ltype in ltypes:
-                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
-                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
-
-
-                    # landfr = {}
-                    # for ltype in ['W','B','H','TC']:
-                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
-
-
-
-                    keytemp = 'alpha'
-                    fnkeytemp = fn+':IGBPDIS:alpha'
-                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
-                        self.library[fnkeytemp]  = book(fnkeytemp,
-                                                        debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-                    else:
-                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
-                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
-                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
-                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
-                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
-                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
-                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
-
-                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-
-                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
-                        for ltype in ltypes:
-                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
-
-                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
-                        print('writing file to: '+fnkeytemp)
-                        os.system('rm '+fnkeytemp)
-                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
-                        self.library[fnkeytemp].close()
-
-
-                        self.library[fnkeytemp]  = \
-                            book(fnkeytemp,debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-
-
-                else:
-                    self.load_dataset_default(fn,varsource,vardest)
-
-
-            elif datakey == 'GLAS':
-                self.load_dataset_default(fn,varsource,vardest)
-                if vardest == 'z0m':
-                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
-                elif vardest == 'z0h':
-                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
-            elif datakey == 'DSMW':
-
-
-                # Procedure of the thermal properties:
-                # 1. determine soil texture from DSMW/10.
-                # 2. soil type with look-up table (according to DWD/EXTPAR)
-                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
-                #    with parameter look-up table from Noilhan and Planton (1989).
-                #    Note: The look-up table is inspired on DWD/COSMO
-
-                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
-
-
-
-                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
-                self.load_dataset_default(fn,'DSMW')
-                print('calculating texture')
-                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
-                TEMP  = {}
-                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
-                TEMP3 = {}
-                for SPKEY in SPKEYS:
-
-
-                    keytemp = SPKEY+'_values'
-                    fnoutkeytemp = fnout+':DSMW:'+keytemp
-                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                    else:
-                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
-                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
-                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-                        # for faster computation, we need to get it to memory out of Dask.
-                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
-                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
-
-                # yes, I know I only check the last file.
-                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
-                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
-                        print('idx',idx,SPKEY)
-                        SEL = (TEMP2 == idx)
-                    #     print(idx,len(TEMP3))
-                        for SPKEY in SPKEYS:
-                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
-
-                    for SPKEY in SPKEYS:
-                        keytemp = SPKEY+'_values'
-                        fnoutkeytemp = fnout+':DSMW:'+keytemp
-                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
-                        os.system('rm '+fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].close()
-
-
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                keytemp = 'texture'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-                else:
-                    self.library[fn+':DSMW:texture'] = xr.Dataset()
-                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
-                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
-                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
-                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-
-                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
-
-                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
-                    zundef[zundef < 0] = np.nan
-                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
-                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
-
-                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-
-
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-                print('calculating texture type')
-
-
-
-                keytemp = 'itex'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-                else:
-                    self.library[fnoutkeytemp] = xr.Dataset()
-                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-                    X = self.datasets['texture'].page['texture'].values*100
-                    X[pd.isnull(X)] = -9
-
-
-                    self.datasets[keytemp][keytemp].values = X
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
-                    self.datasets['itex'].close()
-
-
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-
-                keytemp = 'isoil'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                isoil_reprocessed = False
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-                else:
-                    isoil_reprocessed = True
-                    print('calculating soil type')
-                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    ITEX = self.datasets['itex'].page['itex'].values
-                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
-                    LOOKUP = [
-                              [-10 ,9],# ocean
-                              [0 ,7],# fine textured, clay (soil type 7)
-                              [20,6],# medium to fine textured, loamy clay (soil type 6)
-                              [40,5],# medium textured, loam (soil type 5)
-                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
-                              [80,3],# coarse textured, sand (soil type 3)
-                              [100,9],# coarse textured, sand (soil type 3)
-                            ]
-                    for iitex,iisoil in LOOKUP:
-                        ISOIL[ITEX > iitex] = iisoil
-                        print('iitex,iisoil',iitex,iisoil)
-
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    LOOKUP = [
-                              [9001, 1 ], # ice, glacier (soil type 1)
-                              [9002, 2 ], # rock, lithosols (soil type 2)
-                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
-                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
-                              [9,    9 ], # undefined (ocean)
-                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
-                              [9000, 9 ], # undefined (inland lake)
-                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
-                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
-                            ]
-                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
-                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
-
-                    CODE_VALUES[ITEX == 901200] = 9012
-                    for icode,iisoil in LOOKUP:
-                        ISOIL[CODE_VALUES == icode] = iisoil
-
-                    self.datasets['isoil']['isoil'].values = ISOIL
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-                    print('saved inbetween file to: '+fnoutkeytemp)
-
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                #adopted from data_soil.f90 (COSMO5.0)
-                SP_LOOKUP = {
-                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
-                  # (by index)                                           loam                    loam                                water      ice
-                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
-                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
-                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
-                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
-                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
-                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
-                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
-                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
-                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
-                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
-                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
-                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
-                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
-                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
-                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
-                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
-                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
-                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
-                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
-                  # Important note: For peat, the unknown values below are set equal to that of loam
-                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
-                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
-                  #error in table 2 of NP89: values need to be multiplied by e-6
-                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
-                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
-
-                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
-                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
-                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
-                }
-
-
-                # isoil_reprocessed = False
-                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-
-                #     self.library[fn+':DSMW:isoil'] = \
-                #             book(fnoutkeytemp,debug_level=self.debug_level)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-                # else:
-                #     isoil_reprocessed = True
-                #     print('calculating soil type')
-                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-
-
-
-                # this should become cleaner in future but let's hard code it for now.
-                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
-                print('calculating soil parameter')
-                DATATEMPSPKEY = {}
-                if (recalc < 1) and (isoil_reprocessed == False): 
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        keytemp = SPKEY
-                        fnoutkeytemp=fnout+':DSMW:'+keytemp
-                        self.library[fn+':DSMW:'+SPKEY] =\
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
-                        self.datarefs[SPKEY] =fnoutkeytemp
-                else:
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-
-                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
-                    ISOIL = self.datasets['isoil'].page['isoil'].values
-                    print(np.where(ISOIL>0.))
-                    for i in range(11):
-                        SELECT = (ISOIL == i)
-                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
-
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
-
-                        os.system('rm '+fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].close()
-                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
-
-                        self.library[fn+':DSMW:'+SPKEY] = \
-                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-
-
-            else:
-                self.load_dataset_default(fn,varsource,vardest)
-
-
-
-
-
-
-#
-#                 # only print the last parameter value in the plot
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'cala'
-#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'crhoc'
-#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#     key = "CERES"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         CERES_start_date = dt.datetime(2000,3,1)
-#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
-#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
-#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
-#
-#         var = 'cc'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#         print(class_settings.lat,class_settings.lon)
-#
-#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
-#
-#         input_nc.close()
-#
-
-
-#     key = "GIMMS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
-#         print("Reading Leag Area Index from "+input_fn)
-#         var = 'LAI'
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
-#
-#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
-#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
-#
-#         if np.isnan(tarray[idatetime]):
-#             print("interpolating GIMMS cveg nan value")
-#
-#             mask = np.isnan(tarray)
-#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-#             else:
-#                 print("Warning. Could not interpolate GIMMS cveg nan value")
-#
-#         class_settings.__dict__[var] = tarray[idatetime]
-#
-#         input_nc.close()
-#
-#     key = "IGBPDIS_ALPHA"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         var = 'alpha'
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
-#         print("Reading albedo from "+input_fn)
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#
-#         landfr = {}
-#         for ltype in ['W','B','H','TC']:
-#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
-#
-#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-#
-#         alpha=0.
-#         for ltype in landfr.keys():
-#             alpha += landfr[ltype]*aweights[ltype]
-#
-#
-#         class_settings.__dict__[var] = alpha
-#         input_nc.close()
-#
-#
-#     key = "ERAINT_ST"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         print("Reading soil temperature from "+input_fn)
-#
-#         var = 'Tsoil'
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         var = 'T2'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
-#
-#
-#         input_nc.close()
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #var = 'T2'
-#     #valold = class_settings.__dict__[var]
-#     #
-#     #class_settings.__dict__[var] = 305.
-#     #class_settings.__dict__['Tsoil'] = 302.
-#     #valnew = class_settings.__dict__[var]
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #
-#     #var = 'Lambda'
-#     #valold = class_settings.__dict__[var]
-#
-#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
-#     ## I need to ask Chiel.
-#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
-#     #
-#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
-#     #class_settings.__dict__[var] = valnew
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     key = "GLAS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
-#         print("Reading canopy height for determining roughness length from "+input_fn)
-#         var = 'z0m'
-#
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
-#
-#         lowerlimit = 0.01
-#         if testval < lowerlimit:
-#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
-#             class_settings.__dict__[var] = lowerlimit
-#         else:
-#             class_settings.__dict__[var] = testval
-#
-#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
-#
-#
-#         input_nc.close()
-
-
-
-
-
diff --git a/build/lib/class4gl/interface_functions.py b/build/lib/class4gl/interface_functions.py
deleted file mode 100644
index 3e483f3..0000000
--- a/build/lib/class4gl/interface_functions.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-#from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-#'_afternoon.yaml'
-def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
-    filename = yaml_file.name
-    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-    #yaml_file = open(filename)
-
-    #print('going to next observation',filename)
-    yaml_file.seek(index_start)
-
-    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-
-    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-    filebuffer.write(buf)
-    filebuffer.close()
-    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-    
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-
-    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-    print(command)
-    os.system(command)
-    jsonstream = open(filename+'.buffer.json.'+str(index_start))
-    record_dict = json.load(jsonstream)
-    jsonstream.close()
-    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-
-
-    if mode =='mod':
-        modelout = class4gl()
-        modelout.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        return modelout
-    elif mode == 'ini':
-
- 
-        # datetimes are incorrectly converted to strings. We need to convert them
-        # again to datetimes
-        for key,value in record_dict['pars'].items():
-            # we don't want the key with columns that have none values
-            if value is not None: 
-                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
-               # elif (type(value) == str):
-                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-
-            if (value == 0.9e19) or (value == '.9e19'):
-                record_dict['pars'][key] = np.nan
-        for key in record_dict.keys():
-            #print(key)
-            if key in ['air_ap','air_balloon',]:
-                #NNprint('check')
-                for datakey,datavalue in record_dict[key].items():
-                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-
-        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        c4gli = class4gl_input()
-        print(c4gli.logger,'hello')
-        c4gli.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-        return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-
-class stations(object):
-    def __init__(self,path,suffix='ini',refetch_stations=False):
-
-        self.path = path
-
-        self.file = self.path+'/stations_list.csv'
-        if (os.path.isfile(self.file)) and (not refetch_stations):
-            self.table = pd.read_csv(self.file)
-        else:
-            self.table = self.get_stations(suffix=suffix)
-            self.table.to_csv(self.file)
-        
-        self.table = self.table.set_index('STNID')
-        #print(self.table)
-
-    def get_stations(self,suffix):
-        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
-        if len(stations_list_files) == 0:
-            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
-        stations_list_files.sort()
-        print(stations_list_files)
-        if len(stations_list_files) == 0:
-            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
-        stations_list = []
-        for stations_list_file in stations_list_files:
-            thisfile = open(stations_list_file,'r')
-            yamlgen = yaml.load_all(thisfile)
-            try:
-                first_record  = yamlgen.__next__()
-            except:
-                first_record = None
-            if first_record is not None:
-                stations_list.append({})
-                for column in ['STNID','latitude','longitude']:
-                    #print(first_record['pars'].keys())
-                    stations_list[-1][column] = first_record['pars'][column]
-                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
-            yamlgen.close()
-            thisfile.close()
-    
-        print(stations_list)
-        return pd.DataFrame(stations_list)
-
-class stations_iterator(object):
-    def __init__(self,stations):
-        self.stations = stations
-        self.ix = -1 
-    def __iter__(self):
-        return self
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.stations.table)) 
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_row(self,row):
-        self.ix = row
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_STNID(self,STNID):
-        self.ix = np.where((self.stations.table.index == STNID))[0][0]
-        print(self.ix)
-        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-    def close():
-        del(self.ix)
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.records))
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-# #'_afternoon.yaml'
-# def get_record_yaml(yaml_file,index_start,index_end):
-#     filename = yaml_file.name
-#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-#     #yaml_file = open(filename)
-# 
-#     #print('going to next observation',filename)
-#     yaml_file.seek(index_start)
-# 
-#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-# 
-#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-#     filebuffer.write(buf)
-#     filebuffer.close()
-#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-#     
-#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-# 
-#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-#     print(command)
-#     os.system(command)
-#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
-#     record_dict = json.load(jsonstream)
-#     jsonstream.close()
-#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-#  
-#     # datetimes are incorrectly converted to strings. We need to convert them
-#     # again to datetimes
-#     for key,value in record_dict['pars'].items():
-#         # we don't want the key with columns that have none values
-#         if value is not None: 
-#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
-#            # elif (type(value) == str):
-#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-#                 
-#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
-# 
-#         if (value == 0.9e19) or (value == '.9e19'):
-#             record_dict['pars'][key] = np.nan
-#     for key in record_dict.keys():
-#         print(key)
-#         if key in ['air_ap','air_balloon',]:
-#             print('check')
-#             for datakey,datavalue in record_dict[key].items():
-#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-# 
-#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-# 
-#     c4gli = class4gl_input()
-#     c4gli.load_yaml_dict(record_dict)
-#     return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
-
-    records = pd.DataFrame()
-    for STNID,station in stations.iterrows():
-        dictfnchunks = []
-        if getchunk is 'all':
-
-            # we try the old single-chunk filename format first (usually for
-            # original profile pairs)
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
-            if os.path.isfile(fn):
-                chunk = 0
-                dictfnchunks.append(dict(fn=fn,chunk=chunk))
-
-            # otherwise, we use the new multi-chunk filename format
-            else:
-                chunk = 0
-                end_of_chunks = False
-                while not end_of_chunks:
-                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
-                    if os.path.isfile(fn):
-                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
-                    else:
-                        end_of_chunks = True
-                    chunk += 1
-
-            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
-            # yamlfilenames = glob.glob(globyamlfilenames)
-            # yamlfilenames.sort()
-        else:
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
-            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
-            
-        if len(dictfnchunks) > 0:
-            for dictfnchunk in dictfnchunks:
-                yamlfilename = dictfnchunk['fn']
-                chunk = dictfnchunk['chunk']
-                print(chunk)
-
-                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
-                pklfilename = yamlfilename.replace('.yaml','.pkl')
-
-                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
-                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
-                generate_pkl = False
-                if not os.path.isfile(pklfilename): 
-                    print('pkl file does not exist. I generate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                elif not (os.path.getmtime(yamlfilename) <  \
-                    os.path.getmtime(pklfilename)):
-                    print('pkl file older than yaml file, so I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-
-                if refetch_records:
-                    print('refetch_records flag is True. I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                if not generate_pkl:
-                    records = pd.concat([records,pd.read_pickle(pklfilename)])
-                   # irecord = 0
-                else:
-                    with open(yamlfilename) as yaml_file:
-
-                        dictout = {}
-
-                        next_record_found = False
-                        end_of_file = False
-                        while (not next_record_found) and (not end_of_file):
-                            linebuffer = yaml_file.readline()
-                            next_record_found = (linebuffer == '---\n')
-                            end_of_file = (linebuffer == '')
-                        next_tell = yaml_file.tell()
-                        
-                        while not end_of_file:
-
-                            print(' next record:',next_tell)
-                            current_tell = next_tell
-                            next_record_found = False
-                            yaml_file.seek(current_tell)
-                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
-                            linebuffer = ''
-                            while ( (not next_record_found) and (not end_of_file)):
-                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
-                                linebuffer = yaml_file.readline()
-                                next_record_found = (linebuffer == '---\n')
-                                end_of_file = (linebuffer == '')
-                            filebuffer.close()
-                            
-                            next_tell = yaml_file.tell()
-                            index_start = current_tell
-                            index_end = next_tell
-
-                            
-                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
-                            print(command)
-                            
-                            os.system(command)
-                            #jsonoutput = subprocess.check_output(command,shell=True) 
-                            #print(jsonoutput)
-                            #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
-                            record = json.load(jsonstream)
-                            dictouttemp = {}
-                            for key,value in record['pars'].items():
-                                # we don't want the key with columns that have none values
-                                if value is not None: 
-                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
-                                   if (type(value) in regular_numeric_types):
-                                        dictouttemp[key] = value
-                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
-                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
-                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
-                            recordindex = record['index']
-                            dictouttemp['chunk'] = chunk
-                            dictouttemp['index_start'] = index_start
-                            dictouttemp['index_end'] = index_end
-                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
-                            for key,value in dictouttemp.items():
-                                if key not in dictout.keys():
-                                    dictout[key] = {}
-                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
-                            print(' obs record registered')
-                            jsonstream.close()
-                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
-                    records_station = pd.DataFrame.from_dict(dictout)
-                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+pklfilename+') for station '\
-                          +str(STNID))
-                    records_station.to_pickle(pklfilename)
-                    # else:
-                    #     os.system('rm '+pklfilename)
-                    records = pd.concat([records,records_station])
-    return records
-
-def stdrel(mod,obs,columns):
-    stdrel = pd.DataFrame(columns = columns)
-    for column in columns:
-        stdrel[column] = \
-                (mod.groupby('STNID')[column].transform('mean') -
-                 obs.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') + \
-                (mod[column] -
-                 mod.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') 
-    return stdrel
-
-def pct(obs,columns):
-    pct = pd.DataFrame(columns=columns)
-    for column in columns:
-        #print(column)
-        pct[column] = ""
-        pct[column] = obs[column].rank(pct=True)
-    return pct
-
-def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
-    stats = pd.DataFrame()
-    for key in keys: 
-        stats['d'+key+'dt'] = ""
-        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
-                              (obs_afternoon.ldatetime - \
-                               obs_morning.ldatetime).dt.seconds*3600.
-    return stats
-
diff --git a/build/lib/class4gl/interface_multi.py b/build/lib/class4gl/interface_multi.py
deleted file mode 100644
index 83148e5..0000000
--- a/build/lib/class4gl/interface_multi.py
+++ /dev/null
@@ -1,2061 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-# from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-cdictpres = {'blue': (\
-                   (0.,    0.,  0.),
-                   (0.25,  0.25, 0.25),
-                   (0.5,  .70, 0.70),
-                   (0.75, 1.0, 1.0),
-                   (1,     1.,  1.),
-                   ),
-       'green': (\
-                   (0. ,   0., 0.0),
-                   (0.25,  0.50, 0.50),
-                   (0.5,  .70, 0.70),
-                   (0.75,  0.50, 0.50),
-                   (1  ,    0,  0.),
-                   ),
-       'red':  (\
-                  (0 ,  1.0, 1.0),
-                  (0.25 ,  1.0, 1.0),
-                   (0.5,  .70, 0.70),
-                  (0.75 , 0.25, 0.25),
-                  (1,    0., 0.),
-                  )}
-
-statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-os.system('module load Ruby')
-
-class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
-        """ creates an interactive interface for analysing class4gl experiments
-
-        INPUT:
-            path_exp : path of the experiment output
-            path_obs : path of the observations 
-            globaldata: global data that is being shown on the map
-            refetch_stations: do we need to build the list of the stations again?
-        OUTPUT:
-            the procedure returns an interface object with interactive plots
-
-        """
-        
-        # set the ground
-        self.globaldata = globaldata
-
- 
-        self.path_exp = path_exp
-        self.path_obs = path_obs
-        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
-
-        # # get the list of stations
-        # stationsfile = self.path_exp+'/stations_list.csv'
-        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
-        #     stations = pd.read_csv(stationsfile)
-        # else:
-        #     stations = get_stations(self.path_exp)
-        #     stations.to_csv(stationsfile)
-
-        # stations = stations.set_index('STNID')
-
-        self.frames = {}
-
-        self.frames['stats'] = {}
-        self.frames['worldmap'] = {}
-                
-        self.frames['profiles'] = {}
-        self.frames['profiles'] = {}
-        self.frames['profiles']['DT'] = None
-        self.frames['profiles']['STNID'] = None
-
-        #self.frames['worldmap']['stationsfile'] = stationsfile
-        self.frames['worldmap']['stations'] = stations(self.path_exp, \
-                                                       suffix='ini',\
-                                                       refetch_stations=refetch_stations)
-
-        # Initially, the stats frame inherets the values/iterators of
-        # worldmap
-        for key in self.frames['worldmap'].keys():
-            self.frames['stats'][key] = self.frames['worldmap'][key]
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_ini'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='ini',\
-                                           refetch_records=refetch_records
-                                           )
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_mod'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='mod',\
-                                           refetch_records=refetch_records
-                                           )
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_obs_afternoon'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_obs,\
-                                           subset='afternoon',\
-                                           refetch_records=refetch_records
-                                           )
-
-        self.frames['stats']['records_all_stations_mod'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['records_all_stations_ini']['dates'] = \
-            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
-
-
-        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
-
-        self.frames['stats']['records_all_stations_obs_afternoon'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['viewkeys'] = ['h','theta','q']
-        print('Calculating table statistics')
-        self.frames['stats']['records_all_stations_mod_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_mod'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-
-        self.frames['stats']['inputkeys'] = inputkeys
-        
-        # self.frames['stats']['inputkeys'] = \
-        #     [ key for key in \
-        #       self.globaldata.datasets.keys() \
-        #       if key in \
-        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
-
-
-        # get units from the class4gl units database
-        self.units = dict(units)
-        # for those that don't have a definition yet, we just ask a question
-        # mark
-        for var in self.frames['stats']['inputkeys']:
-            self.units[var] = '?'
-
-        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
-        self.frames['stats']['records_all_stations_ini_pct'] = \
-                  pct(self.frames['stats']['records_all_stations_ini'], \
-                      columns = self.frames['stats']['inputkeys'])
-
-        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
-        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-        #     mod['
-
-        # 
-        # 
-        # \
-        #        self.frames['stats']['records_all_stations_mod'], \
-
-
-
-        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
-        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
-        #               columns = [ 'd'+key+'dt' for key in \
-        #                           self.frames['stats']['viewkeys']], \
-        #              )
-
-        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
-        #               obs = self.frames['stats']['records_all_stations_ini'], \
-        #               columns = self.frames['stats']['viewkeys'], \
-        #              )
-        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
-        
-        print('filtering pathological data')
-        # some observational sounding still seem problematic, which needs to be
-        # investigated. In the meantime, we filter them
-        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
-
-        # we filter ALL data frames!!!
-        for key in self.frames['stats'].keys():
-            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
-               (self.frames['stats'][key].index.names == indextype):
-                self.frames['stats'][key] = self.frames['stats'][key][valid]
-        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
-
-        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
-
-
-        print("filtering stations from interface that have no records")
-        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
-            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                    == STNID).sum() == 0):
-                print("dropping", STNID)
-                self.frames['worldmap']['stations'].table = \
-                        self.frames['worldmap']['stations'].table.drop(STNID)
-                    
-        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-        
-        # TO TEST: should be removed, since it's is also done just below
-        self.frames['stats']['stations_iterator'] = \
-            self.frames['worldmap']['stations_iterator'] 
-
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
-        self.next_station()
-
-        # self.goto_datetime_worldmap(
-        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-        #     'after')
-    def sel_station(self,STNID=None,rownumber=None):
-
-        if (STNID is not None) and (rownumber is not None):
-            raise ValueError('Please provide either STNID or rownumber, not both.')
-
-        if (STNID is None) and (rownumber is None):
-            raise ValueError('Please provide either STNID or rownumber.')
-            
-        if STNID is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
-            print(
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-            )
-            self.update_station()
-        elif rownumber is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
-            self.update_station()
-
-
-
-    def next_station(self,event=None,jump=1):
-        with suppress(StopIteration):
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-                = self.frames['worldmap']['stations_iterator'].__next__(jump)
-            # self.frames['worldmap']['stations_iterator'].close()
-            # del(self.frames['worldmap']['stations_iterator'])
-            # self.frames['worldmap']['stations_iterator'] = \
-            #                 selfself.frames['worldmap']['stations'].iterrows()
-            # self.frames['worldmap']['STNID'],\
-            # self.frames['worldmap']['current_station'] \
-            #     = self.frames['worldmap']['stations_iterator'].__next__()
-
-        self.update_station()
-
-    def prev_station(self,event=None):
-        self.next_station(jump = -1,event=event)
-    def update_station(self):
-        for key in ['STNID','current_station','stations_iterator']: 
-            self.frames['stats'][key] = self.frames['worldmap'][key] 
-
-
-
-        # generate index of the current station
-        self.frames['stats']['records_current_station_index'] = \
-            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-             == \
-             self.frames['stats']['current_station'].name)
-
-        # create the value table of the records of the current station
-        tab_suffixes = \
-                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        for tab_suffix in tab_suffixes:
-            self.frames['stats']['records_current_station'+tab_suffix] = \
-                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-        # go to first record of current station
-        self.frames['stats']['records_iterator'] = \
-                        records_iterator(self.frames['stats']['records_current_station_mod'])
-        (self.frames['stats']['STNID'] , \
-        self.frames['stats']['current_record_chunk'] , \
-        self.frames['stats']['current_record_index']) , \
-        self.frames['stats']['current_record_mod'] = \
-                        self.frames['stats']['records_iterator'].__next__()
-
-        for key in self.frames['stats'].keys():
-            self.frames['profiles'][key] = self.frames['stats'][key]
-
-        STNID = self.frames['profiles']['STNID']
-        chunk = self.frames['profiles']['current_record_chunk']
-        if 'current_station_file_ini' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_ini'].close()
-        self.frames['profiles']['current_station_file_ini'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-        if 'current_station_file_mod' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_mod'].close()
-        self.frames['profiles']['current_station_file_mod'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_afternoon'].close()
-        self.frames['profiles']['current_station_file_afternoon'] = \
-            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-        self.frames['profiles']['records_iterator'] = \
-                        records_iterator(self.frames['profiles']['records_current_station_mod'])
-        (self.frames['profiles']['STNID'] , \
-        self.frames['profiles']['current_record_chunk'] , \
-        self.frames['profiles']['current_record_index']) , \
-        self.frames['profiles']['current_record_mod'] = \
-                        self.frames['profiles']['records_iterator'].__next__()
-
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-
-        self.update_record()
-
-    def next_record(self,event=None,jump=1):
-        with suppress(StopIteration):
-            (self.frames['profiles']['STNID'] , \
-            self.frames['profiles']['current_record_chunk'] , \
-            self.frames['profiles']['current_record_index']) , \
-            self.frames['profiles']['current_record_mod'] = \
-                      self.frames['profiles']['records_iterator'].__next__(jump)
-        # except (StopIteration):
-        #     self.frames['profiles']['records_iterator'].close()
-        #     del( self.frames['profiles']['records_iterator'])
-        #     self.frames['profiles']['records_iterator'] = \
-        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
-        #     (self.frames['profiles']['STNID'] , \
-        #     self.frames['profiles']['current_record_index']) , \
-        #     self.frames['profiles']['current_record_mod'] = \
-        #                     self.frames['profiles']['records_iterator'].__next__()
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        self.update_record()
-
-    def prev_record(self,event=None):
-        self.next_record(jump=-1,event=event)
-
-    def update_record(self):
-        self.frames['profiles']['current_record_ini'] =  \
-            self.frames['profiles']['records_current_station_ini'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'],\
-                  self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon'] =  \
-            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'] , \
-                  self.frames['profiles']['current_record_index'])]
-
-        self.frames['profiles']['current_record_mod_stats'] = \
-                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
-                    self.frames['profiles']['STNID'], \
-                    self.frames['profiles']['current_record_chunk'], \
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
-                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_ini_pct'] = \
-                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        # frame
-        # note that the current station, record is the same as the stats frame for initialization
-
-        # select first 
-        #self.frames['profiles']['current_record_index'], \
-        #self.frames['profiles']['record_yaml_mod'] = \
-        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
-        #                   self.frames['stats']['current_record_index'])
-        self.frames['profiles']['record_yaml_mod'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_mod'], \
-               self.frames['profiles']['current_record_mod'].index_start,
-               self.frames['profiles']['current_record_mod'].index_end,
-               mode='mod')
-                                
-        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_ini'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_ini'], \
-               record_ini.index_start,
-               record_ini.index_end,
-                mode='ini')
-
-        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_obs_afternoon'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_afternoon'], \
-               record_afternoon.index_start,
-               record_afternoon.index_end,
-                mode='ini')
-
-
-        key = self.frames['worldmap']['inputkey']
-        # only redraw the map if the current world map has a time
-        # dimension
-        if 'time' in self.globaldata.datasets[key].page[key].dims:
-            self.goto_datetime_worldmap(
-                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                'after')
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap',
-                                                  'profiles'])
-        else:
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap_stations',
-                                                  'profiles'])
-
-    def abline(self,slope, intercept,axis):
-        """Plot a line from slope and intercept"""
-        #axis = plt.gca()
-        x_vals = np.array(axis.get_xlim())
-        y_vals = intercept + slope * x_vals
-        axis.plot(x_vals, y_vals, 'k--')
-
-    def plot(self):
-        import pylab as pl
-        from matplotlib.widgets import Button
-        import matplotlib.pyplot as plt
-        import matplotlib as mpl
-        '''
-        Definition of the axes for the sounding table stats
-        '''
-        
-        fig = pl.figure(figsize=(14,9))
-        axes = {} #axes
-        btns = {} #buttons
-
-        # frames, which sets attributes for a group of axes, buttens, 
-        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
-            label = 'stats_'+str(key)
-            axes[label] = fig.add_subplot(\
-                            len(self.frames['stats']['viewkeys']),\
-                            5,\
-                            5*ikey+1,label=label)
-            # Actually, the axes should be a part of the frame!
-            #self.frames['stats']['axes'] = axes[
-
-            # pointer to the axes' point data
-            axes[label].data = {}
-
-            # pointer to the axes' color fields
-            axes[label].fields = {}
-
-
-        fig.tight_layout()
-        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
-
-        label ='stats_colorbar'
-        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
-        axes[label].fields = {}
-
-        from matplotlib.colors import LinearSegmentedColormap
-        cdictpres = {'blue': (\
-                           (0.,    0.,  0.),
-                           (0.25,  0.25, 0.25),
-                           (0.5,  .70, 0.70),
-                           (0.75, 1.0, 1.0),
-                           (1,     1.,  1.),
-                           ),
-               'green': (\
-                           (0. ,   0., 0.0),
-                           (0.25,  0.50, 0.50),
-                           (0.5,  .70, 0.70),
-                           (0.75,  0.50, 0.50),
-                           (1  ,    0,  0.),
-                           ),
-               'red':  (\
-                          (0 ,  1.0, 1.0),
-                          (0.25 ,  1.0, 1.0),
-                           (0.5,  .70, 0.70),
-                          (0.75 , 0.25, 0.25),
-                          (1,    0., 0.),
-                          )}
-        
-        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-        label = 'times'
-               
-        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-
-
-        label = 'worldmap'
-               
-        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-        axes[label].lat = None
-        axes[label].lon = None
-
-        label = 'worldmap_colorbar'
-        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
-        axes[label].fields = {}
-
-        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
-        label = 'worldmap_stations'
-        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
-        axes[label].data = {}
-
-        fig.canvas.mpl_connect('pick_event', self.on_pick)
-        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
-
-
-        """ buttons definitions """
-        
-        label = 'bprev_dataset'
-        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous dataset')
-        btns[label].on_clicked(self.prev_dataset)
-
-        label = 'bnext_dataset'
-        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next dataset')
-        btns[label].on_clicked(self.next_dataset)
-
-        label = 'bprev_datetime'
-        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous datetime')
-        btns[label].on_clicked(self.prev_datetime)
-
-        label = 'bnext_datetime'
-        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next datetime')
-        btns[label].on_clicked(self.next_datetime)
-
-
-        label = 'bprev_station'
-        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous station')
-        btns[label].on_clicked(self.prev_station)
-
-        label = 'bnext_station'
-        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next station')
-        btns[label].on_clicked(self.next_station)
-
-        label = 'bprev_record'
-        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous record')
-        btns[label].on_clicked(self.prev_record)
-
-        label = 'bnext_record'
-        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next record')
-        btns[label].on_clicked(self.next_record)
-
-
-        # self.nstatsview = nstatsview
-        # self.statsviewcmap = statsviewcmap
-        self.fig = fig
-        self.axes = axes
-        self.btns = btns
-        self.tbox = {}
-        # self.hover_active = False
-
-        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
-        #                                transform=plt.gcf().transFigure)
-
-        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
-                                          transform=plt.gcf().transFigure)
-
-        label = 'air_ap:theta'
-        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
-
-        label = 'air_ap:q'
-        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
-
-        label = 'out:h'
-        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
-
-        label = 'out:theta'
-        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
-
-        label = 'out:q'
-        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
-
-        label = 'SEB'
-        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
-
-
-        self.hover_active = False
-        self.fig = fig
-        self.fig.show()
-        self.fig.canvas.draw()
-        self.refresh_plot_interface()
-
-
-    # def scan_stations(self):
-    #     blabla
-        
-
-
-    # def get_records(current_file):
-    #     records = pd.DataFrame()
-
-    #     # initial position
-    #     next_record_found = False
-    #     while(not next_record_found):
-    #         next_record_found = (current_file.readline() == '---\n')
-    #     next_tell = current_file.tell() 
-    #     end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #     while not end_of_file:
-    #         current_tell = next_tell
-    #         next_record_found = False
-    #         current_file.seek(current_tell)
-    #         while ( (not next_record_found) and (not end_of_file)):
-    #             current_line = current_file.readline()
-    #             next_record_found = (currentline == '---\n')
-    #             end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #         # we store the position of the next record
-    #         next_tell = current_file.tell() 
-    #         
-    #         # we get the current record. Unfortunately we need to reset the
-    #         # yaml record generator first.
-    #         current_yamlgen.close()
-    #         current_yamlgen = yaml.load_all(current_file)
-    #         current_file.seek(current_tell)
-    #         current_record_mod = current_yamlgen.__next__()
-    #     current_yamlgen.close()
-
-    #     return records
-
-       #      next_record_found = False
-       #      while(not record):
-       #          next_record_found = (self.current_file.readline() == '---\n')
-       #      self.current_tell0 = self.current_file.tell() 
-
-       #  
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell0 = self.current_file.tell() 
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell1 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell0)
-       #  self.r0 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell1)
-       #  next_record_found = False
-       #  while ( (not next_record_found) and (not end_of_file):
-       #      current_line = self.current_file.readline()
-       #      next_record_found = (currentline == '---\n')
-       #      end_of_file = (currentline == '') # an empty line means we are at the end
-
-       #  self.current_tell2 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell1)
-       #  self.r1 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell2)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell3 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell2)
-       #  self.r2 = self.current_yamlgen.__next__()
-
-       #  # go to position of next record in file
-       #  self.current_file.seek(self.current_tell3)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell4 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell3)
-       #  self.r3 = self.current_yamlgen.__next__()
- 
-       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
-
-    def goto_datetime_worldmap(self,DT,shift=None):
-        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
-            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
-            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
-            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
-                self.frames['worldmap']['iDT'] += 1
-            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
-                self.frames['worldmap']['iDT'] -= 1 
-            # for gleam, we take the values of the previous day
-            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
-                self.frames['worldmap']['iDT'] -= 2 
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-        #else:
-        #    self.frames['worldmap'].pop('DT')
-
-    def next_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def prev_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def next_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-    def prev_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-
-
-    def sel_dataset(self,inputkey):
-        self.frames['worldmap']['inputkey'] = inputkey
-        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
-        self.goto_datetime_worldmap(
-            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-            'after')# get nearest datetime of the current dataset to the profile
-        if "fig" in self.__dict__.keys():
-            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
-       
-    # def prev_station(self,event=None):
-    #     self.istation = (self.istation - 1) % self.stations.shape[0]
-    #     self.update_station()
-
-
-
-
-    #def update_datetime(self):
-    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
-    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
-    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
-    #        print(self.worldmapfocus['DT'])
-    #        self.refresh_plot_interface(only='worldmap')
-
-    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
-
-        #print('r1')
-        for argkey in args.keys():
-            self.__dict__[arg] = args[argkey]
-
-        axes = self.axes
-        tbox = self.tbox
-        frames = self.frames
-        fig = self.fig
- 
-        if (only is None) or ('worldmap' in only):
-            globaldata = self.globaldata
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
-            else:
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
-            keystotranspose = ['lat','lon']
-            for key in dict(datasetxr.dims).keys():
-                if key not in keystotranspose:
-                    keystotranspose.append(key)
-
-            datasetxr = datasetxr.transpose(*keystotranspose)
-            datasetxr = datasetxr.sortby('lat',ascending=False)
-
-            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
-            lonleft = lonleft - 360.
-            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
-            label = 'worldmap'
-            axes[label].clear()
-            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
-            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
-
-        if (only is None) or ('worldmap' in only):
-            #if 'axmap' not in self.__dict__ :
-            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
-            #else:
-
-            #stations = self.stations
-
-
-            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
-            #     resolution = 'l', 
-            # area_thresh = 0.1,
-            #     llcrnrlon=-180., llcrnrlat=-90.0,
-            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
-            # 
-            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
-            # self.gmap.drawcountries(color='white',linewidth=0.3)
-            # #self.gmap.fillcontinents(color = 'gray')
-            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
-            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
-            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
-            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # #self.ax5.shadedrelief()
-
-           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
-
-
-            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
-            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
-
-            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
-            if 'lev' in field.dims:
-                field = field.isel(lev=-1)
-
-            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
-            axes[label].axis('off')
-
-            from matplotlib import cm
-            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
-            
-            
-            title=frames['worldmap']['inputkey']
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
-            axes[label].set_title(title)
-
-            label ='worldmap_colorbar'
-            axes[label].clear()
-            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
-
-
-            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
-            # x,y = self.gmap(lons,lats)
-            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
-            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
-
-        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
-
-            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
-            store_xlim = {}
-            store_ylim = {}
-            for ikey, key in enumerate(statskeys_out):
-                if (only is not None) and ('stats_lightupdate' in only):
-                    store_xlim[key] = axes['stats_'+key].get_xlim()
-                    store_ylim[key] = axes['stats_'+key].get_ylim()
-                self.axes['stats_'+key].clear()    
-
-            label = 'times'
-            self.axes[label].clear()
-
-            key = 'dthetadt'
-            x = self.frames['stats']['records_all_stations_ini']['datetime']
-            #print(x)
-            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-            #print(y)
-            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            #print(z)
-
-            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
-            self.axes[label].data[label] = self.axes[label].scatter(x.values,
-                                                                    y.values,
-                                                                    c=z.values,
-                                                                    cmap=self.statsviewcmap,
-                                                                    s=2,
-                                                                    vmin=0.,
-                                                                    vmax=1.,
-                                                                    alpha=alpha_cloud_pixels)
-
-            
-            x = self.frames['stats']['records_current_station_ini']['datetime']
-            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-
-            x = self.frames['profiles']['records_current_station_ini']['datetime']
-            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
-            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
-
-            for ikey, key in enumerate(statskeys_out):
-
-                # show data of all stations
-                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_all_stations_mod_stats'][key]
-                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                qvalmax = x.quantile(0.999)
-                qvalmin = x.quantile(0.001)
-                print('applying extra filter over extreme values for plotting stats')
-                selx = (x >= qvalmin) & (x < qvalmax)
-                sely = (x >= qvalmin) & (x < qvalmax)
-                x = x[selx & sely]
-                y = y[selx & sely]
-                z = z[selx & sely]
-                self.axes['stats_'+key].data['stats_'+key] = \
-                       self.axes['stats_'+key].scatter(x,y, c=z,\
-                                cmap=self.statsviewcmap,\
-                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
-
-                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_current_station_mod_stats'][key]
-                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['profiles']['records_current_station_mod_stats'][key]
-                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
-
-                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
-                y = self.frames['stats']['current_record_mod_stats'][key]
-                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
-                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
-                    axes['stats_'+key].annotate(text, \
-                                               xy=(x,y),\
-                                               xytext=(0.05,0.05),\
-                                               textcoords='axes fraction',\
-                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
-                                               color='white',\
-                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
-                # self.axes['stats_'+key].data[key+'_current_record'] = \
-                #        self.axes['stats_'+key].scatter(x,y, c=z,\
-                #                 cmap=self.statsviewcmap,\
-                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
-
-                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
-                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
-                # # highlight data for curent station
-                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
-
-                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
-
-                if ikey == len(statskeys_out)-1:
-                    self.axes['stats_'+key].set_xlabel('external')
-                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
-                axes['stats_'+key].set_ylabel('model')
-
-
-                if (only is not None) and ('stats_lightupdate' in only):
-                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
-                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
-                else:
-                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
-                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
-                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
-                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
-                self.abline(1,0,axis=self.axes['stats_'+key])
-
-        if (only is None) or ('stats_colorbar' in only):
-            label ='stats_colorbar'
-            axes[label].clear()
-            import matplotlib as mpl
-            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
-            self.axes[label].fields[label] = \
-             mpl.colorbar.ColorbarBase(self.axes[label],\
-                        orientation='horizontal',\
-                        label="percentile of "+self.frames['worldmap']['inputkey'],
-                        alpha=1.,
-                                cmap=self.statsviewcmap,\
-                                       norm=norm
-                         )
-
-        #print('r1')
-        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
-            #print('r2')
-            label = 'worldmap_stations'
-            axes[label].clear()
-            
-            stations = self.frames['worldmap']['stations'].table
-            globaldata = self.globaldata
-            
-            key = label
-
-            #print('r3')
-            if (stations is not None):
-                xlist = []
-                ylist = []
-                #print('r4')
-                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
-            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    xlist.append(x)
-                    ylist.append(y)
-                #picker is needed to make it clickable (pick_event)
-                axes[label].data[label] = axes[label].scatter(xlist,ylist,
-                                                              c='r', s=15,
-                                                              picker = 15,
-                                                              label=key,
-                                                              edgecolor='k',
-                                                              linewidth=0.8)
-
-            # cb.set_label('Wilting point [kg kg-3]')
-                #print('r5')
-
-                
-            #     xseries = []
-            #     yseries = []
-            #     for iSTN,STN in stations.iterrows():
-            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
-            #         xseries.append(x)                    
-            #         yseries.append(y)
-            #         
-            #         
-            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
-                    
-                if ('current_station' in frames['worldmap']):
-                    #print('r5')
-                    STN = frames['stats']['current_station']
-                    STNID = frames['stats']['STNID']
-                    #print('r5')
-
-                    x,y = len(axes['worldmap'].lon)* \
-                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
-                          len(axes['worldmap'].lat)* \
-                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    #print('r6')
-                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
-                    #                          == \
-                    #                          self.frames['worldmap']['STNID'])\
-                    #                         & \
-                    #                         (self.seltablestats['DT'] \
-                    #                          == self.axes['statsview0].focus['DT']) \
-                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
-                    #print('r7')
-                    text = 'STNID: '+ format(STNID,'10.0f') + \
-                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
-                            ', LON: '+format(STN['longitude'],'3.3f')+ \
-                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
-
-                            #+', VAL: '+format(VAL,'.3e')
-
-                    axes[label].scatter(x,y, c='r', s=30,\
-                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
-                    #print('r8')
-            
-                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
-                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
-                    #colorstation = max((min((1.,colorstation)),0.))
-                    colorstation =0.2
-                    from matplotlib import cm
-                    axes[label].annotate(text,
-                                         xy=(x,y),
-                                         xytext=(0.05,0.05),
-                                         textcoords='axes fraction', 
-                                         bbox=dict(boxstyle="round",
-                                         fc = cm.viridis(colorstation)),
-                                         arrowprops=dict(arrowstyle="->",
-                                                         linewidth=1.1),
-                                         color='white' if colorstation < 0.5 else 'black')
-                    #print('r9')
-
-                    # #pos = sc.get_offsets()[ind["ind"][0]]
-                    # 
-                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
-                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
-                    # axes[label].data[label+'statannotate'].set_text(text)
-                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
-                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
-            #print('r9')
-            axes[label].axis('off')
-            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
-            axes[label].set_ylim((len(axes['worldmap'].lat),0))
-            #print('r10')
-
-        if (only is None) or ('profiles' in only): 
-            #print('r11')
-
-            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
-            # # self.update_station(goto_first_sounding=False)
-            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
-            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
-            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
-            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
-
-            label = 'air_ap:theta'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-                # +\
-                # ' -> '+ \
-                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-            
-            
-            
-            
-            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-            #print('r12')
-
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
-            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
-            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
-            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            #print('r13')
-            # 
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r14')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-
-            #print('r15')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-                          
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r16')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r17')
-            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
-            print(hmax)
-            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
-            if valid_mod:
-
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="mod "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                                 +'LT')
-
-            #print('r18')
-            axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('theta [K]')
-
-            label = 'air_ap:q'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-
-            #print('r19')
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            if valid_mod:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            else:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            # 
-            #print('r20')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r21')
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            #print('r23')
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r24')
-            if valid_mod:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="fit ")#+\
-                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
-                             #+'LT')
-            #print('r25')
-            #axes[label].legend()
-
-            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            #axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('q [kg/kg]')
-
-            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-
-            # #pl.subplots_adjust(right=0.6)
-
-            # label = 'q_pro'
-            # axes[label].clear()
-
-            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
-            # 
-            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
-            # 
-            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
-
-            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
-            # #pl.subplots_adjust(right=0.6)
-            # axes[label].set_xlabel('specific humidity [kg/kg]')
- 
-
-            #print('r26')
-            time = self.frames['profiles']['record_yaml_mod'].out.time
-            for ilabel,label in enumerate(['h','theta','q']):
-                axes["out:"+label].clear()
-                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
-                axes["out:"+label].set_ylabel(label)
-                if ilabel == 2:
-                    axes["out:"+label].set_xlabel('local sun time [h]')
-                
-            #print('r27')
-            label = 'SEB'
-            axes[label].clear()
-            
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
-            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
-            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
-            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
-                
-            #print('r28')
-            
-            axes[label].legend()
-            
-            #         for ax in self.fig_timeseries_axes:
-#             ax.clear()
-#         
-#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
-#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
-#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
-#         #print(self.morning_sounding.c4gl.out.Swin)
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
-#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
-#         self.fig_timeseries_axes[3].legend()
-#         self.fig.canvas.draw()
-            
-
-
-
-
-
-
-        #self.ready()
-        #print('r29')
-        fig.canvas.draw()
-        #fig.show()
-
-        self.axes = axes
-        self.tbox = tbox
-        self.fig = fig
-
-    def on_pick(self,event):
-        #print("HELLO")
-        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
-        #self.axes['theta_pro'].clear()
-        #self.axes['q_pro'].clear()
-        
-
-        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
-        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
-        keys_to_axes = {}
-        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
-
-        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
-        keys_to_axes['worldmap'] = 'worldmap'
-        
-        axes = self.axes
-        #nstatsview = self.nstatsview
-        #statsviewcmap = self.statsviewcmap
-        stations = self.frames['worldmap']['stations'].table
-
-
-        #print("p1")
-        current = event
-        artist = event.artist
-        
-        selkey = artist.get_label()
-        
-        #print(keys_to_axes)
-        
-        label = keys_to_axes[selkey]
-        #print("HELLO",selkey,label)
-
-        # # Get to know in which axes we are
-        # label = None
-        # for axeskey in axes.keys():
-        #     if event.inaxes == axes[axeskey]:
-        #         label = axeskey
-        #         
-
-        # cont, pos = None, None
-        
-        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
-        ind = event.ind
-        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
-        d = axes[label].collections[0]
-        #d.set_offset_position('data')
-        xy = d.get_offsets()
-        x, y =  xy[:,0],xy[:,1]
-        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
-
-        #print("p2")
-        if len(ind) > 0:
-            #print("p3")
-            pos = x[ind[0]], y[ind[0]]
-
-            #if label[:-1] == 'statsview':
-            #    #seltablestatsstdrel = self.seltablestatsstdrel
-            #    #seltablestatspct = self.seltablestatspct
-
-            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-            #    
-            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
-            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-            #    
-            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
-            #el
-            if (label == 'worldmap') or (label == 'worldmap_stations'):
-                self.hover_active = False
-                if (self.frames['worldmap']['STNID'] !=
-                    self.frames['profiles']['STNID']):
-                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
-                # so we just need to perform update_station
-                    self.update_station()
-            elif (label[:5] == 'stats'):
-
-                self.hover_active = False
-                if (self.frames['stats']['STNID'] !=
-                self.frames['profiles']['STNID']) or \
-                   (self.frames['stats']['current_record_chunk'] != 
-                    self.frames['profiles']['current_record_chunk']) or \
-                   (self.frames['stats']['current_record_index'] != 
-                    self.frames['profiles']['current_record_index']):
-
-
-
-                    for key in ['STNID','current_station','stations_iterator']: 
-                        self.frames['worldmap'][key] = self.frames['stats'][key] 
-
-                    for key in self.frames['stats'].keys():
-                        self.frames['profiles'][key] = self.frames['stats'][key]
-
-                    STNID = self.frames['profiles']['STNID']
-                    chunk = self.frames['profiles']['current_record_chunk']
-                    if 'current_station_file_ini' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_ini'].close()
-                    self.frames['profiles']['current_station_file_ini'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-                    if 'current_station_file_mod' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_mod'].close()
-                    self.frames['profiles']['current_station_file_mod'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_afternoon'].close()
-                    self.frames['profiles']['current_station_file_afternoon'] = \
-                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-                    # go to hovered record of current station
-                    self.frames['profiles']['records_iterator'] = \
-                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # ... and go to the record of the profile window (last one that
-                    # was picked by the user)
-                    found = False
-                    EOF = False
-                    while (not found) and (not EOF):
-                        try:
-                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
-                            #print("hello*")
-                            #print(self.frames['profiles']['current_record_index'])
-                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
-                               (index == self.frames['profiles']['current_record_index']) and \
-                               (STNID == self.frames['profiles']['STNID']):
-                                #print('found!')
-                                found = True
-                        except StopIteration:
-                            EOF = True
-                    if found:
-                        self.frames['stats']['current_record_mod'] = record
-                        self.frames['stats']['current_record_chunk'] = chunk
-                        self.frames['stats']['current_record_index'] = index
-                    # # for the profiles we make a distinct record iterator, so that the
-                    # # stats iterator can move independently
-                    # self.frames['profiles']['records_iterator'] = \
-                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # (self.frames['profiles']['STNID'] , \
-                    # self.frames['profiles']['current_record_index']) , \
-                    # self.frames['profiles']['current_record_mod'] = \
-                    #                 self.frames['profiles']['records_iterator'].__next__()
-
-
-                    # for the profiles we make a distinct record iterator, so that the
-                    # stats iterator can move independently
-
-                    self.update_record()
-
-
-
-    def on_plot_hover(self,event):
-        axes = self.axes
-        #print('h1')
-
-        # Get to know in which axes we are
-        label = None
-        for axeskey in axes.keys():
-            if event.inaxes == axes[axeskey]:
-                label = axeskey
-                
-        #print('h2')
-
-        cont, pos = None, None
-        #print (label)
-        
-        if label is not None:
-            if  ('data' in axes[label].__dict__.keys()) and \
-                (label in axes[label].data.keys()) and \
-                (axes[label].data[label] is not None):
-                
-                #print('h3')
-                cont, ind =  axes[label].data[label].contains(event)
-                selkey = axes[label].data[label].get_label()
-                if len(ind["ind"]) > 0:
-                    #print('h4')
-                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
-                    #print('pos',pos,selkey)
-
-
-                    #if label[:-1] == 'statsview':
-                    #    seltablestatsstdrel = self.seltablestatsstdrel
-                    #    seltablestatspct = self.seltablestatspct
-
-                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
-                    #    self.hover_active = True
-                    #    
-                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
-                    #    
-                    #el
-                    #print(label[:5])
-                    if (label[:5] == 'stats') or (label == 'times'):
-                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
-                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
-                        
-
-                        if label[:5] == 'stats':
-                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                            (self.frames['stats']['STNID'] ,
-                             self.frames['stats']['current_record_chunk'], 
-                             self.frames['stats']['current_record_index']) = \
-                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-                        # elif label[:5] == 'stats':
-                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
-                        #     (self.frames['stats']['STNID'] ,
-                        #      self.frames['stats']['current_record_chunk'], 
-                        #      self.frames['stats']['current_record_index']) = \
-                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-
-
-                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-                        
-                        # # TO TEST: should be removed, since it's is also done just below
-                        # self.frames['stats']['stations_iterator'] = \
-                        #     self.frames['worldmap']['stations_iterator'] 
-                
-                
-                        # self.goto_datetime_worldmap(
-                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-                        #     'after')
-
-
-                        # scrolling to the right station
-                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                        EOF = False
-                        found = False
-                        while (not found and not EOF):
-                            if (STNID == self.frames['stats']['STNID']):
-                                   found = True 
-                            if not found:
-                                try:
-                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                                except (StopIteration):
-                                    EOF = True
-                        if found:
-                        #    self.frames['stats']['STNID'] = STNID
-                            self.frames['stats']['current_station'] =  station
-
-                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
-                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
-
-
-                        # generate index of the current station
-                        self.frames['stats']['records_current_station_index'] = \
-                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                             == self.frames['stats']['STNID'])
-
-
-                        tab_suffixes = \
-                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            self.frames['stats']['records_current_station'+tab_suffix] = \
-                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-
-                        # go to hovered record of current station
-                        self.frames['stats']['records_iterator'] = \
-                                        records_iterator(self.frames['stats']['records_current_station_mod'])
-
-
-                        # ... and go to the record of the profile window (last one that
-                        # was picked by the user)
-                        found = False
-                        EOF = False
-                        while (not found) and (not EOF):
-                            try:
-                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                                #print("hello*")
-                                #print(self.frames['profiles']['current_record_index'])
-                                if (index == self.frames['stats']['current_record_index']) and \
-                                   (chunk == self.frames['stats']['current_record_chunk']) and \
-                                   (STNID == self.frames['stats']['STNID']):
-                                    #print('found!')
-                                    found = True
-                            except StopIteration:
-                                EOF = True
-                        if found:
-                            #print('h5')
-                            self.frames['stats']['current_record_mod'] = record
-                            self.frames['stats']['current_record_chunk'] = chunk
-                            self.frames['stats']['current_record_index'] = index
-
-                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
-                        tab_suffixes = \
-                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            #print(tab_suffix)
-                            #print(self.frames['stats']['records_current_station'+tab_suffix])
-                            self.frames['stats']['current_record'+tab_suffix] =  \
-                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                      (self.frames['stats']['STNID'] , \
-                                       self.frames['stats']['current_record_chunk'] , \
-                                       self.frames['stats']['current_record_index'])]
-
-
-                        self.hover_active = True
-                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                        # print('h13')
-                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
-                        #     self.goto_datetime_worldmap(
-                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                        #         'after')
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap',
-                        #                                           'profiles'])
-                        # else:
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap_stations',
-                        #                                           'profiles'])
-
-
-
-                    elif label in ['worldmap_stations','worldmap']:
-                        #print('h5')
-
-                        if (self.axes['worldmap'].lat is not None) and \
-                           (self.axes['worldmap'].lon is not None):
-
-
-                            #self.loading()
-                            self.fig.canvas.draw()
-                            self.fig.show()
-
-
-                            # get position of 
-                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
-                                                                 self.axes['worldmap'].lat[0]) + \
-                                           self.axes['worldmap'].lat[0],4)
-                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
-                                                                 self.axes['worldmap'].lon[0]) + \
-                                           self.axes['worldmap'].lon[0],4)
-                        
-                            stations = self.frames['worldmap']['stations'].table
-                            #print('h7')
-                        
-                            #reset stations iterator:
-                            # if 'stations_iterator' in self.frames['worldmap'].keys():
-                            #     self.frames['worldmap']['stations_iterator'].close()
-                            #     del(self.frames['worldmap']['stations_iterator'])
-                            # if 'stations_iterator' in self.frames['stats'].keys():
-                            #     self.frames['stats']['stations_iterator'].close()
-                            #     del(self.frames['stats']['stations_iterator'])
-                            self.frames['worldmap']['stations_iterator'] =\
-                               stations_iterator(self.frames['worldmap']['stations'])
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                            EOF = False
-                            found = False
-                            while (not found and not EOF):
-                                #print('h8',station.latitude,latmap)
-                                #print('h8',station.longitude,lonmap)
-                                if (round(station.latitude,3) == round(latmap,3)) and \
-                                    (round(station.longitude,3) == round(lonmap,3)):
-                                       found = True 
-                                if not found:
-                                    try:
-                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                                    except (StopIteration):
-                                        EOF = True
-                            if found:
-                                self.frames['worldmap']['STNID'] = STNID
-                                self.frames['worldmap']['current_station'] = \
-                                        station
-                        
-                            self.frames['stats']['stations_iterator'] = \
-                                self.frames['worldmap']['stations_iterator'] 
-                            #print('h8')
-                            # inherit station position for the stats frame...
-                            for key in self.frames['worldmap'].keys():
-                                self.frames['stats'][key] = self.frames['worldmap'][key]
-                                
-                            ## fetch records of current station...
-                            #self.frames['stats']['records_current_station_mod'] =\
-                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                            # ... and their indices
-                            self.frames['stats']['records_current_station_index'] = \
-                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                                     == \
-                                     self.frames['stats']['current_station'].name)
-
-
-                            tab_suffixes = \
-                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['records_current_station'+tab_suffix] = \
-                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-                            # ... create a record iterator ...
-                            #self.frames['stats']['records_iterator'].close()
-                            del(self.frames['stats']['records_iterator'])
-                            self.frames['stats']['records_iterator'] = \
-                                self.frames['stats']['records_current_station_mod'].iterrows()
-
-
-
-                        
-                            #print('h9')
-                            # ... and go to to the first record of the current station
-                            (self.frames['stats']['STNID'] , \
-                             self.frames['stats']['current_record_chunk'] , \
-                             self.frames['stats']['current_record_index']) , \
-                            self.frames['stats']['current_record_mod'] = \
-                                self.frames['stats']['records_iterator'].__next__()
-                        
-
-
-
-                            #print('h10')
-                            # cash the current record
-                            tab_suffixes = \
-                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['current_record'+tab_suffix] =  \
-                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                          (self.frames['stats']['STNID'] , \
-                                           self.frames['stats']['current_record_chunk'] , \
-                                           self.frames['stats']['current_record_index'])]
-
-                            #print('h11')
-                            
-                            self.hover_active = True
-                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                            #print('h13')
-
-                        
-
-            #if (stations is not None):
-            #    for iSTN,STN in stations.iterrows():
-            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
-            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
-
-        # self.fig.show()
- 
-        # we are hovering on nothing, so we are going back to the position of
-        # the profile sounding
-        if pos is None:
-            if self.hover_active == True:
-                #print('h1*')
-                
-                #self.loading()
-                # to do: reset stations iterators
-
-                # get station and record index from the current profile
-                for key in ['STNID', 'current_station']:
-                    self.frames['stats'][key] = self.frames['profiles'][key]
-
-                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
-                self.frames['stats']['current_station'] = \
-                        self.frames['profiles']['current_station']
-                #print('h3a*')
-                self.frames['stats']['records_current_station_mod'] = \
-                        self.frames['profiles']['records_current_station_mod']
-                #print('h3b*')
-
-                # the next lines recreate the records iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-
-                # reset stations iterator...
-                #self.frames['stats']['records_iterator'].close()
-                del(self.frames['stats']['records_iterator'])
-                self.frames['stats']['records_iterator'] = \
-                    self.frames['stats']['records_current_station_mod'].iterrows()
-                #print('h4*')
-
-                # ... and go to the record of the profile window (last one that
-                # was picked by the user)
-                found = False
-                EOF = False
-                while (not found) and (not EOF):
-                    try:
-                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                        #print("hello*")
-                        #print(self.frames['profiles']['current_record_index'])
-                        #print(self.frames['profiles']['STNID'])
-                        #print(STNID,index)
-                        if (index == self.frames['profiles']['current_record_index']) and \
-                            (chunk == self.frames['profiles']['current_record_chunk']) and \
-                            (STNID == self.frames['profiles']['STNID']):
-                            #print('found!')
-                            found = True
-                    except StopIteration:
-                        EOF = True
-                if found:
-                    #print('h5*')
-                    self.frames['stats']['current_record_mod'] = record
-                    self.frames['stats']['current_record_chunk'] = chunk
-                    self.frames['stats']['current_record_index'] = index
-
-                #print('h6*')
-
-
-
-                # # fetch records of current station...
-                # self.frames['stats']['records_current_station_mod'] =\
-                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                # ... and their indices
-                self.frames['stats']['records_current_station_index'] = \
-                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                         == \
-                         self.frames['stats']['current_station'].name)
-
-
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['records_current_station'+tab_suffix] = \
-                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-                
-
-                # cash the records of the current stations
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['current_record'+tab_suffix] =  \
-                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                              (self.frames['stats']['STNID'] , \
-                               self.frames['stats']['current_record_chunk'] , \
-                               self.frames['stats']['current_record_index'])]
-
-
-                # the next lines recreate the stations iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-                #print('h7*')
-
-                # reset the stations iterators
-                for framekey in ['stats','worldmap']:
-                    ##print(framekey)
-                    if 'stations_iterator' in self.frames[framekey]:
-                        #self.frames[framekey]['stations_iterator'].close()
-                        del(self.frames[framekey]['stations_iterator'])
-
-                self.frames['worldmap']['current_station'] = \
-                        self.frames['profiles']['current_station']
-
-                #recreate the stations iterator for the worldmap...
-                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-
-                # ... and go the position of the profile
-                #print('h8*')
-                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                EOF = False
-                found = False
-                while (not found and not EOF):
-                    if STNID == self.frames['profiles']['STNID'] :
-                        found = True 
-                    if not found:
-                        try:
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                        except (StopIteration):
-                            EOF = True
-                if found:
-                    self.frames['worldmap']['current_station'] = station
-                    self.frames['worldmap']['STNID'] = STNID
-                #print('h9*')
-                self.frames['stats']['stations_iterator'] = \
-                    self.frames['worldmap']['stations_iterator'] 
-
-                # the stats window now inherits the current station from the
-                # worldmap
-                for key in ['STNID','current_station','stations_iterator']: 
-                    self.frames['stats'][key] = self.frames['worldmap'][key] 
-                #print('h10*')
-
-                # # we now only need inherit station position and go to first record
-                # for key in self.frames['worldmap'].keys():
-                #     self.frames['stats'][key] = self.frames['worldmap'][key]
-
-                # self.frames['stats']['records_current_station'] =\
-                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
-
-                # #print(self.frames['stats']['records_current_station'])
-                # self.frames['stats']['records_iterator'] = \
-                #                 self.frames['stats']['records_current_station'].iterrows()
-                # (self.frames['stats']['STNID'] , \
-                # self.frames['stats']['current_record_index']) , \
-                # self.frames['stats']['current_record_mod'] = \
-                #                 self.frames['stats']['records_iterator'].__next__()
-                
-
-
-
-
-
-
-                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
-                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
-                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
-                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-                self.hover_active = False
-                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
-    # def loading(self):
-    #     self.tbox['loading'].set_text('Loading...')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-    #     sleep(0.1)
-    # def ready(self):
-    #     self.tbox['loading'].set_text('Ready')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-
-
-
diff --git a/build/lib/class4gl/model.py b/build/lib/class4gl/model.py
deleted file mode 100644
index 8760411..0000000
--- a/build/lib/class4gl/model.py
+++ /dev/null
@@ -1,2214 +0,0 @@
-# 
-# CLASS
-# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
-# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
-# Copyright (c) 2011-2015 Chiel van Heerwaarden
-# Copyright (c) 2011-2015 Bart van Stratum
-# Copyright (c) 2011-2015 Kees van den Dries
-# 
-# This file is part of CLASS
-# 
-# CLASS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published bygamma
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-# 
-# CLASS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with CLASS.  If not, see .
-#
-
-import copy as cp
-import numpy as np
-import sys
-import warnings
-import pandas as pd
-from ribtol_hw import zeta_hs2 , funcsche
-import logging
-#from SkewT.thermodynamics import Density
-#import ribtol
-
-grav = 9.81
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-
-def qsat(T,p):
-    return 0.622 * esat(T) / p
-
-
-def ribtol(Rib, zsl, z0m, z0h): 
-    Rib = np.float64(Rib)
-    zsl = np.float64(zsl)
-    z0m = np.float64(z0m)
-    z0h = np.float64(z0h)
-    #print(Rib,zsl,z0m,z0h)
-    if(Rib > 0.):
-        L    = 1.
-        L0   = 2.
-    else:
-        L  = -1.
-        L0 = -2.
-    #print(Rib,zsl,z0m,z0h)
-    while (abs(L - L0) > 0.001):
-        L0      = L
-        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
-        Lstart  = L - 0.001*L
-        Lend    = L + 0.001*L
-        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
-                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-        L       = L - fx / fxdif
-        #print(L,fx/fxdif)
-        if(abs(L) > 1e12):
-            break
-
-    return L
-  
-def psim(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psim = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-    return psim
-  
-def psih(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psih  = 2. * np.log( (1. + x*x) / 2.)
-        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-    return psih
- 
-class model:
-    def __init__(self, model_input = None,debug_level=None):
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        self.logger = logging.getLogger('model')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        """ initialize the different components of the model """ 
-
-        if model_input is not None:
-            # class4gl style input
-            if 'pars' in model_input.__dict__.keys():
-
-                # we make a reference to the full input first, so we can dump it
-                # afterwards
-                self.input_c4gl = model_input
-
-                # we copy the regular parameters first. We keep the classical input
-                # format as self.input so that we don't have to change the entire
-                # model code.
-                self.input = cp.deepcopy(model_input.pars)
-
-                # we copy other sections we are interested in, such as profile
-                # data, and store it also under input
-
-                # I know we mess up a bit the structure of the class4gl_input, but
-                # we will make it clean again at the time of dumping data
-
-                # So here, we copy the profile data into self.input
-                # 1. Air circulation data 
-                if 'sw_ac' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ac']:
-                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
-                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
-
-                    # correct pressure of levels according to surface pressure
-                    # error (so that interpolation is done in a consistent way)
-
-                    p_e = self.input.Ps - self.input.sp
-                    for irow in self.input.air_ac.index[::-1]:
-                       self.input.air_ac.p.iloc[irow] =\
-                        self.input.air_ac.p.iloc[irow] + p_e
-                       p_e = p_e -\
-                       (self.input.air_ac.p.iloc[irow]+p_e)/\
-                        self.input.air_ac.p.iloc[irow] *\
-                        self.input.air_ac.delpdgrav.iloc[irow]*grav
-
-
-
-                # 2. Air circulation data 
-                if 'sw_ap' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ap']:
-                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
-
-            # standard class input
-            else:
-                self.input = cp.deepcopy(model_input)
-
-    def load_yaml_dict(self,yaml_dict):
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                for keydata,value in data.items():
-                    self.__dict__[keydata] = value
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            #elif key == 'sources':
-            #    self.__dict__[key] = data
-            elif key == 'out':
-                # lets convert it to a list of dictionaries
-                dictouttemp = pd.DataFrame(data).to_dict('list')
-            else: 
-                 warnings.warn("Key '"+key+"' is be implemented.")
-            #     self.__dict__[key] = data
-
-
-        self.tsteps = len(dictouttemp['h'])
-        self.out = model_output(self.tsteps)
-        for keydictouttemp in dictouttemp.keys():
-            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
-
-
-  
-    def run(self):
-        # initialize model variables
-        self.init()
-  
-        # time integrate model 
-        #for self.t in range(self.tsteps):
-        while self.t < self.tsteps:
-          
-            # time integrate components
-            self.timestep()
-  
-        # delete unnecessary variables from memory
-        self.exitmodel()
-    
-    def init(self):
-        # assign variables from input data
-        # initialize constants
-        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
-        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        self.rho        = 1.2                   # density of air [kg m-3]
-        self.k          = 0.4                   # Von Karman constant [-]
-        self.g          = 9.81                  # gravity acceleration [m s-2]
-        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-        self.bolz       = 5.67e-8               # Bolzman constant [-]
-        self.rhow       = 1000.                 # density of water [kg m-3]
-        self.S0         = 1368.                 # solar constant [W m-2]
-
-        # A-Gs constants and settings
-        # Plant type:       -C3-     -C4-
-        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
-        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
-        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
-        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
-        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
-        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
-        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
-        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
-        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
-        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
-        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
-
-        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
-        self.mair       =  28.9;                # molecular weight air [g mol -1]
-        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
-
-        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
-        self.wmax       =  0.55;                # upper reference value soil water [-]
-        self.wmin       =  0.005;               # lower reference value soil water [-]
-        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
-        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
-
-        # Read switches
-        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
-        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
-        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
-        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
-        self.sw_sl      = self.input.sw_sl      # surface layer switch
-        self.sw_rad     = self.input.sw_rad     # radiation switch
-        self.sw_ls      = self.input.sw_ls      # land surface switch
-        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
-        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
-
-        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
-        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
-        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-  
-        # initialize mixed-layer
-        self.h          = self.input.h          # initial ABL height [m]
-        self.Ps         = self.input.Ps         # surface pressure [Pa]
-        self.sp         = self.input.sp         # This is also surface pressure
-                                                #but derived from the global data [Pa]
-        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
-        self.ws         = None                  # large-scale vertical velocity [m s-1]
-        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
-        self.we         = -1.                   # entrainment velocity [m s-1]
-       
-         # Temperature 
-        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
-        
-        
-        self.substep    = False
-        self.substeps   = 0
-
-
-
-        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
-        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
-        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
-        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
-        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
- 
-        self.wstar      = 0.                    # convective velocity scale [m s-1]
- 
-        # 2m diagnostic variables 
-        self.T2m        = None                  # 2m temperature [K]
-        self.q2m        = None                  # 2m specific humidity [kg kg-1]
-        self.e2m        = None                  # 2m vapor pressure [Pa]
-        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
-        self.u2m        = None                  # 2m u-wind [m s-1]
-        self.v2m        = None                  # 2m v-wind [m s-1]
- 
-        # Surface variables 
-        self.thetasurf  = self.input.theta      # surface potential temperature [K]
-        self.thetavsurf = None                  # surface virtual potential temperature [K]
-        self.qsurf      = None                  # surface specific humidity [g kg-1]
-
-        # Mixed-layer top variables
-        self.P_h        = None                  # Mixed-layer top pressure [pa]
-        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
-        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
-        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
-        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
-        self.dz_h       = None                  # Transition layer thickness [-]
-        self.lcl        = None                  # Lifting condensation level [m]
-
-        # Virtual temperatures and fluxes
-        self.thetav     = None                  # initial mixed-layer potential temperature [K]
-        self.dthetav    = None                  # initial virtual temperature jump at h [K]
-        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
-        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
-       
-        
-        
-        
-        
-        
-        # Moisture 
-        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
-
-        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
-        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
-        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
-  
-        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
-        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
-        self.e          = None                  # mixed-layer vapor pressure [Pa]
-        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
-        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
-      
-        
-        
-        # CO2
-        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
-        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
-        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
-        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
-        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
-        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
-        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
-        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
-        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
-        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
-       
-        # Wind 
-        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
-        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
-        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = self.input.advu       # advection of u-wind [m s-2]
-        
-        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
-        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = self.input.advv       # advection of v-wind [m s-2]
-         
-  # BEGIN -- HW 20170606
-        # z-coordinate for vertical profiles of stratification above the mixed-layer height
-
-        if self.sw_ac:
-        # this is the data frame with the grided profile on the L60 grid
-        # (subsidence, and advection) 
-            self.air_ac      = self.input.air_ac  # full level air circulation
-                                                  # forcing
-            # self.air_ach     = self.input.air_ach # half level air circulation
-            #                                       # forcing
-            
-
-        if self.sw_ap:
-        # this is the data frame with the fitted profile (including HAGL,
-        # THTA,WSPD, SNDU,WNDV PRES ...)
-            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
-
-            # just for legacy reasons...
-            if 'z' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
-            if 'p' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
-
-            indexh = np.where(self.air_ap.z.values == self.h)
-            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
-                raise ValueError("Error input profile consistency: mixed- \
-                                 layer height needs to be equal to the second \
-                                 and third \
-                                 level of the vertical profile input!")
-            # initialize q from its profile when available
-            p_old = self.Ps
-            p_new = self.air_ap.p[indexh[0][0]]
-            
-            if ((p_old is not None) & (p_old != p_new)):
-                print("Warning: Ps input was provided ("+str(p_old)+\
-                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
-                    +str(p_new)+"Pa).")
-                                    
-            self.Ps = p_new
-            # these variables/namings are more convenient to work with in the code
-            # we will update the original variables afterwards
-            #self.air_ap['q'] = self.air_ap.QABS/1000.
-
-            self.air_ap = \
-                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
-            # we require the temperature fields, since we need to consider
-            # advection
-            # if self.sw_ac:
-            #     #self.air_ap['theta'] = self.air_ap['t'] *
-
-            #     # we consider self.sp in case of air-circulation input (for
-            #     # consistence)
-            #     self.air_ap['t'] = \
-            #                 self.air_ap.theta *  \
-            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
-            # else:
-            # we consider self.Ps in case of balloon input only 
-            self.air_ap = self.air_ap.assign(t = lambda x: \
-                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
-
-            #self.air_ap['theta'] = self.air_ap.THTA
-            if 'u' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
-            if 'v' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
-
-            for var in ['theta','q','u','v']:
-
-                
-                if self.air_ap[var][1] != self.air_ap[var][0]:
-                    raise ValueError("Error input profile consistency: two \
-                                     lowest profile levels for "+var+" should \
-                                     be equal.")
-                
-                # initialize the value from its profile when available
-                value_old = self.__dict__[var]
-                value_new = self.air_ap[var][indexh[0][0]]
-                
-                if ((value_old is not None) & (value_old != value_new)):
-                    warnings.warn("Warning:  input was provided \
-                                     ("+str(value_old)+ "kg kg-1), \
-                                     but it is now overwritten by the first \
-                                     level (index 0) of air_ap]var\ which is \
-                                     different (" +str(value_new)+"K).")
-                                        
-                self.__dict__[var] = value_new
-
-                # make a profile of the stratification 
-                # please note that the stratification between z_pro[i] and
-                # z_pro[i+1] is given by air_ap.GTHT[i]
-
-                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
-                # np.gradient(self.z_pro)
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
-
-
-                self.__dict__['gamma'+var] = \
-                    self.air_ap['gamma'+var][np.where(self.h >= \
-                                                     self.air_ap.z)[0][-1]]
-
-
-
-        # the variable p_pro is just for diagnosis of lifted index
-            
-            
-
-            # input Ph is wrong, so we correct it according to hydrostatic equation
-            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-
-            #if self.sw_ac:
-                # note that we use sp as surface pressure, which is determined
-                # from era-interim instead of the observations. This is to
-                # avoid possible failure of the interpolation routine
-                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
-                #                          + \
-                #                          list(self.air_ap.p[3:]))
-
-            # else:
-                # in the other case, it is updated at the time of calculting
-                # the statistics 
-
-# END -- HW 20170606      
-        #print(self.air_ap)
-
-        if self.sw_ac and not self.sw_ap:
-            raise ValueError("air circulation switch only possible when air \
-                             profiles are given")
-        
-        if self.sw_ac:
-
-            # # # we comment this out, because subsidence is calculated
-            # according to advection
-            # #interpolate subsidence towards the air_ap height coordinate
-            # self.air_ap['w'] = np.interp(self.air_ap.p,\
-            #                               self.air_ac.p,\
-            #                               self.air_ac.w) 
-            # #subsidence at the mixed-layer top
-            # self.w = self.air_ap.w[1]
-        
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-                # in case we didn't find any points, we just take the lowest one.
-                # actually, this can happen if ERA-INTERIM pressure levels are
-                # inconsistent with 
-                if in_ml.sum() == 0:
-                    warnings.warn(" no circulation points in the mixed layer \
-                                  found. We just take the bottom one.")
-                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-
-                for var in ['t','q','u','v']:
-    
-                   # calculation of the advection variables for the mixed layer
-                   # we weight by the hydrostatic thickness of each layer and
-                   # divide by the total thickness
-                   self.__dict__['adv'+var] = \
-                            ((self.air_ac['adv'+var+'_x'][in_ml] \
-                             + \
-                             self.air_ac['adv'+var+'_y'][in_ml])* \
-                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                            self.air_ac['delpdgrav'][in_ml].sum()
-
-                   # calculation of the advection variables for the profile above
-                   # (lowest 3 values are not used by class)
-                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
-                   self.air_ap['adv'+var] = \
-                           np.interp(self.air_ap.p,\
-                                     self.air_ac.p,\
-                                     self.air_ac['adv'+var+'_x']) \
-                           + \
-                           np.interp(self.air_ap.p, \
-                                       self.air_ac.p, \
-                                       self.air_ac['adv'+var+'_y'])
-
-                # as an approximation, we consider that advection of theta in the
-                # mixed layer is equal to advection of t. This is a sufficient
-                # approximation since theta and t are very similar at the surface
-                # pressure.
-                self.__dict__['advtheta'] = self.__dict__['advt']
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # self.wrho = np.interp(self.P_h,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) 
-            # self.ws   = self.air_ap.w.iloc[1]
-
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                self.air_ap = self.air_ap.assign(wp = 0.)
-                self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                              self.air_ac.p, \
-                                              self.air_ac['wp'])
-                self.air_ap = self.air_ap.assign(R = 0.)
-                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                     self.Rv*self.air_ap.q)
-                self.air_ap = self.air_ap.assign(rho = 0.)
-                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-                
-                self.air_ap = self.air_ap.assign(w = 0.)
-                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-                #print('hello w ini')
-
-                # Note: in case of sw_ac is False, we update it from prescribed
-                # divergence
-                self.ws   = self.air_ap.w[1]
-
-                # self.ws   = self.wrho/self.rho
-                # self.ws   = self.wrho/(self.P_h/ \
-                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
-                #                         self.theta) # this should be T!!!
-
-                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-                #                         + \
-                #                         self.air_ac['divU_y'][in_ml])* \
-                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                #             self.air_ac['delpdgrav'][in_ml].sum() \
-        
-
-        # Tendencies 
-        self.htend      = None                  # tendency of CBL [m s-1]
-        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
-        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
-        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
-        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
-        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
-        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
-        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
-        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
-        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
-        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
-        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
-  
-        # initialize surface layer
-        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
-        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
-        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
-        self.z0m        = self.input.z0m        # roughness length for momentum [m]
-        self.z0h        = self.input.z0h        # roughness length for scalars [m]
-        self.Cm         = 1e12                  # drag coefficient for momentum [-]
-        self.Cs         = 1e12                  # drag coefficient for scalars [-]
-        self.L          = None                  # Obukhov length [m]
-        self.Rib        = None                  # bulk Richardson number [-]
-        self.ra         = None                  # aerodynamic resistance [s m-1]
-  
-        # initialize radiation
-        self.lat        = self.input.lat        # latitude [deg]
-        #self.fc         = self.input.fc         # coriolis parameter [s-1]
-        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
-        self.lon        = self.input.lon        # longitude [deg]
-        self.doy        = self.input.doy        # day of the year [-]
-        self.tstart     = self.input.tstart     # time of the day [-]
-        self.cc         = self.input.cc         # cloud cover fraction [-]
-        self.Swin       = None                  # incoming short wave radiation [W m-2]
-        self.Swout      = None                  # outgoing short wave radiation [W m-2]
-        self.Lwin       = None                  # incoming long wave radiation [W m-2]
-        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
-        self.Q          = self.input.Q          # net radiation [W m-2]
-        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
-  
-        # initialize land surface
-        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
-        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
-        self.T2         = self.input.T2         # temperature deeper soil layer [K]
-                           
-        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
-        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
-        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
-        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
-                           
-        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
-        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
-        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
-                           
-        self.C1sat      = self.input.C1sat      
-        self.C2ref      = self.input.C2ref      
-
-        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
-        
-        self.LAI        = self.input.LAI        # leaf area index [-]
-        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
-        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = self.input.alpha      # surface albedo [-]
-  
-        self.rs         = 1.e6                  # resistance transpiration [s m-1]
-        self.rssoil     = 1.e6                  # resistance soil [s m-1]
-                           
-        self.Ts         = self.input.Ts         # surface temperature [K]
-                           
-        self.cveg       = self.input.cveg       # vegetation fraction [-]
-        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
-        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
-        self.cliq       = None                  # wet fraction [-]
-                          
-        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
-  
-        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
-        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
-        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
-  
-        self.H          = None                  # sensible heat flux [W m-2]
-        self.LE         = None                  # evapotranspiration [W m-2]
-        self.LEliq      = None                  # open water evaporation [W m-2]
-        self.LEveg      = None                  # transpiration [W m-2]
-        self.LEsoil     = None                  # soil evaporation [W m-2]
-        self.LEpot      = None                  # potential evaporation [W m-2]
-        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
-        self.G          = None                  # ground heat flux [W m-2]
-
-        # initialize A-Gs surface scheme
-        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
-
-        # initialize cumulus parameterization
-        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
-        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
-        self.ac         = 0.                    # Cloud core fraction [-]
-        self.M          = 0.                    # Cloud core mass flux [m s-1] 
-        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
-  
-        # initialize time variables
-        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
-        self.dt     = self.input.dt
-        self.dtcur      = self.dt
-        self.firsttime = True
-        self.t      = 0
- 
-        # Some sanity checks for valid input
-        if (self.c_beta is None): 
-            self.c_beta = 0                     # Zero curvature; linear response
-        assert(self.c_beta >= 0 or self.c_beta <= 1)
-
-        # initialize output
-        self.out = model_output(self.tsteps)
- 
-        self.statistics()
-  
-        # calculate initial diagnostic variables
-        if(self.sw_rad):
-            self.run_radiation()
- 
-        if(self.sw_sl):
-            for i in range(10): 
-                self.run_surface_layer()
-  
-        if(self.sw_ls):
-            self.run_land_surface()
-
-        if(self.sw_cu):
-            self.run_mixed_layer()
-            self.run_cumulus()
-        
-        if(self.sw_ml):
-            self.run_mixed_layer()
-
-    def timestep(self):
-
-        self.dtmax = +np.inf
-        self.logger.debug('before stats') 
-        self.statistics()
-
-        # run radiation model
-        self.logger.debug('before rad') 
-        if(self.sw_rad):
-            self.run_radiation()
-  
-        # run surface layer model
-        if(self.sw_sl):
-            self.logger.debug('before surface layer') 
-            self.run_surface_layer()
-        
-        # run land surface model
-        if(self.sw_ls):
-            self.logger.debug('before land surface') 
-            self.run_land_surface()
- 
-        # run cumulus parameterization
-        if(self.sw_cu):
-            self.logger.debug('before cumulus') 
-            self.run_cumulus()
-   
-        self.logger.debug('before mixed layer') 
-        # run mixed-layer model
-        if(self.sw_ml):
-            self.run_mixed_layer()
-        self.logger.debug('after mixed layer') 
- 
-        #get first profile data point above mixed layer
-        if self.sw_ap:
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                # here we correct for the fact that the upper profile also
-                # shifts in the vertical.
-
-                diffhtend = self.htend - self.air_ap.w[zidx_first]
-                if diffhtend > 0:
-                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            else:
-                if self.htend > 0:
-                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            #print(self.h,zidx_first,self.ws,self.air_ap.z)
-
-        
-        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
-        self.logger.debug('before store') 
-        self.substep =  (self.dtcur > self.dtmax)
-        if self.substep:
-            dtnext = self.dtcur - self.dtmax
-            self.dtcur = self.dtmax
-
-        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
-
-        # HW: this will be done multiple times in case of a substep is needed
-        # store output before time integration
-        if self.firsttime:
-            self.store()
-  
-        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
-        # time integrate land surface model
-        if(self.sw_ls):
-            self.integrate_land_surface()
-        self.logger.debug('before integrate mixed layer') 
-        # time integrate mixed-layer model
-        if(self.sw_ml):
-            self.integrate_mixed_layer() 
-        self.logger.debug('after integrate mixed layer') 
-        if self.substep:
-            self.dtcur = dtnext
-            self.firsttime = False
-            self.substeps += 1
-        else:
-            self.dtcur = self.dt
-            self.t += 1 
-            self.firsttime = True
-            self.substeps = 0
-        self.logger.debug('going to next step')
-        
-        
-  
-    def statistics(self):
-        # Calculate virtual temperatures 
-        self.thetav   = self.theta  + 0.61 * self.theta * self.q
-        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
-        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
-        # Mixed-layer top properties
-        self.P_h    = self.Ps - self.rho * self.g * self.h
-        # else:
-            # in the other case, it is updated at the time that the profile is
-            # updated (and at the initialization
-
-        self.T_h    = self.theta - self.g/self.cp * self.h
-
-        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
-        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
-
-        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
-
-        # Find lifting condensation level iteratively
-        if(self.t == 0):
-            self.lcl = self.h
-            RHlcl = 0.5
-        else:
-            RHlcl = 0.9998 
-
-        itmax = 30
-        it = 0
-        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
-            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
-            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
-        else:
-            self.q2_h   = 0.
-            self.CO22_h = 0.
-
-        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
-        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
-        self.M      = self.ac * self.wstar
-        self.wqM    = self.M * self.q2_h**0.5
-
-        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
-        if(self.dCO2 < 0):
-            self.wCO2M  = self.M * self.CO22_h**0.5
-        else:
-            self.wCO2M  = 0.
-
-    def run_mixed_layer(self):
-        if(not self.sw_sl):
-            # decompose ustar along the wind components
-            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
-            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
-
-
-
-        # calculate large-scale vertical velocity (subsidence)
-        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
-            self.ws = -self.divU * self.h
-        # else:
-        #     in case the air circulation switch is turned on, subsidence is
-        #     calculated from the circulate profile at the initialization and
-        #     in the integrate_mixed_layer routine
-              
-        # calculate compensation to fix the free troposphere in case of subsidence 
-        if(self.sw_fixft):
-            w_th_ft  = self.gammatheta * self.ws
-            w_q_ft   = self.gammaq     * self.ws
-            w_CO2_ft = self.gammaCO2   * self.ws 
-        else:
-            w_th_ft  = 0.
-            w_q_ft   = 0.
-            w_CO2_ft = 0. 
-      
-        # calculate mixed-layer growth due to cloud top radiative divergence
-        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
-       
-        # calculate convective velocity scale w* 
-        if(self.wthetav > 0.):
-            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
-        else:
-            self.wstar  = 1e-6;
-      
-        # Virtual heat entrainment flux 
-        self.wthetave    = -self.beta * self.wthetav 
-        
-        # compute mixed-layer tendencies
-        if(self.sw_shearwe):
-            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
-        else:
-            self.we    = -self.wthetave / self.dthetav
-        # Don't allow boundary layer shrinking if wtheta < 0 
-        if(self.we < 0):
-            self.we = 0.
-
-        # Calculate entrainment fluxes
-        self.wthetae     = -self.we * self.dtheta
-        self.wqe         = -self.we * self.dq
-        self.wCO2e       = -self.we * self.dCO2
-        
-        htend_pre       = self.we + self.ws + self.wf - self.M
-        
-        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-        
- 
-        #print('thetatend_pre',thetatend_pre)
-        
-        #preliminary boundary-layer top chenage
-        #htend_pre = self.we + self.ws + self.wf - self.M
-        #preliminary change in temperature jump
-        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
-                          thetatend_pre + w_th_ft
-        
-        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
-        l_entrainment = True
-
-        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
-            l_entrainment = False
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! temperature jump is at the lower limit \
-                          and is not growing: entrainment is disabled for this (sub)timestep.") 
-        elif dtheta_pre < 0.1:
-            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
-            l_entrainment = True
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          " Warning! Potential temperature jump at mixed- \
-                          layer height would become too low limiting timestep \
-                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
-            self.dtmax = min(self.dtmax,dtmax_new)
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "next subtimestep, entrainment will be disabled")
-            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
-
-
-
-        # when entrainment is disabled, we just use the simplified formulation
-        # as in Wouters et al., 2013 (section 2.2.1)
-
-        self.dthetatend = l_entrainment*dthetatend_pre + \
-                        (1.-l_entrainment)*0.
-        self.thetatend = l_entrainment*thetatend_pre + \
-                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
-        self.htend = l_entrainment*htend_pre + \
-                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
-        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
-        #stop
-
-
-        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
-        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
-
-
-        # self.qtend = l_entrainment*qtend_pre + \
-        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
-        # self.CO2tend = l_entrainment*CO2tend_pre + \
-        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-
-
-        #     # part of the timestep for which the temperature mixed-layer jump
-        #     # was changing, and for which entrainment took place. For the other
-        #     # part, we don't assume entrainment anymore, and we use the
-        #     # simplified formulation  of Wouters et al., 2013
-
-        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
-        #   
-        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
-        #                      self.dthetatend + w_th_ft) + \
-        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
-        #     self.htend = fac*self.htend + \
-        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
-        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
-        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-
-        # else:
-        #     #self.htend = htend_pre
-        #     self.dthetatend = dthetatend_pre
-        #     self.thetatend = thetatend_pre
-        
-        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
-        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
-     
-        # assume u + du = ug, so ug - u = du
-        if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
-  
-            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
-            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
-        
-        # tendency of the transition layer thickness
-        if(self.ac > 0 or self.lcl - self.h < 300):
-            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
-        else:
-            self.dztend = 0.
-
-   
-    def integrate_mixed_layer(self):
-        # set values previous time step
-        h0      = self.h
-        
-        theta0  = self.theta
-        dtheta0 = self.dtheta
-        q0      = self.q
-        dq0     = self.dq
-        CO20    = self.CO2
-        dCO20   = self.dCO2
-        
-        u0      = self.u
-        du0     = self.du
-        v0      = self.v
-        dv0     = self.dv
-
-        dz0     = self.dz_h
-  
-        # integrate mixed-layer equations
-        
-            
-
-# END -- HW 20170606        
-        self.h        = h0      + self.dtcur * self.htend
-        # print(self.h,self.htend)
-        # stop
-        self.theta    = theta0  + self.dtcur * self.thetatend
-        #print(dtheta0,self.dtcur,self.dthetatend)
-        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
-        self.q        = q0      + self.dtcur * self.qtend
-        self.dq       = dq0     + self.dtcur * self.dqtend
-        self.CO2      = CO20    + self.dtcur * self.CO2tend
-        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
-        self.dz_h     = dz0     + self.dtcur * self.dztend
-            
-        # Limit dz to minimal value
-        dz0 = 50
-        if(self.dz_h < dz0):
-            self.dz_h = dz0 
-  
-        if(self.sw_wind):
-            self.u        = u0      + self.dtcur * self.utend
-            self.du       = du0     + self.dtcur * self.dutend
-            self.v        = v0      + self.dtcur * self.vtend
-            self.dv       = dv0     + self.dtcur * self.dvtend
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-            for var in ['t','q','u','v']:
-                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
-
-            # take into account advection for the whole profile
-                
-                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
-
-            var = 'z'
-            #print(self.air_ap[var])
-                #     print(self.air_ap['adv'+var])
-
-
-
-
-            #moving the profile vertically according to the vertical wind
-                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
-
-
-            # air_apvarold = pd.Series(np.array(self.air_ap.z))
-            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
-            # stop
-
-
-                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
-                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
-
-            #As t is updated, we also need to recalculate theta (and R)
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-
-            # air_aptheta_old = pd.Series(self.air_ap['theta'])
-            self.air_ap['theta'] = \
-                        self.air_ap.t * \
-                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
-                                         self.dtcur * self.air_ap.w[zidx_first:]
-
-#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
-#            print(self.t, self.dtcur,self.dt,self.htend)
-
-            # # the pressure levels of the profiles are recalculated according to
-            # # there new height (after subsidence)
-            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
-            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
-            #         * self.dtcur *  self.air_ap.w[zidx_first:]
-
-            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
-                    self.dtcur * self.air_ap.wp[zidx_first:]
-
-            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
-        # note that theta and q itself are updatet by class itself
-
-    
-        if self.sw_ap:
-            # Just for model consistency preservation purposes, we set the
-            # theta variables of the mixed-layer to nan values, since the
-            # mixed-layer values should overwritte by the mixed-layer
-            # calculations of class.
-            self.air_ap['theta'][0:3] = np.nan 
-            self.air_ap['p'][0:3] = np.nan 
-            self.air_ap['q'][0:3] = np.nan 
-            self.air_ap['u'][0:3] = np.nan 
-            self.air_ap['v'][0:3] = np.nan 
-            self.air_ap['t'][0:3] = np.nan 
-            self.air_ap['z'][0:3] = np.nan 
-
-            # Update the vertical profiles: 
-            #   - new mixed layer properties( h, theta, q ...)
-            #   - any data points below the new ixed-layer height are removed
-
-            # Three data points at the bottom that describe the mixed-layer
-            # properties
-            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
-                                           # columns as air_ap
-            # air_ap_head['z'].iloc[0] = 2.
-            # air_ap_head['z'].iloc[1] = self.__dict__['h']
-            # air_ap_head['z'].iloc[2] = self.__dict__['h']
-            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
-                        [2.,self.__dict__['h'],self.__dict__['h']]
-            for var in ['theta','q','u','v']:
-
-                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
-                        [self.__dict__[var], \
-                         self.__dict__[var], \
-                         self.__dict__[var] + self.__dict__['d'+var]]
-                
-            #print(self.air_ap)
-
-            # This is the remaining profile considering the remaining
-            # datapoints above the mixed layer height
-            air_ap_tail = self.air_ap.iloc[3:]
-            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
-
-            # print('h',self.h)
-            # # only select samples monotonically increasing with height
-            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            # air_ap_tail = pd.DataFrame()
-            # theta_low = self.theta
-            # z_low =     self.h
-            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            # for ibottom in range(1,len(air_ap_tail_orig)):
-            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
-            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-
-
-            # make theta increase strong enough to avoid numerical
-            # instability
-            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            air_ap_tail = pd.DataFrame()
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            theta_low = self.theta
-            z_low =     self.h
-            ibottom = 0
-            itop = 0
-            # print(air_ap_tail_orig)
-            # stop
-
-            # HW: this is the lower limit that we use for gammatheta, which is
-            # there to avoid model crashes. Besides on this limit, the upper
-            # air profile is modified in a way that is still conserves total
-            # quantities of moisture and temperature. The limit is set by trial
-            # and error. The numerics behind the crash should be investigated
-            # so that a cleaner solution can be provided.
-            gammatheta_lower_limit = 0.002
-            while ((itop in range(0,1)) or (itop != ibottom)):
-                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-                if (
-                    #(z_mean > (z_low+0.2)) and \
-                    #(theta_mean > (theta_low+0.02) ) and \
-                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
-                  (itop >= (len(air_ap_tail_orig)-1)) \
-                   :
-
-                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                    ibottom = itop+1
-                    theta_low = air_ap_tail.theta.iloc[-1]
-                    z_low =     air_ap_tail.z.iloc[-1]
-    
-
-                itop +=1
-                # elif  (itop > len(air_ap_tail_orig)-10):
-                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-                #print(itop,ibottom)
-
-            if itop > 1:
-                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! Temperature profile was too steep. \
-                                  Modifying profile: "+ \
-                                  str(itop - 1)+ " measurements were dropped \
-                                  and replaced with its average \
-                                  Modifying profile. \
-                                  mean with next profile point(s).") 
-
-
-            self.air_ap = pd.concat((air_ap_head,\
-                                     air_ap_tail,\
-                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
-                                                                      axis=1)
-
-            if  self.sw_ac:
-                qvalues = \
-                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
-
-                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
-                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
-                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-                self.P_h    = self.Ps - self.rho * self.g * self.h
-                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
-                        [self.Ps,  self.P_h, self.P_h-0.1]
-
-                self.air_ap.t = \
-                            self.air_ap.theta * \
-                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
-
-
-        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
-
-
-
-
-        # else:
-            # in the other case, it is updated at the time the statistics are
-            # calculated 
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if in_ml.sum() == 0:
-                warnings.warn(" no circulation points in the mixed layer \
-                              found. We just take the bottom one.")
-                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-            for var in ['t','q','u','v']:
-
-                # calculation of the advection variables for the mixed-layer
-                # these will be used for the next timestep
-                # Warning: w is excluded for now.
-
-                self.__dict__['adv'+var] = \
-                        ((self.air_ac['adv'+var+'_x'][in_ml] \
-                         + \
-                         self.air_ac['adv'+var+'_y'][in_ml])* \
-                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                        self.air_ac['delpdgrav'][in_ml].sum()
-
-                # calculation of the advection variables for the profile above
-                # the mixed layer (also for the next timestep)
-                self.air_ap['adv'+var] = \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p,\
-                                              self.air_ac['adv'+var+'_x']) \
-                                    + \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p, \
-                                              self.air_ac['adv'+var+'_y'])
-                # if var == 't':
-                #     print(self.air_ap['adv'+var])
-                #     stop
-
-            # as an approximation, we consider that advection of theta in the
-            # mixed layer is equal to advection of t. This is a sufficient
-            # approximation since theta and t are very similar at the surface
-            # pressure.
-
-            self.__dict__['advtheta'] = self.__dict__['advt']
-
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            # update the vertical wind profile
-            self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                          self.air_ac.p, \
-                                          self.air_ac['wp'])
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-            
-            air_apwold = self.air_ap['w']
-            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-            #print('hello w upd')
-
-            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # # self.wrho = np.interp(self.P_h,\
-            # #                      self.air_ach.p,\
-            # #                      self.air_ach['wrho']) \
-
-
-
-            # Also update the vertical wind at the mixed-layer height
-            # (subsidence)
-            self.ws   = self.air_ap.w[1]
-        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
-
-            ## Finally, we update he 
-            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-            #                        + \
-            #                        self.air_ac['divU_y'][in_ml])* \
-            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-            #            self.air_ac['delpdgrav'][in_ml].sum() 
-            
-
-        if self.sw_ap:
-            for var in ['theta','q','u','v']:
-
-                # update of the slope (gamma) for the different variables, for
-                # the next timestep!
-
-                # there is an warning message that tells about dividing through
-                # zero, which we ignore
-
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                    # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap['gamma'+var] = gammavar
-
-                # Based on the above, update the gamma value at the mixed-layer
-                # top
-                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
-                                                                     self.air_ap.z)[0][-1]]
-
-            
-    def run_radiation(self):
-        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
-        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
-        sinlea = max(sinlea, 0.0001)
-        
-        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
-  
-        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
-  
-        self.Swin  = self.S0 * Tr * sinlea
-        self.Swout = self.alpha * self.S0 * Tr * sinlea
-        
-        
-        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
-        self.Lwout = self.bolz * self.Ts ** 4.
-          
-        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
-        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
-  
-    def run_surface_layer(self):
-        # HW: I had to raise the minimum wind speed to make the simulation with
-        # the non-iterative solution stable (this solution was a wild guess, so I don't
-        # know the exact problem of the instability in case of very low wind
-        # speeds yet)
-        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        # version of 20180730 where there are still some runs crashing. Maybe
-        # an upper limit should be set on the monin-obukhov length instead of
-        # a lower limmit on the wind speed?
-        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        
-        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
-        qsatsurf       = qsat(self.thetasurf, self.Ps)
-        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
-        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
-
-        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
-  
-        zsl       = 0.1 * self.h
-        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
-        
-
-
-        if self.sw_lit:
-            self.Rib  = min(self.Rib, 0.2)
-            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
-            self.zeta  = zsl/self.L
-            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
-            
-        
-            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
-            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
-            
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-        
-     
-            # diagnostic meteorological variables
-            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
-            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
-            
-            # diagnostic meteorological variables
-        else:
-            
-            ## circumventing any iteration with Wouters et al., 2012
-            self.zslz0m = np.max((zsl/self.z0m,10.))
-            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
-            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
-            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
-            self.L = zsl/self.zeta
-            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
-        
-            self.Cm = self.k**2.0/funm/funm
-            self.Cs = self.k**2.0/funm/funh
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-            
-            # extrapolation from mixed layer (instead of from surface) to 2meter
-            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
-            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
-            self.u2m    =                - self.uw     / self.ustar / self.k * funm
-            self.v2m    =                - self.vw     / self.ustar / self.k * funm
-        
-        
-        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
-        self.e2m    = self.q2m * self.Ps / 0.622
-     
-    def ribtol(self, Rib, zsl, z0m, z0h): 
-        if(Rib > 0.):
-            L    = 1.
-            L0   = 2.
-        else:
-            L  = -1.
-            L0 = -2.
-        #print(Rib,zsl,z0m,z0h)
-        
-        while (abs(L - L0) > 0.001):
-            L0      = L
-            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
-            Lstart  = L - 0.001*L
-            Lend    = L + 0.001*L
-            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
-                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-            L       = L - fx / fxdif
-            #print(L)
-            if(abs(L) > 1e12):
-                break
-
-        return L
-      
-    def psim(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psim = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-        return psim
-      
-    def psih(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psih  = 2. * np.log( (1. + x*x) / 2.)
-            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-        return psih
- 
-    def jarvis_stewart(self):
-        # calculate surface resistances using Jarvis-Stewart model
-        if(self.sw_rad):
-            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
-        else:
-            f1 = 1.
-  
-        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
-            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
-        else:
-            f2 = 1.e8
- 
-        # Limit f2 in case w2 > wfc, where f2 < 1
-        f2 = max(f2, 1.);
- 
-        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
-        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
-  
-        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
-
-    def factorial(self,k):
-        factorial = 1
-        for n in range(2,k+1):
-            factorial = factorial * float(n)
-        return factorial;
-
-    def E1(self,x):
-        E1sum = 0
-        for k in range(1,100):
-            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
-        return -0.57721566490153286060 - np.log(x) - E1sum
- 
-    def ags(self):
-        # Select index for plant type
-        if(self.c3c4 == 'c3'):
-            c = 0
-        elif(self.c3c4 == 'c4'):
-            c = 1
-        else:
-            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
-
-        # calculate CO2 compensation concentration
-        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
-
-        # calculate mesophyll conductance
-        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
-                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
-        gm            = gm / 1000. # conversion from mm s-1 to m s-1
-  
-        # calculate CO2 concentration inside the leaf (ci)
-        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
-        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
-  
-        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
-        D0            = (self.f0[c] - fmin) / self.ad[c]
-  
-        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
-        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
-        ci            = cfrac * (co2abs - CO2comp) + CO2comp
-  
-        # calculate maximal gross primary production in high light conditions (Ag)
-        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
-  
-        # calculate effect of soil moisture stress on gross assimilation rate
-        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
-  
-        # calculate stress function
-        if (self.c_beta == 0):
-            fstr = betaw;
-        else:
-            # Following Combe et al (2016)
-            if (self.c_beta < 0.25):
-                P = 6.4 * self.c_beta
-            elif (self.c_beta < 0.50):
-                P = 7.6 * self.c_beta - 0.3
-            else:
-                P = 2**(3.66 * self.c_beta + 0.34) - 1
-            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
-  
-        # calculate gross assimilation rate (Am)
-        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
-        Rdark        = (1. / 9.) * Am
-        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
-  
-        # calculate  light use efficiency
-        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
-  
-        # calculate gross primary productivity
-        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
-  
-        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
-        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
-        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
-  
-        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
-        a1           = 1. / (1. - self.f0[c])
-        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
-  
-        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
-  
-        # calculate surface resistance for moisture and carbon dioxide
-        self.rs      = 1. / (1.6 * gcco2)
-        rsCO2        = 1. / gcco2
-  
-        # calculate net flux of CO2 into the plant (An)
-        An           = -(co2abs - ci) / (self.ra + rsCO2)
-  
-        # CO2 soil surface flux
-        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
-        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
-  
-        # CO2 flux
-        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
-        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
-        self.wCO2    = self.wCO2A + self.wCO2R
- 
-    def run_land_surface(self):
-        # compute ra
-        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
-        #print('ueff',self.u,self.v,self.wstar)
-
-        if(self.sw_sl):
-          self.ra = (self.Cs * ueff)**-1.
-        else:
-          self.ra = ueff / max(1.e-3, self.ustar)**2.
-
-        #print('ra',self.ra,self.ustar,ueff)
-
-        # first calculate essential thermodynamic variables
-        self.esat    = esat(self.theta)
-        self.qsat    = qsat(self.theta, self.Ps)
-        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
-        self.dqsatdT = 0.622 * desatdT / self.Ps
-        self.e       = self.q * self.Ps / 0.622
-
-        if(self.ls_type == 'js'): 
-            self.jarvis_stewart() 
-        elif(self.ls_type == 'ags'):
-            self.ags()
-        else:
-            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
-
-        # recompute f2 using wg instead of w2
-        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
-          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
-        else:
-          f2        = 1.e8
-        self.rssoil = self.rssoilmin * f2 
- 
-        Wlmx = self.LAI * self.Wmax
-        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
-        self.cliq = min(1., self.Wl / Wlmx) 
-     
-        # calculate skin temperature implictly
-        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
-            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
-            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
-            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
-
-        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
-        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
-        #print('Ts',self.rs)
-
-        esatsurf      = esat(self.Ts)
-        self.qsatsurf = qsat(self.Ts, self.Ps)
-
-        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-  
-        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
-  
-        self.LE     = self.LEsoil + self.LEveg + self.LEliq
-        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
-        #print('H',self.ra,self.Ts,self.theta)
-        self.G      = self.Lambda * (self.Ts - self.Tsoil)
-        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
-        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
-        
-        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
-  
-        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
-   
-        d1          = 0.1
-        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
-        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
-        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
-        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
-  
-        # calculate kinematic heat fluxes
-        self.wtheta   = self.H  / (self.rho * self.cp)
-        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
-        self.wq       = self.LE / (self.rho * self.Lv)
- 
-    def integrate_land_surface(self):
-        # integrate soil equations
-        Tsoil0        = self.Tsoil
-        wg0           = self.wg
-        Wl0           = self.Wl
-  
-        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
-        self.wg       = wg0     + self.dtcur * self.wgtend
-        self.Wl       = Wl0     + self.dtcur * self.Wltend
-  
-    # store model output
-    def store(self):
-        t                      = self.t
-        
-        self.out.time[t]          = t * self.dt / 3600. + self.tstart
-
-        # in case we are at the end of the simulation, we store the vertical
-        # profiles to the output
-        
-        # if t == (len(self.out.time) - 1):
-        #     self.out.air_ac = self.air_ac
-        #     self.out.air_ap = self.air_ap
-
-        
-        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
-        #  for key in self.out.__dict__.keys():
-        #      if key in self.__dict__:
-        #          self.out.__dict__[key][t]  = self.__dict__[key]
-        
-        self.out.h[t]          = self.h
-        
-        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
-        
-        self.out.gammatheta[t] = self.gammatheta
-        self.out.gammau[t]     = self.gammau
-        self.out.gammav[t]     = self.gammav
-        self.out.gammaq[t]     = self.gammaq
-        self.out.theta[t]      = self.theta
-        self.out.thetav[t]     = self.thetav
-        self.out.dtheta[t]     = self.dtheta
-        self.out.dthetav[t]    = self.dthetav
-        self.out.wtheta[t]     = self.wtheta
-        self.out.wthetav[t]    = self.wthetav
-        self.out.wthetae[t]    = self.wthetae
-        self.out.wthetave[t]   = self.wthetave
-        
-        self.out.q[t]          = self.q
-        self.out.dq[t]         = self.dq
-        self.out.wq[t]         = self.wq
-        self.out.wqe[t]        = self.wqe
-        self.out.wqM[t]        = self.wqM
-      
-        self.out.qsat[t]       = self.qsat
-        self.out.e[t]          = self.e
-        self.out.esat[t]       = self.esat
-      
-        fac = (self.rho*self.mco2)/self.mair
-        self.out.CO2[t]        = self.CO2
-        self.out.dCO2[t]       = self.dCO2
-        self.out.wCO2[t]       = self.wCO2  * fac
-        self.out.wCO2e[t]      = self.wCO2e * fac
-        self.out.wCO2R[t]      = self.wCO2R * fac
-        self.out.wCO2A[t]      = self.wCO2A * fac
-
-        self.out.u[t]          = self.u
-        self.out.du[t]         = self.du
-        self.out.uw[t]         = self.uw
-        
-        self.out.v[t]          = self.v
-        self.out.dv[t]         = self.dv
-        self.out.vw[t]         = self.vw
-        
-        self.out.T2m[t]        = self.T2m
-        self.out.q2m[t]        = self.q2m
-        self.out.u2m[t]        = self.u2m
-        self.out.v2m[t]        = self.v2m
-        self.out.e2m[t]        = self.e2m
-        self.out.esat2m[t]     = self.esat2m
-
-
-        self.out.Tsoil[t]      = self.Tsoil
-        self.out.T2[t]         = self.T2
-        self.out.Ts[t]         = self.Ts
-        self.out.wg[t]         = self.wg
-        
-        self.out.thetasurf[t]  = self.thetasurf
-        self.out.thetavsurf[t] = self.thetavsurf
-        self.out.qsurf[t]      = self.qsurf
-        self.out.ustar[t]      = self.ustar
-        self.out.Cm[t]         = self.Cm
-        self.out.Cs[t]         = self.Cs
-        self.out.L[t]          = self.L
-        self.out.Rib[t]        = self.Rib
-  
-        self.out.Swin[t]       = self.Swin
-        self.out.Swout[t]      = self.Swout
-        self.out.Lwin[t]       = self.Lwin
-        self.out.Lwout[t]      = self.Lwout
-        self.out.Q[t]          = self.Q
-  
-        self.out.ra[t]         = self.ra
-        self.out.rs[t]         = self.rs
-        self.out.H[t]          = self.H
-        self.out.LE[t]         = self.LE
-        self.out.LEliq[t]      = self.LEliq
-        self.out.LEveg[t]      = self.LEveg
-        self.out.LEsoil[t]     = self.LEsoil
-        self.out.LEpot[t]      = self.LEpot
-        self.out.LEref[t]      = self.LEref
-        self.out.G[t]          = self.G
-
-        self.out.zlcl[t]       = self.lcl
-        self.out.RH_h[t]       = self.RH_h
-
-        self.out.ac[t]         = self.ac
-        self.out.M[t]          = self.M
-        self.out.dz[t]         = self.dz_h
-        self.out.substeps[t]   = self.substeps
-  
-    # delete class variables to facilitate analysis in ipython
-    def exitmodel(self):
-        del(self.Lv)
-        del(self.cp)
-        del(self.rho)
-        del(self.k)
-        del(self.g)
-        del(self.Rd)
-        del(self.Rv)
-        del(self.bolz)
-        del(self.S0)
-        del(self.rhow)
-  
-        del(self.t)
-        del(self.dt)
-        del(self.tsteps)
-         
-        del(self.h)          
-        del(self.Ps)        
-        del(self.fc)        
-        del(self.ws)
-        del(self.we)
-        
-        del(self.theta)
-        del(self.dtheta)
-        del(self.gammatheta)
-        del(self.advtheta)
-        del(self.beta)
-        del(self.wtheta)
-    
-        del(self.T2m)
-        del(self.q2m)
-        del(self.e2m)
-        del(self.esat2m)
-        del(self.u2m)
-        del(self.v2m)
-        
-        del(self.thetasurf)
-        del(self.qsatsurf)
-        del(self.thetav)
-        del(self.dthetav)
-        del(self.thetavsurf)
-        del(self.qsurf)
-        del(self.wthetav)
-        
-        del(self.q)
-        del(self.qsat)
-        del(self.dqsatdT)
-        del(self.e)
-        del(self.esat)
-        del(self.dq)
-        del(self.gammaq)
-        del(self.advq)
-        del(self.wq)
-        
-        del(self.u)
-        del(self.du)
-        del(self.gammau)
-        del(self.advu)
-        
-        del(self.v)
-        del(self.dv)
-        del(self.gammav)
-        del(self.advv)
-  
-        del(self.htend)
-        del(self.thetatend)
-        del(self.dthetatend)
-        del(self.qtend)
-        del(self.dqtend)
-        del(self.utend)
-        del(self.dutend)
-        del(self.vtend)
-        del(self.dvtend)
-     
-        del(self.Tsoiltend) 
-        del(self.wgtend)  
-        del(self.Wltend) 
-  
-        del(self.ustar)
-        del(self.uw)
-        del(self.vw)
-        del(self.z0m)
-        del(self.z0h)        
-        del(self.Cm)         
-        del(self.Cs)
-        del(self.L)
-        del(self.Rib)
-        del(self.ra)
-  
-        del(self.lat)
-        del(self.lon)
-        del(self.doy)
-        del(self.tstart)
-   
-        del(self.Swin)
-        del(self.Swout)
-        del(self.Lwin)
-        del(self.Lwout)
-        del(self.cc)
-  
-        del(self.wg)
-        del(self.w2)
-        del(self.cveg)
-        del(self.cliq)
-        del(self.Tsoil)
-        del(self.T2)
-        del(self.a)
-        del(self.b)
-        del(self.p)
-        del(self.CGsat)
-  
-        del(self.wsat)
-        del(self.wfc)
-        del(self.wwilt)
-  
-        del(self.C1sat)
-        del(self.C2ref)
-  
-        del(self.LAI)
-        del(self.rs)
-        del(self.rssoil)
-        del(self.rsmin)
-        del(self.rssoilmin)
-        del(self.alpha)
-        del(self.gD)
-  
-        del(self.Ts)
-  
-        del(self.Wmax)
-        del(self.Wl)
-  
-        del(self.Lambda)
-        
-        del(self.Q)
-        del(self.H)
-        del(self.LE)
-        del(self.LEliq)
-        del(self.LEveg)
-        del(self.LEsoil)
-        del(self.LEpot)
-        del(self.LEref)
-        del(self.G)
-  
-        del(self.sw_ls)
-        del(self.sw_rad)
-        del(self.sw_sl)
-        del(self.sw_wind)
-        del(self.sw_shearwe)
-
-# class for storing mixed-layer model output data
-class model_output:
-    def __init__(self, tsteps):
-        self.time          = np.zeros(tsteps)    # time [s]
-
-        # mixed-layer variables
-        self.h          = np.zeros(tsteps)    # ABL height [m]
-        
-        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammau     = np.zeros(tsteps)
-        self.gammav     = np.zeros(tsteps)
-        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
-        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
-        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
-        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
-        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
-        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
-        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
-        
-        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
-        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
-        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
-        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
-
-        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
-        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
-        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
-
-        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
-        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
-        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
-        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
-        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
-        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
-        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
-        
-        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
-        
-        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
-
-        # diagnostic meteorological variables
-        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
-        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
-        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
-        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
-        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
-        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
-
-        # ground variables
-        self.Tsoil       = np.zeros(tsteps)
-        self.T2          = np.zeros(tsteps)
-        self.Ts          = np.zeros(tsteps)
-        self.wg          = np.zeros(tsteps)
-
-        # surface-layer variables
-        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
-        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
-        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
-        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
-        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
-        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
-        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
-        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
-        self.L          = np.zeros(tsteps)    # Obukhov length [m]
-        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
-
-        # radiation variables
-        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
-        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
-        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
-        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
-        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
-
-        # land surface variables
-        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
-        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
-        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
-        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
-        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
-        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
-        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
-        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
-        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
-        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
-
-        # Mixed-layer top variables
-        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
-        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
-
-        # cumulus variables
-        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
-        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
-        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
-        
-        
-        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
-
-# class for storing mixed-layer model input data
-class model_input:
-    def __init__(self):
-
-        # # comment not valid
-        # we comment out the initialization, because there is a problem when
-        # inheriting values from one the another class4gl_iput. We also expect
-        # that the user specifies all the required parmameters (if not, an error
-        # is raised). 
-
-        # general model variables
-        self.runtime    = None  # duration of model run [s]
-        self.dt         = None  # time step [s]
-
-        # mixed-layer variables
-        self.sw_ml      = None  # mixed-layer model switch
-        self.sw_shearwe = None  # Shear growth ABL switch
-        self.sw_fixft   = None  # Fix the free-troposphere switch
-        self.h          = None  # initial ABL height [m]
-        self.Ps         = None  # surface pressure [Pa]
-        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
-        self.fc         = None  # Coriolis parameter [s-1]
-        
-        self.theta      = None  # initial mixed-layer potential temperature [K]
-        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
-
-        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
-
-        self.dtheta     = None  # initial temperature jump at h [K]
-        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = None  # advection of heat [K s-1]
-        self.beta       = None  # entrainment ratio for virtual heat [-]
-        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
-        
-        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
-        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
-        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
-
-        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = None  # advection of moisture [kg kg-1 s-1]
-        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
-
-        self.CO2        = None  # initial mixed-layer potential temperature [K]
-        self.dCO2       = None  # initial temperature jump at h [K]
-        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advCO2     = None  # advection of heat [K s-1]
-        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
-        
-        self.sw_wind    = None  # prognostic wind switch
-        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.du         = None  # initial u-wind jump at h [m s-1]
-        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = None  # advection of u-wind [m s-2]
-
-        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = None  # initial u-wind jump at h [m s-1]
-        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = None  # advection of v-wind [m s-2]
-
-        # surface layer variables
-        self.sw_sl      = None  # surface layer switch
-        self.ustar      = None  # surface friction velocity [m s-1]
-        self.z0m        = None  # roughness length for momentum [m]
-        self.z0h        = None  # roughness length for scalars [m]
-        self.Cm         = None  # drag coefficient for momentum [-]
-        self.Cs         = None  # drag coefficient for scalars [-]
-        self.L          = None  # Obukhov length [-]
-        self.Rib        = None  # bulk Richardson number [-]
-
-        # radiation parameters
-        self.sw_rad     = None  # radiation switch
-        self.lat        = None  # latitude [deg]
-        self.lon        = None  # longitude [deg]
-        self.doy        = None  # day of the year [-]
-        self.tstart     = None  # time of the day [h UTC]
-        self.cc         = None  # cloud cover fraction [-]
-        self.Q          = None  # net radiation [W m-2] 
-        self.dFz        = None  # cloud top radiative divergence [W m-2] 
-
-        # land surface parameters
-        self.sw_ls      = None  # land surface switch
-        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
-        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = None  # temperature top soil layer [K]
-        self.T2         = None  # temperature deeper soil layer [K]
-        
-        self.a          = None  # Clapp and Hornberger retention curve parameter a
-        self.b          = None  # Clapp and Hornberger retention curve parameter b
-        self.p          = None  # Clapp and Hornberger retention curve parameter p 
-        self.CGsat      = None  # saturated soil conductivity for heat
-        
-        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
-        self.wfc        = None  # volumetric water content field capacity [-]
-        self.wwilt      = None  # volumetric water content wilting point [-]
-        
-        self.C1sat      = None 
-        self.C2ref      = None
-
-        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
-        
-        self.LAI        = None  # leaf area index [-]
-        self.gD         = None  # correction factor transpiration for VPD [-]
-        self.rsmin      = None  # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = None  # surface albedo [-]
-        
-        self.Ts         = None  # initial surface temperature [K]
-        
-        self.cveg       = None  # vegetation fraction [-]
-        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
-        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
-        
-        self.Lambda     = None  # thermal diffusivity skin layer [-]
-
-        # A-Gs parameters
-        self.c3c4       = None  # Plant type ('c3' or 'c4')
-
-        # Cumulus parameters
-        self.sw_cu      = None  # Cumulus parameterization switch
-        self.dz_h       = None  # Transition layer thickness [m]
-        
-# BEGIN -- HW 20171027
-        # self.cala       = None      # soil heat conductivity [W/(K*m)]
-        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
-# END -- HW 20171027
diff --git a/class4gl.py b/class4gl.py
deleted file mode 100644
index 7baaa51..0000000
--- a/class4gl.py
+++ /dev/null
@@ -1,1611 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-
-Created on Mon Jan 29 12:33:51 2018
-
-Module file for class4gl, which  extents the class-model to be able to take
-global air profiles as input. It exists of:
-
-CLASSES:
-    - an input object, namely class4gl_input. It includes:
-        - a function to read Wyoming sounding data from a yyoming stream object
-        - a function to read global data from a globaldata library object 
-    - the model object: class4gl
-    - ....    
-
-DEPENDENCIES:
-    - xarray
-    - numpy
-    - data_global
-    - Pysolar
-    - yaml
-
-@author: Hendrik Wouters
-
-"""
-
-
-
-""" Setup of envirnoment """
-
-# Standard modules of the stand class-boundary-layer model
-from model import model
-from model import model_output as class4gl_output
-from model import model_input
-from model import qsat
-#from data_soundings import wyoming 
-import Pysolar
-import yaml
-import logging
-import warnings
-import pytz
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-
-# Generic Python Packages
-import numpy as np
-import datetime as dt
-import pandas as pd
-import xarray as xr
-import io
-#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
-from data_global import data_global
-grav = 9.81
-
-# this is just a generic input object
-class generic_input(object):
-    def __init__(self):
-        self.init = True
-
-
-# all units from all variables in CLASS(4GL) should be defined here!
-units = {
-         'h':'m',
-         'theta':'K', 
-         'q':'kg/kg',
-         'cc': '-',
-         'cveg': '-',
-         'wg': 'm3 m-3',
-         'w2': 'm3 m-3',
-         #'wg': 'kg/kg',
-         'Tsoil': 'K',
-         'T2': 'K',
-         'z0m': 'm',
-         'alpha': '-',
-         'LAI': '-',
-         'dhdt':'m/h',
-         'dthetadt':'K/h',
-         'dqdt':'kg/kg/h',
-         'BR': '-',
-         'EF': '-',
-}
-
-class class4gl_input(object):
-# this was the way it was defined previously.
-#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
-
-    def __init__(self,set_pars_defaults=True,debug_level=None):
-
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        print('hello')
-        self.logger = logging.getLogger('class4gl_input')
-        print(self.logger)
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # # create logger
-        # self.logger = logging.getLogger('class4gl_input')
-        # self.logger.setLevel(debug_level)
-
-        # # create console handler and set level to debug
-        # ch = logging.StreamHandler()
-        # ch.setLevel(debug_level)
-
-        # # create formatter
-        # formatter = logging.Formatter('%(asctime)s - \
-        #                                %(name)s - \
-        #                                %(levelname)s - \
-        #                                %(message)s')
-        # add formatter to ch
-        # ch.setFormatter(formatter)
-     
-        # # add ch to logger
-        # self.logger.addHandler(ch)
-
-        # """ end set up logger """
-
-
-
-        # these are the standard model input single-value parameters for class
-        self.pars = model_input()
-
-        # diagnostic parameters of the initial profile
-        self.diag = dict()
-
-        # In this variable, we keep track of the different parameters from where it originates from. 
-        self.sources = {}
-
-        if set_pars_defaults:
-            self.set_pars_defaults()
-
-    def set_pars_defaults(self):
-
-        """ 
-        Create empty model_input and set up case
-        """
-        defaults = dict( 
-        dt         = 60.    , # time step [s] 
-        runtime    = 6*3600 ,  # total run time [s]
-        
-        # mixed-layer input
-        sw_ml      = True   ,  # mixed-layer model switch
-        sw_shearwe = False  ,  # shear growth mixed-layer switch
-        sw_fixft   = False  ,  # Fix the free-troposphere switch
-        h          = 200.   ,  # initial ABL height [m]
-        Ps         = 101300.,  # surface pressure [Pa]
-        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
-        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
-        
-        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
-        dtheta     = 1.     ,  # initial temperature jump at h [K]
-        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
-        advtheta   = 0.     ,  # advection of heat [K s-1]
-        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
-        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
-        
-        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
-        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
-        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
-        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
-        
-        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
-        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
-        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
-        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
-        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
-        sw_wind    = True  ,  # prognostic wind switch
-        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
-        du         = 0.     ,  # initial u-wind jump at h [m s-1]
-        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
-        advu       = 0.     ,  # advection of u-wind [m s-2]
-        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
-        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
-        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
-        advv       = 0.     ,  # advection of v-wind [m s-2]
-        sw_sl      = True   , # surface layer switch
-        ustar      = 0.3    ,  # surface friction velocity [m s-1]
-        z0m        = 0.02   ,  # roughness length for momentum [m]
-        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
-        sw_rad     = True   , # radiation switch
-        lat        = 51.97  ,  # latitude [deg]
-        lon        = -4.93  ,  # longitude [deg]
-        doy        = 268.   ,  # day of the year [-]
-        tstart     = 6.8    ,  # time of the day [h UTC]
-        cc         = 0.0    ,  # cloud cover fraction [-]
-        Q          = 400.   ,  # net radiation [W m-2] 
-        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
-        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
-        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
-        cveg       = 0.85   ,  # vegetation fraction [-]
-        Tsoil      = 295.   ,  # temperature top soil layer [K]
-        Ts         = 295.   ,    # initial surface temperature [K]
-        T2         = 296.   ,  # temperature deeper soil layer [K]
-        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
-        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
-        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
-        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
-        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
-        wfc        = 0.323  ,  # volumetric water content field capacity [-]
-        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
-        C1sat      = 0.132  ,  
-        C2ref      = 1.8    ,
-        LAI        = 2.     ,  # leaf area index [-]
-        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
-        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
-        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
-        alpha      = 0.25   ,  # surface albedo [-]
-        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
-        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
-        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
-        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
-        sw_cu      = False  ,  # Cumulus parameterization switch
-        dz_h       = 150.   ,  # Transition layer thickness [m]
-        cala       = None   ,  # soil heat conductivity [W/(K*m)]
-        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
-        sw_ls      = True   ,
-        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
-        sw_lit     = False,
-        )
-        pars = model_input()
-        for key in defaults:
-            pars.__dict__[key] = defaults[key]
-        
-        self.update(source='defaults',pars=pars)
-        
-    def clear(self):
-        """ this procudure clears the class4gl_input """
-
-        for key in list(self.__dict__.keys()):
-            del(self.__dict__[key])
-        self.__init__()
-
-    def dump(self,file):
-        """ this procedure dumps the class4gl_input object into a yaml file
-            
-            Input: 
-                - self.__dict__ (internal): the dictionary from which we read 
-            Output:
-                - file: All the parameters in self.__init__() are written to
-                the yaml file, including pars, air_ap, sources etc.
-        """
-        file.write('---\n')
-        index = file.tell()
-        file.write('# CLASS4GL input; format version: 0.1\n')
-
-        # write out the position of the current record
-        yaml.dump({'index':index}, file, default_flow_style=False)
-
-        # we do not include the none values
-        for key,data in self.__dict__.items():
-            #if ((type(data) == model_input) or (type(class4gl_input):
-            if key == 'pars':
-
-                pars = {'pars' : self.__dict__['pars'].__dict__}
-                parsout = {}
-                for key in pars.keys():
-                    if pars[key] is not None:
-                        parsout[key] = pars[key]
-
-                yaml.dump(parsout, file, default_flow_style=False)
-            elif type(data) == dict:
-                if key == 'sources':
-                    # in case of sources, we want to have a
-                    # condensed list format as well, so we leave out
-                    # 'default_flow_style=False'
-                    yaml.dump({key : data}, file)
-                else: 
-                    yaml.dump({key : data}, file,
-                              default_flow_style=False)
-            elif type(data) == pd.DataFrame:
-                # in case of dataframes (for profiles), we want to have a
-                # condensed list format as well, so we leave out
-                # 'default_flow_style=False'
-                yaml.dump({key: data.to_dict(orient='list')},file)
-
-                # # these are trials to get it into a more human-readable
-                # fixed-width format, but it is too complex
-                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
-                #file.write(stream)
-                
-                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
-                #file.write(key+': !!str |\n')
-                #file.write(str(data)+'\n')
-       
-    def load_yaml_dict(self,yaml_dict,reset=True):
-        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
-            
-            Input: 
-                - yaml_dict: the dictionary from which we read 
-                - reset: reset data before reading        
-            Output:
-                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
-        """
-        
-        if reset:
-            for key in list(self.__dict__.keys()):
-                del(self.__dict__[key])
-            self.__init__()
-
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                self.__dict__[key] = model_input()
-                self.__dict__[key].__dict__ = data
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            elif key == 'sources':
-                self.__dict__[key] = data
-            elif key == 'diag':
-                self.__dict__[key] = data
-            else: 
-                warnings.warn("Key '"+key+"' may not be implemented.")
-                self.__dict__[key] = data
-
-    def update(self,source,**kwargs):
-        """ this procedure is to make updates of input parameters and tracking
-        of their source more convenient. It implements the assignment of
-        parameter source/sensitivity experiment IDs ('eg.,
-        'defaults', 'sounding balloon', any satellite information, climate
-        models, sensitivity tests etc.). These are all stored in a convenient
-        way with as class4gl_input.sources.  This way, the user can always consult with
-        from where parameters data originates from.  
-        
-        Input:
-            - source:    name of the underlying dataset
-            - **kwargs: a dictionary of data input, for which the key values
-            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
-            the values is a again a dictionary/dataframe of datakeys/columns
-            ('wg','PRES','datetime', ...) and datavalues (either single values,
-            profiles ...), eg., 
-
-                pars = {'wg': 0.007  , 'w2', 0.005}
-                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
-                                     300.,...]}
-            
-        Output:
-            - self.__dict__[datatype] : object to which the parameters are
-                                        assigned. They can be consulted with
-                                        self.pars, self.profiles, etc.
-                                        
-            - self.sources[source] : It supplements the overview overview of
-                                     data sources can be consulted with
-                                     self.sources. The structure is as follows:
-                                     as:
-                self.sources = { 
-                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
-                'GLEAM' :  ['pars:wg','pars:w2', ...],
-                 ...
-                }
-        
-        """
-
-        #print(source,kwargs)
-
-        for key,data in kwargs.items():
-
-            #print(key)
-            # if the key is not in class4gl_input object, then just add it. In
-            # that case, the update procedures below will just overwrite it 
-            if key not in self.__dict__:
-                self.__dict__[key] = data
-
-
-            
-
-            #... we do an additional check to see whether there is a type
-            # match. I not then raise a key error
-            if (type(data) != type(self.__dict__[key]) \
-                # we allow dict input for model_input pars
-                and not ((key == 'pars') and (type(data) == dict) and \
-                (type(self.__dict__[key]) == model_input))):
-
-                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
-
-
-            # This variable keeps track of the added data that is supplemented
-            # by the current source. We add this to class4gl_input.sources
-            datakeys = []
-
-            #... and we update the class4gl_input data, and this depends on the
-            # data type
-
-            if type(self.__dict__[key]) == pd.DataFrame:
-                # If the data type is a dataframe, then we update the columns
-                for column in list(data.columns):
-                    #print(column)
-                    self.__dict__[key][column] = data[column]
-                    datakeys.append(column)
-                    
-
-            elif type(self.__dict__[key]) == model_input:
-                # if the data type is a model_input, then we update its internal
-                # dictionary of parameters
-                if type(data) == model_input:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data.__dict__}
-                    datakeys = list(data.__dict__.keys())
-                elif type(data) == dict:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data}
-                    datakeys = list(data.keys())
-                else:
-                    raise TypeError('input key '+key+' is not of the same type\
-                                    as the one in the class4gl_object')
-
-            elif type(self.__dict__[key]) == dict:
-                # if the data type is a dictionary, we update the
-                # dictionary 
-                self.__dict__[key] = {self.__dict__[key] , data}
-                datakeys = list(data.keys())
-
-
-            # if source entry is not existing yet, we add it
-            if source not in self.sources.keys():
-                self.sources[source] = []
-
-
-            # self.logger.debug('updating section "'+\
-            #                  key+' ('+' '.join(datakeys)+')'\
-            #                  '" from source \
-            #                  "'+source+'"')
-
-            # Update the source dictionary: add the provided data keys to the
-            # specified source list
-            for datakey in datakeys:
-                # At first, remove the occurences of the keys in the other
-                # source lists
-                for sourcekey,sourcelist in self.sources.items():
-                    if key+':'+datakey in sourcelist:
-                        self.sources[sourcekey].remove(key+':'+datakey)
-                # Afterwards, add it to the current source list
-                self.sources[source].append(key+':'+datakey)
-
-
-        # # in case the datatype is a class4gl_input_pars, we update its keys
-        # # according to **kwargs dictionary
-        # if type(self.__dict__[datatype]) == class4gl_input_pars:
-        #     # add the data parameters to the datatype object dictionary of the
-        #     # datatype
-        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
-        #                                        **kwargs}
-        # # in case, the datatype reflects a dataframe, we update the columns according
-        # # to the *args list
-        # elif type(self.__dict__[datatype]) == pd.DataFrame:
-        #     for dataframe in args:
-        #         for column in list(dataframe.columns):
-        #             self.__dict__[datatype][column] = dataframe[column]
-        
-
-    def get_profile(self,IOBJ, *args, **argv):
-        # if type(IOBJ) == wyoming:
-        self.get_profile_wyoming(IOBJ,*args,**argv)
-        # else:
-        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
-        
-    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
-        """ 
-            Purpose: 
-                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
-
-            Input:
-                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
-                function will take the profile at the stream's current
-                position. 
-                2. air_ap_mode: which air profile do we take? 
-                    - b : best
-                    - l : according to lower limit for the mixed-layer height
-                            estimate
-                    - u : according to upper limit for the mixed-layer height
-                            estimate
-
-
-            Output:
-                1. all single-value parameters are stored in the
-                   class4gl_input.pars object
-                2. the souding profiles are stored in the in the
-                   class4gl_input.air_balloon dataframe
-                3. modified sounding profiles for which the mixed layer height
-                   is fitted
-                4. ...
-
-        """
-
-
-        # Raise an error in case the input stream is not the correct object
-        # if type(wy_strm) is not wyoming:
-        #    raise TypeError('Not a wyoming type input stream')
-
-        # Let's tell the class_input object that it is a Wyoming fit type
-        self.air_ap_type = 'wyoming'
-        # ... and which mode of fitting we apply
-        self.air_ap_mode = air_ap_mode
-
-        """ Temporary variables used for output """
-        # single value parameters derived from the sounding profile
-        dpars = dict()
-        # profile values
-        air_balloon = pd.DataFrame()
-        # fitted profile values
-        air_ap = pd.DataFrame()
-        
-        string = wy_strm.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = wy_strm.current.find_next('pre').find_next('pre').text
-        
-        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
-        dpars = {**dpars,
-                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
-               }
-        
-        # we get weird output when it's a numpy Timestamp, so we convert it to
-        # pd.datetime type
-
-        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
-        dpars['STNID'] = dpars['Station number']
-
-        # altitude above ground level
-        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
-        # absolute humidity in g/kg
-        air_balloon['q']= (air_balloon.MIXR/1000.) \
-                              / \
-                             (air_balloon.MIXR/1000.+1.)
-        # convert wind speed from knots to m/s
-        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
-        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-        
-        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
-        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
-
-        
-
-        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-
-        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
-        air_balloon['p'] = air_balloon.PRES*100.
-
-
-        # Therefore, determine the sounding that are valid for 'any' column 
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        #is_valid = (air_balloon.z >= 0)
-        # # this is an alternative pipe/numpy method
-        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
-        valid_indices = air_balloon.index[is_valid].values
-        print(valid_indices)
-
-        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-
-        air_balloon['t'] = air_balloon['TEMP']+273.15
-        air_balloon['theta'] = (air_balloon.t) * \
-                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
-        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
-
-        if len(valid_indices) > 0:
-            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
-            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
-            
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            
-            # the final mixed-layer height that will be used by class. We round it
-            # to 1 decimal so that we get a clean yaml output format
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-
-
-        if np.isnan(dpars['h']):
-            dpars['Ps'] = np.nan
-
-
-
-
-        if ~np.isnan(dpars['h']):
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u 
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-            
-
-
-
-        # First 3 data points of the mixed-layer fit. We create a empty head
-        # first
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-        
-        #calculate mixed-layer jump ( this should be larger than 0.1)
-        
-        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        air_ap_head['HGHT'] = air_ap_head['z'] \
-                                + \
-                                np.round(dpars[ 'Station elevation'],1)
-        
-        # make a row object for defining the jump
-        jump = air_ap_head.iloc[0] * np.nan
-            
-        if air_ap_tail.shape[0] > 1:
-
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = dpars['theta']
-        z_low =     dpars['h']
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                (z_mean > (z_low+10.)) and \
-                (theta_mean > (theta_low+0.2) ) and \
-                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-
-
-
-
-
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        #print(air_ap['PRES'].iloc[0])
-
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-
-        
-        dpars['lat'] = dpars['Station latitude']
-        dpars['latitude'] = dpars['lat']
-        
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        dpars['longitude'] = dpars['Station longitude']
-        
-        dpars['ldatetime'] = dpars['datetime'] \
-                            + \
-                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-
-        # # we make a pars object that is similar to the destination object
-        # pars = model_input()
-        # for key,value in dpars.items():
-        #     pars.__dict__[key] = value
-
-
-        # we round the columns to a specified decimal, so that we get a clean
-        # output format for yaml
-        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
-                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
-                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
-# 
-        for column,decimal in decimals.items():
-            air_balloon[column] = air_balloon[column].round(decimal)
-            air_ap[column] = air_ap[column].round(decimal)
-
-        self.update(source='wyoming',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-
-        
-    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
-    
-        """
-        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
-                 according to the position (lat lon) and the class datetime and timespan
-                 globaldata should be a globaldata multifile object
-        
-        Input: 
-            - globaldata: this is the library object
-            - only_keys: only extract specified keys
-            - exclude_keys: do not inherit specified keys
-        """
-        classdatetime      = np.datetime64(self.pars.datetime_daylight)
-        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
-                                           + \
-                                           dt.timedelta(seconds=self.pars.runtime)\
-                                          )
-
-
-        # # list of variables that we get from global ground data
-        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
-        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
-        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
-        #                 'texture', 'itex', 'isoil', 'BR',
-        #                 'b', 'cveg',
-        #                 'C1sat', 
-        #                 'C2ref', 'p', 'a',
-        #                 ] #globaldata.datasets.keys():
-
-        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
-        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
-
-
-        if type(globaldata) is not data_global:
-            raise TypeError("Wrong type of input library") 
-
-        # by default, we get all dataset keys
-        keys = list(globaldata.datasets.keys())
-
-        # We add LAI manually, because it is not listed in the datasets and
-        #they its retreival is hard coded below based on LAIpixel and cveg
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            keys.append('LAI')
-
-        # # In case there is surface pressure, we also calculate the half-level
-        # # and full-level pressure fields
-        # if ('sp' in keys):
-        #     keys.append('pfull')
-        #     keys.append('phalf')
-
-        # If specified, we only take the keys that are in only_keys
-        if only_keys is not None:
-            for key in keys:
-                if key not in only_keys:
-                    keys.remove(key)
-                
-        # If specified, we take out keys that are in exclude keys
-        if exclude_keys is not None:
-            for key in keys:
-                if key in exclude_keys:
-                    keys.remove(key)
-
-        # we set everything to nan first in the pars section (non-profile parameters
-        # without lev argument), so that we can check afterwards whether the
-        # data is well-fetched or not.
-        for key in keys:
-            if not ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None) and \
-                ('lev' in globaldata.datasets[key].page[key].dims)):
-                self.update(source='globaldata',pars={key:np.nan})
-            # # we do not check profile input for now. We assume it is
-            # # available
-            #else:
-            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
-
-        self.logger.debug('getting keys "'+', '.join(keys)+'\
-                          from global data')
-
-        for key in keys:
-            # If we find it, then we obtain the variables
-            if ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None)):
-
-                # check first whether the dataset has a height coordinate (3d space)
-                if 'lev' in globaldata.datasets[key].page[key].dims:
-
-                    # first, we browse to the correct file that has the current time
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-                        globaldata.datasets[key].browse_page(time=classdatetime)
-
-                    
-                    if (globaldata.datasets[key].page is not None):
-                        # find longitude and latitude coordinates
-                        ilats = (np.abs(globaldata.datasets[key].page.lat -
-                                        self.pars.latitude) < 0.5)
-                        ilons = (np.abs(globaldata.datasets[key].page.lon -
-                                        self.pars.longitude) < 0.5)
-                        
-                        # if we have a time dimension, then we look up the required timesteps during the class simulation
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            itimes = ((globaldata.datasets[key].page.time >= \
-                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
-
-                            # In case we didn't find any correct time, we take the
-                            # closest one.
-                            if np.sum(itimes) == 0.:
-
-
-                                classdatetimemean = \
-                                    np.datetime64(self.pars.datetime_daylight + \
-                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
-                                                ))
-
-                                dstimes = globaldata.datasets[key].page.time
-                                time = dstimes.sel(time=classdatetimemean,method='nearest')
-                                itimes = (globaldata.datasets[key].page.time ==
-                                          time)
-                                
-                        else:
-                            # we don't have a time coordinate so it doesn't matter
-                            # what itimes is
-                            itimes = 0
-
-                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
-
-                        # over which dimensions we take a mean:
-                        dims = globaldata.datasets[key].page[key].dims
-                        namesmean = list(dims)
-                        namesmean.remove('lev')
-                        idxmean = [dims.index(namemean) for namemean in namesmean]
-                        
-                        value = \
-                        globaldata.datasets[key].page[key].isel(time=itimes,
-                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
-
-                        # Ideally, source should be equal to the datakey of globaldata.library 
-                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
-                        #  but therefore the globaldata class requires a revision to make this work
-                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
-
-                else:
-                    # this procedure is for reading the ground fields (2d space). 
-                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
-
-    
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-    
-                       # first, we browse to the correct file
-                       #print(key)
-                       globaldata.datasets[key].browse_page(time=classdatetime)
-    
-                    if globaldata.datasets[key].page is not None:
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - self.pars.latitude))
-                        ilat = np.where((DIST) == np.min(DIST))[0][0]
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - self.pars.longitude))
-                        ilon = np.where((DIST) == np.min(DIST))[0][0]
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - (self.pars.latitude + 0.5)))
-                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmax = ilat
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - (self.pars.longitude  + 0.5)))
-                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmax = ilon
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lat.values\
-                                - (self.pars.latitude - 0.5)))
-                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmin = ilat
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lon.values\
-                                - (self.pars.longitude  - 0.5)))
-                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmin = ilon        
-                        
-                        if ilatmin < ilatmax:
-                            ilatrange = range(ilatmin,ilatmax+1)
-                        else:
-                            ilatrange = range(ilatmax,ilatmin+1)
-                            
-                        if ilonmin < ilonmax:
-                            ilonrange = range(ilonmin,ilonmax+1)
-                        else:
-                            ilonrange = range(ilonmax,ilonmin+1)     
-                            
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                            
-                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
-                                idatetime += 1
-                            
-                            classdatetimeend = np.datetime64(\
-                                                             self.pars.datetime +\
-                                                             dt.timedelta(seconds=self.pars.runtime)\
-                                                            ) 
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
-                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
-                                idatetimeend -= 1
-                            idatetime = np.min((idatetime,idatetimeend))
-                            #for gleam, we take the previous day values
-                            if key in ['wg', 'w2']:
-                                idatetime = idatetime - 1
-                                idatetimeend = idatetimeend - 1
-
-                            # in case of soil temperature, we take the exact
-                            # timing (which is the morning)
-                            if key in ['Tsoil','T2']:
-                                idatetimeend = idatetime
-                            
-                            idts = range(idatetime,idatetimeend+1)
-                            
-                            count = 0
-                            self.__dict__[key] = 0.
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    for iidts in idts:
-                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
-                                        count += 1
-                            value = value/count
-                            self.update(source='globaldata',pars={key:value.item()})
-                                
-                        else:
-                                
-                            count = 0
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
-                                    count += 1
-                            value = value/count                        
-
-                            self.update(source='globaldata',pars={key:value.item()})
-
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            self.logger.debug('also update LAI based on LAIpixel and cveg') 
-            # I suppose LAI pixel is already determined in the previous
-            # procedure. Anyway...
-            key = 'LAIpixel'
-
-            if globaldata.datasets[key].page is not None:
-                # first, we browse to the correct file that has the current time
-                if 'time' in list(globaldata.datasets[key].page[key].dims):
-                    globaldata.datasets[key].browse_page(time=classdatetime)
-            
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - self.pars.latitude))
-                ilat = np.where((DIST) == np.min(DIST))[0][0]
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - self.pars.longitude))
-                ilon = np.where((DIST) == np.min(DIST))[0][0]
-                 
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude + 0.5)))
-                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmax = ilat
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values \
-                        - (self.pars.longitude  + 0.5)))
-                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmax = ilon
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude - 0.5)))
-                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmin = ilat
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - (self.pars.longitude  - 0.5)))
-                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmin = ilon        
-                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                
-                
-                if ilatmin < ilatmax:
-                    ilatrange = range(ilatmin,ilatmax+1)
-                else:
-                    ilatrange = range(ilatmax,ilatmin+1)
-                    
-                if ilonmin < ilonmax:
-                    ilonrange = range(ilonmin,ilonmax+1)
-                else:
-                    ilonrange = range(ilonmax,ilonmin+1)           
-                
-                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
-                LAIpixel = 0.
-                count = 0
-                for iilat in [ilat]: #ilatrange
-                    for iilon in [ilon]: #ilonrange
-                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
-                        
-                                        
-                        # if np.isnan(tarray[idatetime]):
-                        #     print("interpolating GIMMS LAIpixel nan value")
-                        #     
-                        #     mask = np.isnan(tarray)
-                        #     
-                        #     #replace each nan value with a interpolated value
-                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-                        #         
-                        #     else:
-                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
-                    
-                        #         tarray *= np.nan 
-                        
-                        count += 1
-                        #tarray_res += tarray
-                LAIpixel = LAIpixel/count
-                
-                count = 0
-                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
-  
-                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
-                #print('LAIpixel:',self.__dict__['LAIpixel'])
-                #print('cveg:',self.__dict__['cveg'])
-                
-                # finally, we rescale the LAI according to the vegetation
-                # fraction
-                value = 0. 
-                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
-                   value =self.pars.LAIpixel/self.pars.cveg
-                else:
-                    # in case of small vegetation fraction, we take just a standard 
-                    # LAI value. It doesn't have a big influence anyway for
-                    # small vegetation
-                    value = 2.
-                #print('LAI:',self.__dict__['LAI'])
-                self.update(source='globaldata',pars={'LAI':value}) 
-
-
-        # in case we have 'sp', we also calculate the 3d pressure fields at
-        # full level and half level
-        if ('sp' in keys) and ('sp' in self.pars.__dict__):
-            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
-
-            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # hydrostatic thickness of each model layer
-            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
-            # # dz = rhodz/(R * T / pfull)
-
-
-            # # subsidence multiplied by density. We calculate the subsidence of
-            # # the in class itself
-            # wrho = np.zeros_like(phalf)
-            # wrho[-1] = 0. 
-
-            # for ihlev in range(0,wrho.shape[0]-1):
-            #     # subsidence multiplied by density is the integral of
-            #     # divergences multiplied by the layer thicknessies
-            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
-            #                     self.air_ac['divU_y'][ihlev:]) * \
-            #                    delpdgrav[ihlev:]).sum()
-
-
-            
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'p':list(pfull)}))
-            self.update(source='globaldata',\
-                        air_ach=pd.DataFrame({'p':list(phalf)}))
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
-            # self.update(source='globaldata',\
-            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
-
-    def check_source(self,source,check_only_sections=None):
-        """ this procedure checks whether data of a specified source is valid.
-
-        INPUT:
-            source: the data source we want to check
-            check_only_sections: a string or list with sections to be checked
-        OUTPUT:
-            returns True or False
-        """
-
-        # we set source ok to false as soon as we find a invalid input
-        source_ok = True
-
-        # convert to a single-item list in case of a string
-        check_only_sections_def = (([check_only_sections]) if \
-                                   type(check_only_sections) is str else \
-                                    check_only_sections)
-                                  
-        if source not in self.sources.keys():
-            self.logger.info('Source '+source+' does not exist')
-            source_ok = False
-
-        for sectiondatakey in self.sources[source]:                             
-            section,datakey = sectiondatakey.split(':')                         
-            if ((check_only_sections_def is None) or \
-                (section in check_only_sections_def)):                          
-                checkdatakeys = []
-                if type(self.__dict__[section]) is pd.DataFrame:
-                    checkdata = self.__dict__[section]
-                elif type(self.__dict__[section]) is model_input:
-                    checkdata = self.__dict__[section].__dict__
-
-                if (datakey not in checkdata):                              
-                    # self.logger.info('Expected key '+datakey+\
-                    #                  ' is not in parameter input')                        
-                    source_ok = False                                           
-                elif (checkdata[datakey] is None) or \
-                     (pd.isnull(checkdata[datakey]) is True):                    
-        
-                    # self.logger.info('Key value of "'+datakey+\
-                    #                  '" is invalid: ('+ \
-                    # str(self.__dict__[section].__dict__[datakey])+')')         
-                    source_ok = False
-
-        return source_ok
-
-    def check_source_globaldata(self):
-        """ this procedure checks whether all global parameter data is
-        available, according to the keys in the self.sources"""
-
-        source_globaldata_ok = True
-
-        #self.get_values_air_input()
-
-        # and now we can get the surface values
-        #class_settings = class4gl_input()
-        #class_settings.set_air_input(input_atm)
-        
-        # we only allow non-polar stations
-        if not (self.pars.lat <= 60.):
-            source_globaldata_ok = False
-            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-        
-        # check lat and lon
-        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
-            source_globaldata_ok = False
-            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
-            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
-        else:
-            # we only check the ground parameter data (pars section). The 
-            # profile data (air_ap section) are supposed to be valid in any 
-            # case.
-            source_ok = self.check_source(source='globaldata',\
-                                          check_only_sections=['air_ac',\
-                                                               'air_ap',\
-                                                               'pars'])
-            if not source_ok:
-                source_globaldata_ok = False
-        
-            # Additional check: we exclude desert-like
-            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
-                source_globaldata_ok = False
-                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
-                source_globaldata_ok = False
-                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
-            elif self.pars.cveg < 0.02:
-                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
-                source_globaldata_ok = False
-
-        return source_globaldata_ok
-
-
-class c4gli_iterator():
-    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
-    
-        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
-    """
-    def __init__(self,file):
-        # take file as IO stream
-        self.file = file
-        self.yaml_generator = yaml.load_all(file)
-        self.current_dict = {}
-        self.current_class4gl_input = class4gl_input()
-        separator = self.file.readline() # this is just dummy
-        self.header = file.readline()
-        if self.header != '# CLASS4GL record; format version: 0.1\n':
-            raise NotImplementedError("Wrong format version: '"+self.header+"'")
-    def __iter__(self):
-        return self
-    def __next__(self):
-        self.current_dict = self.yaml_generator.__next__()
-        self.current_class4gl_input.load_yaml_dict(self.current_dict)
-        return self.current_class4gl_input
-
-
-
-#get_cape and lift_parcel are adapted from the SkewT package
-    
-class gl_dia(object):
-    def get_lifted_index(self,timestep=-1):
-        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
-    
-#from SkewT
-#def get_lcl(startp,startt,startdp,nsteps=101):
-#    from numpy import interp
-#    #--------------------------------------------------------------------
-#    # Lift a parcel dry adiabatically from startp to LCL.
-#    # Init temp is startt in K, Init dew point is stwrtdp,
-#    # pressure levels are in Pa    
-#    #--------------------------------------------------------------------
-#
-#    assert startdp<=startt
-#
-#    if startdp==startt:
-#        return np.array([startp]),np.array([startt]),np.array([startdp]),
-#
-#    # Pres=linspace(startp,60000.,nsteps)
-#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
-#
-#    # Lift the dry parcel
-#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
-#    # Mixing ratio isopleth
-#    starte=VaporPressure(startdp)
-#    startw=MixRatio(starte,startp)
-#    e=Pres*startw/(.622+startw)
-#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
-#
-#    # Solve for the intersection of these lines (LCL).
-#    # interp requires the x argument (argument 2)
-#    # to be ascending in order!
-#    P_lcl=interp(0.,T_iso-T_dry,Pres)
-#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
-#
-#    # # presdry=linspace(startp,P_lcl)
-#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
-#
-#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
-#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
-#
-#    return P_lcl,T_lcl
-
-
-
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    """ Calculate mixed-layer height from temperature and wind speed profile
-
-        Input:
-            HAGL: height coordinates [m]
-            THTV: virtual potential temperature profile [K]
-            WSPD: wind speed profile [m/s]
-
-        Output:
-            BLH: best-guess mixed-layer height
-            BLHu: upper limit of mixed-layer height
-            BLHl: lower limit of mixed-layer height
-
-    """
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHl
-
-
-
-#from class
-def get_lcl(startp,startt,startqv):
-        # Find lifting condensation level iteratively
-    lcl = 20.
-    RHlcl = 0.5
-    
-    itmax = 30
-    it = 0
-    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it
Date: Tue, 21 Aug 2018 22:03:49 +0200
Subject: [PATCH 011/129] test

---
 interface_multi.py | 2061 --------------------------------------------
 ribtol.cpp         |   81 --
 ribtol.pyx         |   48 --
 ribtol_hw.py       |  165 ----
 4 files changed, 2355 deletions(-)
 delete mode 100644 interface_multi.py
 delete mode 100644 ribtol.cpp
 delete mode 100644 ribtol.pyx
 delete mode 100644 ribtol_hw.py

diff --git a/interface_multi.py b/interface_multi.py
deleted file mode 100644
index 83148e5..0000000
--- a/interface_multi.py
+++ /dev/null
@@ -1,2061 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-# from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-cdictpres = {'blue': (\
-                   (0.,    0.,  0.),
-                   (0.25,  0.25, 0.25),
-                   (0.5,  .70, 0.70),
-                   (0.75, 1.0, 1.0),
-                   (1,     1.,  1.),
-                   ),
-       'green': (\
-                   (0. ,   0., 0.0),
-                   (0.25,  0.50, 0.50),
-                   (0.5,  .70, 0.70),
-                   (0.75,  0.50, 0.50),
-                   (1  ,    0,  0.),
-                   ),
-       'red':  (\
-                  (0 ,  1.0, 1.0),
-                  (0.25 ,  1.0, 1.0),
-                   (0.5,  .70, 0.70),
-                  (0.75 , 0.25, 0.25),
-                  (1,    0., 0.),
-                  )}
-
-statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-os.system('module load Ruby')
-
-class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
-        """ creates an interactive interface for analysing class4gl experiments
-
-        INPUT:
-            path_exp : path of the experiment output
-            path_obs : path of the observations 
-            globaldata: global data that is being shown on the map
-            refetch_stations: do we need to build the list of the stations again?
-        OUTPUT:
-            the procedure returns an interface object with interactive plots
-
-        """
-        
-        # set the ground
-        self.globaldata = globaldata
-
- 
-        self.path_exp = path_exp
-        self.path_obs = path_obs
-        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
-
-        # # get the list of stations
-        # stationsfile = self.path_exp+'/stations_list.csv'
-        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
-        #     stations = pd.read_csv(stationsfile)
-        # else:
-        #     stations = get_stations(self.path_exp)
-        #     stations.to_csv(stationsfile)
-
-        # stations = stations.set_index('STNID')
-
-        self.frames = {}
-
-        self.frames['stats'] = {}
-        self.frames['worldmap'] = {}
-                
-        self.frames['profiles'] = {}
-        self.frames['profiles'] = {}
-        self.frames['profiles']['DT'] = None
-        self.frames['profiles']['STNID'] = None
-
-        #self.frames['worldmap']['stationsfile'] = stationsfile
-        self.frames['worldmap']['stations'] = stations(self.path_exp, \
-                                                       suffix='ini',\
-                                                       refetch_stations=refetch_stations)
-
-        # Initially, the stats frame inherets the values/iterators of
-        # worldmap
-        for key in self.frames['worldmap'].keys():
-            self.frames['stats'][key] = self.frames['worldmap'][key]
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_ini'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='ini',\
-                                           refetch_records=refetch_records
-                                           )
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_mod'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='mod',\
-                                           refetch_records=refetch_records
-                                           )
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_obs_afternoon'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_obs,\
-                                           subset='afternoon',\
-                                           refetch_records=refetch_records
-                                           )
-
-        self.frames['stats']['records_all_stations_mod'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['records_all_stations_ini']['dates'] = \
-            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
-
-
-        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
-
-        self.frames['stats']['records_all_stations_obs_afternoon'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['viewkeys'] = ['h','theta','q']
-        print('Calculating table statistics')
-        self.frames['stats']['records_all_stations_mod_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_mod'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-
-        self.frames['stats']['inputkeys'] = inputkeys
-        
-        # self.frames['stats']['inputkeys'] = \
-        #     [ key for key in \
-        #       self.globaldata.datasets.keys() \
-        #       if key in \
-        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
-
-
-        # get units from the class4gl units database
-        self.units = dict(units)
-        # for those that don't have a definition yet, we just ask a question
-        # mark
-        for var in self.frames['stats']['inputkeys']:
-            self.units[var] = '?'
-
-        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
-        self.frames['stats']['records_all_stations_ini_pct'] = \
-                  pct(self.frames['stats']['records_all_stations_ini'], \
-                      columns = self.frames['stats']['inputkeys'])
-
-        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
-        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-        #     mod['
-
-        # 
-        # 
-        # \
-        #        self.frames['stats']['records_all_stations_mod'], \
-
-
-
-        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
-        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
-        #               columns = [ 'd'+key+'dt' for key in \
-        #                           self.frames['stats']['viewkeys']], \
-        #              )
-
-        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
-        #               obs = self.frames['stats']['records_all_stations_ini'], \
-        #               columns = self.frames['stats']['viewkeys'], \
-        #              )
-        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
-        
-        print('filtering pathological data')
-        # some observational sounding still seem problematic, which needs to be
-        # investigated. In the meantime, we filter them
-        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
-
-        # we filter ALL data frames!!!
-        for key in self.frames['stats'].keys():
-            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
-               (self.frames['stats'][key].index.names == indextype):
-                self.frames['stats'][key] = self.frames['stats'][key][valid]
-        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
-
-        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
-
-
-        print("filtering stations from interface that have no records")
-        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
-            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                    == STNID).sum() == 0):
-                print("dropping", STNID)
-                self.frames['worldmap']['stations'].table = \
-                        self.frames['worldmap']['stations'].table.drop(STNID)
-                    
-        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-        
-        # TO TEST: should be removed, since it's is also done just below
-        self.frames['stats']['stations_iterator'] = \
-            self.frames['worldmap']['stations_iterator'] 
-
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
-        self.next_station()
-
-        # self.goto_datetime_worldmap(
-        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-        #     'after')
-    def sel_station(self,STNID=None,rownumber=None):
-
-        if (STNID is not None) and (rownumber is not None):
-            raise ValueError('Please provide either STNID or rownumber, not both.')
-
-        if (STNID is None) and (rownumber is None):
-            raise ValueError('Please provide either STNID or rownumber.')
-            
-        if STNID is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
-            print(
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-            )
-            self.update_station()
-        elif rownumber is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
-            self.update_station()
-
-
-
-    def next_station(self,event=None,jump=1):
-        with suppress(StopIteration):
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-                = self.frames['worldmap']['stations_iterator'].__next__(jump)
-            # self.frames['worldmap']['stations_iterator'].close()
-            # del(self.frames['worldmap']['stations_iterator'])
-            # self.frames['worldmap']['stations_iterator'] = \
-            #                 selfself.frames['worldmap']['stations'].iterrows()
-            # self.frames['worldmap']['STNID'],\
-            # self.frames['worldmap']['current_station'] \
-            #     = self.frames['worldmap']['stations_iterator'].__next__()
-
-        self.update_station()
-
-    def prev_station(self,event=None):
-        self.next_station(jump = -1,event=event)
-    def update_station(self):
-        for key in ['STNID','current_station','stations_iterator']: 
-            self.frames['stats'][key] = self.frames['worldmap'][key] 
-
-
-
-        # generate index of the current station
-        self.frames['stats']['records_current_station_index'] = \
-            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-             == \
-             self.frames['stats']['current_station'].name)
-
-        # create the value table of the records of the current station
-        tab_suffixes = \
-                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        for tab_suffix in tab_suffixes:
-            self.frames['stats']['records_current_station'+tab_suffix] = \
-                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-        # go to first record of current station
-        self.frames['stats']['records_iterator'] = \
-                        records_iterator(self.frames['stats']['records_current_station_mod'])
-        (self.frames['stats']['STNID'] , \
-        self.frames['stats']['current_record_chunk'] , \
-        self.frames['stats']['current_record_index']) , \
-        self.frames['stats']['current_record_mod'] = \
-                        self.frames['stats']['records_iterator'].__next__()
-
-        for key in self.frames['stats'].keys():
-            self.frames['profiles'][key] = self.frames['stats'][key]
-
-        STNID = self.frames['profiles']['STNID']
-        chunk = self.frames['profiles']['current_record_chunk']
-        if 'current_station_file_ini' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_ini'].close()
-        self.frames['profiles']['current_station_file_ini'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-        if 'current_station_file_mod' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_mod'].close()
-        self.frames['profiles']['current_station_file_mod'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_afternoon'].close()
-        self.frames['profiles']['current_station_file_afternoon'] = \
-            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-        self.frames['profiles']['records_iterator'] = \
-                        records_iterator(self.frames['profiles']['records_current_station_mod'])
-        (self.frames['profiles']['STNID'] , \
-        self.frames['profiles']['current_record_chunk'] , \
-        self.frames['profiles']['current_record_index']) , \
-        self.frames['profiles']['current_record_mod'] = \
-                        self.frames['profiles']['records_iterator'].__next__()
-
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-
-        self.update_record()
-
-    def next_record(self,event=None,jump=1):
-        with suppress(StopIteration):
-            (self.frames['profiles']['STNID'] , \
-            self.frames['profiles']['current_record_chunk'] , \
-            self.frames['profiles']['current_record_index']) , \
-            self.frames['profiles']['current_record_mod'] = \
-                      self.frames['profiles']['records_iterator'].__next__(jump)
-        # except (StopIteration):
-        #     self.frames['profiles']['records_iterator'].close()
-        #     del( self.frames['profiles']['records_iterator'])
-        #     self.frames['profiles']['records_iterator'] = \
-        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
-        #     (self.frames['profiles']['STNID'] , \
-        #     self.frames['profiles']['current_record_index']) , \
-        #     self.frames['profiles']['current_record_mod'] = \
-        #                     self.frames['profiles']['records_iterator'].__next__()
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        self.update_record()
-
-    def prev_record(self,event=None):
-        self.next_record(jump=-1,event=event)
-
-    def update_record(self):
-        self.frames['profiles']['current_record_ini'] =  \
-            self.frames['profiles']['records_current_station_ini'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'],\
-                  self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon'] =  \
-            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'] , \
-                  self.frames['profiles']['current_record_index'])]
-
-        self.frames['profiles']['current_record_mod_stats'] = \
-                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
-                    self.frames['profiles']['STNID'], \
-                    self.frames['profiles']['current_record_chunk'], \
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
-                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_ini_pct'] = \
-                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        # frame
-        # note that the current station, record is the same as the stats frame for initialization
-
-        # select first 
-        #self.frames['profiles']['current_record_index'], \
-        #self.frames['profiles']['record_yaml_mod'] = \
-        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
-        #                   self.frames['stats']['current_record_index'])
-        self.frames['profiles']['record_yaml_mod'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_mod'], \
-               self.frames['profiles']['current_record_mod'].index_start,
-               self.frames['profiles']['current_record_mod'].index_end,
-               mode='mod')
-                                
-        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_ini'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_ini'], \
-               record_ini.index_start,
-               record_ini.index_end,
-                mode='ini')
-
-        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_obs_afternoon'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_afternoon'], \
-               record_afternoon.index_start,
-               record_afternoon.index_end,
-                mode='ini')
-
-
-        key = self.frames['worldmap']['inputkey']
-        # only redraw the map if the current world map has a time
-        # dimension
-        if 'time' in self.globaldata.datasets[key].page[key].dims:
-            self.goto_datetime_worldmap(
-                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                'after')
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap',
-                                                  'profiles'])
-        else:
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap_stations',
-                                                  'profiles'])
-
-    def abline(self,slope, intercept,axis):
-        """Plot a line from slope and intercept"""
-        #axis = plt.gca()
-        x_vals = np.array(axis.get_xlim())
-        y_vals = intercept + slope * x_vals
-        axis.plot(x_vals, y_vals, 'k--')
-
-    def plot(self):
-        import pylab as pl
-        from matplotlib.widgets import Button
-        import matplotlib.pyplot as plt
-        import matplotlib as mpl
-        '''
-        Definition of the axes for the sounding table stats
-        '''
-        
-        fig = pl.figure(figsize=(14,9))
-        axes = {} #axes
-        btns = {} #buttons
-
-        # frames, which sets attributes for a group of axes, buttens, 
-        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
-            label = 'stats_'+str(key)
-            axes[label] = fig.add_subplot(\
-                            len(self.frames['stats']['viewkeys']),\
-                            5,\
-                            5*ikey+1,label=label)
-            # Actually, the axes should be a part of the frame!
-            #self.frames['stats']['axes'] = axes[
-
-            # pointer to the axes' point data
-            axes[label].data = {}
-
-            # pointer to the axes' color fields
-            axes[label].fields = {}
-
-
-        fig.tight_layout()
-        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
-
-        label ='stats_colorbar'
-        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
-        axes[label].fields = {}
-
-        from matplotlib.colors import LinearSegmentedColormap
-        cdictpres = {'blue': (\
-                           (0.,    0.,  0.),
-                           (0.25,  0.25, 0.25),
-                           (0.5,  .70, 0.70),
-                           (0.75, 1.0, 1.0),
-                           (1,     1.,  1.),
-                           ),
-               'green': (\
-                           (0. ,   0., 0.0),
-                           (0.25,  0.50, 0.50),
-                           (0.5,  .70, 0.70),
-                           (0.75,  0.50, 0.50),
-                           (1  ,    0,  0.),
-                           ),
-               'red':  (\
-                          (0 ,  1.0, 1.0),
-                          (0.25 ,  1.0, 1.0),
-                           (0.5,  .70, 0.70),
-                          (0.75 , 0.25, 0.25),
-                          (1,    0., 0.),
-                          )}
-        
-        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-        label = 'times'
-               
-        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-
-
-        label = 'worldmap'
-               
-        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-        axes[label].lat = None
-        axes[label].lon = None
-
-        label = 'worldmap_colorbar'
-        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
-        axes[label].fields = {}
-
-        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
-        label = 'worldmap_stations'
-        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
-        axes[label].data = {}
-
-        fig.canvas.mpl_connect('pick_event', self.on_pick)
-        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
-
-
-        """ buttons definitions """
-        
-        label = 'bprev_dataset'
-        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous dataset')
-        btns[label].on_clicked(self.prev_dataset)
-
-        label = 'bnext_dataset'
-        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next dataset')
-        btns[label].on_clicked(self.next_dataset)
-
-        label = 'bprev_datetime'
-        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous datetime')
-        btns[label].on_clicked(self.prev_datetime)
-
-        label = 'bnext_datetime'
-        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next datetime')
-        btns[label].on_clicked(self.next_datetime)
-
-
-        label = 'bprev_station'
-        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous station')
-        btns[label].on_clicked(self.prev_station)
-
-        label = 'bnext_station'
-        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next station')
-        btns[label].on_clicked(self.next_station)
-
-        label = 'bprev_record'
-        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous record')
-        btns[label].on_clicked(self.prev_record)
-
-        label = 'bnext_record'
-        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next record')
-        btns[label].on_clicked(self.next_record)
-
-
-        # self.nstatsview = nstatsview
-        # self.statsviewcmap = statsviewcmap
-        self.fig = fig
-        self.axes = axes
-        self.btns = btns
-        self.tbox = {}
-        # self.hover_active = False
-
-        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
-        #                                transform=plt.gcf().transFigure)
-
-        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
-                                          transform=plt.gcf().transFigure)
-
-        label = 'air_ap:theta'
-        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
-
-        label = 'air_ap:q'
-        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
-
-        label = 'out:h'
-        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
-
-        label = 'out:theta'
-        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
-
-        label = 'out:q'
-        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
-
-        label = 'SEB'
-        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
-
-
-        self.hover_active = False
-        self.fig = fig
-        self.fig.show()
-        self.fig.canvas.draw()
-        self.refresh_plot_interface()
-
-
-    # def scan_stations(self):
-    #     blabla
-        
-
-
-    # def get_records(current_file):
-    #     records = pd.DataFrame()
-
-    #     # initial position
-    #     next_record_found = False
-    #     while(not next_record_found):
-    #         next_record_found = (current_file.readline() == '---\n')
-    #     next_tell = current_file.tell() 
-    #     end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #     while not end_of_file:
-    #         current_tell = next_tell
-    #         next_record_found = False
-    #         current_file.seek(current_tell)
-    #         while ( (not next_record_found) and (not end_of_file)):
-    #             current_line = current_file.readline()
-    #             next_record_found = (currentline == '---\n')
-    #             end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #         # we store the position of the next record
-    #         next_tell = current_file.tell() 
-    #         
-    #         # we get the current record. Unfortunately we need to reset the
-    #         # yaml record generator first.
-    #         current_yamlgen.close()
-    #         current_yamlgen = yaml.load_all(current_file)
-    #         current_file.seek(current_tell)
-    #         current_record_mod = current_yamlgen.__next__()
-    #     current_yamlgen.close()
-
-    #     return records
-
-       #      next_record_found = False
-       #      while(not record):
-       #          next_record_found = (self.current_file.readline() == '---\n')
-       #      self.current_tell0 = self.current_file.tell() 
-
-       #  
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell0 = self.current_file.tell() 
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell1 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell0)
-       #  self.r0 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell1)
-       #  next_record_found = False
-       #  while ( (not next_record_found) and (not end_of_file):
-       #      current_line = self.current_file.readline()
-       #      next_record_found = (currentline == '---\n')
-       #      end_of_file = (currentline == '') # an empty line means we are at the end
-
-       #  self.current_tell2 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell1)
-       #  self.r1 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell2)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell3 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell2)
-       #  self.r2 = self.current_yamlgen.__next__()
-
-       #  # go to position of next record in file
-       #  self.current_file.seek(self.current_tell3)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell4 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell3)
-       #  self.r3 = self.current_yamlgen.__next__()
- 
-       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
-
-    def goto_datetime_worldmap(self,DT,shift=None):
-        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
-            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
-            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
-            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
-                self.frames['worldmap']['iDT'] += 1
-            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
-                self.frames['worldmap']['iDT'] -= 1 
-            # for gleam, we take the values of the previous day
-            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
-                self.frames['worldmap']['iDT'] -= 2 
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-        #else:
-        #    self.frames['worldmap'].pop('DT')
-
-    def next_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def prev_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def next_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-    def prev_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-
-
-    def sel_dataset(self,inputkey):
-        self.frames['worldmap']['inputkey'] = inputkey
-        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
-        self.goto_datetime_worldmap(
-            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-            'after')# get nearest datetime of the current dataset to the profile
-        if "fig" in self.__dict__.keys():
-            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
-       
-    # def prev_station(self,event=None):
-    #     self.istation = (self.istation - 1) % self.stations.shape[0]
-    #     self.update_station()
-
-
-
-
-    #def update_datetime(self):
-    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
-    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
-    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
-    #        print(self.worldmapfocus['DT'])
-    #        self.refresh_plot_interface(only='worldmap')
-
-    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
-
-        #print('r1')
-        for argkey in args.keys():
-            self.__dict__[arg] = args[argkey]
-
-        axes = self.axes
-        tbox = self.tbox
-        frames = self.frames
-        fig = self.fig
- 
-        if (only is None) or ('worldmap' in only):
-            globaldata = self.globaldata
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
-            else:
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
-            keystotranspose = ['lat','lon']
-            for key in dict(datasetxr.dims).keys():
-                if key not in keystotranspose:
-                    keystotranspose.append(key)
-
-            datasetxr = datasetxr.transpose(*keystotranspose)
-            datasetxr = datasetxr.sortby('lat',ascending=False)
-
-            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
-            lonleft = lonleft - 360.
-            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
-            label = 'worldmap'
-            axes[label].clear()
-            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
-            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
-
-        if (only is None) or ('worldmap' in only):
-            #if 'axmap' not in self.__dict__ :
-            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
-            #else:
-
-            #stations = self.stations
-
-
-            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
-            #     resolution = 'l', 
-            # area_thresh = 0.1,
-            #     llcrnrlon=-180., llcrnrlat=-90.0,
-            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
-            # 
-            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
-            # self.gmap.drawcountries(color='white',linewidth=0.3)
-            # #self.gmap.fillcontinents(color = 'gray')
-            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
-            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
-            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
-            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # #self.ax5.shadedrelief()
-
-           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
-
-
-            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
-            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
-
-            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
-            if 'lev' in field.dims:
-                field = field.isel(lev=-1)
-
-            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
-            axes[label].axis('off')
-
-            from matplotlib import cm
-            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
-            
-            
-            title=frames['worldmap']['inputkey']
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
-            axes[label].set_title(title)
-
-            label ='worldmap_colorbar'
-            axes[label].clear()
-            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
-
-
-            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
-            # x,y = self.gmap(lons,lats)
-            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
-            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
-
-        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
-
-            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
-            store_xlim = {}
-            store_ylim = {}
-            for ikey, key in enumerate(statskeys_out):
-                if (only is not None) and ('stats_lightupdate' in only):
-                    store_xlim[key] = axes['stats_'+key].get_xlim()
-                    store_ylim[key] = axes['stats_'+key].get_ylim()
-                self.axes['stats_'+key].clear()    
-
-            label = 'times'
-            self.axes[label].clear()
-
-            key = 'dthetadt'
-            x = self.frames['stats']['records_all_stations_ini']['datetime']
-            #print(x)
-            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-            #print(y)
-            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            #print(z)
-
-            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
-            self.axes[label].data[label] = self.axes[label].scatter(x.values,
-                                                                    y.values,
-                                                                    c=z.values,
-                                                                    cmap=self.statsviewcmap,
-                                                                    s=2,
-                                                                    vmin=0.,
-                                                                    vmax=1.,
-                                                                    alpha=alpha_cloud_pixels)
-
-            
-            x = self.frames['stats']['records_current_station_ini']['datetime']
-            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-
-            x = self.frames['profiles']['records_current_station_ini']['datetime']
-            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
-            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
-
-            for ikey, key in enumerate(statskeys_out):
-
-                # show data of all stations
-                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_all_stations_mod_stats'][key]
-                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                qvalmax = x.quantile(0.999)
-                qvalmin = x.quantile(0.001)
-                print('applying extra filter over extreme values for plotting stats')
-                selx = (x >= qvalmin) & (x < qvalmax)
-                sely = (x >= qvalmin) & (x < qvalmax)
-                x = x[selx & sely]
-                y = y[selx & sely]
-                z = z[selx & sely]
-                self.axes['stats_'+key].data['stats_'+key] = \
-                       self.axes['stats_'+key].scatter(x,y, c=z,\
-                                cmap=self.statsviewcmap,\
-                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
-
-                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_current_station_mod_stats'][key]
-                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['profiles']['records_current_station_mod_stats'][key]
-                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
-
-                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
-                y = self.frames['stats']['current_record_mod_stats'][key]
-                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
-                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
-                    axes['stats_'+key].annotate(text, \
-                                               xy=(x,y),\
-                                               xytext=(0.05,0.05),\
-                                               textcoords='axes fraction',\
-                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
-                                               color='white',\
-                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
-                # self.axes['stats_'+key].data[key+'_current_record'] = \
-                #        self.axes['stats_'+key].scatter(x,y, c=z,\
-                #                 cmap=self.statsviewcmap,\
-                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
-
-                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
-                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
-                # # highlight data for curent station
-                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
-
-                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
-
-                if ikey == len(statskeys_out)-1:
-                    self.axes['stats_'+key].set_xlabel('external')
-                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
-                axes['stats_'+key].set_ylabel('model')
-
-
-                if (only is not None) and ('stats_lightupdate' in only):
-                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
-                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
-                else:
-                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
-                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
-                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
-                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
-                self.abline(1,0,axis=self.axes['stats_'+key])
-
-        if (only is None) or ('stats_colorbar' in only):
-            label ='stats_colorbar'
-            axes[label].clear()
-            import matplotlib as mpl
-            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
-            self.axes[label].fields[label] = \
-             mpl.colorbar.ColorbarBase(self.axes[label],\
-                        orientation='horizontal',\
-                        label="percentile of "+self.frames['worldmap']['inputkey'],
-                        alpha=1.,
-                                cmap=self.statsviewcmap,\
-                                       norm=norm
-                         )
-
-        #print('r1')
-        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
-            #print('r2')
-            label = 'worldmap_stations'
-            axes[label].clear()
-            
-            stations = self.frames['worldmap']['stations'].table
-            globaldata = self.globaldata
-            
-            key = label
-
-            #print('r3')
-            if (stations is not None):
-                xlist = []
-                ylist = []
-                #print('r4')
-                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
-            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    xlist.append(x)
-                    ylist.append(y)
-                #picker is needed to make it clickable (pick_event)
-                axes[label].data[label] = axes[label].scatter(xlist,ylist,
-                                                              c='r', s=15,
-                                                              picker = 15,
-                                                              label=key,
-                                                              edgecolor='k',
-                                                              linewidth=0.8)
-
-            # cb.set_label('Wilting point [kg kg-3]')
-                #print('r5')
-
-                
-            #     xseries = []
-            #     yseries = []
-            #     for iSTN,STN in stations.iterrows():
-            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
-            #         xseries.append(x)                    
-            #         yseries.append(y)
-            #         
-            #         
-            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
-                    
-                if ('current_station' in frames['worldmap']):
-                    #print('r5')
-                    STN = frames['stats']['current_station']
-                    STNID = frames['stats']['STNID']
-                    #print('r5')
-
-                    x,y = len(axes['worldmap'].lon)* \
-                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
-                          len(axes['worldmap'].lat)* \
-                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    #print('r6')
-                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
-                    #                          == \
-                    #                          self.frames['worldmap']['STNID'])\
-                    #                         & \
-                    #                         (self.seltablestats['DT'] \
-                    #                          == self.axes['statsview0].focus['DT']) \
-                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
-                    #print('r7')
-                    text = 'STNID: '+ format(STNID,'10.0f') + \
-                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
-                            ', LON: '+format(STN['longitude'],'3.3f')+ \
-                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
-
-                            #+', VAL: '+format(VAL,'.3e')
-
-                    axes[label].scatter(x,y, c='r', s=30,\
-                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
-                    #print('r8')
-            
-                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
-                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
-                    #colorstation = max((min((1.,colorstation)),0.))
-                    colorstation =0.2
-                    from matplotlib import cm
-                    axes[label].annotate(text,
-                                         xy=(x,y),
-                                         xytext=(0.05,0.05),
-                                         textcoords='axes fraction', 
-                                         bbox=dict(boxstyle="round",
-                                         fc = cm.viridis(colorstation)),
-                                         arrowprops=dict(arrowstyle="->",
-                                                         linewidth=1.1),
-                                         color='white' if colorstation < 0.5 else 'black')
-                    #print('r9')
-
-                    # #pos = sc.get_offsets()[ind["ind"][0]]
-                    # 
-                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
-                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
-                    # axes[label].data[label+'statannotate'].set_text(text)
-                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
-                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
-            #print('r9')
-            axes[label].axis('off')
-            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
-            axes[label].set_ylim((len(axes['worldmap'].lat),0))
-            #print('r10')
-
-        if (only is None) or ('profiles' in only): 
-            #print('r11')
-
-            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
-            # # self.update_station(goto_first_sounding=False)
-            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
-            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
-            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
-            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
-
-            label = 'air_ap:theta'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-                # +\
-                # ' -> '+ \
-                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-            
-            
-            
-            
-            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-            #print('r12')
-
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
-            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
-            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
-            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            #print('r13')
-            # 
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r14')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-
-            #print('r15')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-                          
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r16')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r17')
-            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
-            print(hmax)
-            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
-            if valid_mod:
-
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="mod "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                                 +'LT')
-
-            #print('r18')
-            axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('theta [K]')
-
-            label = 'air_ap:q'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-
-            #print('r19')
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            if valid_mod:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            else:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            # 
-            #print('r20')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r21')
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            #print('r23')
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r24')
-            if valid_mod:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="fit ")#+\
-                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
-                             #+'LT')
-            #print('r25')
-            #axes[label].legend()
-
-            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            #axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('q [kg/kg]')
-
-            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-
-            # #pl.subplots_adjust(right=0.6)
-
-            # label = 'q_pro'
-            # axes[label].clear()
-
-            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
-            # 
-            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
-            # 
-            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
-
-            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
-            # #pl.subplots_adjust(right=0.6)
-            # axes[label].set_xlabel('specific humidity [kg/kg]')
- 
-
-            #print('r26')
-            time = self.frames['profiles']['record_yaml_mod'].out.time
-            for ilabel,label in enumerate(['h','theta','q']):
-                axes["out:"+label].clear()
-                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
-                axes["out:"+label].set_ylabel(label)
-                if ilabel == 2:
-                    axes["out:"+label].set_xlabel('local sun time [h]')
-                
-            #print('r27')
-            label = 'SEB'
-            axes[label].clear()
-            
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
-            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
-            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
-            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
-                
-            #print('r28')
-            
-            axes[label].legend()
-            
-            #         for ax in self.fig_timeseries_axes:
-#             ax.clear()
-#         
-#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
-#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
-#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
-#         #print(self.morning_sounding.c4gl.out.Swin)
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
-#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
-#         self.fig_timeseries_axes[3].legend()
-#         self.fig.canvas.draw()
-            
-
-
-
-
-
-
-        #self.ready()
-        #print('r29')
-        fig.canvas.draw()
-        #fig.show()
-
-        self.axes = axes
-        self.tbox = tbox
-        self.fig = fig
-
-    def on_pick(self,event):
-        #print("HELLO")
-        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
-        #self.axes['theta_pro'].clear()
-        #self.axes['q_pro'].clear()
-        
-
-        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
-        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
-        keys_to_axes = {}
-        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
-
-        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
-        keys_to_axes['worldmap'] = 'worldmap'
-        
-        axes = self.axes
-        #nstatsview = self.nstatsview
-        #statsviewcmap = self.statsviewcmap
-        stations = self.frames['worldmap']['stations'].table
-
-
-        #print("p1")
-        current = event
-        artist = event.artist
-        
-        selkey = artist.get_label()
-        
-        #print(keys_to_axes)
-        
-        label = keys_to_axes[selkey]
-        #print("HELLO",selkey,label)
-
-        # # Get to know in which axes we are
-        # label = None
-        # for axeskey in axes.keys():
-        #     if event.inaxes == axes[axeskey]:
-        #         label = axeskey
-        #         
-
-        # cont, pos = None, None
-        
-        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
-        ind = event.ind
-        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
-        d = axes[label].collections[0]
-        #d.set_offset_position('data')
-        xy = d.get_offsets()
-        x, y =  xy[:,0],xy[:,1]
-        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
-
-        #print("p2")
-        if len(ind) > 0:
-            #print("p3")
-            pos = x[ind[0]], y[ind[0]]
-
-            #if label[:-1] == 'statsview':
-            #    #seltablestatsstdrel = self.seltablestatsstdrel
-            #    #seltablestatspct = self.seltablestatspct
-
-            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-            #    
-            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
-            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-            #    
-            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
-            #el
-            if (label == 'worldmap') or (label == 'worldmap_stations'):
-                self.hover_active = False
-                if (self.frames['worldmap']['STNID'] !=
-                    self.frames['profiles']['STNID']):
-                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
-                # so we just need to perform update_station
-                    self.update_station()
-            elif (label[:5] == 'stats'):
-
-                self.hover_active = False
-                if (self.frames['stats']['STNID'] !=
-                self.frames['profiles']['STNID']) or \
-                   (self.frames['stats']['current_record_chunk'] != 
-                    self.frames['profiles']['current_record_chunk']) or \
-                   (self.frames['stats']['current_record_index'] != 
-                    self.frames['profiles']['current_record_index']):
-
-
-
-                    for key in ['STNID','current_station','stations_iterator']: 
-                        self.frames['worldmap'][key] = self.frames['stats'][key] 
-
-                    for key in self.frames['stats'].keys():
-                        self.frames['profiles'][key] = self.frames['stats'][key]
-
-                    STNID = self.frames['profiles']['STNID']
-                    chunk = self.frames['profiles']['current_record_chunk']
-                    if 'current_station_file_ini' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_ini'].close()
-                    self.frames['profiles']['current_station_file_ini'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-                    if 'current_station_file_mod' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_mod'].close()
-                    self.frames['profiles']['current_station_file_mod'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_afternoon'].close()
-                    self.frames['profiles']['current_station_file_afternoon'] = \
-                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-                    # go to hovered record of current station
-                    self.frames['profiles']['records_iterator'] = \
-                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # ... and go to the record of the profile window (last one that
-                    # was picked by the user)
-                    found = False
-                    EOF = False
-                    while (not found) and (not EOF):
-                        try:
-                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
-                            #print("hello*")
-                            #print(self.frames['profiles']['current_record_index'])
-                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
-                               (index == self.frames['profiles']['current_record_index']) and \
-                               (STNID == self.frames['profiles']['STNID']):
-                                #print('found!')
-                                found = True
-                        except StopIteration:
-                            EOF = True
-                    if found:
-                        self.frames['stats']['current_record_mod'] = record
-                        self.frames['stats']['current_record_chunk'] = chunk
-                        self.frames['stats']['current_record_index'] = index
-                    # # for the profiles we make a distinct record iterator, so that the
-                    # # stats iterator can move independently
-                    # self.frames['profiles']['records_iterator'] = \
-                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # (self.frames['profiles']['STNID'] , \
-                    # self.frames['profiles']['current_record_index']) , \
-                    # self.frames['profiles']['current_record_mod'] = \
-                    #                 self.frames['profiles']['records_iterator'].__next__()
-
-
-                    # for the profiles we make a distinct record iterator, so that the
-                    # stats iterator can move independently
-
-                    self.update_record()
-
-
-
-    def on_plot_hover(self,event):
-        axes = self.axes
-        #print('h1')
-
-        # Get to know in which axes we are
-        label = None
-        for axeskey in axes.keys():
-            if event.inaxes == axes[axeskey]:
-                label = axeskey
-                
-        #print('h2')
-
-        cont, pos = None, None
-        #print (label)
-        
-        if label is not None:
-            if  ('data' in axes[label].__dict__.keys()) and \
-                (label in axes[label].data.keys()) and \
-                (axes[label].data[label] is not None):
-                
-                #print('h3')
-                cont, ind =  axes[label].data[label].contains(event)
-                selkey = axes[label].data[label].get_label()
-                if len(ind["ind"]) > 0:
-                    #print('h4')
-                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
-                    #print('pos',pos,selkey)
-
-
-                    #if label[:-1] == 'statsview':
-                    #    seltablestatsstdrel = self.seltablestatsstdrel
-                    #    seltablestatspct = self.seltablestatspct
-
-                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
-                    #    self.hover_active = True
-                    #    
-                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
-                    #    
-                    #el
-                    #print(label[:5])
-                    if (label[:5] == 'stats') or (label == 'times'):
-                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
-                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
-                        
-
-                        if label[:5] == 'stats':
-                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                            (self.frames['stats']['STNID'] ,
-                             self.frames['stats']['current_record_chunk'], 
-                             self.frames['stats']['current_record_index']) = \
-                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-                        # elif label[:5] == 'stats':
-                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
-                        #     (self.frames['stats']['STNID'] ,
-                        #      self.frames['stats']['current_record_chunk'], 
-                        #      self.frames['stats']['current_record_index']) = \
-                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-
-
-                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-                        
-                        # # TO TEST: should be removed, since it's is also done just below
-                        # self.frames['stats']['stations_iterator'] = \
-                        #     self.frames['worldmap']['stations_iterator'] 
-                
-                
-                        # self.goto_datetime_worldmap(
-                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-                        #     'after')
-
-
-                        # scrolling to the right station
-                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                        EOF = False
-                        found = False
-                        while (not found and not EOF):
-                            if (STNID == self.frames['stats']['STNID']):
-                                   found = True 
-                            if not found:
-                                try:
-                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                                except (StopIteration):
-                                    EOF = True
-                        if found:
-                        #    self.frames['stats']['STNID'] = STNID
-                            self.frames['stats']['current_station'] =  station
-
-                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
-                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
-
-
-                        # generate index of the current station
-                        self.frames['stats']['records_current_station_index'] = \
-                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                             == self.frames['stats']['STNID'])
-
-
-                        tab_suffixes = \
-                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            self.frames['stats']['records_current_station'+tab_suffix] = \
-                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-
-                        # go to hovered record of current station
-                        self.frames['stats']['records_iterator'] = \
-                                        records_iterator(self.frames['stats']['records_current_station_mod'])
-
-
-                        # ... and go to the record of the profile window (last one that
-                        # was picked by the user)
-                        found = False
-                        EOF = False
-                        while (not found) and (not EOF):
-                            try:
-                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                                #print("hello*")
-                                #print(self.frames['profiles']['current_record_index'])
-                                if (index == self.frames['stats']['current_record_index']) and \
-                                   (chunk == self.frames['stats']['current_record_chunk']) and \
-                                   (STNID == self.frames['stats']['STNID']):
-                                    #print('found!')
-                                    found = True
-                            except StopIteration:
-                                EOF = True
-                        if found:
-                            #print('h5')
-                            self.frames['stats']['current_record_mod'] = record
-                            self.frames['stats']['current_record_chunk'] = chunk
-                            self.frames['stats']['current_record_index'] = index
-
-                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
-                        tab_suffixes = \
-                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            #print(tab_suffix)
-                            #print(self.frames['stats']['records_current_station'+tab_suffix])
-                            self.frames['stats']['current_record'+tab_suffix] =  \
-                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                      (self.frames['stats']['STNID'] , \
-                                       self.frames['stats']['current_record_chunk'] , \
-                                       self.frames['stats']['current_record_index'])]
-
-
-                        self.hover_active = True
-                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                        # print('h13')
-                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
-                        #     self.goto_datetime_worldmap(
-                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                        #         'after')
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap',
-                        #                                           'profiles'])
-                        # else:
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap_stations',
-                        #                                           'profiles'])
-
-
-
-                    elif label in ['worldmap_stations','worldmap']:
-                        #print('h5')
-
-                        if (self.axes['worldmap'].lat is not None) and \
-                           (self.axes['worldmap'].lon is not None):
-
-
-                            #self.loading()
-                            self.fig.canvas.draw()
-                            self.fig.show()
-
-
-                            # get position of 
-                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
-                                                                 self.axes['worldmap'].lat[0]) + \
-                                           self.axes['worldmap'].lat[0],4)
-                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
-                                                                 self.axes['worldmap'].lon[0]) + \
-                                           self.axes['worldmap'].lon[0],4)
-                        
-                            stations = self.frames['worldmap']['stations'].table
-                            #print('h7')
-                        
-                            #reset stations iterator:
-                            # if 'stations_iterator' in self.frames['worldmap'].keys():
-                            #     self.frames['worldmap']['stations_iterator'].close()
-                            #     del(self.frames['worldmap']['stations_iterator'])
-                            # if 'stations_iterator' in self.frames['stats'].keys():
-                            #     self.frames['stats']['stations_iterator'].close()
-                            #     del(self.frames['stats']['stations_iterator'])
-                            self.frames['worldmap']['stations_iterator'] =\
-                               stations_iterator(self.frames['worldmap']['stations'])
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                            EOF = False
-                            found = False
-                            while (not found and not EOF):
-                                #print('h8',station.latitude,latmap)
-                                #print('h8',station.longitude,lonmap)
-                                if (round(station.latitude,3) == round(latmap,3)) and \
-                                    (round(station.longitude,3) == round(lonmap,3)):
-                                       found = True 
-                                if not found:
-                                    try:
-                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                                    except (StopIteration):
-                                        EOF = True
-                            if found:
-                                self.frames['worldmap']['STNID'] = STNID
-                                self.frames['worldmap']['current_station'] = \
-                                        station
-                        
-                            self.frames['stats']['stations_iterator'] = \
-                                self.frames['worldmap']['stations_iterator'] 
-                            #print('h8')
-                            # inherit station position for the stats frame...
-                            for key in self.frames['worldmap'].keys():
-                                self.frames['stats'][key] = self.frames['worldmap'][key]
-                                
-                            ## fetch records of current station...
-                            #self.frames['stats']['records_current_station_mod'] =\
-                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                            # ... and their indices
-                            self.frames['stats']['records_current_station_index'] = \
-                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                                     == \
-                                     self.frames['stats']['current_station'].name)
-
-
-                            tab_suffixes = \
-                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['records_current_station'+tab_suffix] = \
-                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-                            # ... create a record iterator ...
-                            #self.frames['stats']['records_iterator'].close()
-                            del(self.frames['stats']['records_iterator'])
-                            self.frames['stats']['records_iterator'] = \
-                                self.frames['stats']['records_current_station_mod'].iterrows()
-
-
-
-                        
-                            #print('h9')
-                            # ... and go to to the first record of the current station
-                            (self.frames['stats']['STNID'] , \
-                             self.frames['stats']['current_record_chunk'] , \
-                             self.frames['stats']['current_record_index']) , \
-                            self.frames['stats']['current_record_mod'] = \
-                                self.frames['stats']['records_iterator'].__next__()
-                        
-
-
-
-                            #print('h10')
-                            # cash the current record
-                            tab_suffixes = \
-                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['current_record'+tab_suffix] =  \
-                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                          (self.frames['stats']['STNID'] , \
-                                           self.frames['stats']['current_record_chunk'] , \
-                                           self.frames['stats']['current_record_index'])]
-
-                            #print('h11')
-                            
-                            self.hover_active = True
-                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                            #print('h13')
-
-                        
-
-            #if (stations is not None):
-            #    for iSTN,STN in stations.iterrows():
-            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
-            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
-
-        # self.fig.show()
- 
-        # we are hovering on nothing, so we are going back to the position of
-        # the profile sounding
-        if pos is None:
-            if self.hover_active == True:
-                #print('h1*')
-                
-                #self.loading()
-                # to do: reset stations iterators
-
-                # get station and record index from the current profile
-                for key in ['STNID', 'current_station']:
-                    self.frames['stats'][key] = self.frames['profiles'][key]
-
-                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
-                self.frames['stats']['current_station'] = \
-                        self.frames['profiles']['current_station']
-                #print('h3a*')
-                self.frames['stats']['records_current_station_mod'] = \
-                        self.frames['profiles']['records_current_station_mod']
-                #print('h3b*')
-
-                # the next lines recreate the records iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-
-                # reset stations iterator...
-                #self.frames['stats']['records_iterator'].close()
-                del(self.frames['stats']['records_iterator'])
-                self.frames['stats']['records_iterator'] = \
-                    self.frames['stats']['records_current_station_mod'].iterrows()
-                #print('h4*')
-
-                # ... and go to the record of the profile window (last one that
-                # was picked by the user)
-                found = False
-                EOF = False
-                while (not found) and (not EOF):
-                    try:
-                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                        #print("hello*")
-                        #print(self.frames['profiles']['current_record_index'])
-                        #print(self.frames['profiles']['STNID'])
-                        #print(STNID,index)
-                        if (index == self.frames['profiles']['current_record_index']) and \
-                            (chunk == self.frames['profiles']['current_record_chunk']) and \
-                            (STNID == self.frames['profiles']['STNID']):
-                            #print('found!')
-                            found = True
-                    except StopIteration:
-                        EOF = True
-                if found:
-                    #print('h5*')
-                    self.frames['stats']['current_record_mod'] = record
-                    self.frames['stats']['current_record_chunk'] = chunk
-                    self.frames['stats']['current_record_index'] = index
-
-                #print('h6*')
-
-
-
-                # # fetch records of current station...
-                # self.frames['stats']['records_current_station_mod'] =\
-                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                # ... and their indices
-                self.frames['stats']['records_current_station_index'] = \
-                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                         == \
-                         self.frames['stats']['current_station'].name)
-
-
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['records_current_station'+tab_suffix] = \
-                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-                
-
-                # cash the records of the current stations
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['current_record'+tab_suffix] =  \
-                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                              (self.frames['stats']['STNID'] , \
-                               self.frames['stats']['current_record_chunk'] , \
-                               self.frames['stats']['current_record_index'])]
-
-
-                # the next lines recreate the stations iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-                #print('h7*')
-
-                # reset the stations iterators
-                for framekey in ['stats','worldmap']:
-                    ##print(framekey)
-                    if 'stations_iterator' in self.frames[framekey]:
-                        #self.frames[framekey]['stations_iterator'].close()
-                        del(self.frames[framekey]['stations_iterator'])
-
-                self.frames['worldmap']['current_station'] = \
-                        self.frames['profiles']['current_station']
-
-                #recreate the stations iterator for the worldmap...
-                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-
-                # ... and go the position of the profile
-                #print('h8*')
-                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                EOF = False
-                found = False
-                while (not found and not EOF):
-                    if STNID == self.frames['profiles']['STNID'] :
-                        found = True 
-                    if not found:
-                        try:
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                        except (StopIteration):
-                            EOF = True
-                if found:
-                    self.frames['worldmap']['current_station'] = station
-                    self.frames['worldmap']['STNID'] = STNID
-                #print('h9*')
-                self.frames['stats']['stations_iterator'] = \
-                    self.frames['worldmap']['stations_iterator'] 
-
-                # the stats window now inherits the current station from the
-                # worldmap
-                for key in ['STNID','current_station','stations_iterator']: 
-                    self.frames['stats'][key] = self.frames['worldmap'][key] 
-                #print('h10*')
-
-                # # we now only need inherit station position and go to first record
-                # for key in self.frames['worldmap'].keys():
-                #     self.frames['stats'][key] = self.frames['worldmap'][key]
-
-                # self.frames['stats']['records_current_station'] =\
-                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
-
-                # #print(self.frames['stats']['records_current_station'])
-                # self.frames['stats']['records_iterator'] = \
-                #                 self.frames['stats']['records_current_station'].iterrows()
-                # (self.frames['stats']['STNID'] , \
-                # self.frames['stats']['current_record_index']) , \
-                # self.frames['stats']['current_record_mod'] = \
-                #                 self.frames['stats']['records_iterator'].__next__()
-                
-
-
-
-
-
-
-                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
-                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
-                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
-                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-                self.hover_active = False
-                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
-    # def loading(self):
-    #     self.tbox['loading'].set_text('Loading...')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-    #     sleep(0.1)
-    # def ready(self):
-    #     self.tbox['loading'].set_text('Ready')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-
-
-
diff --git a/ribtol.cpp b/ribtol.cpp
deleted file mode 100644
index 148b0d3..0000000
--- a/ribtol.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-// fast conversion of bulk Richardson number to Obukhov length
-
-#include 
-#include 
-#include 
-using namespace std;
-
-inline double psim(double zeta)
-{
-  double psim;
-  double x;
-  if(zeta <= 0.)
-  {
-    //x     = (1. - 16. * zeta) ** (0.25)
-    //psim  = 3.14159265 / 2. - 2. * arctan(x) + log( (1.+x) ** 2. * (1. + x ** 2.) / 8.)
-    x    = pow(1. + pow(3.6 * abs(zeta),2./3.), -0.5);
-    psim = 3. * log( (1. + 1. / x) / 2.);
-  }
-  else
-  {
-    psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35;
-  }
-  return psim;
-}
-    
-inline double psih(double zeta)
-{
-  double psih;
-  double x;
-  if(zeta <= 0.)
-  {
-    // x     = (1. - 16. * zeta) ** (0.25)
-    // psih  = 2. * log( (1. + x ** 2.) / 2. )
-    x     = pow(1. + pow(7.9 * abs(zeta), (2./3.)), -0.5);
-    psih  = 3. * log( (1. + 1. / x) / 2.);
-  }
-  else
-  {
-    psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - pow(1. + (2./3.) * zeta, 1.5) - (10./3.) / 0.35 + 1.;
-  }
-  return psih;
-}
-
-
-double ribtol(double Rib, double zsl, double z0m, double z0h)
-{
-  double L, L0;
-  double Lstart, Lend;
-  double fx, fxdif;
-
-  if(Rib > 0.)
-  {
-    L    = 1.;
-    L0   = 2.;
-  }
-  else
-  {
-    L  = -1.;
-    L0 = -2.;
-  }
-    
-  while (abs(L - L0) > 0.001)
-  {
-    L0      = L;
-    fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / pow(log(zsl / z0m) - psim(zsl / L) + psim(z0m / L), 2.);
-    Lstart  = L - 0.001 * L;
-    Lend    = L + 0.001 * L;
-    fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / pow(log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart), 2.)) - (-zsl / Lend * (log(zsl / z0h) - psih(zsl / Lend) + psih(z0h / Lend)) / pow(log(zsl / z0m) - psim(zsl / Lend) + psim(z0m / Lend), 2.)) ) / (Lstart - Lend);
-    L       = L - fx / fxdif;
-  }
-  
-  return L;
-
-}
-
-BOOST_PYTHON_MODULE(ribtol)
-{
-    using namespace boost::python;
-    def("ribtol", ribtol);
-}
-
diff --git a/ribtol.pyx b/ribtol.pyx
deleted file mode 100644
index e11a147..0000000
--- a/ribtol.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-#cython: boundscheck=False
-#cython: wraparound=False
-
-from libc.math cimport atan, log, exp, fabs
-
-cdef double psim(double zeta):
-    cdef double x, psim
-
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psim  = 3.14159265 / 2. - 2. * atan(x) + log((1. + x)**2. * (1. + x**2.) / 8.)
-    else:
-        psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-    return psim
-      
-cdef double psih(double zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psih  = 2. * log( (1. + x*x) / 2.)
-    else:
-        psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-    return psih
-
-def ribtol(double Rib, double zsl, double z0m, double z0h): 
-    cdef double L, L0, fx, Lstart, Lend, fxdif
-
-    if(Rib > 0.):
-        L    = 1.
-        L0   = 2.
-    else:
-        L  = -1.
-        L0 = -2.
-    
-    while (fabs(L - L0) > 0.001):
-        L0      = L
-        fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
-        Lstart  = L - 0.001*L
-        Lend    = L + 0.001*L
-        fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
-                                      (log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
-                  - (-zsl /  Lend   * (log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
-                                      (log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-        L       = L - fx / fxdif
-
-        if(fabs(L) > 1e15):
-            break
-
-    return L
diff --git a/ribtol_hw.py b/ribtol_hw.py
deleted file mode 100644
index 1946cc8..0000000
--- a/ribtol_hw.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Jan 12 10:46:20 2018
-
-@author: vsc42247
-"""
-
-
-
-# purpose of calc_cm_ch: calculate momentum and thermal turbulent diffusion coefficients of the surface layer with a non-iterative procedure (Wouters et al., 2012)
-
-# input:
-
-# zrib = bulk Richardson number = (g/T)* DT * z/(ua^2)
-#   with:
-#     g = 9.81 m/s2 the gravitational acceleration
-#     z = height (in meters) of the surface layer under consideration 
-#     T = (reference) temperature (in Kelvin) at height z 
-#     DT = (T - T_s) = temperature (in Kelvin) gradient between the surface and height z 
-#     u_a^2 = u^2 +  v^2 is the squared horizontal absolute wind speed 
-# zzz0m = ratio z/z0 between the height z and the momentum roughness length z0m
-# zkbm = ln(z0m/z0h), with z0m, z0h the momentum and thermal roughness length, respectively.
-
-# output: diffusion coefficients (CM and CH) which cna be used to determine surface-layer turbulent transport
-# u'w' = - CM ua^2.
-# w'T' = - CH ua DT 
-
-
-# Reference:
-# Wouters, H., De Ridder, K., and Lipzig, N. P. M.: Comprehensive
-# Parametrization of Surface-Layer Transfer Coefficients for Use
-# in Atmospheric Numerical Models, Bound.-Lay. Meteorol., 145,
-# 539–550, doi:10.1007/s10546-012-9744-3, 2012.
-
-import numpy as np
-
-def calc_cm_ch (zeta,zzz0m,zkbm):
-    krm = 0.4
-
-    #ZETA = zeta_hs2(zrib,zzz0m,zkbm)
-    FUNM,FUNH = funcsche(ZETA,zzz0m,zkbm)
-    CM = krm**2.0/FUNM/FUNM
-    CH = krm**2.0/FUNM/FUNH
-
-    # FUNMn,FUNHn = funcsche(0.,zzz0m,zkbm)
-    # CMn = krm**2.0/FUNMn/FUNMn
-    # CHn = krm**2.0/FUNMn/FUNHn
-
-    # print ZETA,FUNM,FUNH
-    # print 'CMCMN',CM/CMn
-    # print 'CHCHN',CH/CHn
-
-    return CM,CH
-
-
-def zeta_hs2(RiB,zzz0m,kBmin1):
-    #print(RiB,zzz0m,kBmin1)
-    mum=2.59
-    muh=0.95
-    nu=0.5
-    lam=1.5
-
-    betah = 5.0
-
-    zzz0h = zzz0m*np.exp(kBmin1)
-    zzzs = zzz0m*0.06 # to be changed!! r. 101 nog bekijken!!
-
-    L0M = np.log(zzz0m)
-    L0H = np.log(zzz0h)
-    facM = np.log(1.+lam/mum/zzzs)*np.exp(-mum*zzzs)/lam
-    facH = np.log(1.+lam/muh/zzzs)*np.exp(-muh*zzzs)/lam
-    L0Ms = L0M + facM 
-    L0Hs = L0H + facH
-
-    if RiB < 0.:
-        p = np.log(1.-RiB)
-        Q = -0.486 +0.219*p - 0.0331*p**2-4.93*np.exp(-L0H) - 3.65/L0H +\
-            0.38*p/L0H+ 14.8/L0H/L0H-0.946*p/L0H/L0H-10.0/L0H**3+ \
-            0.392*L0M/L0H-0.084*p*L0M/L0H+0.368*L0M/L0H/L0H
-        # print 'p: ',p
-        # print 'Q: ',Q
-        zeta = (1. + p*Q)* L0Ms**2/L0Hs * RiB
-    else:
-        betam = 4.76+7.03/zzz0m +0.24*zzz0m/zzz0h # to be changed
-        # betam = 5.0 + 1.59*10.**(-5.)*(np.exp(13.0-L0M)-1.0) \
-        #         +0.24*(np.exp(-kBmin1)-1.0) # to be changed!!
-        # print('betam',betam)
-        lL0M = np.log(L0M)
-        S0Ms = 1.-1./zzz0m + (1.+nu/mum/zzzs)*facM
-        S0Hs = 1.-1./zzz0h + (1.+nu/muh/zzzs)*facH
-        zetat = -0.316-0.515*np.exp(-L0H) + 25.8 *np.exp(-2.*L0H) + 4.36/L0H \
-                -6.39/L0H/L0H+0.834*lL0M - 0.0267*lL0M**2
-        # print('zetat',zetat)
-        RiBt = zetat *(L0Hs+ S0Hs*betah*zetat)/(L0Ms+S0Ms*betam*zetat)**2 
-        # print('RiBt',RiBt)
-
-        if (RiB > RiBt):
-            D = (L0Ms+S0Ms*betam*zetat)**3/\
-                (L0Ms*L0Hs+zetat*(2.*S0Hs * betah * L0Ms - S0Ms*betam*L0Hs))
-            zeta = zetat + D*(RiB-RiBt)
-        else:
-            r = RiB - S0Hs*betah/(S0Ms*betam)**2
-            B = S0Ms*betam*L0Hs- 2.*S0Hs*betah*L0Ms
-            C = 4.*(S0Ms*betam)**2 * L0Ms *(S0Hs*betah*L0Ms-S0Ms*betam*L0Hs)
-            zeta = - L0Ms / S0Ms/betam - B*C/(4.*(S0Ms*betam)**3 *(B**2+abs(C*r)))
-            if r != 0:
-                zeta = zeta + (B-np.sqrt(B**2+C*r) + B*C*r/(2.*(B**2+abs(C*r))))/(2.*(S0Ms*betam)**3*r)
-    # print('zeta',zeta)
-    return zeta
-
-def funcsche(zeta,zzz0,kBmin1):
-
-
-    mum=2.5
-    muh=0.9
-    nu=0.5
-    lam=1.5
-    
-    p2=3.141592/2.
-    
-    lnzzz0=np.log(zzz0)
-    zzzs=zzz0*0.06
-    zetamcorr=(1.+nu/(mum*zzzs))*zeta
-    zetam0=zeta/zzz0
-    zetahcorr=(1.+nu/(muh*zzzs))*zeta
-    zetah0=zeta/(zzz0*np.exp(kBmin1))
-    
-    if (zeta <= 0.):
-    
-        gamma=15.2
-        alfam=0.25
-        xx=(1.-gamma*zeta)**alfam
-        psim=2.*np.log((1.+xx)/2.)+np.log((1.+xx**2.)/2.)-2.*np.arctan(xx)+p2
-        xx0=(1.-gamma*zetam0)**alfam
-        psim0=2.*np.log((1.+xx0)/2.)+np.log((1.+xx0**2.)/2.)-2.*np.arctan(xx0)+p2
-        phimcorr=(1.-gamma*zetamcorr)**(-alfam)
-        
-        alfah=0.5
-        yy=(1.-gamma*zeta)**alfah
-        psih=2.*np.log((1.+yy)/2.)
-        yy0=(1.-gamma*zetah0)**alfah
-        psih0=2.*np.log((1.+yy0)/2.)
-        phihcorr=(1.-gamma*zetahcorr)**(-alfah)
-    else: 
-    
-        aa=6.1
-        bb=2.5
-        psim=-aa*np.log(zeta+(1.+zeta**bb)**(1./bb))
-        psim0=-aa*np.log(zetam0+(1.+zetam0**bb)**(1./bb))
-        phimcorr=1.+aa*(zetamcorr+zetamcorr**bb*(1.+zetamcorr**bb)**((1.-bb)/bb))/(zetamcorr+(1.+zetamcorr**bb)**(1./bb))
-        
-        cc=5.3
-        dd=1.1
-        psih=-cc*np.log(zeta+(1.+zeta**dd)**(1./dd))
-        psih0=-cc*np.log(zetah0+(1.+zetah0**dd)**(1./dd))
-        phihcorr=1.+cc*(zetahcorr+zetahcorr**dd*(1.+zetahcorr**dd)**((1.-dd)/dd))/(zetahcorr+(1.+zetahcorr**dd)**(1./dd))
-    
-    psistrm=phimcorr*(1./lam)*np.log(1.+lam/(mum*zzzs))*np.exp(-mum*zzzs)
-    psistrh=phihcorr*(1./lam)*np.log(1.+lam/(muh*zzzs))*np.exp(-muh*zzzs)
-    
-    funm=lnzzz0-psim+psim0 +psistrm
-    funh=lnzzz0+kBmin1-psih+psih0 +psistrh
-    return funm,funh
-

From adfa5b63589c2880a80b9d4baf321d48603b3937 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 21 Aug 2018 22:05:44 +0200
Subject: [PATCH 012/129]  almost initial version

---
 Makefile                                      |    8 -
 MakefileMac                                   |    9 -
 data_air.py                                   |  473 ----
 data_global.py                                |  936 -------
 examples/run_soundings/batch_run_soundings.py |   76 -
 examples/run_soundings/run.py                 |  264 --
 examples/run_soundings/run_iter.py            |  364 ---
 examples/run_soundings/run_iter_test.py       |  367 ---
 examples/run_soundings/trash/run_test.py      |  241 --
 examples/setup_soundings/setup_bllast.py      |  719 ------
 examples/setup_soundings/setup_global.py      |  310 ---
 examples/setup_soundings/setup_goamazon.py    |  740 ------
 examples/setup_soundings/setup_humppa.py      |  732 ------
 .../setup_soundings/trash/setup_global_old.py |  284 ---
 interface_functions.py                        |  506 ----
 lib/class4gl.py                               | 1611 ------------
 lib/data_air.py                               |  473 ----
 lib/data_global.py                            |  936 -------
 lib/interface_functions.py                    |  506 ----
 lib/interface_multi.py                        | 2061 ---------------
 lib/model.py                                  | 2214 -----------------
 lib/ribtol/Makefile                           |    8 -
 lib/ribtol/MakefileMac                        |    9 -
 lib/ribtol/ribtol.cpp                         |   81 -
 lib/ribtol/ribtol.pyx                         |   48 -
 lib/ribtol/ribtol_hw.py                       |  165 --
 lib/ribtol/setup.py                           |   12 -
 model.py                                      | 2214 -----------------
 runmodel.py                                   |  130 -
 29 files changed, 16497 deletions(-)
 delete mode 100644 Makefile
 delete mode 100644 MakefileMac
 delete mode 100644 data_air.py
 delete mode 100644 data_global.py
 delete mode 100644 examples/run_soundings/batch_run_soundings.py
 delete mode 100644 examples/run_soundings/run.py
 delete mode 100644 examples/run_soundings/run_iter.py
 delete mode 100644 examples/run_soundings/run_iter_test.py
 delete mode 100644 examples/run_soundings/trash/run_test.py
 delete mode 100644 examples/setup_soundings/setup_bllast.py
 delete mode 100644 examples/setup_soundings/setup_global.py
 delete mode 100644 examples/setup_soundings/setup_goamazon.py
 delete mode 100644 examples/setup_soundings/setup_humppa.py
 delete mode 100644 examples/setup_soundings/trash/setup_global_old.py
 delete mode 100644 interface_functions.py
 delete mode 100644 lib/class4gl.py
 delete mode 100644 lib/data_air.py
 delete mode 100644 lib/data_global.py
 delete mode 100644 lib/interface_functions.py
 delete mode 100644 lib/interface_multi.py
 delete mode 100644 lib/model.py
 delete mode 100644 lib/ribtol/Makefile
 delete mode 100644 lib/ribtol/MakefileMac
 delete mode 100644 lib/ribtol/ribtol.cpp
 delete mode 100644 lib/ribtol/ribtol.pyx
 delete mode 100644 lib/ribtol/ribtol_hw.py
 delete mode 100644 lib/ribtol/setup.py
 delete mode 100644 model.py
 delete mode 100644 runmodel.py

diff --git a/Makefile b/Makefile
deleted file mode 100644
index e23e3e1..0000000
--- a/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-ribtol.so : ribtol.o
-	g++ -O3 -shared -Wl -z -def -o ribtol.so -lpython2.6 -lboost_python ribtol.o
-
-ribtol.o : ribtol.cpp
-	g++ -c -O3 -fPIC ribtol.cpp -I/usr/include/python2.6
-
-clean : 
-	rm -rf ribtol.o ribtol.so
diff --git a/MakefileMac b/MakefileMac
deleted file mode 100644
index bf34ea8..0000000
--- a/MakefileMac
+++ /dev/null
@@ -1,9 +0,0 @@
-# Note: boost-python needs to be installed: brew install boost-python -with-python3 -without-python
-ribtol.so : ribtol.o
-	clang++ -O3 -shared -o ribtol.so -L/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib -lpython3.6m -L/usr/local/lib -lboost_python3-mt -lpython ribtol.o
-
-ribtol.o : ribtol.cpp
-	clang++ -c -O3 -fPIC ribtol.cpp -I/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/include/python3.6m -I/usr/local/include
-
-clean : 
-	rm -rf ribtol.o ribtol.so
diff --git a/data_air.py b/data_air.py
deleted file mode 100644
index 1c51deb..0000000
--- a/data_air.py
+++ /dev/null
@@ -1,473 +0,0 @@
-import numpy as np
-
-from bs4 import BeautifulSoup
-import pandas as pd
-import datetime as dt
-#import pylab as pl
-import io
-import os
-import calendar
-import Pysolar
-import Pysolar.util
-
-
-
-#from urllib import request
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHd
-
-def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
-    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
-    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
-
-
-#from os import listdir
-#from os.path import isfile #,join
-import glob
-
-
-class wyoming(object):
-    def __init__(self):
-       self.status = 'init'
-       self.found = False
-       self.DT = None
-       self.current = None
-       #self.mode = 'b'
-       self.profile_type = 'wyoming'  
-       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
-       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-         
-    def set_STNM(self,STNM):
-        self.__init__()
-        self.STNM = STNM
-        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
-        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
-        self.current = None
-        self.found = False
-        self.FILES.sort()
-        
-    def find_first(self,year=None,get_atm=False):
-        self.found = False    
-                
-        # check first file/year or specified year
-        if year == None:
-            self.iFN = 0
-            self.FN = self.FILES[self.iFN]
-        else:
-            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-        self.current = self.sounding_series.find('h2')
-        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
-        
-        # go through other files and find first sounding when year is not specified
-        self.iFN=self.iFN+1
-        while keepsearching:
-            self.FN = self.FILES[self.iFN]
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            self.iFN=self.iFN+1
-            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
-        self.found = (self.current is not None)
-
-        self.status = 'fetch'
-        if self.found:
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        
-        if self.found and get_atm:
-            self.get_values_air_input()
-        
-    
-    def find(self,DT,get_atm=False):
-        
-        self.found = False
-        keepsearching = True
-        #print(DT)
-        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
-        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
-            self.DT = DT
-            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            
-        keepsearching = (self.current is not None)
-        while keepsearching:
-            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            if DTcurrent == DT:
-                self.found = True
-                keepsearching = False
-                if get_atm:
-                    self.get_values_air_input()
-                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            elif DTcurrent > DT:
-                keepsearching = False
-                self.current = None
-            else:
-                self.current = self.current.find_next('h2')
-                if self.current is None:
-                    keepsearching = False
-        self.found = (self.current is not None)
-        self.status = 'fetch'
-
-    def find_next(self,get_atm=False):
-        self.found = False
-        self.DT = None
-        if self.current is None:
-            self.find_first()
-        else:                
-            self.current = self.current.find_next('h2')
-            self.found = (self.current is not None)
-            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
-            while keepsearching:
-                self.iFN=self.iFN+1
-                self.FN = self.FILES[self.iFN]
-                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-                self.current = self.sounding_series.find('h2')
-                
-                self.found = (self.current is not None)
-                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
-        if self.found:        
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        if self.found and get_atm:
-            self.get_values_air_input()
-       
-
-
-    def get_values_air_input(self,latitude=None,longitude=None):
-
-        # for iDT,DT in enumerate(DTS):
-        
-            #websource = urllib.request.urlopen(webpage)
-        #soup = BeautifulSoup(open(webpage), "html.parser")
-        
-       
-        #workaround for ...last line has 
 which results in stringlike first column
-        string = self.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = self.current.find_next('pre').find_next('pre').text
-
-        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
-        #PARAMS.insert(0,'date',DT)
-
-        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
-        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
-        
-        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
-        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
-        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
-        HAGL = HGHT - np.float(PARAMS['Station elevation'])
-        ONE_COLUMN.insert(0,'HAGL',HAGL)
-
-        
-        
-        
-        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
-        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
-        ONE_COLUMN.insert(0,'QABS',QABS)
-        
-        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
-
-        #mixed layer potential temperature
-        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
-
-        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
-        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
-        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
-
-        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
-        BLHV = np.max((BLHV,10.))
-        BLHVu = np.max((BLHVu,10.))
-        BLHVd = np.max((BLHVd,10.))
-        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
-
-        #security values for mixed-layer jump values dthetav, dtheta and dq
-        
-        # fit new profiles taking the above-estimated mixed-layer height
-        ONE_COLUMNNEW = []
-        for BLH in [BLHV,BLHVu,BLHVd]:
-            ONE_COLUMNNEW.append(pd.DataFrame())
-            
-            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
-            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
-            
-            listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
-                
-                # get index of lowest valid observation. This seems to vary
-                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
-                if len(idxvalid) > 0:
-                    #print('idxvalid',idxvalid)
-                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
-                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
-                    else:
-                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
-                else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
-                    #print(col,meanabl)
-               
-                
-                # if col == 'PRES':
-                #     meanabl =  
-            
-                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
-                #THTVM = np.nanmean(THTV[HAGL <= BLH])
-                #print("new_pro_h",new_pro_h)
-                # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV',]:
-                    #for moisture
-                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
-                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
-                    if len(listHAGLNEW) > 4:
-                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
-                        dtheta = np.max((0.1,dtheta_pre))
-                        #meanabl = meanabl - (dtheta - dtheta_pre)
-                        #print('dtheta_pre',dtheta_pre)
-                        #print('dtheta',dtheta)
-                        #print('meanabl',meanabl)
-                        #stop
-                        
-                    else:
-                        dtheta = np.nan
-                else:
-                    if len(listHAGLNEW) > 4:
-                        #for moisture (it can have both negative and positive slope)
-                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
-                    else:
-                        dtheta = np.nan
-                #print('dtheta',dtheta)
-                
-                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
-            
-                
-                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
-                
-            #QABSM = np.nanmean(QABS[HAGL <= BLH])
-            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
-            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
-            
-        # we just make a copy of the fields, so that it can be read correctly by CLASS 
-        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
-            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
-            
-            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
-        
-            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
-            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
-
-
-        # assign fields adopted by CLASS
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'b':
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'u':
-            PARAMS.insert(0,'h',   BLHVu)
-        elif self.mode == 'd':
-            PARAMS.insert(0,'h',   BLHVd)
-        else:
-            PARAMS.insert(0,'h',   BLHV)
-            
-
-        try:
-            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
-        except:
-            print("could not convert latitude coordinate")
-            PARAMS.insert(0,'latitude', np.nan)
-            PARAMS.insert(0,'lat', np.nan)
-        try:
-            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
-            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
-            PARAMS.insert(0,'lon', 0.)
-        except:
-            print("could not convert longitude coordinate")
-            PARAMS.insert(0,'longitude', np.nan)
-            PARAMS.insert(0,'lon', 0.)
-
-        if latitude is not None:
-            print('overwriting latitude with specified value')
-            PARAMS['latitude'] = np.float(latitude)
-            PARAMS['lat'] = np.float(latitude)
-        if longitude is not None:
-            print('overwriting longitude with specified value')
-            PARAMS['longitude'] = np.float(longitude)
-        try:
-            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
-            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
-            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            # This is the nearest datetime when sun is up (for class)
-            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
-            # apply the same time shift for UTC datetime
-            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
-            
-        except:
-            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
-            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
-            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
-            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
-
-        
-
-        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
-        # as we are forcing lon equal to zero this is is expressed in local suntime
-        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
-
-           
-        ONE_COLUMNb = ONE_COLUMNNEW[0]
-        ONE_COLUMNu = ONE_COLUMNNEW[1]
-        ONE_COLUMNd = ONE_COLUMNNEW[2]
-        
-
-        THTVM = np.nanmean(THTV[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
-        
-        QABSM = np.nanmean(QABS[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
-        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
-        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
-
-        BLHVe = abs(BLHV - BLHVu)
-        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-
-        #PARAMS.insert(0,'dq',0.)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
-        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
-        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
-        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
-        
-        if self.mode == 'o': #original 
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
-        elif self.mode == 'b': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNb
-            BLCOLUMN = ONE_COLUMNb
-        elif self.mode == 'u': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNu
-            BLCOLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNd
-            BLCOLUMN = ONE_COLUMNd
-        else:
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb
-
-        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
-        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
-        # print(BLCOLUMN['HAGL'][lt6000])
-        # print(BLCOLUMN['HAGL'][lt2500])
-        # 
-        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
-
-        #print(BLCOLUMN['HAGL'][lt2500])
-        PARAMS.insert(0,'OK',
-                      ((BLHVe < 200.) and 
-                       ( len(np.where(lt6000)[0]) > 5) and
-                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
-                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
-                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
-                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
-                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
-                      )
-                     )
-
-        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
-        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
-        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
-        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
-        
-        
-        PARAMS = PARAMS.T
-
-        
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = USE_ONECOLUMN
-        # if self.mode == 'o': #original 
-        #     self.ONE_COLUMN = ONE_COLUMN
-        # elif self.mode == 'b': # best BLH
-        #     self.ONE_COLUMN = ONE_COLUMNb
-        # elif self.mode == 'u':# upper BLH
-        #     self.ONE_COLUMN = ONE_COLUMNu
-        # elif self.mode == 'd': # lower BLH
-        #     self.ONE_COLUMN=ONE_COLUMNd
-        # else:
-        #     self.ONE_COLUMN = ONE_COLUMN
-
diff --git a/data_global.py b/data_global.py
deleted file mode 100644
index 9c3d9b5..0000000
--- a/data_global.py
+++ /dev/null
@@ -1,936 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov  7 10:51:03 2017
-
-@author: Hendrik Wouters
-
-Purpose: provides class routines for ground and atmosphere conditions used for
-the CLASS miced-layer model
-
-Usage:
-    from data_global import data_global
-    from class4gl import class4gl_input
-    from data_soundings import wyoming
-
-    # create a data_global object and load initial data pages
-    globaldata = data_global()
-    globaldata.load_datasets()
-    # create a class4gl_input object
-    c4gli = class4gl_input()
-    # Initialize it with profile data. We need to do this first. Actually this
-    # will set the coordinate parameters (datetime, latitude, longitude) in
-    # class4gl_input.pars.__dict__, which is required to read point data from
-    # the data_global object.
-
-    # open a Wyoming stream for a specific station
-    wy_strm = wyoming(STNM=91376)
-    # load the first profile
-    wy_strm.find_first()
-    # load the profile data into the class4gl_input object
-    c4gli.get_profile_wyoming(wy_strm)
-    
-    # and finally, read the global input data for this profile
-    c4gli.get_global_input(globaldata)
-
-
-"""
-
-import netCDF4 as nc4
-import numpy as np
-import datetime as dt
-#you can install with
-#import pynacolada as pcd
-import pandas as pd
-import xarray as xr
-import os
-import glob
-import sys
-import errno
-import warnings
-import logging
-
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-class book(object):
-    """ this is a class for a dataset spread over multiple files. It has a
-    similar purpose  open_mfdataset, but only 1 file (called current 'page')
-    one is loaded at a time. This saves precious memory.  """
-    def __init__(self,fn,concat_dim = None,debug_level=None):
-        self.logger = logging.getLogger('book')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # filenames are expanded as a list and sorted by filename
-        self.pages = glob.glob(fn); self.pages.sort()
-        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
-        if len(self.pages) == 0:
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
-        self.ipage = -1; self.page = None
-        self.renames = {} # each time when opening a file, a renaming should be done.
-        self.set_page(0)
-
-        # we consider that the outer dimension is the one we concatenate
-        self.concat_dim = concat_dim
-        if self.concat_dim is None:
-            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
-
-    # this wraps the xarray sel-commmand
-    def sel(*args, **kwargs):
-        for dim in kwargs.keys():
-            if dim == self.concat_dim:
-                self.browse_page(**{dim: kwargs[dim]})
-        return page.sel(*args,**kwargs)
-
-
-    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
-    #def __getattr__(self,attr):
-    #    orig_attr = self.page.__getattribute__(attr)
-    #    if callable(orig_attr):
-    #        def hooked(*args, **kwargs):
-    #            for dim in kwargs.keys():
-    #                if dim == self.concat_dim:
-    #                    self.browse_page(**{dim: kwargs[dim]})
-    #
-    #            result = orig_attr(*args, **kwargs)
-    #            # prevent wrapped_class from becoming unwrapped
-    #            if result == self.page:
-    #                return self
-    #            self.post()
-    #            return result
-    #        return hooked
-    #    else:
-    #        return orig_attr
-
-    def set_renames(self,renames):
-        #first, we convert back to original names, and afterwards, we apply the update of the renames.
-        reverse_renames = dict((v,k) for k,v in self.renames.items())
-        self.renames = renames
-        if self.page is not None:
-            self.page = self.page.rename(reverse_renames)
-            self.page = self.page.rename(self.renames)
-
-    def set_page(self,ipage,page=None):
-        """ this sets the right page according to ipage:
-                - We do not switch the page if we are already at the right one
-                - we set the correct renamings (level -> lev, latitude -> lat,
-                etc.)
-                - The dataset is also squeezed.
-        """
-
-        if ((ipage != self.ipage) or (page is not None)):
-
-            if self.page is not None:
-                self.page.close()
-
-            self.ipage = ipage
-            if page is not None:
-                self.page = page
-            else:
-                if self.ipage == -1:
-                   self.page = None
-                else:
-                    #try:
-
-                    self.logger.info("Switching to page "+str(self.ipage)+': '\
-                                     +self.pages[self.ipage])
-                    self.page = xr.open_dataset(self.pages[self.ipage])
-
-
-            # do some final corrections to the dataset to make them uniform
-            if self.page is not None:
-               if 'latitude' in self.page.dims:
-#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
-               if 'level' in self.page.dims:
-                   self.page = self.page.rename({'level':'lev'})
-
-               self.page = self.page.rename(self.renames)
-               self.page = self.page.squeeze(drop=True)
-
-    def browse_page(self,rewind=2,**args):
-
-        # at the moment, this is only tested with files that are stacked according to the time dimension.
-        dims = args.keys()
-
-
-        if self.ipage == -1:
-            self.set_page(0)
-
-        found = False
-        iipage = 0
-        startipage = self.ipage - rewind
-        while (iipage < len(self.pages)) and not found:
-            ipage = (iipage+startipage) % len(self.pages)
-            for dim in args.keys():
-                this_file = True
-
-                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
-                if 'dims' not in self.__dict__:
-                    self.dims = {}
-                if dim not in self.dims.keys():
-                    self.dims[dim] = [None]*len(self.pages)
-
-                if self.dims[dim][ipage] is None:
-                    self.logger.info('Loading coordinates of dimension "'+dim+\
-                                     '" of page "' +str(ipage)+'".')
-                    self.set_page(ipage)
-                    # print(ipage)
-                    # print(dim)
-                    # print(dim,self.page[dim].values)
-                    self.dims[dim][ipage] = self.page[dim].values
-
-                # determine current time range of the current page
-                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
-                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
-
-                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
-                    this_file = False
-
-            if this_file:
-                found = True
-                self.set_page(ipage)
-            else:
-
-                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
-                #    iipage = len(self.pages) # we stop searching
-
-                iipage += 1
-
-        if not found:
-            self.logger.info("Page not found. Setting to page -1")
-            #iipage = len(self.pages) # we stop searching further
-            self.set_page(-1)
-
-        if self.ipage != -1:
-            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
-        else:
-            self.logger.debug("I'm now at page "+ str(self.ipage))
-
-
-class data_global(object):
-    def __init__(self,sources= {
-        # # old gleam
-        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
-        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
-        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
-        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
-        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
-        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
-        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
-        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
-        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
-        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
-        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
-        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
-        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
-        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
-        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
-        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
-        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
-        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
-        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
-        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
-        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
-        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
-        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
-        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
-        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
-        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
-        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
-        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
-        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
-        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
-        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
-        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
-        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
-        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
-        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
-        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
-        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
-        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
-        },debug_level=None):
-        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
-        self.sources = sources
-        self.datarefs = {}
-        self.datasets = {}
-        self.datetime = dt.datetime(1981,1,1)
-
-        self.logger = logging.getLogger('data_global')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-        self.debug_level = debug_level
-
-        warnings.warn('omitting pressure field p and advection')
-
-    def in_library(self,fn):
-        if fn not in self.library.keys():
-            return False
-        else:
-            print("Warning: "+fn+" is already in the library.")
-            return True
-
-    def add_to_library(self,fn):
-        if not self.in_library(fn):
-            print("opening: "+fn)
-            self.library[fn] = \
-                book(fn,concat_dim='time',debug_level=self.debug_level)
-
-            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
-            #if 'latitude' in self.library[fn].variables:
-            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-
-    # default procedure for loading datasets into the globaldata library
-    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
-        if type(varssource) is str:
-            varssource = [varssource]
-        if type(varsdest) is str:
-            varsdest = [varsdest]
-
-        self.add_to_library(input_fn)
-
-        if varssource is None:
-            varssource = []
-            for var in self.sources[input_fn].variables:
-                avoid = \
-                ['lat','lon','latitude','longitude','time','lev','level']
-                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
-                    varssource.append(var)
-
-        if varsdest is None:
-            varsdest = varssource
-
-        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
-        for ivar,vardest in enumerate(varsdest):
-            varsource = varssource[ivar]
-            print('setting '+vardest+' as '+varsource+' from '+input_fn)
-
-            if vardest in self.datarefs.keys():
-                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
-            #self.add_to_library(fn,varsource,vardest)
-            if vardest != varsource:
-                libkey = input_fn+'.'+varsource+'.'+vardest
-                if libkey not in self.library.keys():
-                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
-                    self.library[libkey] = book(input_fn,\
-                                                debug_level=self.debug_level)
-                    self.library[libkey].set_renames({varsource: vardest})
-
-                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-            else:
-                self.datarefs[vardest] = input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-
-            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
-            #     print('Warning: '+ vardest "not in " + input_fn)
-
-
-
-    def load_datasets(self,sources = None,recalc=0):
-
-        if sources is None:
-            sources = self.sources
-        for key in sources.keys():
-            #datakey,vardest,*args = key.split(':')
-            datakey,vardest = key.split(':')
-            #print(datakey)
-
-            fnvarsource = sources[key].split(':')
-            if len(fnvarsource) > 2:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource,fnargs = fnvarsource
-                fnargs = [fnargs]
-            elif len(fnvarsource) > 1:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource = fnvarsource
-                fnargs = []
-            else:
-                fn = sources[key]
-                varsource = vardest
-            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
-
-    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
-            # the default way of loading a 2d dataset
-            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
-                self.load_dataset_default(fn,varsource,vardest)
-            elif datakey == 'IGBPDIS':
-                if vardest == 'alpha':
-                    ltypes = ['W','B','H','TC']
-                    for ltype in ltypes:
-                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
-                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
-
-
-                    # landfr = {}
-                    # for ltype in ['W','B','H','TC']:
-                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
-
-
-
-                    keytemp = 'alpha'
-                    fnkeytemp = fn+':IGBPDIS:alpha'
-                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
-                        self.library[fnkeytemp]  = book(fnkeytemp,
-                                                        debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-                    else:
-                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
-                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
-                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
-                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
-                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
-                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
-                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
-
-                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-
-                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
-                        for ltype in ltypes:
-                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
-
-                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
-                        print('writing file to: '+fnkeytemp)
-                        os.system('rm '+fnkeytemp)
-                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
-                        self.library[fnkeytemp].close()
-
-
-                        self.library[fnkeytemp]  = \
-                            book(fnkeytemp,debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-
-
-                else:
-                    self.load_dataset_default(fn,varsource,vardest)
-
-
-            elif datakey == 'GLAS':
-                self.load_dataset_default(fn,varsource,vardest)
-                if vardest == 'z0m':
-                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
-                elif vardest == 'z0h':
-                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
-            elif datakey == 'DSMW':
-
-
-                # Procedure of the thermal properties:
-                # 1. determine soil texture from DSMW/10.
-                # 2. soil type with look-up table (according to DWD/EXTPAR)
-                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
-                #    with parameter look-up table from Noilhan and Planton (1989).
-                #    Note: The look-up table is inspired on DWD/COSMO
-
-                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
-
-
-
-                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
-                self.load_dataset_default(fn,'DSMW')
-                print('calculating texture')
-                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
-                TEMP  = {}
-                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
-                TEMP3 = {}
-                for SPKEY in SPKEYS:
-
-
-                    keytemp = SPKEY+'_values'
-                    fnoutkeytemp = fnout+':DSMW:'+keytemp
-                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                    else:
-                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
-                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
-                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-                        # for faster computation, we need to get it to memory out of Dask.
-                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
-                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
-
-                # yes, I know I only check the last file.
-                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
-                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
-                        print('idx',idx,SPKEY)
-                        SEL = (TEMP2 == idx)
-                    #     print(idx,len(TEMP3))
-                        for SPKEY in SPKEYS:
-                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
-
-                    for SPKEY in SPKEYS:
-                        keytemp = SPKEY+'_values'
-                        fnoutkeytemp = fnout+':DSMW:'+keytemp
-                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
-                        os.system('rm '+fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].close()
-
-
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                keytemp = 'texture'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-                else:
-                    self.library[fn+':DSMW:texture'] = xr.Dataset()
-                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
-                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
-                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
-                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-
-                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
-
-                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
-                    zundef[zundef < 0] = np.nan
-                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
-                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
-
-                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-
-
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-                print('calculating texture type')
-
-
-
-                keytemp = 'itex'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-                else:
-                    self.library[fnoutkeytemp] = xr.Dataset()
-                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-                    X = self.datasets['texture'].page['texture'].values*100
-                    X[pd.isnull(X)] = -9
-
-
-                    self.datasets[keytemp][keytemp].values = X
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
-                    self.datasets['itex'].close()
-
-
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-
-                keytemp = 'isoil'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                isoil_reprocessed = False
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-                else:
-                    isoil_reprocessed = True
-                    print('calculating soil type')
-                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    ITEX = self.datasets['itex'].page['itex'].values
-                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
-                    LOOKUP = [
-                              [-10 ,9],# ocean
-                              [0 ,7],# fine textured, clay (soil type 7)
-                              [20,6],# medium to fine textured, loamy clay (soil type 6)
-                              [40,5],# medium textured, loam (soil type 5)
-                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
-                              [80,3],# coarse textured, sand (soil type 3)
-                              [100,9],# coarse textured, sand (soil type 3)
-                            ]
-                    for iitex,iisoil in LOOKUP:
-                        ISOIL[ITEX > iitex] = iisoil
-                        print('iitex,iisoil',iitex,iisoil)
-
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    LOOKUP = [
-                              [9001, 1 ], # ice, glacier (soil type 1)
-                              [9002, 2 ], # rock, lithosols (soil type 2)
-                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
-                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
-                              [9,    9 ], # undefined (ocean)
-                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
-                              [9000, 9 ], # undefined (inland lake)
-                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
-                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
-                            ]
-                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
-                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
-
-                    CODE_VALUES[ITEX == 901200] = 9012
-                    for icode,iisoil in LOOKUP:
-                        ISOIL[CODE_VALUES == icode] = iisoil
-
-                    self.datasets['isoil']['isoil'].values = ISOIL
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-                    print('saved inbetween file to: '+fnoutkeytemp)
-
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                #adopted from data_soil.f90 (COSMO5.0)
-                SP_LOOKUP = {
-                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
-                  # (by index)                                           loam                    loam                                water      ice
-                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
-                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
-                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
-                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
-                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
-                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
-                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
-                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
-                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
-                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
-                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
-                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
-                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
-                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
-                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
-                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
-                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
-                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
-                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
-                  # Important note: For peat, the unknown values below are set equal to that of loam
-                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
-                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
-                  #error in table 2 of NP89: values need to be multiplied by e-6
-                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
-                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
-
-                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
-                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
-                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
-                }
-
-
-                # isoil_reprocessed = False
-                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-
-                #     self.library[fn+':DSMW:isoil'] = \
-                #             book(fnoutkeytemp,debug_level=self.debug_level)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-                # else:
-                #     isoil_reprocessed = True
-                #     print('calculating soil type')
-                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-
-
-
-                # this should become cleaner in future but let's hard code it for now.
-                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
-                print('calculating soil parameter')
-                DATATEMPSPKEY = {}
-                if (recalc < 1) and (isoil_reprocessed == False): 
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        keytemp = SPKEY
-                        fnoutkeytemp=fnout+':DSMW:'+keytemp
-                        self.library[fn+':DSMW:'+SPKEY] =\
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
-                        self.datarefs[SPKEY] =fnoutkeytemp
-                else:
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-
-                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
-                    ISOIL = self.datasets['isoil'].page['isoil'].values
-                    print(np.where(ISOIL>0.))
-                    for i in range(11):
-                        SELECT = (ISOIL == i)
-                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
-
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
-
-                        os.system('rm '+fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].close()
-                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
-
-                        self.library[fn+':DSMW:'+SPKEY] = \
-                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-
-
-            else:
-                self.load_dataset_default(fn,varsource,vardest)
-
-
-
-
-
-
-#
-#                 # only print the last parameter value in the plot
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'cala'
-#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'crhoc'
-#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#     key = "CERES"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         CERES_start_date = dt.datetime(2000,3,1)
-#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
-#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
-#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
-#
-#         var = 'cc'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#         print(class_settings.lat,class_settings.lon)
-#
-#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
-#
-#         input_nc.close()
-#
-
-
-#     key = "GIMMS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
-#         print("Reading Leag Area Index from "+input_fn)
-#         var = 'LAI'
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
-#
-#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
-#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
-#
-#         if np.isnan(tarray[idatetime]):
-#             print("interpolating GIMMS cveg nan value")
-#
-#             mask = np.isnan(tarray)
-#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-#             else:
-#                 print("Warning. Could not interpolate GIMMS cveg nan value")
-#
-#         class_settings.__dict__[var] = tarray[idatetime]
-#
-#         input_nc.close()
-#
-#     key = "IGBPDIS_ALPHA"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         var = 'alpha'
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
-#         print("Reading albedo from "+input_fn)
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#
-#         landfr = {}
-#         for ltype in ['W','B','H','TC']:
-#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
-#
-#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-#
-#         alpha=0.
-#         for ltype in landfr.keys():
-#             alpha += landfr[ltype]*aweights[ltype]
-#
-#
-#         class_settings.__dict__[var] = alpha
-#         input_nc.close()
-#
-#
-#     key = "ERAINT_ST"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         print("Reading soil temperature from "+input_fn)
-#
-#         var = 'Tsoil'
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         var = 'T2'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
-#
-#
-#         input_nc.close()
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #var = 'T2'
-#     #valold = class_settings.__dict__[var]
-#     #
-#     #class_settings.__dict__[var] = 305.
-#     #class_settings.__dict__['Tsoil'] = 302.
-#     #valnew = class_settings.__dict__[var]
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #
-#     #var = 'Lambda'
-#     #valold = class_settings.__dict__[var]
-#
-#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
-#     ## I need to ask Chiel.
-#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
-#     #
-#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
-#     #class_settings.__dict__[var] = valnew
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     key = "GLAS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
-#         print("Reading canopy height for determining roughness length from "+input_fn)
-#         var = 'z0m'
-#
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
-#
-#         lowerlimit = 0.01
-#         if testval < lowerlimit:
-#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
-#             class_settings.__dict__[var] = lowerlimit
-#         else:
-#             class_settings.__dict__[var] = testval
-#
-#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
-#
-#
-#         input_nc.close()
-
-
-
-
-
diff --git a/examples/run_soundings/batch_run_soundings.py b/examples/run_soundings/batch_run_soundings.py
deleted file mode 100644
index c4fc40e..0000000
--- a/examples/run_soundings/batch_run_soundings.py
+++ /dev/null
@@ -1,76 +0,0 @@
-
-import argparse
-
-import pandas as pd
-import os
-import math
-import numpy as np
-import sys
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-df_stations = pd.read_csv(fn_stations)
-
-# if 'path-soundings' in args.__dict__.keys():
-#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-# else:
-
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
-    parser.add_argument('--experiments')#should be ';'-seperated list
-    parser.add_argument('--split-by',default=-1)
-    args = parser.parse_args()
-
-experiments = args.experiments.split(';')
-#SET = 'GLOBAL'
-SET = args.dataset
-print(args.experiments)
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-for expname in experiments:
-    #exp = EXP_DEFS[expname]
-    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-    os.system('rm -R '+path_exp)
-
-totalchunks = 0
-for istation,current_station in all_stations.iterrows():
-    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
-    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
-    totalchunks +=chunks_current_station
-
-#if sys.argv[1] == 'qsub':
-# with qsub
-os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
-                                       ',split_by='+str(args.split_by)+\
-                                       ',exec='+str(args.exec)+\
-                                       ',experiments='+str(args.experiments))
-# elif sys.argv[1] == 'wsub':
-#     
-#     # with wsub
-#     STNlist = list(df_stations.iterrows())
-#     NUMSTNS = len(STNlist)
-#     PROCS = NUMSTNS 
-#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-# 
-#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
-
diff --git a/examples/run_soundings/run.py b/examples/run_soundings/run.py
deleted file mode 100644
index 46eeea1..0000000
--- a/examples/run_soundings/run.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--error-handling',default='dump_on_success')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk')
-    parser.add_argument('--c4gl-path',default='')
-    args = parser.parse_args()
-
-
-
-if args.c4gl_path == '': 
-    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-else:
-    sys.path.insert(0, args.c4gl_path)
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if args.global_chunk is not None:
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    run_stations = pd.DataFrame(all_stations)
-    if args.last_station is not None:
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    if args.first_station is not None:
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    run_station_chunk = 0
-    if args.station_chunk is not None:
-        run_station_chunk = args.station_chunk
-
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-            print(records_morning_station_chunk)
-
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-
-                    if args.error_handling == 'dump_always':
-                        try:
-                            c4gl.run()
-                        except:
-                            print('run not succesfull')
-                        onerun = True
-
-                        c4gli_morning.dump(file_ini)
-                        
-                        
-                        c4gl.dump(file_mod,\
-                                  include_input=False,\
-                                  #timeseries_only=timeseries_only,\
-                                 )
-                        onerun = True
-                    # in this case, only the file will dumped if the runs were
-                    # successful
-                    elif args.error_handling == 'dump_on_succes':
-                        try:
-                            c4gl.run()
-                            print('run not succesfull')
-                            onerun = True
-
-                            c4gli_morning.dump(file_ini)
-                            
-                            
-                            c4gl.dump(file_mod,\
-                                      include_input=False,\
-                                      #timeseries_only=timeseries_only,\
-                                     )
-                            onerun = True
-                        except:
-                            print('run not succesfull')
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/examples/run_soundings/run_iter.py b/examples/run_soundings/run_iter.py
deleted file mode 100644
index 5dfbaff..0000000
--- a/examples/run_soundings/run_iter.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk',default=0)
-    args = parser.parse_args()
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if 'global_chunk' in args.__dict__.keys():
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    if 'last_station' in args.__dict__.keys():
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    
-    if 'first_station' in args.__dict__.keys():
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    if 'station_chunk' in args.__dict__.keys():
-        run_station_chunk = args.station_chunk
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-    for istation,current_station in run_stations.iterrows():
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                #if iexp == 11:
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-                    
-                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
-                    EFobs = c4gli_morning.pars.EF
-                    
-                    b = c4gli_morning.pars.wwilt
-                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
-                    
-                    
-                    try:
-                        #fb = f(b)
-                        c4gli_morning.pars.wg = b
-                        c4gli_morning.pars.w2 = b
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fb = EFmod - EFobs
-                        EFmodb = EFmod
-                        c4glb = c4gl
-                        c4gli_morningb = c4gli_morning
-                        
-                        #fc = f(c)
-                        c4gli_morning.pars.wg = c
-                        c4gli_morning.pars.w2 = c
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fc = EFmod - EFobs
-                        print (EFmodb,EFobs,fb)
-                        print (EFmod,EFobs,fc)
-                        c4glc = c4gl
-                        c4gli_morningc = c4gli_morning
-                        i=0
-                        
-
-                        if fc*fb > 0.:
-                            if abs(fb) < abs(fc):
-                                c4gl = c4glb
-                                c4gli_morning = c4gli_morningb
-                            else:
-                                c4gl = c4glc
-                                c4gli_morning = c4gli_morningc
-                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
-                        
-                        else:
-                            print('starting ITERATION!!!')
-                            cn  = c - fc/(fc-fb)*(c-b)
-                            
-                            
-                            #fcn = f(cn)
-                            c4gli_morning.pars.wg = np.asscalar(cn)
-                            c4gli_morning.pars.w2 = np.asscalar(cn)
-                            c4gl = class4gl(c4gli_morning)
-                            c4gl.run()
-                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                            
-                            tol = 0.02
-                            ftol = 10.
-                            maxiter = 10
-                            
-                            is1=0
-                            is1max=1
-                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
-                                if fc * fcn > 0:
-                                    temp = c
-                                    c = b
-                                    b = temp
-                                
-                                a = b
-                                fa = fb
-                                b = c
-                                fb = fc
-                                c = cn
-                                fc = fcn
-                                              
-                                print(i,a,b,c,fcn)
-                                
-                                s1 = c - fc/(fc-fb)*(c-b) 
-                                s2 = c - fc/(fc-fa)*(c-a)
-                                
-                                
-                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
-                                
-                                
-                                if (abs(s1-b) < abs(s2-b)):
-                                    is1 = 0
-                                else:
-                                    is1 +=1
-                                    
-                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
-                                if is1 < is1max:           
-                                    s = s1
-                                    print('s1')
-                                else:
-                                    is1 = 0
-                                    s = s2
-                                    print('s2')
-                                
-                                if c > b:
-                                    l = b
-                                    r = c
-                                else:
-                                    l = c
-                                    r = b
-                                
-                                m = (b+c)/2.
-                                     
-                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
-                                    cn = s
-                                    print('midpoint')
-                                else:
-                                    cn = m
-                                    print('bissection')
-                                    
-                                
-                                #fcn = f(cn)
-                                c4gli_morning.pars.wg = np.asscalar(cn)
-                                c4gli_morning.pars.w2 = np.asscalar(cn)
-                                c4gl = class4gl(c4gli_morning)
-                                c4gl.run()
-                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                                
-                            
-                                i+=1
-                                
-                            if i == maxiter:
-                                raise StopIteration('did not converge')
-
-
-
-
-                        #c4gl = class4gl(c4gli_morning)
-                        #c4gl.run()
-
-                        c4gli_morning.pars.itersteps = i
-                        c4gli_morning.dump(file_ini)
-                        
-                        
-                        c4gl.dump(file_mod,\
-                                      include_input=False,\
-                                   #   timeseries_only=timeseries_only,\
-                                 )
-                        onerun = True
-                    except:
-                        print('run not succesfull')
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/examples/run_soundings/run_iter_test.py b/examples/run_soundings/run_iter_test.py
deleted file mode 100644
index eefd475..0000000
--- a/examples/run_soundings/run_iter_test.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk')
-    args = parser.parse_args()
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if args.global_chunk is not None:
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    run_stations = pd.DataFrame(all_stations)
-    if args.last_station is not None:
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    if args.first_station is not None:
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    run_station_chunk = 0
-    if args.station_chunk is not None:
-        run_station_chunk = args.station_chunk
-
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-                #if iexp == 11:
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-                    
-                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
-                    EFobs = c4gli_morning.pars.EF
-                    
-                    b = c4gli_morning.pars.wwilt
-                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
-                    
-                    
-                    try:
-                        #fb = f(b)
-                        c4gli_morning.pars.wg = b
-                        c4gli_morning.pars.w2 = b
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fb = EFmod - EFobs
-                        EFmodb = EFmod
-                        c4glb = c4gl
-                        c4gli_morningb = c4gli_morning
-                        
-                        #fc = f(c)
-                        c4gli_morning.pars.wg = c
-                        c4gli_morning.pars.w2 = c
-                        c4gl = class4gl(c4gli_morning)
-                        c4gl.run()
-                        EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum())
-                        fc = EFmod - EFobs
-                        print (EFmodb,EFobs,fb)
-                        print (EFmod,EFobs,fc)
-                        c4glc = c4gl
-                        c4gli_morningc = c4gli_morning
-                        i=0
-                        
-
-                        if fc*fb > 0.:
-                            if abs(fb) < abs(fc):
-                                c4gl = c4glb
-                                c4gli_morning = c4gli_morningb
-                            else:
-                                c4gl = c4glc
-                                c4gli_morning = c4gli_morningc
-                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
-                        
-                        else:
-                            print('starting ITERATION!!!')
-                            cn  = c - fc/(fc-fb)*(c-b)
-                            
-                            
-                            #fcn = f(cn)
-                            c4gli_morning.pars.wg = np.asscalar(cn)
-                            c4gli_morning.pars.w2 = np.asscalar(cn)
-                            c4gl = class4gl(c4gli_morning)
-                            c4gl.run()
-                            fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                            
-                            tol = 0.02
-                            ftol = 10.
-                            maxiter = 10
-                            
-                            is1=0
-                            is1max=1
-                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
-                                if fc * fcn > 0:
-                                    temp = c
-                                    c = b
-                                    b = temp
-                                
-                                a = b
-                                fa = fb
-                                b = c
-                                fb = fc
-                                c = cn
-                                fc = fcn
-                                              
-                                print(i,a,b,c,fcn)
-                                
-                                s1 = c - fc/(fc-fb)*(c-b) 
-                                s2 = c - fc/(fc-fa)*(c-a)
-                                
-                                
-                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
-                                
-                                
-                                if (abs(s1-b) < abs(s2-b)):
-                                    is1 = 0
-                                else:
-                                    is1 +=1
-                                    
-                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
-                                if is1 < is1max:           
-                                    s = s1
-                                    print('s1')
-                                else:
-                                    is1 = 0
-                                    s = s2
-                                    print('s2')
-                                
-                                if c > b:
-                                    l = b
-                                    r = c
-                                else:
-                                    l = c
-                                    r = b
-                                
-                                m = (b+c)/2.
-                                     
-                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
-                                    cn = s
-                                    print('midpoint')
-                                else:
-                                    cn = m
-                                    print('bissection')
-                                    
-                                
-                                #fcn = f(cn)
-                                c4gli_morning.pars.wg = np.asscalar(cn)
-                                c4gli_morning.pars.w2 = np.asscalar(cn)
-                                c4gl = class4gl(c4gli_morning)
-                                c4gl.run()
-                                fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs
-                                
-                            
-                                i+=1
-                                
-                            if i == maxiter:
-                                raise StopIteration('did not converge')
-
-
-
-
-                        #c4gl = class4gl(c4gli_morning)
-                        #c4gl.run()
-                        onerun = True
-
-                        c4gli_morning.pars.itersteps = i
-                    except:
-                        print('run not succesfull')
-                    c4gli_morning.dump(file_ini)
-                    
-                    
-                    c4gl.dump(file_mod,\
-                                  include_input=False,\
-                               #   timeseries_only=timeseries_only,\
-                             )
-                    onerun = True
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/examples/run_soundings/trash/run_test.py b/examples/run_soundings/trash/run_test.py
deleted file mode 100644
index 767d960..0000000
--- a/examples/run_soundings/trash/run_test.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-
-import argparse
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk')
-    parser.add_argument('--c4gl-path',default='')
-    args = parser.parse_args()
-
-if args.c4gl_path == '': 
-    sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-else:
-    sys.path.insert(0, args.c4gl_path)
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
-
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-
-EXP_DEFS  =\
-{
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-if args.global_chunk is not None:
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
-
-        totalchunks +=chunks_current_station
-
-else:
-    run_stations = pd.DataFrame(all_stations)
-    if args.last_station is not None:
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    if args.first_station is not None:
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    run_station_chunk = 0
-    if args.station_chunk is not None:
-        run_station_chunk = args.station_chunk
-
-#print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
-records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
-
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-            print(records_morning_station_chunk)
-
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                
-            
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-                    
-                    
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-                    c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gl = class4gl(c4gli_morning)
-                    try:
-                        c4gl.run()
-                    except:
-                        print('run not succesfull')
-                    onerun = True
-
-                    c4gli_morning.dump(file_ini)
-                    
-                    
-                    c4gl.dump(file_mod,\
-                              include_input=False,\
-                              #timeseries_only=timeseries_only,\
-                             )
-                    onerun = True
-
-                #iexp = iexp +1
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
-
diff --git a/examples/setup_soundings/setup_bllast.py b/examples/setup_soundings/setup_bllast.py
deleted file mode 100644
index af8c8bb..0000000
--- a/examples/setup_soundings/setup_bllast.py
+++ /dev/null
@@ -1,719 +0,0 @@
-# -*- coding: utf-8 -*-
-# Read data from BLLAST campaing and convert it to class4gl input
-
-# WARNING!! stupid tab versus space formatting, grrrmmmlmlmlll!  the following command needs to be executed first: 
-#    for file in RS_2011????_????_site1_MODEM_CRA.cor ;  do expand -i -t 4 $file > $file.fmt ; done
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import Pysolar
-import sys
-import pytz
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-
-globaldata = data_global()
-globaldata.load_datasets(recalc=0)
-
-Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-epsilon = Rd/Rv # or mv/md
-
-
-def replace_iter(iterable, search, replace):
-    for value in iterable:
-        value.replace(search, replace)
-        yield value
-
-from class4gl import blh,class4gl_input
-
-# definition of the humpa station
-current_station = pd.Series({ "latitude"  : 42.971834,
-                  "longitude" : 0.3671169,
-                  "name" : "the BLLAST experiment"
-                })
-current_station.name = 90001
-
-
-
-
-
-# RS_20110624_1700_site1_MODEM_CRA.cor.fmt
-# RS_20110630_1700_site1_MODEM_CRA.cor.fmt
-# RS_20110702_1655_site1_MODEM_CRA.cor.fmt
-# RS_20110621_0509_site1_MODEM_CRA.cor.fmt
-
-HOUR_FILES = \
-{ dt.datetime(2011,6,19,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110619_0521_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110619_1750_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,20,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110620_0515_site1_MODEM_CRA.cor.fmt'],'afternoon':[18,'RS_20110620_1750_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,25,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110625_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110625_1700_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,26,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110626_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110626_1700_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,6,27,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110627_0503_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110627_1700_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,7, 2,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110702_0501_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110702_1655_site1_MODEM_CRA.cor.fmt']},
- dt.datetime(2011,7, 5,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110705_0448_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110705_1701_site1_MODEM_CRA.cor.fmt']},
-}
-
-
-#only include the following timeseries in the model output
-timeseries_only = \
-['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
- 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
- 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
- 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
- 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-def efrom_rh100_T(rh100,T):
-    return esat(T)*rh100/100.
-def qfrom_e_p(e,p):
-    return epsilon * e/(p - (1.-epsilon)*e)
-
-def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
-        #balloon_conv = replace_iter(balloon_file,"°","deg")
-        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
-        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
-        air_balloon_in = pd.read_csv(balloon_file,delimiter='\t',)
-                                     #widths=[14]*19,
-                                     #skiprows=9,
-                                     #skipfooter=15,
-                                     #decimal='.',
-                                     #header=None,
-                                     #names = columns,
-                                     #na_values='-----')
-        air_balloon_in = air_balloon_in.rename(columns=lambda x: x.strip())
-        print(air_balloon_in.columns)
-        rowmatches = {
-            't':      lambda x: x['TaRad']+273.15,
-            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
-            'p':      lambda x: x['Press']*100.,
-            'u':      lambda x: x['VHor'] * np.sin((90.-x['VDir'])/180.*np.pi),
-            'v':      lambda x: x['VHor'] * np.cos((90.-x['VDir'])/180.*np.pi),
-            'z':      lambda x: x['Altitude'] -582.,
-            # from virtual temperature to absolute humidity
-            'q':      lambda x: qfrom_e_p(efrom_rh100_T(x['UCal'],x['TaRad']+273.15),x['Press']*100.),
-        }
-        
-        air_balloon = pd.DataFrame()
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon_in)
-        
-        rowmatches = {
-            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
-            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
-            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
-        }
-        
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        dpars = {}
-        dpars['longitude']  = current_station['longitude']
-        dpars['latitude']  = current_station['latitude'] 
-        
-        dpars['STNID'] = current_station.name
-        
-        
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        valid_indices = air_balloon.index[is_valid].values
-        
-        air_ap_mode='b'
-        
-        if len(valid_indices) > 0:
-            dpars['h'],dpars['h_u'],dpars['h_l'] =\
-                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['VHor'])
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-        
-        
-        
-        if ~np.isnan(dpars['h']):
-            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-        else:
-            dpars['Ps'] = np.nan
-        
-        if ~np.isnan(dpars['h']):
-        
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-        
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-
-
-
-
-
-        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        jump = air_ap_head.iloc[0] * np.nan
-        
-        
-        if air_ap_tail.shape[0] > 1:
-        
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-        # filter data so that potential temperature always increases with
-        # height 
-        cols = []
-        for column in air_ap_tail.columns:
-            #if column != 'z':
-                cols.append(column)
-
-        # only select samples monotonically increasing with height
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        for ibottom in range(1,len(air_ap_tail_orig)):
-            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-
-
-
-        # # make theta increase strong enough to avoid numerical
-        # # instability
-        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        # air_ap_tail = pd.DataFrame()
-        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # theta_low = air_ap_head['theta'].iloc[2]
-        # z_low = air_ap_head['z'].iloc[2]
-        # ibottom = 0
-        # for itop in range(0,len(air_ap_tail_orig)):
-        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-        #     if ((theta_mean > (theta_low+0.2) ) and \
-        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
-
-        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-        #         ibottom = itop+1
-        #         theta_low = air_ap_tail.theta.iloc[-1]
-        #         z_low =     air_ap_tail.z.iloc[-1]
-        #     # elif  (itop > len(air_ap_tail_orig)-10):
-        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        # air_ap = \
-        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        # 
-        # # we copy the pressure at ground level from balloon sounding. The
-        # # pressure at mixed-layer height will be determined internally by class
-        
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-        
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-        
-        
-        dpars['lat'] = dpars['latitude']
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        
-        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
-        dpars['datetime'] = ldate+dt.timedelta(hours=hour)
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        
-        
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        
-        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually
-        # write local solar time, we need to assign the timezone to UTC (which
-        # is WRONG!!!). Otherwise ruby cannot understand it (it always converts
-        # tolocal computer time :( ). 
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise']+dt.timedelta(hours=2))\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        print('ldatetime_daylight',dpars['ldatetime_daylight'])
-        print('ldatetime',dpars['ldatetime'])
-        print('lSunrise',dpars['lSunrise'])
-        dpars['day'] = dpars['ldatetime'].day
-        
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-        print('tstart',dpars['tstart'])
-        dpars['sw_lit'] = False
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-        
-                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
-        # 
-                for column,decimal in decimals.items():
-                    air_balloon[column] = air_balloon[column].round(decimal)
-                    air_ap[column] = air_ap[column].round(decimal)
-        
-        updateglobal = False
-        if c4gli is None:
-            c4gli = class4gl_input()
-            updateglobal = True
-        
-        print('updating...')
-        print(column)
-        c4gli.update(source='bllast',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-        if updateglobal:
-            c4gli.get_global_input(globaldata)
-
-        # if profile_ini:
-        #     c4gli.runtime = 10 * 3600
-
-        c4gli.dump(file_sounding)
-        
-        # if profile_ini:
-        #     c4gl = class4gl(c4gli)
-        #     c4gl.run()
-        #     c4gl.dump(file_model,\
-        #               include_input=True,\
-        #               timeseries_only=timeseries_only)
-        #     
-        #     # This will cash the observations and model tables per station for
-        #     # the interface
-        # 
-        # if profile_ini:
-        #     profile_ini=False
-        # else:
-        #     profile_ini=True
-        return c4gli
-
-
-path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
-
-
-file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    print(pair['morning'])
-    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1]
-    
-    print(humpafn)
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_morning = bllast_parser(balloon_file,file_morning,date,pair['morning'][0])
-    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
-file_morning.close()
-
-file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1]
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_afternoon = bllast_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
-    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
-file_afternoon.close()
- 
-
-# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-# for date,pair  in HOUR_FILES.items(): 
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
-#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
-# file_morning.close()
-# 
-# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-# for hour in [18]:
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
-# file_afternoon.close()
-
-
-
-# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
-# 
-# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
-
-
-records_morning = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='morning',
-                                           refetch_records=True,
-                                           )
-print('records_morning_ldatetime',records_morning.ldatetime)
-
-records_afternoon = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='afternoon',
-                                           refetch_records=True,
-                                           )
-
-# align afternoon records with noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
-
-os.system('mkdir -p '+path_exp)
-file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
-file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
-
-for (STNID,chunk,index),record_morning in records_morning.iterrows():
-    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-
-    c4gli_morning = get_record_yaml(file_morning, 
-                                    record_morning.index_start, 
-                                    record_morning.index_end,
-                                    mode='ini')
-    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-    
-    
-    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                      record_afternoon.index_start, 
-                                      record_afternoon.index_end,
-                                    mode='ini')
-
-    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon.pars.datetime_daylight - 
-                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-
-    
-    c4gli_morning.pars.sw_ac = []
-    c4gli_morning.pars.sw_ap = True
-    c4gli_morning.pars.sw_lit = False
-    c4gli_morning.dump(file_ini)
-    
-    c4gl = class4gl(c4gli_morning)
-    c4gl.run()
-    
-    c4gl.dump(file_mod,\
-              include_input=False,\
-              timeseries_only=timeseries_only)
-file_ini.close()
-file_mod.close()
-file_morning.close()
-file_afternoon.close()
-
-records_ini = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='ini',
-                                           refetch_records=True,
-                                           )
-records_mod = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='mod',
-                                           refetch_records=True,
-                                           )
-
-records_mod.index = records_ini.index
-
-# align afternoon records with initial records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-records_afternoon.index = records_ini.index
-
-
-
-# stations_for_iter = stations(path_exp)
-# for STNID,station in stations_iterator(stations_for_iter):
-#     records_current_station_index = \
-#             (records_ini.index.get_level_values('STNID') == STNID)
-#     file_current_station_mod = STNID
-# 
-#     with \
-#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-#         for (STNID,index),record_ini in records_iterator(records_ini):
-#             c4gli_ini = get_record_yaml(file_station_ini, 
-#                                         record_ini.index_start, 
-#                                         record_ini.index_end,
-#                                         mode='ini')
-#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-# 
-#             record_mod = records_mod.loc[(STNID,index)]
-#             c4gl_mod = get_record_yaml(file_station_mod, 
-#                                         record_mod.index_start, 
-#                                         record_mod.index_end,
-#                                         mode='mod')
-#             record_afternoon = records_afternoon.loc[(STNID,index)]
-#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-#                                         record_afternoon.index_start, 
-#                                         record_afternoon.index_end,
-#                                         mode='ini')
-
-
-# # select the samples of the afternoon list that correspond to the timing of the
-# # morning list
-# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
-# records_afternoon.index = recods_morning.index
-# 
-# 
-# # create intersectino index
-# index_morning = pd.Index(records_morning.ldatetime.to_date())
-# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
-# 
-# for record_morning in records_morning.iterrows():
-#     
-#     c4gl = class4gl(c4gli)
-#     c4gl.run()
-#     c4gl.dump(c4glfile,\
-#               include_input=True,\
-#               timeseries_only=timeseries_only)
-# 
-# # This will cash the observations and model tables per station for
-# # the interface
-# 
-# records_ini = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=0,\
-#                                    by=2,\
-#                                    subset='ini',
-#                                    refetch_records=True,
-#                                    )
-# records_mod = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='mod',
-#                                    refetch_records=True,
-#                                    )
-# records_eval = get_records(pd.DataFrame([current_station]),\
-#                                    path_obs,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='eval',
-#                                    refetch_records=True,
-#                                    )
-# 
-# 
-# # mod_scores = pd.DataFrame(index=mod_records.index)
-# # for (STNID,index), current_record_mod in mod_records.iterrows():
-# #     print(STNID,index)
-# #     current_station = STN
-# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
-# #     current_record_obs = obs_records.loc[(STNID,index)]
-# # 
-# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
-# #                                           current_station,\
-# #                                           current_record_mod,\
-# #                                          )
-# # 
-# #     record_yaml_obs = \
-# #             get_record_yaml_obs(odirexperiments[keyEXP],\
-# #                                 current_station,\
-# #                                 current_record_obs,\
-# #                                 suffix='.yaml')
-# # 
-# #     record_yaml_obs_afternoon = \
-# #             get_record_yaml_obs(odir,\
-# #                                 current_station,\
-# #                                 current_record_obs_afternoon,\
-# #                                 suffix='_afternoon.yaml')
-# # 
-# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
-# #                    record_yaml_mod.h])
-# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
-# #     
-# # 
-# #     for height,hvalue in HEIGHTS.items():
-# # 
-# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
-# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
-# #         try:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
-# #                 rmse(\
-# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
-# #                     np.interp(\
-# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
-# #                         record_yaml_mod.air_ap.z[lt_mod],\
-# #                         record_yaml_mod.air_ap.theta[lt_mod]\
-# #                     ))
-# #         except ValueError:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
-# #     # # we calculate these things in the interface itself
-# #     # for key in ['q','theta','h']:
-# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_mod.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# #     #     # the actual time of the initial and evaluation sounding can be 
-# #     #     # different, but we consider this as a measurement error for
-# #     #     # the starting and end time of the simulation.
-# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
-# #         
-# #                 
-# #                 
-# # # for EXP,c4glfile in c4glfiles.items():
-# # #     c4glfile.close()            
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# #     
-# #     # {'Time[min:sec]': None 
-# #     #  'P[hPa]': None, 
-# #     #  'T[C]': None, 
-# #     #  'U[%]': None, 
-# #     #  'Wsp[m/s]': None, 
-# #     #  'Wdir[Grd]': None,
-# #     #  'Lon[°]', 
-# #     #  'Lat[°]', 
-# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
-# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
-# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
-# #     # }
-# #     # 
-# #     # #pivotrows =
-# #     # #{
-# # 
-# # 
-# # 
diff --git a/examples/setup_soundings/setup_global.py b/examples/setup_soundings/setup_global.py
deleted file mode 100644
index 79224d9..0000000
--- a/examples/setup_soundings/setup_global.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thursday, March 29, 11:30 AM
-
-@author: Hendrik Wouters
-
-The dry-2-dry global radio sounding experiment.
-
-usage:
-    python setup_global.py 
-    where  is an integer indicating the row index of the station list
-    under odir+'/'+fn_stations (see below)
-
-this scripts should be called from the pbs script setup_global.pbs
-
-
-
-dependencies:
-    - pandas
-    - class4gl
-    - data_soundings
-
-
-"""
-
-""" import libraries """
-import pandas as pd
-import sys
-#import copy as cp
-import numpy as np
-from sklearn.metrics import mean_squared_error
-import logging
-import datetime as dt
-import os
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-
-
-#calculate the root mean square error
-def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
-    """ calculated root mean squared error 
-        
-    
-        INPUT:
-            y_actual: reference dataset
-            y_predicted: predicting dataset
-            z_actual: coordinate values of reference dataset
-            z_predicted: coordinate values of the predicting dataset
-            
-            filternan_actual: throw away reference values that have nans
-    """
-    
-    y_actual_temp = np.array(y_actual)
-    y_predicted_temp = np.array(y_predicted)
-    
-    if z_actual is not None:
-        z_actual_temp = np.array(z_actual)
-    else: 
-        z_actual_temp = None
-        
-    
-    if filternan_actual:
-        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
-        if z_actual_temp is not None:
-            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
-    
-    if ((z_actual_temp is not None) or (z_predicted is not None)):    
-        if (z_actual_temp is None) or (z_predicted is None):
-            raise ValueError('Input z_actual and z_predicted need \
-                              to be specified simultaneously.')
-        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
-    
-    else:
-        # this catches the situation that y_predicted is a single value (eg., 
-        # which is the case for evaluating eg., mixed-layer estimates)
-        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
-        
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from data_soundings import wyoming
-#from data_global import data_global
-
-# iniitialize global data
-globaldata = data_global()
-# ...  and load initial data pages
-globaldata.load_datasets(recalc=0)
-
-# read the list of stations with valid ground data (list generated with
-# get_valid_stations.py)
-idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-
-df_stations = pd.read_csv(fn_stations)
-
-
-STNlist = list(df_stations.iterrows())
-NUMSTNS = len(STNlist)
-PROCS = 100
-BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-
-
-iPROC = int(sys.argv[1])
-
-
-for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
-    one_run = False
-# for iSTN,STN in STNlist[5:]:  
-    
-    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
-    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
-    
-
-    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
-    #                   for EXP in experiments.keys()])
-        
-    with open(fnout,'w') as fileout, \
-         open(fnout_afternoon,'w') as fileout_afternoon:
-        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
-        wy_strm.set_STNM(int(STN['ID']))
-
-        # we consider all soundings after 1981
-        wy_strm.find_first(year=1981)
-        #wy_strm.find(dt.datetime(2004,10,19,6))
-        
-        c4gli = class4gl_input(debug_level=logging.INFO)
-        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
-        # so we continue as long as we can find a new sounding
-                
-        while wy_strm.current is not None:
-            
-            c4gli.clear()
-            try:
-                c4gli.get_profile_wyoming(wy_strm)
-                #print(STN['ID'],c4gli.pars.datetime)
-                #c4gli.get_global_input(globaldata)
-
-                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
-
-                logic = dict()
-                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
-                logic['daylight'] = \
-                    ((c4gli.pars.ldatetime_daylight - 
-                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
-                
-                logic['springsummer'] = (c4gli.pars.theta > 278.)
-                
-                # we take 3000 because previous analysis (ie., HUMPPA) has
-                # focussed towards such altitude
-                le3000 = (c4gli.air_balloon.z <= 3000.)
-                logic['10measurements'] = (np.sum(le3000) >= 10) 
-
-                leh = (c4gli.air_balloon.z <= c4gli.pars.h)
-
-                logic['mlerrlow'] = (\
-                        (len(np.where(leh)[0]) > 0) and \
-                        # in cases where humidity is not defined, the mixed-layer
-                        # values get corr
-                        (not np.isnan(c4gli.pars.theta)) and \
-                        (rmse(c4gli.air_balloon.theta[leh] , \
-                              c4gli.pars.theta,filternan_actual=True) < 1.)\
-                              )
-    
-
-                logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
-                
-                print('logic:', logic)
-                # the result
-                morning_ok = np.mean(list(logic.values()))
-                print(morning_ok,c4gli.pars.ldatetime)
-
-            except:
-                morning_ok =False
-                print('obtain morning not good')
-            # the next sounding will be used either for an afternoon sounding
-            # or for the morning sounding of the next day.
-            wy_strm.find_next()
-            # If the morning is ok, then we try to find a decent afternoon
-            # sounding
-            if morning_ok == 1.:
-                print('MORNING OK!')
-                # we get the current date
-                current_date = dt.date(c4gli.pars.ldatetime.year, \
-                                       c4gli.pars.ldatetime.month, \
-                                       c4gli.pars.ldatetime.day)
-                c4gli_afternoon.clear()
-                print('AFTERNOON PROFILE CLEARED')
-                try:
-                    c4gli_afternoon.get_profile_wyoming(wy_strm)
-                    print('AFTERNOON PROFILE OK')
-
-                    if wy_strm.current is not None:
-                        current_date_afternoon = \
-                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                           c4gli_afternoon.pars.ldatetime.month, \
-                                           c4gli_afternoon.pars.ldatetime.day)
-                    else:
-                        # a dummy date: this will be ignored anyway
-                        current_date_afternoon = dt.date(1900,1,1)
-
-                    # we will dump the latest afternoon sounding that fits the
-                    # minimum criteria specified by logic_afternoon
-                    print(current_date,current_date_afternoon)
-                    c4gli_afternoon_for_dump = None
-                    while ((current_date_afternoon == current_date) and \
-                           (wy_strm.current is not None)):
-                        logic_afternoon =dict()
-
-                        logic_afternoon['afternoon'] = \
-                            (c4gli_afternoon.pars.ldatetime.hour >= 12.)
-                        logic_afternoon['daylight'] = \
-                          ((c4gli_afternoon.pars.ldatetime - \
-                            c4gli_afternoon.pars.ldatetime_daylight \
-                           ).total_seconds()/3600. <= 0.)
-
-
-                        le3000_afternoon = \
-                            (c4gli_afternoon.air_balloon.z <= 3000.)
-                        logic_afternoon['5measurements'] = \
-                            (np.sum(le3000_afternoon) >= 5) 
-
-                        # we only store the last afternoon sounding that fits these
-                        # minimum criteria
-
-                        afternoon_ok = np.mean(list(logic_afternoon.values()))
-
-                        print('logic_afternoon: ',logic_afternoon)
-                        print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
-                        if afternoon_ok == 1.:
-                            # # doesn't work :(
-                            # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
-                            
-                            # so we just create a new one from the same wyoming profile
-                            c4gli_afternoon_for_dump = class4gl_input()
-                            c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
-
-                        wy_strm.find_next()
-                        c4gli_afternoon.clear()
-                        c4gli_afternoon.get_profile_wyoming(wy_strm)
-
-                        if wy_strm.current is not None:
-                            current_date_afternoon = \
-                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                           c4gli_afternoon.pars.ldatetime.month, \
-                                           c4gli_afternoon.pars.ldatetime.day)
-                        else:
-                            # a dummy date: this will be ignored anyway
-                            current_date_afternoon = dt.date(1900,1,1)
-
-                        # Only in the case we have a good pair of soundings, we
-                        # dump them to disk
-                    if c4gli_afternoon_for_dump is not None:
-                        c4gli.update(source='pairs',pars={'runtime' : \
-                            int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
-                                 c4gli.pars.datetime_daylight).total_seconds())})
-    
-    
-                        print('ALMOST...')
-                        if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
-                                
-        
-                            c4gli.get_global_input(globaldata)
-                            print('VERY CLOSE...')
-                            if c4gli.check_source_globaldata() and \
-                                (c4gli.check_source(source='wyoming',\
-                                                   check_only_sections='pars')):
-                                c4gli.dump(fileout)
-                                
-                                c4gli_afternoon_for_dump.dump(fileout_afternoon)
-                                
-                                
-                                # for keyEXP,dictEXP in experiments.items():
-                                #     
-                                #     c4gli.update(source=keyEXP,pars = dictEXP)
-                                #     c4gl = class4gl(c4gli)
-                                #     # c4gl.run()
-                                #     
-                                #     c4gl.dump(c4glfiles[key])
-                                
-                                print('HIT!!!')
-                                one_run = True
-                except:
-                    print('get profile failed')
-                
-    if one_run:
-        STN.name = STN['ID']
-        all_records_morning = get_records(pd.DataFrame([STN]),\
-                                      odir,\
-                                      subset='morning',
-                                      refetch_records=True,
-                                      )
-        all_records_afternoon = get_records(pd.DataFrame([STN]),\
-                                      odir,\
-                                      subset='afternoon',
-                                      refetch_records=True,
-                                      )
-    else:
-        os.system('rm '+fnout)
-        os.system('rm '+fnout_afternoon)
-
-    # for c4glfile in c4glfiles:
-    #     c4glfile.close()            
-
diff --git a/examples/setup_soundings/setup_goamazon.py b/examples/setup_soundings/setup_goamazon.py
deleted file mode 100644
index f9efe2c..0000000
--- a/examples/setup_soundings/setup_goamazon.py
+++ /dev/null
@@ -1,740 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import xarray as xr
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import Pysolar
-import sys
-import pytz
-import glob
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-
-globaldata = data_global()
-globaldata.load_datasets(recalc=0)
-
-Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-epsilon = Rd/Rv # or mv/md
-
-path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'
-
-def replace_iter(iterable, search, replace):
-    for value in iterable:
-        value.replace(search, replace)
-        yield value
-
-from class4gl import blh,class4gl_input
-
-# definition of the humpa station
-current_station = pd.Series({ "latitude"  : -3.21,
-                  "longitude" : -60.6,
-                  "name" : "the GOAMAZON experiment"
-                })
-current_station.name = 90002
-
-# we define the columns ourselves because it is a mess in the file itself.
-columns =\
-['Time[min:sec]',
- 'P[hPa]',
- 'T[C]',
- 'U[%]',
- 'Wsp[m/s]',
- 'Wdir[Grd]',
- 'Lon[°]',
- 'Lat[°]',
- 'Altitude[m]',
- 'GeoPot[m]',
- 'MRI',
- 'RI',    
- 'DewPoint[C]',
- 'Virt. Temp[C]',
- 'Rs[m/min]',
- 'D[kg/m3]',
- 'Azimut[°]',
- 'Elevation[°]',
- 'Range[m]',
-]
-
-DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC)
-DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC)
-
-
-DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))]
-HOUR_FILES = {}
-for iDT, DT in enumerate(DTS):
-    morning_file = None
-    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf')
-    if len(possible_files)>0:
-        morning_file= possible_files[0]
-    afternoon_file = None
-    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*.cdf')
-    if len(possible_files)>0:
-        afternoon_file= possible_files[0]
-
-    if (morning_file is not None) and (afternoon_file is not None):
-        HOUR_FILES[DT] = {'morning':[5.5,morning_file],
-                          'afternoon':[17.5,afternoon_file]}
-
-print(HOUR_FILES)
-
-# HOUR_FILES = \
-# {
-#     dt.datetime(2015,5,7,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150507.052900.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150507.172700.custom.cdf']},
-#     dt.datetime(2015,3,13,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150313.052700.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150313.173000.custom.cdf']},
-#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
-#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
-#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
-#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
-# }
-
-
-
-
-#only include the following timeseries in the model output
-timeseries_only = \
-['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
- 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
- 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
- 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
- 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-def efrom_rh100_T(rh100,T):
-    return esat(T)*rh100/100.
-def qfrom_e_p(e,p):
-    return epsilon * e/(p - (1.-epsilon)*e)
-
-
-
-def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
-        print(balloon_file)
-        
-        xrin = balloon_file
-        air_balloon = pd.DataFrame()
-
-        air_balloon['t'] = xrin.tdry.values+273.15
-        air_balloon['p'] = xrin.pres.values*100.
-        
-        air_balloon['u'] = xrin.u_wind.values
-        air_balloon['v'] = xrin.v_wind.values
-        air_balloon['WSPD'] = xrin['wspd'].values
-        
-        print(xrin.rh.values.shape)
-        air_balloon['q'] = qfrom_e_p(efrom_rh100_T(xrin.rh.values,air_balloon['t'].values),air_balloon.p.values)
-        
-
-        #balloon_conv = replace_iter(balloon_file,"°","deg")
-        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
-        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
-        # air_balloon_in = pd.read_fwf(balloon_file,
-        #                              widths=[14]*19,
-        #                              skiprows=9,
-        #                              skipfooter=15,
-        #                              decimal=',',
-        #                              header=None,
-        #                              names = columns,
-        #                              na_values='-----')
-    
-
-        
-        rowmatches = {
-            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
-            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
-            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q,
-            'rho': lambda x: x.p /x.t / x.R ,
-        }
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        print('alt in xrin?:','alt' in xrin)
-        if 'alt' in xrin:
-            air_balloon['z'] = xrin.alt.values
-        else:
-            air_balloon['z'] = 0.
-            for irow,row in air_balloon.iloc[1:].iterrows():
-                air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \
-                        2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \
-                        (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1])
-                        
-             
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        dpars = {}
-        dpars['longitude']  = current_station['longitude']
-        dpars['latitude']  = current_station['latitude'] 
-        
-        dpars['STNID'] = current_station.name
-        
-
-        # there are issues with the lower measurements in the HUMPPA campaign,
-        # for which a steady decrease of potential temperature is found, which
-        # is unrealistic.  Here I filter them away
-        ifirst = 0
-        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
-            ifirst = ifirst+1
-        print ('ifirst:',ifirst)
-        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
-        
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        valid_indices = air_balloon.index[is_valid].values
-        
-        air_ap_mode='b'
-        
-        if len(valid_indices) > 0:
-            print(air_balloon.z.shape,air_balloon.thetav.shape,)
-            dpars['h'],dpars['h_u'],dpars['h_l'] =\
-                blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-        
-        
-        
-        if ~np.isnan(dpars['h']):
-            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-        else:
-            dpars['Ps'] = np.nan
-        
-        if ~np.isnan(dpars['h']):
-        
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-        
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-
-
-
-        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        jump = air_ap_head.iloc[0] * np.nan
-        
-        if air_ap_tail.shape[0] > 1:
-        
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # only select samples monotonically increasing with height
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        for ibottom in range(1,len(air_ap_tail_orig)):
-            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-        
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-        
-        
-        dpars['lat'] = dpars['latitude']
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        
-        dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour)
-        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-4)
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        
-        
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        
-        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-        dpars['sw_lit'] = False
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-        
-                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
-        # 
-                for column,decimal in decimals.items():
-                    air_balloon[column] = air_balloon[column].round(decimal)
-                    air_ap[column] = air_ap[column].round(decimal)
-        
-        updateglobal = False
-        if c4gli is None:
-            c4gli = class4gl_input()
-            updateglobal = True
-        
-        print('updating...')
-        print(column)
-        c4gli.update(source='humppa',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-        if updateglobal:
-            c4gli.get_global_input(globaldata)
-
-        # if profile_ini:
-        #     c4gli.runtime = 10 * 3600
-
-        c4gli.dump(file_sounding)
-        
-        # if profile_ini:
-        #     c4gl = class4gl(c4gli)
-        #     c4gl.run()
-        #     c4gl.dump(file_model,\
-        #               include_input=True,\
-        #               timeseries_only=timeseries_only)
-        #     
-        #     # This will cash the observations and model tables per station for
-        #     # the interface
-        # 
-        # if profile_ini:
-        #     profile_ini=False
-        # else:
-        #     profile_ini=True
-        return c4gli
-
-
-path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
-
-
-file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    print(pair['morning'])
-    humpafn =pair['morning'][1]
-    print(humpafn)
-    balloon_file = xr.open_dataset(humpafn)
-
-    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
-    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
-file_morning.close()
-
-file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    humpafn = pair['afternoon'][1]
-    balloon_file = xr.open_dataset(humpafn)
-
-    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
-    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
-file_afternoon.close()
- 
-
-# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-# for date,pair  in HOUR_FILES.items(): 
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'+pair['morning'][1],
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
-#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
-# file_morning.close()
-# 
-# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-# for hour in [18]:
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM//humppa_080610_'+format(hour,"02d")+'00.txt'
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
-# file_afternoon.close()
-
-
-
-# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
-# 
-# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
-
-
-records_morning = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='morning',
-                                           refetch_records=True,
-                                           )
-print('records_morning_ldatetime',records_morning.ldatetime)
-
-records_afternoon = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='afternoon',
-                                           refetch_records=True,
-                                           )
-
-# align afternoon records with noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
-
-os.system('mkdir -p '+path_exp)
-file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
-file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
-
-for (STNID,chunk,index),record_morning in records_morning.iterrows():
-    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-
-    c4gli_morning = get_record_yaml(file_morning, 
-                                    record_morning.index_start, 
-                                    record_morning.index_end,
-                                    mode='ini')
-    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-    
-    
-    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                      record_afternoon.index_start, 
-                                      record_afternoon.index_end,
-                                    mode='ini')
-
-    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon.pars.datetime_daylight - 
-                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-    c4gli_morning.update(source='manual',
-                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
-    c4gli_morning.dump(file_ini)
-    
-    c4gl = class4gl(c4gli_morning)
-    c4gl.run()
-    
-    c4gl.dump(file_mod,\
-              include_input=False,\
-              timeseries_only=timeseries_only)
-file_ini.close()
-file_mod.close()
-file_morning.close()
-file_afternoon.close()
-
-records_ini = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='ini',
-                                           refetch_records=True,
-                                           )
-records_mod = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='mod',
-                                           refetch_records=True,
-                                           )
-
-records_mod.index = records_ini.index
-
-# align afternoon records with initial records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-records_afternoon.index = records_ini.index
-
-
-"""
-stations_for_iter = stations(path_exp)
-for STNID,station in stations_iterator(stations_for_iter):
-    records_current_station_index = \
-            (records_ini.index.get_level_values('STNID') == STNID)
-    file_current_station_mod = STNID
-
-    with \
-    open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-        for (STNID,index),record_ini in records_iterator(records_ini):
-            c4gli_ini = get_record_yaml(file_station_ini, 
-                                        record_ini.index_start, 
-                                        record_ini.index_end,
-                                        mode='ini')
-            #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-
-            record_mod = records_mod.loc[(STNID,index)]
-            c4gl_mod = get_record_yaml(file_station_mod, 
-                                        record_mod.index_start, 
-                                        record_mod.index_end,
-                                        mode='mod')
-            record_afternoon = records_afternoon.loc[(STNID,index)]
-            c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-                                        record_afternoon.index_start, 
-                                        record_afternoon.index_end,
-                                        mode='ini')
-"""
-
-
-# # select the samples of the afternoon list that correspond to the timing of the
-# # morning list
-# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
-# records_afternoon.index = recods_morning.index
-# 
-# 
-# # create intersectino index
-# index_morning = pd.Index(records_morning.ldatetime.to_date())
-# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
-# 
-# for record_morning in records_morning.iterrows():
-#     
-#     c4gl = class4gl(c4gli)
-#     c4gl.run()
-#     c4gl.dump(c4glfile,\
-#               include_input=True,\
-#               timeseries_only=timeseries_only)
-# 
-# # This will cash the observations and model tables per station for
-# # the interface
-# 
-# records_ini = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=0,\
-#                                    by=2,\
-#                                    subset='ini',
-#                                    refetch_records=True,
-#                                    )
-# records_mod = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='mod',
-#                                    refetch_records=True,
-#                                    )
-# records_eval = get_records(pd.DataFrame([current_station]),\
-#                                    path_obs,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='eval',
-#                                    refetch_records=True,
-#                                    )
-# 
-# 
-# # mod_scores = pd.DataFrame(index=mod_records.index)
-# # for (STNID,index), current_record_mod in mod_records.iterrows():
-# #     print(STNID,index)
-# #     current_station = STN
-# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
-# #     current_record_obs = obs_records.loc[(STNID,index)]
-# # 
-# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
-# #                                           current_station,\
-# #                                           current_record_mod,\
-# #                                          )
-# # 
-# #     record_yaml_obs = \
-# #             get_record_yaml_obs(odirexperiments[keyEXP],\
-# #                                 current_station,\
-# #                                 current_record_obs,\
-# #                                 suffix='.yaml')
-# # 
-# #     record_yaml_obs_afternoon = \
-# #             get_record_yaml_obs(odir,\
-# #                                 current_station,\
-# #                                 current_record_obs_afternoon,\
-# #                                 suffix='_afternoon.yaml')
-# # 
-# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
-# #                    record_yaml_mod.h])
-# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
-# #     
-# # 
-# #     for height,hvalue in HEIGHTS.items():
-# # 
-# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
-# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
-# #         try:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
-# #                 rmse(\
-# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
-# #                     np.interp(\
-# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
-# #                         record_yaml_mod.air_ap.z[lt_mod],\
-# #                         record_yaml_mod.air_ap.theta[lt_mod]\
-# #                     ))
-# #         except ValueError:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
-# #     # # we calculate these things in the interface itself
-# #     # for key in ['q','theta','h']:
-# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_mod.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# #     #     # the actual time of the initial and evaluation sounding can be 
-# #     #     # different, but we consider this as a measurement error for
-# #     #     # the starting and end time of the simulation.
-# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
-# #         
-# #                 
-# #                 
-# # # for EXP,c4glfile in c4glfiles.items():
-# # #     c4glfile.close()            
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# #     
-# #     # {'Time[min:sec]': None 
-# #     #  'P[hPa]': None, 
-# #     #  'T[C]': None, 
-# #     #  'U[%]': None, 
-# #     #  'Wsp[m/s]': None, 
-# #     #  'Wdir[Grd]': None,
-# #     #  'Lon[°]', 
-# #     #  'Lat[°]', 
-# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
-# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
-# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
-# #     # }
-# #     # 
-# #     # #pivotrows =
-# #     # #{
-# # 
-# # 
-# # 
diff --git a/examples/setup_soundings/setup_humppa.py b/examples/setup_soundings/setup_humppa.py
deleted file mode 100644
index ff37628..0000000
--- a/examples/setup_soundings/setup_humppa.py
+++ /dev/null
@@ -1,732 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import Pysolar
-import sys
-import pytz
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-
-globaldata = data_global()
-globaldata.load_datasets(recalc=0)
-
-Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-epsilon = Rd/Rv # or mv/md
-
-
-def replace_iter(iterable, search, replace):
-    for value in iterable:
-        value.replace(search, replace)
-        yield value
-
-from class4gl import blh,class4gl_input
-
-# definition of the humpa station
-current_station = pd.Series({ "latitude"  : 61.8448,
-                  "longitude" : 24.2882,
-                  "name" : "the HUMMPA experiment"
-                })
-current_station.name = 90000
-
-# we define the columns ourselves because it is a mess in the file itself.
-columns =\
-['Time[min:sec]',
- 'P[hPa]',
- 'T[C]',
- 'U[%]',
- 'Wsp[m/s]',
- 'Wdir[Grd]',
- 'Lon[°]',
- 'Lat[°]',
- 'Altitude[m]',
- 'GeoPot[m]',
- 'MRI',
- 'RI',    
- 'DewPoint[C]',
- 'Virt. Temp[C]',
- 'Rs[m/min]',
- 'D[kg/m3]',
- 'Azimut[°]',
- 'Elevation[°]',
- 'Range[m]',
-]
-
-
-HOUR_FILES = \
-{ dt.datetime(2010,7,12,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071210_0300.txt'],'afternoon':[15,'humppa_071210_1500.txt']},
-  dt.datetime(2010,7,13,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071310_0300.txt'],'afternoon':[18,'humppa_071310_1800.txt']},
-  dt.datetime(2010,7,14,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071410_0300.txt'],'afternoon':[16,'humppa_071410_1600.txt']},
-  dt.datetime(2010,7,15,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071510_0300.txt'],'afternoon':[15,'humppa_071510_1500.txt']},
-  dt.datetime(2010,7,16,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071610_0300.txt'],'afternoon':[21,'humppa_071610_2100.txt']},
-  dt.datetime(2010,7,17,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071710_0300.txt'],'afternoon':[18,'humppa_071710_1800.txt']},
-  dt.datetime(2010,7,18,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071810_0300.txt'],'afternoon':[21,'humppa_071810_2100.txt']},
-  dt.datetime(2010,7,19,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071910_0300.txt'],'afternoon':[21,'humppa_071910_2100.txt']},
-#  dt.datetime(2010,7,20):{'morning':[4,'humppa_072010_0400.txt'],'afternoon':[15,'humppa_072010_1500.txt']},
-  dt.datetime(2010,7,21,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072110_0300.txt'],'afternoon':[21,'humppa_072110_2100.txt']},
-  dt.datetime(2010,7,22,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072210_0400.txt'],'afternoon':[18,'humppa_072210_1800.txt']},
- # something is wrong with ths profile
- # dt.datetime(2010,7,23,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072310_0300.txt'],'afternoon':[15,'humppa_072310_1500.txt']},
-  dt.datetime(2010,7,24,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072410_0300.txt'],'afternoon':[16,'humppa_072410_1600.txt']},
-  dt.datetime(2010,7,25,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072510_0300.txt'],'afternoon':[21,'humppa_072510_2100.txt']},
-  dt.datetime(2010,7,26,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072610_0300.txt'],'afternoon':[21,'humppa_072610_2100.txt']},
-  dt.datetime(2010,7,27,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072710_0300.txt'],'afternoon':[15,'humppa_072710_1500.txt']},
-  dt.datetime(2010,7,28,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072810_0300.txt'],'afternoon':[15,'humppa_072810_1500.txt']},
-  dt.datetime(2010,7,29,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072910_0400.txt'],'afternoon':[18,'humppa_072910_1800.txt']},
-  dt.datetime(2010,7,30,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_073010_0900.txt'],'afternoon':[15,'humppa_073010_1500.txt']},
-  dt.datetime(2010,7,31,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_073110_0300_01.txt'],'afternoon':[15,'humppa_073110_1500.txt']},
-  dt.datetime(2010,8, 1,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080110_0300.txt'],'afternoon':[18,'humppa_080110_1800.txt']},
-  dt.datetime(2010,8, 2,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080210_0300.txt'],'afternoon':[18,'humppa_080210_1800.txt']},
-  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_080310_0900.txt'],'afternoon':[18,'humppa_080310_1800.txt']},
-  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[8,'humppa_080410_0800.txt'],'afternoon':[18,'humppa_080410_1800.txt']},
-  dt.datetime(2010,8, 5,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080510_0300.txt'],'afternoon':[18,'humppa_080510_1800.txt']},
-  dt.datetime(2010,8, 6,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_080610_0400.txt'],'afternoon':[18,'humppa_080610_1800.txt']},
-  dt.datetime(2010,8, 7,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080710_0300.txt'],'afternoon':[18,'humppa_080710_1800.txt']},
-  dt.datetime(2010,8, 8,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080810_0300.txt'],'afternoon':[18,'humppa_080810_1800.txt']},
-  dt.datetime(2010,8,10,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_081010_0300.txt'],'afternoon':[18,'humppa_081010_1800.txt']},
-}
-
-
-
-
-
-
-#only include the following timeseries in the model output
-timeseries_only = \
-['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
- 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
- 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
- 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
- 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
-
-
-def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
-        #balloon_conv = replace_iter(balloon_file,"°","deg")
-        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
-        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
-        air_balloon_in = pd.read_fwf(balloon_file,
-                                     widths=[14]*19,
-                                     skiprows=9,
-                                     skipfooter=15,
-                                     decimal=',',
-                                     header=None,
-                                     names = columns,
-                                     na_values='-----')
-    
-        rowmatches = {
-            't':      lambda x: x['T[C]']+273.15,
-            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
-            'p':      lambda x: x['P[hPa]']*100.,
-            'u':      lambda x: x['Wsp[m/s]'] * np.sin((90.-x['Wdir[Grd]'])/180.*np.pi),
-            'v':      lambda x: x['Wsp[m/s]'] * np.cos((90.-x['Wdir[Grd]'])/180.*np.pi),
-            'z':      lambda x: x['Altitude[m]'],
-            'q':      lambda x: np.clip((1. - (273.15+x['Virt. Temp[C]'])/(273.15+x['T[C]']))/(1. - 1./epsilon),a_min=0.,a_max=None),
-        }
-        
-        air_balloon = pd.DataFrame()
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon_in)
-        
-        rowmatches = {
-            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
-            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
-            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
-        }
-        
-        for varname,lfunction in rowmatches.items():
-            air_balloon[varname] = lfunction(air_balloon)
-        
-        dpars = {}
-        dpars['longitude']  = current_station['longitude']
-        dpars['latitude']  = current_station['latitude'] 
-        
-        dpars['STNID'] = current_station.name
-        
-
-        # there are issues with the lower measurements in the HUMPPA campaign,
-        # for which a steady decrease of potential temperature is found, which
-        # is unrealistic.  Here I filter them away
-        ifirst = 0
-        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
-            ifirst = ifirst+1
-        print ('ifirst:',ifirst)
-        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
-        
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        valid_indices = air_balloon.index[is_valid].values
-        
-        air_ap_mode='b'
-        
-        if len(valid_indices) > 0:
-            dpars['h'],dpars['h_u'],dpars['h_l'] =\
-                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['Wsp[m/s]'])
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-        
-        
-        
-        if ~np.isnan(dpars['h']):
-            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-        else:
-            dpars['Ps'] = np.nan
-        
-        if ~np.isnan(dpars['h']):
-        
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-        
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-
-
-
-        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        jump = air_ap_head.iloc[0] * np.nan
-        
-        if air_ap_tail.shape[0] > 1:
-        
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # only select samples monotonically increasing with height
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        for ibottom in range(1,len(air_ap_tail_orig)):
-            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-
-        # # make theta increase strong enough to avoid numerical
-        # # instability
-        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        # air_ap_tail = pd.DataFrame()
-        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        # theta_low = air_ap_head['theta'].iloc[2]
-        # z_low = air_ap_head['z'].iloc[2]
-        # ibottom = 0
-        # for itop in range(0,len(air_ap_tail_orig)):
-        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-        #     if ((theta_mean > (theta_low+0.2) ) and \
-        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
-
-        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-        #         ibottom = itop+1
-        #         theta_low = air_ap_tail.theta.iloc[-1]
-        #         z_low =     air_ap_tail.z.iloc[-1]
-        #     # elif  (itop > len(air_ap_tail_orig)-10):
-        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        # 
-        # air_ap = \
-        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-        
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-        
-        
-        dpars['lat'] = dpars['latitude']
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        
-        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
-        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-3)
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        
-        
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        
-        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-        dpars['sw_lit'] = False
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-        
-                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
-        # 
-                for column,decimal in decimals.items():
-                    air_balloon[column] = air_balloon[column].round(decimal)
-                    air_ap[column] = air_ap[column].round(decimal)
-        
-        updateglobal = False
-        if c4gli is None:
-            c4gli = class4gl_input()
-            updateglobal = True
-        
-        print('updating...')
-        print(column)
-        c4gli.update(source='humppa',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-        if updateglobal:
-            c4gli.get_global_input(globaldata)
-
-        # if profile_ini:
-        #     c4gli.runtime = 10 * 3600
-
-        c4gli.dump(file_sounding)
-        
-        # if profile_ini:
-        #     c4gl = class4gl(c4gli)
-        #     c4gl.run()
-        #     c4gl.dump(file_model,\
-        #               include_input=True,\
-        #               timeseries_only=timeseries_only)
-        #     
-        #     # This will cash the observations and model tables per station for
-        #     # the interface
-        # 
-        # if profile_ini:
-        #     profile_ini=False
-        # else:
-        #     profile_ini=True
-        return c4gli
-
-
-path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/'
-
-
-file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    print(pair['morning'])
-    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1]
-    print(humpafn)
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
-    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
-file_morning.close()
-
-file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-for date,pair  in HOUR_FILES.items(): 
-    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1]
-    balloon_file = open(humpafn,'r',encoding='latin-1')
-
-    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
-    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
-file_afternoon.close()
- 
-
-# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
-# for date,pair  in HOUR_FILES.items(): 
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
-#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
-# file_morning.close()
-# 
-# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
-# for hour in [18]:
-#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
-#     balloon_file = open(humpafn,'r',encoding='latin-1')
-# 
-#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
-# file_afternoon.close()
-
-
-
-# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
-# 
-# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
-
-
-records_morning = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='morning',
-                                           refetch_records=True,
-                                           )
-print('records_morning_ldatetime',records_morning.ldatetime)
-
-records_afternoon = get_records(pd.DataFrame([current_station]),\
-                                           path_soundings,\
-                                           subset='afternoon',
-                                           refetch_records=True,
-                                           )
-
-# align afternoon records with noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/'
-
-os.system('mkdir -p '+path_exp)
-file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
-file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
-
-for (STNID,chunk,index),record_morning in records_morning.iterrows():
-    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-
-    c4gli_morning = get_record_yaml(file_morning, 
-                                    record_morning.index_start, 
-                                    record_morning.index_end,
-                                    mode='ini')
-    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
-    
-    
-    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                      record_afternoon.index_start, 
-                                      record_afternoon.index_end,
-                                    mode='ini')
-
-    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon.pars.datetime_daylight - 
-                             c4gli_morning.pars.datetime_daylight).total_seconds())})
-    c4gli_morning.update(source='manual',
-                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
-    c4gli_morning.dump(file_ini)
-    
-    c4gl = class4gl(c4gli_morning)
-    c4gl.run()
-    
-    c4gl.dump(file_mod,\
-              include_input=False,\
-              timeseries_only=timeseries_only)
-file_ini.close()
-file_mod.close()
-file_morning.close()
-file_afternoon.close()
-
-records_ini = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='ini',
-                                           refetch_records=True,
-                                           )
-records_mod = get_records(pd.DataFrame([current_station]),\
-                                           path_exp,\
-                                           subset='mod',
-                                           refetch_records=True,
-                                           )
-
-records_mod.index = records_ini.index
-
-# align afternoon records with initial records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-records_afternoon.index = records_ini.index
-
-# stations_for_iter = stations(path_exp)
-# for STNID,station in stations_iterator(stations_for_iter):
-#     records_current_station_index = \
-#             (records_ini.index.get_level_values('STNID') == STNID)
-#     file_current_station_mod = STNID
-# 
-#     with \
-#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-#         for (STNID,index),record_ini in records_iterator(records_ini):
-#             c4gli_ini = get_record_yaml(file_station_ini, 
-#                                         record_ini.index_start, 
-#                                         record_ini.index_end,
-#                                         mode='ini')
-#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-# 
-#             record_mod = records_mod.loc[(STNID,index)]
-#             c4gl_mod = get_record_yaml(file_station_mod, 
-#                                         record_mod.index_start, 
-#                                         record_mod.index_end,
-#                                         mode='mod')
-#             record_afternoon = records_afternoon.loc[(STNID,index)]
-#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-#                                         record_afternoon.index_start, 
-#                                         record_afternoon.index_end,
-#                                         mode='ini')
-
-
-
-# # select the samples of the afternoon list that correspond to the timing of the
-# # morning list
-# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
-# records_afternoon.index = recods_morning.index
-# 
-# 
-# # create intersectino index
-# index_morning = pd.Index(records_morning.ldatetime.to_date())
-# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
-# 
-# for record_morning in records_morning.iterrows():
-#     
-#     c4gl = class4gl(c4gli)
-#     c4gl.run()
-#     c4gl.dump(c4glfile,\
-#               include_input=True,\
-#               timeseries_only=timeseries_only)
-# 
-# # This will cash the observations and model tables per station for
-# # the interface
-# 
-# records_ini = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=0,\
-#                                    by=2,\
-#                                    subset='ini',
-#                                    refetch_records=True,
-#                                    )
-# records_mod = get_records(pd.DataFrame([current_station]),\
-#                                    path_mod,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='mod',
-#                                    refetch_records=True,
-#                                    )
-# records_eval = get_records(pd.DataFrame([current_station]),\
-#                                    path_obs,\
-#                                    start=1,\
-#                                    by=2,\
-#                                    subset='eval',
-#                                    refetch_records=True,
-#                                    )
-# 
-# 
-# # mod_scores = pd.DataFrame(index=mod_records.index)
-# # for (STNID,index), current_record_mod in mod_records.iterrows():
-# #     print(STNID,index)
-# #     current_station = STN
-# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
-# #     current_record_obs = obs_records.loc[(STNID,index)]
-# # 
-# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
-# #                                           current_station,\
-# #                                           current_record_mod,\
-# #                                          )
-# # 
-# #     record_yaml_obs = \
-# #             get_record_yaml_obs(odirexperiments[keyEXP],\
-# #                                 current_station,\
-# #                                 current_record_obs,\
-# #                                 suffix='.yaml')
-# # 
-# #     record_yaml_obs_afternoon = \
-# #             get_record_yaml_obs(odir,\
-# #                                 current_station,\
-# #                                 current_record_obs_afternoon,\
-# #                                 suffix='_afternoon.yaml')
-# # 
-# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
-# #                    record_yaml_mod.h])
-# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
-# #     
-# # 
-# #     for height,hvalue in HEIGHTS.items():
-# # 
-# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
-# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
-# #         try:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
-# #                 rmse(\
-# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
-# #                     np.interp(\
-# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
-# #                         record_yaml_mod.air_ap.z[lt_mod],\
-# #                         record_yaml_mod.air_ap.theta[lt_mod]\
-# #                     ))
-# #         except ValueError:
-# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
-# #     # # we calculate these things in the interface itself
-# #     # for key in ['q','theta','h']:
-# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_mod.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# #     #     # the actual time of the initial and evaluation sounding can be 
-# #     #     # different, but we consider this as a measurement error for
-# #     #     # the starting and end time of the simulation.
-# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
-# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
-# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
-# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
-# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
-# # 
-# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
-# #         
-# #                 
-# #                 
-# # # for EXP,c4glfile in c4glfiles.items():
-# # #     c4glfile.close()            
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# # 
-# #     
-# #     # {'Time[min:sec]': None 
-# #     #  'P[hPa]': None, 
-# #     #  'T[C]': None, 
-# #     #  'U[%]': None, 
-# #     #  'Wsp[m/s]': None, 
-# #     #  'Wdir[Grd]': None,
-# #     #  'Lon[°]', 
-# #     #  'Lat[°]', 
-# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
-# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
-# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
-# #     # }
-# #     # 
-# #     # #pivotrows =
-# #     # #{
-# # 
-# # 
-# # 
diff --git a/examples/setup_soundings/trash/setup_global_old.py b/examples/setup_soundings/trash/setup_global_old.py
deleted file mode 100644
index d812684..0000000
--- a/examples/setup_soundings/trash/setup_global_old.py
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Thursday, March 29, 11:30 AM
-
-@author: Hendrik Wouters
-
-The dry-2-dry global radio sounding experiment.
-
-usage:
-    python setup_global.py 
-    where  is an integer indicating the row index of the station list
-    under odir+'/'+fn_stations (see below)
-
-this scripts should be called from the pbs script setup_global.pbs
-
-
-
-dependencies:
-    - pandas
-    - class4gl
-    - data_soundings
-
-
-"""
-
-""" import libraries """
-import pandas as pd
-import sys
-#import copy as cp
-import numpy as np
-from sklearn.metrics import mean_squared_error
-import logging
-import datetime as dt
-import os
-import math
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-
-
-#calculate the root mean square error
-def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
-    """ calculated root mean squared error 
-        
-    
-        INPUT:
-            y_actual: reference dataset
-            y_predicted: predicting dataset
-            z_actual: coordinate values of reference dataset
-            z_predicted: coordinate values of the predicting dataset
-            
-            filternan_actual: throw away reference values that have nans
-    """
-    
-    y_actual_temp = np.array(y_actual)
-    y_predicted_temp = np.array(y_predicted)
-    
-    if z_actual is not None:
-        z_actual_temp = np.array(z_actual)
-    else: 
-        z_actual_temp = None
-        
-    
-    if filternan_actual:
-        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
-        if z_actual_temp is not None:
-            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
-    
-    if ((z_actual_temp is not None) or (z_predicted is not None)):    
-        if (z_actual_temp is None) or (z_predicted is None):
-            raise ValueError('Input z_actual and z_predicted need \
-                              to be specified simultaneously.')
-        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
-    
-    else:
-        # this catches the situation that y_predicted is a single value (eg., 
-        # which is the case for evaluating eg., mixed-layer estimates)
-        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
-        
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from data_soundings import wyoming
-#from data_global import data_global
-
-# iniitialize global data
-globaldata = data_global()
-# ...  and load initial data pages
-globaldata.load_datasets(recalc=0)
-
-# read the list of stations with valid ground data (list generated with
-# get_valid_stations.py)
-idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-
-df_stations = pd.read_csv(fn_stations)
-
-
-STNlist = list(df_stations.iterrows())
-NUMSTNS = len(STNlist)
-PROCS = 100
-BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-
-
-iPROC = int(sys.argv[1])
-
-
-for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
-# for iSTN,STN in STNlist[5:]:  
-    
-    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
-    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
-    
-
-    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
-    #                   for EXP in experiments.keys()])
-        
-    with open(fnout,'w') as fileout, \
-         open(fnout_afternoon,'w') as fileout_afternoon:
-        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
-        wy_strm.set_STNM(int(STN['ID']))
-
-        # we consider all soundings after 1981
-        wy_strm.find_first(year=1981)
-        #wy_strm.find(dt.datetime(2004,10,19,6))
-        
-        c4gli = class4gl_input(debug_level=logging.INFO)
-        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
-        # so we continue as long as we can find a new sounding
-        while wy_strm.current is not None:
-            
-            c4gli.clear()
-            c4gli.get_profile_wyoming(wy_strm)
-            #print(STN['ID'],c4gli.pars.datetime)
-            #c4gli.get_global_input(globaldata)
-
-            print(c4gli.pars.STNID, c4gli.pars.ldatetime)
-
-            logic = dict()
-            logic['morning'] =  (c4gli.pars.ldatetime.hour < 12.)
-            logic['daylight'] = \
-                ((c4gli.pars.ldatetime_daylight - 
-                  c4gli.pars.ldatetime).total_seconds()/3600. <= 5.)
-            
-            logic['springsummer'] = (c4gli.pars.theta > 278.)
-            
-            # we take 3000 because previous analysis (ie., HUMPPA) has
-            # focussed towards such altitude
-            le3000 = (c4gli.air_balloon.z <= 3000.)
-            logic['10measurements'] = (np.sum(le3000) >= 10) 
-
-            leh = (c4gli.air_balloon.z <= c4gli.pars.h)
-
-            try:
-                logic['mlerrlow'] = (\
-                        (len(np.where(leh)[0]) > 0) and \
-                        # in cases where humidity is not defined, the mixed-layer
-                        # values get corr
-                        (not np.isnan(c4gli.pars.theta)) and \
-                        (rmse(c4gli.air_balloon.theta[leh] , \
-                              c4gli.pars.theta,filternan_actual=True) < 1.)\
-                              )
-    
-            except:
-                logic['mlerrlow'] = False
-                print('rmse probably failed')
-
-            logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
-            
-            print('logic:', logic)
-            # the result
-            morning_ok = np.mean(list(logic.values()))
-            print(morning_ok,c4gli.pars.ldatetime)
-            
-            # the next sounding will be used either for an afternoon sounding
-            # or for the morning sounding of the next day.
-            wy_strm.find_next()
-
-            # If the morning is ok, then we try to find a decent afternoon
-            # sounding
-            if morning_ok == 1.:
-                # we get the current date
-                current_date = dt.date(c4gli.pars.ldatetime.year, \
-                                       c4gli.pars.ldatetime.month, \
-                                       c4gli.pars.ldatetime.day)
-                c4gli_afternoon.clear()
-                c4gli_afternoon.get_profile_wyoming(wy_strm)
-
-                if wy_strm.current is not None:
-                    current_date_afternoon = \
-                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                       c4gli_afternoon.pars.ldatetime.month, \
-                                       c4gli_afternoon.pars.ldatetime.day)
-                else:
-                    # a dummy date: this will be ignored anyway
-                    current_date_afternoon = dt.date(1900,1,1)
-
-                # we will dump the latest afternoon sounding that fits the
-                # minimum criteria specified by logic_afternoon
-                c4gli_afternoon_for_dump = None
-                while ((current_date_afternoon == current_date) and \
-                       (wy_strm.current is not None)):
-                    logic_afternoon =dict()
-
-                    logic_afternoon['afternoon'] = \
-                        (c4gli_afternoon.pars.ldatetime.hour >= 12.)
-                    logic_afternoon['daylight'] = \
-                      ((c4gli_afternoon.pars.ldatetime - \
-                        c4gli_afternoon.pars.ldatetime_daylight \
-                       ).total_seconds()/3600. <= 2.)
-
-
-                    le3000_afternoon = \
-                        (c4gli_afternoon.air_balloon.z <= 3000.)
-                    logic_afternoon['5measurements'] = \
-                        (np.sum(le3000_afternoon) >= 5) 
-
-                    # we only store the last afternoon sounding that fits these
-                    # minimum criteria
-
-                    afternoon_ok = np.mean(list(logic_afternoon.values()))
-
-                    print('logic_afternoon: ',logic_afternoon)
-                    print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
-                    if afternoon_ok == 1.:
-                        # # doesn't work :(
-                        # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
-                        
-                        # so we just create a new one from the same wyoming profile
-                        c4gli_afternoon_for_dump = class4gl_input()
-                        c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
-
-                    wy_strm.find_next()
-                    c4gli_afternoon.clear()
-                    c4gli_afternoon.get_profile_wyoming(wy_strm)
-
-                    if wy_strm.current is not None:
-                        current_date_afternoon = \
-                               dt.date(c4gli_afternoon.pars.ldatetime.year, \
-                                       c4gli_afternoon.pars.ldatetime.month, \
-                                       c4gli_afternoon.pars.ldatetime.day)
-                    else:
-                        # a dummy date: this will be ignored anyway
-                        current_date_afternoon = dt.date(1900,1,1)
-
-                    # Only in the case we have a good pair of soundings, we
-                    # dump them to disk
-                if c4gli_afternoon_for_dump is not None:
-                    c4gli.update(source='pairs',pars={'runtime' : \
-                        int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
-                             c4gli.pars.datetime_daylight).total_seconds())})
-    
-    
-                    print('ALMOST...')
-                    if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
-                            
-        
-                        c4gli.get_global_input(globaldata)
-                        print('VERY CLOSE...')
-                        if c4gli.check_source_globaldata() and \
-                            (c4gli.check_source(source='wyoming',\
-                                               check_only_sections='pars')):
-                            c4gli.dump(fileout)
-                            
-                            c4gli_afternoon_for_dump.dump(fileout_afternoon)
-                            
-                            
-                            # for keyEXP,dictEXP in experiments.items():
-                            #     
-                            #     c4gli.update(source=keyEXP,pars = dictEXP)
-                            #     c4gl = class4gl(c4gli)
-                            #     # c4gl.run()
-                            #     
-                            #     c4gl.dump(c4glfiles[key])
-                            
-                            print('HIT!!!')
-                
-                
-    # for c4glfile in c4glfiles:
-    #     c4glfile.close()            
-
diff --git a/interface_functions.py b/interface_functions.py
deleted file mode 100644
index 3e483f3..0000000
--- a/interface_functions.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-#from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-#'_afternoon.yaml'
-def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
-    filename = yaml_file.name
-    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-    #yaml_file = open(filename)
-
-    #print('going to next observation',filename)
-    yaml_file.seek(index_start)
-
-    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-
-    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-    filebuffer.write(buf)
-    filebuffer.close()
-    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-    
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-
-    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-    print(command)
-    os.system(command)
-    jsonstream = open(filename+'.buffer.json.'+str(index_start))
-    record_dict = json.load(jsonstream)
-    jsonstream.close()
-    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-
-
-    if mode =='mod':
-        modelout = class4gl()
-        modelout.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        return modelout
-    elif mode == 'ini':
-
- 
-        # datetimes are incorrectly converted to strings. We need to convert them
-        # again to datetimes
-        for key,value in record_dict['pars'].items():
-            # we don't want the key with columns that have none values
-            if value is not None: 
-                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
-               # elif (type(value) == str):
-                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-
-            if (value == 0.9e19) or (value == '.9e19'):
-                record_dict['pars'][key] = np.nan
-        for key in record_dict.keys():
-            #print(key)
-            if key in ['air_ap','air_balloon',]:
-                #NNprint('check')
-                for datakey,datavalue in record_dict[key].items():
-                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-
-        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        c4gli = class4gl_input()
-        print(c4gli.logger,'hello')
-        c4gli.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-        return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-
-class stations(object):
-    def __init__(self,path,suffix='ini',refetch_stations=False):
-
-        self.path = path
-
-        self.file = self.path+'/stations_list.csv'
-        if (os.path.isfile(self.file)) and (not refetch_stations):
-            self.table = pd.read_csv(self.file)
-        else:
-            self.table = self.get_stations(suffix=suffix)
-            self.table.to_csv(self.file)
-        
-        self.table = self.table.set_index('STNID')
-        #print(self.table)
-
-    def get_stations(self,suffix):
-        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
-        if len(stations_list_files) == 0:
-            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
-        stations_list_files.sort()
-        print(stations_list_files)
-        if len(stations_list_files) == 0:
-            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
-        stations_list = []
-        for stations_list_file in stations_list_files:
-            thisfile = open(stations_list_file,'r')
-            yamlgen = yaml.load_all(thisfile)
-            try:
-                first_record  = yamlgen.__next__()
-            except:
-                first_record = None
-            if first_record is not None:
-                stations_list.append({})
-                for column in ['STNID','latitude','longitude']:
-                    #print(first_record['pars'].keys())
-                    stations_list[-1][column] = first_record['pars'][column]
-                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
-            yamlgen.close()
-            thisfile.close()
-    
-        print(stations_list)
-        return pd.DataFrame(stations_list)
-
-class stations_iterator(object):
-    def __init__(self,stations):
-        self.stations = stations
-        self.ix = -1 
-    def __iter__(self):
-        return self
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.stations.table)) 
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_row(self,row):
-        self.ix = row
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_STNID(self,STNID):
-        self.ix = np.where((self.stations.table.index == STNID))[0][0]
-        print(self.ix)
-        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-    def close():
-        del(self.ix)
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.records))
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-# #'_afternoon.yaml'
-# def get_record_yaml(yaml_file,index_start,index_end):
-#     filename = yaml_file.name
-#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-#     #yaml_file = open(filename)
-# 
-#     #print('going to next observation',filename)
-#     yaml_file.seek(index_start)
-# 
-#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-# 
-#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-#     filebuffer.write(buf)
-#     filebuffer.close()
-#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-#     
-#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-# 
-#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-#     print(command)
-#     os.system(command)
-#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
-#     record_dict = json.load(jsonstream)
-#     jsonstream.close()
-#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-#  
-#     # datetimes are incorrectly converted to strings. We need to convert them
-#     # again to datetimes
-#     for key,value in record_dict['pars'].items():
-#         # we don't want the key with columns that have none values
-#         if value is not None: 
-#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
-#            # elif (type(value) == str):
-#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-#                 
-#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
-# 
-#         if (value == 0.9e19) or (value == '.9e19'):
-#             record_dict['pars'][key] = np.nan
-#     for key in record_dict.keys():
-#         print(key)
-#         if key in ['air_ap','air_balloon',]:
-#             print('check')
-#             for datakey,datavalue in record_dict[key].items():
-#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-# 
-#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-# 
-#     c4gli = class4gl_input()
-#     c4gli.load_yaml_dict(record_dict)
-#     return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
-
-    records = pd.DataFrame()
-    for STNID,station in stations.iterrows():
-        dictfnchunks = []
-        if getchunk is 'all':
-
-            # we try the old single-chunk filename format first (usually for
-            # original profile pairs)
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
-            if os.path.isfile(fn):
-                chunk = 0
-                dictfnchunks.append(dict(fn=fn,chunk=chunk))
-
-            # otherwise, we use the new multi-chunk filename format
-            else:
-                chunk = 0
-                end_of_chunks = False
-                while not end_of_chunks:
-                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
-                    if os.path.isfile(fn):
-                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
-                    else:
-                        end_of_chunks = True
-                    chunk += 1
-
-            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
-            # yamlfilenames = glob.glob(globyamlfilenames)
-            # yamlfilenames.sort()
-        else:
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
-            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
-            
-        if len(dictfnchunks) > 0:
-            for dictfnchunk in dictfnchunks:
-                yamlfilename = dictfnchunk['fn']
-                chunk = dictfnchunk['chunk']
-                print(chunk)
-
-                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
-                pklfilename = yamlfilename.replace('.yaml','.pkl')
-
-                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
-                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
-                generate_pkl = False
-                if not os.path.isfile(pklfilename): 
-                    print('pkl file does not exist. I generate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                elif not (os.path.getmtime(yamlfilename) <  \
-                    os.path.getmtime(pklfilename)):
-                    print('pkl file older than yaml file, so I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-
-                if refetch_records:
-                    print('refetch_records flag is True. I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                if not generate_pkl:
-                    records = pd.concat([records,pd.read_pickle(pklfilename)])
-                   # irecord = 0
-                else:
-                    with open(yamlfilename) as yaml_file:
-
-                        dictout = {}
-
-                        next_record_found = False
-                        end_of_file = False
-                        while (not next_record_found) and (not end_of_file):
-                            linebuffer = yaml_file.readline()
-                            next_record_found = (linebuffer == '---\n')
-                            end_of_file = (linebuffer == '')
-                        next_tell = yaml_file.tell()
-                        
-                        while not end_of_file:
-
-                            print(' next record:',next_tell)
-                            current_tell = next_tell
-                            next_record_found = False
-                            yaml_file.seek(current_tell)
-                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
-                            linebuffer = ''
-                            while ( (not next_record_found) and (not end_of_file)):
-                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
-                                linebuffer = yaml_file.readline()
-                                next_record_found = (linebuffer == '---\n')
-                                end_of_file = (linebuffer == '')
-                            filebuffer.close()
-                            
-                            next_tell = yaml_file.tell()
-                            index_start = current_tell
-                            index_end = next_tell
-
-                            
-                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
-                            print(command)
-                            
-                            os.system(command)
-                            #jsonoutput = subprocess.check_output(command,shell=True) 
-                            #print(jsonoutput)
-                            #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
-                            record = json.load(jsonstream)
-                            dictouttemp = {}
-                            for key,value in record['pars'].items():
-                                # we don't want the key with columns that have none values
-                                if value is not None: 
-                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
-                                   if (type(value) in regular_numeric_types):
-                                        dictouttemp[key] = value
-                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
-                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
-                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
-                            recordindex = record['index']
-                            dictouttemp['chunk'] = chunk
-                            dictouttemp['index_start'] = index_start
-                            dictouttemp['index_end'] = index_end
-                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
-                            for key,value in dictouttemp.items():
-                                if key not in dictout.keys():
-                                    dictout[key] = {}
-                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
-                            print(' obs record registered')
-                            jsonstream.close()
-                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
-                    records_station = pd.DataFrame.from_dict(dictout)
-                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+pklfilename+') for station '\
-                          +str(STNID))
-                    records_station.to_pickle(pklfilename)
-                    # else:
-                    #     os.system('rm '+pklfilename)
-                    records = pd.concat([records,records_station])
-    return records
-
-def stdrel(mod,obs,columns):
-    stdrel = pd.DataFrame(columns = columns)
-    for column in columns:
-        stdrel[column] = \
-                (mod.groupby('STNID')[column].transform('mean') -
-                 obs.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') + \
-                (mod[column] -
-                 mod.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') 
-    return stdrel
-
-def pct(obs,columns):
-    pct = pd.DataFrame(columns=columns)
-    for column in columns:
-        #print(column)
-        pct[column] = ""
-        pct[column] = obs[column].rank(pct=True)
-    return pct
-
-def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
-    stats = pd.DataFrame()
-    for key in keys: 
-        stats['d'+key+'dt'] = ""
-        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
-                              (obs_afternoon.ldatetime - \
-                               obs_morning.ldatetime).dt.seconds*3600.
-    return stats
-
diff --git a/lib/class4gl.py b/lib/class4gl.py
deleted file mode 100644
index 7baaa51..0000000
--- a/lib/class4gl.py
+++ /dev/null
@@ -1,1611 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-
-Created on Mon Jan 29 12:33:51 2018
-
-Module file for class4gl, which  extents the class-model to be able to take
-global air profiles as input. It exists of:
-
-CLASSES:
-    - an input object, namely class4gl_input. It includes:
-        - a function to read Wyoming sounding data from a yyoming stream object
-        - a function to read global data from a globaldata library object 
-    - the model object: class4gl
-    - ....    
-
-DEPENDENCIES:
-    - xarray
-    - numpy
-    - data_global
-    - Pysolar
-    - yaml
-
-@author: Hendrik Wouters
-
-"""
-
-
-
-""" Setup of envirnoment """
-
-# Standard modules of the stand class-boundary-layer model
-from model import model
-from model import model_output as class4gl_output
-from model import model_input
-from model import qsat
-#from data_soundings import wyoming 
-import Pysolar
-import yaml
-import logging
-import warnings
-import pytz
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-
-# Generic Python Packages
-import numpy as np
-import datetime as dt
-import pandas as pd
-import xarray as xr
-import io
-#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio
-from data_global import data_global
-grav = 9.81
-
-# this is just a generic input object
-class generic_input(object):
-    def __init__(self):
-        self.init = True
-
-
-# all units from all variables in CLASS(4GL) should be defined here!
-units = {
-         'h':'m',
-         'theta':'K', 
-         'q':'kg/kg',
-         'cc': '-',
-         'cveg': '-',
-         'wg': 'm3 m-3',
-         'w2': 'm3 m-3',
-         #'wg': 'kg/kg',
-         'Tsoil': 'K',
-         'T2': 'K',
-         'z0m': 'm',
-         'alpha': '-',
-         'LAI': '-',
-         'dhdt':'m/h',
-         'dthetadt':'K/h',
-         'dqdt':'kg/kg/h',
-         'BR': '-',
-         'EF': '-',
-}
-
-class class4gl_input(object):
-# this was the way it was defined previously.
-#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
-
-    def __init__(self,set_pars_defaults=True,debug_level=None):
-
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        print('hello')
-        self.logger = logging.getLogger('class4gl_input')
-        print(self.logger)
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # # create logger
-        # self.logger = logging.getLogger('class4gl_input')
-        # self.logger.setLevel(debug_level)
-
-        # # create console handler and set level to debug
-        # ch = logging.StreamHandler()
-        # ch.setLevel(debug_level)
-
-        # # create formatter
-        # formatter = logging.Formatter('%(asctime)s - \
-        #                                %(name)s - \
-        #                                %(levelname)s - \
-        #                                %(message)s')
-        # add formatter to ch
-        # ch.setFormatter(formatter)
-     
-        # # add ch to logger
-        # self.logger.addHandler(ch)
-
-        # """ end set up logger """
-
-
-
-        # these are the standard model input single-value parameters for class
-        self.pars = model_input()
-
-        # diagnostic parameters of the initial profile
-        self.diag = dict()
-
-        # In this variable, we keep track of the different parameters from where it originates from. 
-        self.sources = {}
-
-        if set_pars_defaults:
-            self.set_pars_defaults()
-
-    def set_pars_defaults(self):
-
-        """ 
-        Create empty model_input and set up case
-        """
-        defaults = dict( 
-        dt         = 60.    , # time step [s] 
-        runtime    = 6*3600 ,  # total run time [s]
-        
-        # mixed-layer input
-        sw_ml      = True   ,  # mixed-layer model switch
-        sw_shearwe = False  ,  # shear growth mixed-layer switch
-        sw_fixft   = False  ,  # Fix the free-troposphere switch
-        h          = 200.   ,  # initial ABL height [m]
-        Ps         = 101300.,  # surface pressure [Pa]
-        divU       = 0.     ,  # horizontal large-scale divergence of wind [s-1]
-        #fc         = 1.e-4  ,  # Coriolis parameter [m s-1]
-        
-        theta      = 288.   ,  # initial mixed-layer potential temperature [K]
-        dtheta     = 1.     ,  # initial temperature jump at h [K]
-        gammatheta = 0.006  ,  # free atmosphere potential temperature lapse rate [K m-1]
-        advtheta   = 0.     ,  # advection of heat [K s-1]
-        beta       = 0.2    ,  # entrainment ratio for virtual heat [-]
-        wtheta     = 0.1    ,  # surface kinematic heat flux [K m s-1]
-        
-        q          = 0.008  ,  # initial mixed-layer specific humidity [kg kg-1]
-        dq         = -0.001 ,  # initial specific humidity jump at h [kg kg-1]
-        gammaq     = 0.     ,  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        advq       = 0.     ,  # advection of moisture [kg kg-1 s-1]
-        wq         = 0.1e-3 ,  # surface kinematic moisture flux [kg kg-1 m s-1]
-        
-        CO2        = 422.   ,  # initial mixed-layer CO2 [ppm]
-        dCO2       = -44.   ,  # initial CO2 jump at h [ppm]
-        gammaCO2   = 0.     ,  # free atmosphere CO2 lapse rate [ppm m-1]
-        advCO2     = 0.     ,  # advection of CO2 [ppm s-1]
-        wCO2       = 0.     ,  # surface kinematic CO2 flux [ppm m s-1]
-        sw_wind    = True  ,  # prognostic wind switch
-        u          = 0.     ,  # initial mixed-layer u-wind speed [m s-1]
-        du         = 0.     ,  # initial u-wind jump at h [m s-1]
-        gammau     = 0.     ,  # free atmosphere u-wind speed lapse rate [s-1]
-        advu       = 0.     ,  # advection of u-wind [m s-2]
-        v          = 0.0    , # initial mixed-layer u-wind speed [m s-1]
-        dv         = 0.0    ,  # initial u-wind jump at h [m s-1]
-        gammav     = 0.     ,  # free atmosphere v-wind speed lapse rate [s-1]
-        advv       = 0.     ,  # advection of v-wind [m s-2]
-        sw_sl      = True   , # surface layer switch
-        ustar      = 0.3    ,  # surface friction velocity [m s-1]
-        z0m        = 0.02   ,  # roughness length for momentum [m]
-        z0h        = 0.02* 0.1 ,  # roughness length for scalars [m]
-        sw_rad     = True   , # radiation switch
-        lat        = 51.97  ,  # latitude [deg]
-        lon        = -4.93  ,  # longitude [deg]
-        doy        = 268.   ,  # day of the year [-]
-        tstart     = 6.8    ,  # time of the day [h UTC]
-        cc         = 0.0    ,  # cloud cover fraction [-]
-        Q          = 400.   ,  # net radiation [W m-2] 
-        dFz        = 0.     ,  # cloud top radiative divergence [W m-2] 
-        ls_type    = 'js'   ,  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        wg         = 0.21   ,  # volumetric water content top soil layer [m3 m-3]
-        w2         = 0.21   ,  # volumetric water content deeper soil layer [m3 m-3]
-        cveg       = 0.85   ,  # vegetation fraction [-]
-        Tsoil      = 295.   ,  # temperature top soil layer [K]
-        Ts         = 295.   ,    # initial surface temperature [K]
-        T2         = 296.   ,  # temperature deeper soil layer [K]
-        a          = 0.219  ,  # Clapp and Hornberger retention curve parameter a
-        b          = 4.90   ,  # Clapp and Hornberger retention curve parameter b
-        p          = 4.     ,  # Clapp and Hornberger retention curve parameter c
-        CGsat      = 3.56e-6,  # saturated soil conductivity for heat
-        wsat       = 0.472  ,  # saturated volumetric water content ECMWF config [-]
-        wfc        = 0.323  ,  # volumetric water content field capacity [-]
-        wwilt      = 0.171  ,  # volumetric water content wilting point [-]
-        C1sat      = 0.132  ,  
-        C2ref      = 1.8    ,
-        LAI        = 2.     ,  # leaf area index [-]
-        gD         = 0.0    ,  # correction factor transpiration for VPD [-]
-        rsmin      = 110.   ,  # minimum resistance transpiration [s m-1]
-        rssoilmin  = 50.    ,  # minimun resistance soil evaporation [s m-1]
-        alpha      = 0.25   ,  # surface albedo [-]
-        Wmax       = 0.0012 ,  # thickness of water layer on wet vegetation [m]
-        Wl         = 0.0000 ,  # equivalent water layer depth for wet vegetation [m]
-        Lambda     = 5.9    ,  # thermal diffusivity skin layer [-]
-        c3c4       = 'c3'   ,  # Plant type ('c3' or 'c4')
-        sw_cu      = False  ,  # Cumulus parameterization switch
-        dz_h       = 150.   ,  # Transition layer thickness [m]
-        cala       = None   ,  # soil heat conductivity [W/(K*m)]
-        crhoc      = None   ,  # soil heat capacity  [J/K*m**3]
-        sw_ls      = True   ,
-        sw_ap      = True  ,   # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-        sw_ac      = None  ,   # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields  as input from eg., ERA-INTERIM
-        sw_lit     = False,
-        )
-        pars = model_input()
-        for key in defaults:
-            pars.__dict__[key] = defaults[key]
-        
-        self.update(source='defaults',pars=pars)
-        
-    def clear(self):
-        """ this procudure clears the class4gl_input """
-
-        for key in list(self.__dict__.keys()):
-            del(self.__dict__[key])
-        self.__init__()
-
-    def dump(self,file):
-        """ this procedure dumps the class4gl_input object into a yaml file
-            
-            Input: 
-                - self.__dict__ (internal): the dictionary from which we read 
-            Output:
-                - file: All the parameters in self.__init__() are written to
-                the yaml file, including pars, air_ap, sources etc.
-        """
-        file.write('---\n')
-        index = file.tell()
-        file.write('# CLASS4GL input; format version: 0.1\n')
-
-        # write out the position of the current record
-        yaml.dump({'index':index}, file, default_flow_style=False)
-
-        # we do not include the none values
-        for key,data in self.__dict__.items():
-            #if ((type(data) == model_input) or (type(class4gl_input):
-            if key == 'pars':
-
-                pars = {'pars' : self.__dict__['pars'].__dict__}
-                parsout = {}
-                for key in pars.keys():
-                    if pars[key] is not None:
-                        parsout[key] = pars[key]
-
-                yaml.dump(parsout, file, default_flow_style=False)
-            elif type(data) == dict:
-                if key == 'sources':
-                    # in case of sources, we want to have a
-                    # condensed list format as well, so we leave out
-                    # 'default_flow_style=False'
-                    yaml.dump({key : data}, file)
-                else: 
-                    yaml.dump({key : data}, file,
-                              default_flow_style=False)
-            elif type(data) == pd.DataFrame:
-                # in case of dataframes (for profiles), we want to have a
-                # condensed list format as well, so we leave out
-                # 'default_flow_style=False'
-                yaml.dump({key: data.to_dict(orient='list')},file)
-
-                # # these are trials to get it into a more human-readable
-                # fixed-width format, but it is too complex
-                #stream = yaml.dump({key : False},width=100, default_flow_style=False)
-                #file.write(stream)
-                
-                # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here
-                #file.write(key+': !!str |\n')
-                #file.write(str(data)+'\n')
-       
-    def load_yaml_dict(self,yaml_dict,reset=True):
-        """ this procedure loads class4gl_input data from a dictionary obtained from yaml
-            
-            Input: 
-                - yaml_dict: the dictionary from which we read 
-                - reset: reset data before reading        
-            Output:
-                - All the parameters in self, eg., (pars, air_ap, sources etc.,).
-        """
-        
-        if reset:
-            for key in list(self.__dict__.keys()):
-                del(self.__dict__[key])
-            self.__init__()
-
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                self.__dict__[key] = model_input()
-                self.__dict__[key].__dict__ = data
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            elif key == 'sources':
-                self.__dict__[key] = data
-            elif key == 'diag':
-                self.__dict__[key] = data
-            else: 
-                warnings.warn("Key '"+key+"' may not be implemented.")
-                self.__dict__[key] = data
-
-    def update(self,source,**kwargs):
-        """ this procedure is to make updates of input parameters and tracking
-        of their source more convenient. It implements the assignment of
-        parameter source/sensitivity experiment IDs ('eg.,
-        'defaults', 'sounding balloon', any satellite information, climate
-        models, sensitivity tests etc.). These are all stored in a convenient
-        way with as class4gl_input.sources.  This way, the user can always consult with
-        from where parameters data originates from.  
-        
-        Input:
-            - source:    name of the underlying dataset
-            - **kwargs: a dictionary of data input, for which the key values
-            refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and
-            the values is a again a dictionary/dataframe of datakeys/columns
-            ('wg','PRES','datetime', ...) and datavalues (either single values,
-            profiles ...), eg., 
-
-                pars = {'wg': 0.007  , 'w2', 0.005}
-                pars = {pd.Dataframe('PRES': [1005.,9523,...]  , 'THTA': [295.,
-                                     300.,...]}
-            
-        Output:
-            - self.__dict__[datatype] : object to which the parameters are
-                                        assigned. They can be consulted with
-                                        self.pars, self.profiles, etc.
-                                        
-            - self.sources[source] : It supplements the overview overview of
-                                     data sources can be consulted with
-                                     self.sources. The structure is as follows:
-                                     as:
-                self.sources = { 
-                'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...],
-                'GLEAM' :  ['pars:wg','pars:w2', ...],
-                 ...
-                }
-        
-        """
-
-        #print(source,kwargs)
-
-        for key,data in kwargs.items():
-
-            #print(key)
-            # if the key is not in class4gl_input object, then just add it. In
-            # that case, the update procedures below will just overwrite it 
-            if key not in self.__dict__:
-                self.__dict__[key] = data
-
-
-            
-
-            #... we do an additional check to see whether there is a type
-            # match. I not then raise a key error
-            if (type(data) != type(self.__dict__[key]) \
-                # we allow dict input for model_input pars
-                and not ((key == 'pars') and (type(data) == dict) and \
-                (type(self.__dict__[key]) == model_input))):
-
-                raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object')
-
-
-            # This variable keeps track of the added data that is supplemented
-            # by the current source. We add this to class4gl_input.sources
-            datakeys = []
-
-            #... and we update the class4gl_input data, and this depends on the
-            # data type
-
-            if type(self.__dict__[key]) == pd.DataFrame:
-                # If the data type is a dataframe, then we update the columns
-                for column in list(data.columns):
-                    #print(column)
-                    self.__dict__[key][column] = data[column]
-                    datakeys.append(column)
-                    
-
-            elif type(self.__dict__[key]) == model_input:
-                # if the data type is a model_input, then we update its internal
-                # dictionary of parameters
-                if type(data) == model_input:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data.__dict__}
-                    datakeys = list(data.__dict__.keys())
-                elif type(data) == dict:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data}
-                    datakeys = list(data.keys())
-                else:
-                    raise TypeError('input key '+key+' is not of the same type\
-                                    as the one in the class4gl_object')
-
-            elif type(self.__dict__[key]) == dict:
-                # if the data type is a dictionary, we update the
-                # dictionary 
-                self.__dict__[key] = {self.__dict__[key] , data}
-                datakeys = list(data.keys())
-
-
-            # if source entry is not existing yet, we add it
-            if source not in self.sources.keys():
-                self.sources[source] = []
-
-
-            # self.logger.debug('updating section "'+\
-            #                  key+' ('+' '.join(datakeys)+')'\
-            #                  '" from source \
-            #                  "'+source+'"')
-
-            # Update the source dictionary: add the provided data keys to the
-            # specified source list
-            for datakey in datakeys:
-                # At first, remove the occurences of the keys in the other
-                # source lists
-                for sourcekey,sourcelist in self.sources.items():
-                    if key+':'+datakey in sourcelist:
-                        self.sources[sourcekey].remove(key+':'+datakey)
-                # Afterwards, add it to the current source list
-                self.sources[source].append(key+':'+datakey)
-
-
-        # # in case the datatype is a class4gl_input_pars, we update its keys
-        # # according to **kwargs dictionary
-        # if type(self.__dict__[datatype]) == class4gl_input_pars:
-        #     # add the data parameters to the datatype object dictionary of the
-        #     # datatype
-        #     self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ ,
-        #                                        **kwargs}
-        # # in case, the datatype reflects a dataframe, we update the columns according
-        # # to the *args list
-        # elif type(self.__dict__[datatype]) == pd.DataFrame:
-        #     for dataframe in args:
-        #         for column in list(dataframe.columns):
-        #             self.__dict__[datatype][column] = dataframe[column]
-        
-
-    def get_profile(self,IOBJ, *args, **argv):
-        # if type(IOBJ) == wyoming:
-        self.get_profile_wyoming(IOBJ,*args,**argv)
-        # else:
-        #     raise TypeError('Type '+str(type(IOBJ))+' is not supported')
-        
-    def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
-        """ 
-            Purpose: 
-                This procedure assigns wyoming air profiles and parameters to the class4gl_input object.
-
-            Input:
-                1. wy_strm   = wyoming html (beautifulsoup) stream object. The
-                function will take the profile at the stream's current
-                position. 
-                2. air_ap_mode: which air profile do we take? 
-                    - b : best
-                    - l : according to lower limit for the mixed-layer height
-                            estimate
-                    - u : according to upper limit for the mixed-layer height
-                            estimate
-
-
-            Output:
-                1. all single-value parameters are stored in the
-                   class4gl_input.pars object
-                2. the souding profiles are stored in the in the
-                   class4gl_input.air_balloon dataframe
-                3. modified sounding profiles for which the mixed layer height
-                   is fitted
-                4. ...
-
-        """
-
-
-        # Raise an error in case the input stream is not the correct object
-        # if type(wy_strm) is not wyoming:
-        #    raise TypeError('Not a wyoming type input stream')
-
-        # Let's tell the class_input object that it is a Wyoming fit type
-        self.air_ap_type = 'wyoming'
-        # ... and which mode of fitting we apply
-        self.air_ap_mode = air_ap_mode
-
-        """ Temporary variables used for output """
-        # single value parameters derived from the sounding profile
-        dpars = dict()
-        # profile values
-        air_balloon = pd.DataFrame()
-        # fitted profile values
-        air_ap = pd.DataFrame()
-        
-        string = wy_strm.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = wy_strm.current.find_next('pre').find_next('pre').text
-        
-        # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)).
-        dpars = {**dpars,
-                **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict()
-               }
-        
-        # we get weird output when it's a numpy Timestamp, so we convert it to
-        # pd.datetime type
-
-        dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M"))
-        dpars['STNID'] = dpars['Station number']
-
-        # altitude above ground level
-        air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation']
-        # absolute humidity in g/kg
-        air_balloon['q']= (air_balloon.MIXR/1000.) \
-                              / \
-                             (air_balloon.MIXR/1000.+1.)
-        # convert wind speed from knots to m/s
-        air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT
-        angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-        
-        air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x)
-        air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x)
-
-        
-
-        cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-
-        air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q)
-        air_balloon['p'] = air_balloon.PRES*100.
-
-
-        # Therefore, determine the sounding that are valid for 'any' column 
-        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
-        #is_valid = (air_balloon.z >= 0)
-        # # this is an alternative pipe/numpy method
-        # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
-        valid_indices = air_balloon.index[is_valid].values
-        print(valid_indices)
-
-        dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
-
-        air_balloon['t'] = air_balloon['TEMP']+273.15
-        air_balloon['theta'] = (air_balloon.t) * \
-                   (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp)
-        air_balloon['thetav']   = air_balloon['theta']*(1. + 0.61 * air_balloon['q'])
-
-        if len(valid_indices) > 0:
-            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
-            dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
-            
-            dpars['h_b'] = np.max((dpars['h'],10.))
-            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
-            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
-            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
-            
-            # the final mixed-layer height that will be used by class. We round it
-            # to 1 decimal so that we get a clean yaml output format
-            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
-        else:
-            dpars['h_u'] =np.nan
-            dpars['h_l'] =np.nan
-            dpars['h_e'] =np.nan
-            dpars['h'] =np.nan
-
-
-        if np.isnan(dpars['h']):
-            dpars['Ps'] = np.nan
-
-
-
-
-        if ~np.isnan(dpars['h']):
-            # determine mixed-layer properties (moisture, potential temperature...) from profile
-            
-            # ... and those of the mixed layer
-            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
-            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
-            if len(valid_indices) > 1:
-                if len(valid_indices_below_h) >= 3.:
-                    ml_mean = air_balloon[is_valid_below_h].mean()
-                else:
-                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
-            elif len(valid_indices) == 1:
-                ml_mean = (air_balloon.iloc[0:1]).mean()
-            else:
-                temp =  pd.DataFrame(air_balloon)
-                temp.iloc[0] = np.nan
-                ml_mean = temp
-                       
-            dpars['theta']= ml_mean.theta
-            dpars['q']    = ml_mean.q
-            dpars['u']    = ml_mean.u 
-            dpars['v']    = ml_mean.v 
-        else:
-            dpars['theta'] = np.nan
-            dpars['q'] = np.nan
-            dpars['u'] = np.nan
-            dpars['v'] = np.nan
-            
-
-
-
-        # First 3 data points of the mixed-layer fit. We create a empty head
-        # first
-        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
-        # All other  data points above the mixed-layer fit
-        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
-        
-        #calculate mixed-layer jump ( this should be larger than 0.1)
-        
-        air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
-        air_ap_head['HGHT'] = air_ap_head['z'] \
-                                + \
-                                np.round(dpars[ 'Station elevation'],1)
-        
-        # make a row object for defining the jump
-        jump = air_ap_head.iloc[0] * np.nan
-            
-        if air_ap_tail.shape[0] > 1:
-
-            # we originally used THTA, but that has another definition than the
-            # variable theta that we need which should be the temperature that
-            # one would have if brought to surface (NOT reference) pressure.
-            for column in ['theta','q','u','v']:
-               
-               # initialize the profile head with the mixed-layer values
-               air_ap_head[column] = ml_mean[column]
-               # calculate jump values at mixed-layer height, which will be
-               # added to the third datapoint of the profile head
-               jump[column] = (air_ap_tail[column].iloc[1]\
-                               -\
-                               air_ap_tail[column].iloc[0])\
-                              /\
-                              (air_ap_tail.z.iloc[1]\
-                               - air_ap_tail.z.iloc[0])\
-                              *\
-                              (dpars['h']- air_ap_tail.z.iloc[0])\
-                              +\
-                              air_ap_tail[column].iloc[0]\
-                              -\
-                              ml_mean[column] 
-               if column == 'theta':
-                  # for potential temperature, we need to set a lower limit to
-                  # avoid the model to crash
-                  jump.theta = np.max((0.1,jump.theta))
-        
-               air_ap_head[column][2] += jump[column]
-        
-        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
-
-
-        # make theta increase strong enough to avoid numerical
-        # instability
-        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = dpars['theta']
-        z_low =     dpars['h']
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                (z_mean > (z_low+10.)) and \
-                (theta_mean > (theta_low+0.2) ) and \
-                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-
-
-
-
-
-        air_ap = \
-            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
-        
-        # we copy the pressure at ground level from balloon sounding. The
-        # pressure at mixed-layer height will be determined internally by class
-        #print(air_ap['PRES'].iloc[0])
-
-        rho        = 1.2                   # density of air [kg m-3]
-        g          = 9.81                  # gravity acceleration [m s-2]
-
-        air_ap['p'].iloc[0] =dpars['Ps'] 
-        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
-        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
-
-        
-        dpars['lat'] = dpars['Station latitude']
-        dpars['latitude'] = dpars['lat']
-        
-        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
-        dpars['lon'] = 0.
-        # this is the real longitude that will be used to extract ground data
-        dpars['longitude'] = dpars['Station longitude']
-        
-        dpars['ldatetime'] = dpars['datetime'] \
-                            + \
-                            dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-        dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
-                                    dpars['latitude'],\
-                                    dpars['longitude'],\
-                                    dpars['datetime']\
-                                )
-        dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
-                                         0.,
-                                         dpars['ldatetime'],0.)
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
-        # This is the nearest datetime when the sun is up (for class)
-        dpars['ldatetime_daylight'] = \
-                                np.min(\
-                                    (np.max(\
-                                        (dpars['ldatetime'],\
-                                         dpars['lSunrise'])\
-                                     ),\
-                                     dpars['lSunset']\
-                                    )\
-                                )
-        # apply the same time shift for UTC datetime
-        dpars['datetime_daylight'] = dpars['datetime'] \
-                                    +\
-                                    (dpars['ldatetime_daylight']\
-                                     -\
-                                     dpars['ldatetime'])
-        
-        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
-
-        # We set the starting time to the local sun time, since the model 
-        # thinks we are always at the meridian (lon=0). This way the solar
-        # radiation is calculated correctly.
-        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
-                         + \
-                         dpars['ldatetime_daylight'].minute/60.\
-                         + \
-                         dpars['ldatetime_daylight'].second/3600.
-        
-
-        # convert numpy types to native python data types. This provides
-        # cleaner data IO with yaml:
-        for key,value in dpars.items():
-            if type(value).__module__ == 'numpy':
-                dpars[key] = dpars[key].item()
-
-        # # we make a pars object that is similar to the destination object
-        # pars = model_input()
-        # for key,value in dpars.items():
-        #     pars.__dict__[key] = value
-
-
-        # we round the columns to a specified decimal, so that we get a clean
-        # output format for yaml
-        decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\
-                   'DRCT':2 ,'SKNT':2,   'theta':4,   'THTE':2,  'THTV':2,\
-                   'z':2, 'q':5, 'WSPD':2, 'u':4,       'v':4}
-# 
-        for column,decimal in decimals.items():
-            air_balloon[column] = air_balloon[column].round(decimal)
-            air_ap[column] = air_ap[column].round(decimal)
-
-        self.update(source='wyoming',\
-                    # pars=pars,
-                    pars=dpars,\
-                    air_balloon=air_balloon,\
-                    air_ap=air_ap)
-
-        
-    def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
-    
-        """
-        Purpose: This sets copies the parameters from the global datasets into the self (or similar object) 
-                 according to the position (lat lon) and the class datetime and timespan
-                 globaldata should be a globaldata multifile object
-        
-        Input: 
-            - globaldata: this is the library object
-            - only_keys: only extract specified keys
-            - exclude_keys: do not inherit specified keys
-        """
-        classdatetime      = np.datetime64(self.pars.datetime_daylight)
-        classdatetime_stop = np.datetime64(self.pars.datetime_daylight \
-                                           + \
-                                           dt.timedelta(seconds=self.pars.runtime)\
-                                          )
-
-
-        # # list of variables that we get from global ground data
-        # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', 
-        #                 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', 
-        #                 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', 
-        #                 'texture', 'itex', 'isoil', 'BR',
-        #                 'b', 'cveg',
-        #                 'C1sat', 
-        #                 'C2ref', 'p', 'a',
-        #                 ] #globaldata.datasets.keys():
-
-        # # these are the required class4gl 3d atmospheric input which is not provided by the soundings
-        # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p']
-
-
-        if type(globaldata) is not data_global:
-            raise TypeError("Wrong type of input library") 
-
-        # by default, we get all dataset keys
-        keys = list(globaldata.datasets.keys())
-
-        # We add LAI manually, because it is not listed in the datasets and
-        #they its retreival is hard coded below based on LAIpixel and cveg
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            keys.append('LAI')
-
-        # # In case there is surface pressure, we also calculate the half-level
-        # # and full-level pressure fields
-        # if ('sp' in keys):
-        #     keys.append('pfull')
-        #     keys.append('phalf')
-
-        # If specified, we only take the keys that are in only_keys
-        if only_keys is not None:
-            for key in keys:
-                if key not in only_keys:
-                    keys.remove(key)
-                
-        # If specified, we take out keys that are in exclude keys
-        if exclude_keys is not None:
-            for key in keys:
-                if key in exclude_keys:
-                    keys.remove(key)
-
-        # we set everything to nan first in the pars section (non-profile parameters
-        # without lev argument), so that we can check afterwards whether the
-        # data is well-fetched or not.
-        for key in keys:
-            if not ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None) and \
-                ('lev' in globaldata.datasets[key].page[key].dims)):
-                self.update(source='globaldata',pars={key:np.nan})
-            # # we do not check profile input for now. We assume it is
-            # # available
-            #else:
-            #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
-
-        self.logger.debug('getting keys "'+', '.join(keys)+'\
-                          from global data')
-
-        for key in keys:
-            # If we find it, then we obtain the variables
-            if ((key in globaldata.datasets) and \
-                (globaldata.datasets[key].page is not None)):
-
-                # check first whether the dataset has a height coordinate (3d space)
-                if 'lev' in globaldata.datasets[key].page[key].dims:
-
-                    # first, we browse to the correct file that has the current time
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-                        globaldata.datasets[key].browse_page(time=classdatetime)
-
-                    
-                    if (globaldata.datasets[key].page is not None):
-                        # find longitude and latitude coordinates
-                        ilats = (np.abs(globaldata.datasets[key].page.lat -
-                                        self.pars.latitude) < 0.5)
-                        ilons = (np.abs(globaldata.datasets[key].page.lon -
-                                        self.pars.longitude) < 0.5)
-                        
-                        # if we have a time dimension, then we look up the required timesteps during the class simulation
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            itimes = ((globaldata.datasets[key].page.time >= \
-                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
-
-                            # In case we didn't find any correct time, we take the
-                            # closest one.
-                            if np.sum(itimes) == 0.:
-
-
-                                classdatetimemean = \
-                                    np.datetime64(self.pars.datetime_daylight + \
-                                    dt.timedelta(seconds=int(self.pars.runtime/2.)
-                                                ))
-
-                                dstimes = globaldata.datasets[key].page.time
-                                time = dstimes.sel(time=classdatetimemean,method='nearest')
-                                itimes = (globaldata.datasets[key].page.time ==
-                                          time)
-                                
-                        else:
-                            # we don't have a time coordinate so it doesn't matter
-                            # what itimes is
-                            itimes = 0
-
-                        #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
-
-                        # over which dimensions we take a mean:
-                        dims = globaldata.datasets[key].page[key].dims
-                        namesmean = list(dims)
-                        namesmean.remove('lev')
-                        idxmean = [dims.index(namemean) for namemean in namesmean]
-                        
-                        value = \
-                        globaldata.datasets[key].page[key].isel(time=itimes,
-                                                                lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
-
-                        # Ideally, source should be equal to the datakey of globaldata.library 
-                        # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
-                        #  but therefore the globaldata class requires a revision to make this work
-                        self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
-
-                else:
-                    # this procedure is for reading the ground fields (2d space). 
-                    # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
-
-    
-                    if 'time' in list(globaldata.datasets[key].page[key].dims):
-    
-                       # first, we browse to the correct file
-                       #print(key)
-                       globaldata.datasets[key].browse_page(time=classdatetime)
-    
-                    if globaldata.datasets[key].page is not None:
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - self.pars.latitude))
-                        ilat = np.where((DIST) == np.min(DIST))[0][0]
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - self.pars.longitude))
-                        ilon = np.where((DIST) == np.min(DIST))[0][0]
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lat'].values\
-                                - (self.pars.latitude + 0.5)))
-                        ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmax = ilat
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.variables['lon'].values\
-                                - (self.pars.longitude  + 0.5)))
-                        ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmax = ilon
-                        
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lat.values\
-                                - (self.pars.latitude - 0.5)))
-                        ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                            ilatmin = ilat
-                        DIST = \
-                        np.abs((globaldata.datasets[key].page.lon.values\
-                                - (self.pars.longitude  - 0.5)))
-                        ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                        if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                            ilonmin = ilon        
-                        
-                        if ilatmin < ilatmax:
-                            ilatrange = range(ilatmin,ilatmax+1)
-                        else:
-                            ilatrange = range(ilatmax,ilatmin+1)
-                            
-                        if ilonmin < ilonmax:
-                            ilonrange = range(ilonmin,ilonmax+1)
-                        else:
-                            ilonrange = range(ilonmax,ilonmin+1)     
-                            
-                        if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                            
-                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
-                                idatetime += 1
-                            
-                            classdatetimeend = np.datetime64(\
-                                                             self.pars.datetime +\
-                                                             dt.timedelta(seconds=self.pars.runtime)\
-                                                            ) 
-                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
-                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
-                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)):
-                                idatetimeend -= 1
-                            idatetime = np.min((idatetime,idatetimeend))
-                            #for gleam, we take the previous day values
-                            if key in ['wg', 'w2']:
-                                idatetime = idatetime - 1
-                                idatetimeend = idatetimeend - 1
-
-                            # in case of soil temperature, we take the exact
-                            # timing (which is the morning)
-                            if key in ['Tsoil','T2']:
-                                idatetimeend = idatetime
-                            
-                            idts = range(idatetime,idatetimeend+1)
-                            
-                            count = 0
-                            self.__dict__[key] = 0.
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    for iidts in idts:
-                                        value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values)
-                                        count += 1
-                            value = value/count
-                            self.update(source='globaldata',pars={key:value.item()})
-                                
-                        else:
-                                
-                            count = 0
-                            value = 0.
-                            for iilat in ilatrange:
-                                for iilon in ilonrange:
-                                    value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values)
-                                    count += 1
-                            value = value/count                        
-
-                            self.update(source='globaldata',pars={key:value.item()})
-
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            self.logger.debug('also update LAI based on LAIpixel and cveg') 
-            # I suppose LAI pixel is already determined in the previous
-            # procedure. Anyway...
-            key = 'LAIpixel'
-
-            if globaldata.datasets[key].page is not None:
-                # first, we browse to the correct file that has the current time
-                if 'time' in list(globaldata.datasets[key].page[key].dims):
-                    globaldata.datasets[key].browse_page(time=classdatetime)
-            
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - self.pars.latitude))
-                ilat = np.where((DIST) == np.min(DIST))[0][0]
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - self.pars.longitude))
-                ilon = np.where((DIST) == np.min(DIST))[0][0]
-                 
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude + 0.5)))
-                ilatmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmax = ilat
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values \
-                        - (self.pars.longitude  + 0.5)))
-                ilonmax = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmax = ilon
-                
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lat.values\
-                        - (self.pars.latitude - 0.5)))
-                ilatmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]:
-                    ilatmin = ilat
-                DIST = \
-                np.abs((globaldata.datasets[key].page.lon.values\
-                        - (self.pars.longitude  - 0.5)))
-                ilonmin = np.where((DIST) == np.min(DIST))[0][0]
-                if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
-                    ilonmin = ilon        
-                DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
-                idatetime = np.where((DIST) == np.min(DIST))[0][0]
-                
-                
-                if ilatmin < ilatmax:
-                    ilatrange = range(ilatmin,ilatmax+1)
-                else:
-                    ilatrange = range(ilatmax,ilatmin+1)
-                    
-                if ilonmin < ilonmax:
-                    ilonrange = range(ilonmin,ilonmax+1)
-                else:
-                    ilonrange = range(ilonmax,ilonmin+1)           
-                
-                #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape)
-                LAIpixel = 0.
-                count = 0
-                for iilat in [ilat]: #ilatrange
-                    for iilon in [ilon]: #ilonrange
-                        LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values
-                        
-                                        
-                        # if np.isnan(tarray[idatetime]):
-                        #     print("interpolating GIMMS LAIpixel nan value")
-                        #     
-                        #     mask = np.isnan(tarray)
-                        #     
-                        #     #replace each nan value with a interpolated value
-                        #     if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-                        #         tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-                        #         
-                        #     else:
-                        #         print("Warning. Could not interpolate GIMMS LAIpixel nan value")
-                    
-                        #         tarray *= np.nan 
-                        
-                        count += 1
-                        #tarray_res += tarray
-                LAIpixel = LAIpixel/count
-                
-                count = 0
-                #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values
-  
-                self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) 
-                #print('LAIpixel:',self.__dict__['LAIpixel'])
-                #print('cveg:',self.__dict__['cveg'])
-                
-                # finally, we rescale the LAI according to the vegetation
-                # fraction
-                value = 0. 
-                if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)):
-                   value =self.pars.LAIpixel/self.pars.cveg
-                else:
-                    # in case of small vegetation fraction, we take just a standard 
-                    # LAI value. It doesn't have a big influence anyway for
-                    # small vegetation
-                    value = 2.
-                #print('LAI:',self.__dict__['LAI'])
-                self.update(source='globaldata',pars={'LAI':value}) 
-
-
-        # in case we have 'sp', we also calculate the 3d pressure fields at
-        # full level and half level
-        if ('sp' in keys) and ('sp' in self.pars.__dict__):
-            pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0)  
-
-            phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values)
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # hydrostatic thickness of each model layer
-            delpdgrav = -(phalf[:-1] - phalf[1:])/grav
-            # # dz = rhodz/(R * T / pfull)
-
-
-            # # subsidence multiplied by density. We calculate the subsidence of
-            # # the in class itself
-            # wrho = np.zeros_like(phalf)
-            # wrho[-1] = 0. 
-
-            # for ihlev in range(0,wrho.shape[0]-1):
-            #     # subsidence multiplied by density is the integral of
-            #     # divergences multiplied by the layer thicknessies
-            #     wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \
-            #                     self.air_ac['divU_y'][ihlev:]) * \
-            #                    delpdgrav[ihlev:]).sum()
-
-
-            
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'p':list(pfull)}))
-            self.update(source='globaldata',\
-                        air_ach=pd.DataFrame({'p':list(phalf)}))
-            self.update(source='globaldata',\
-                        air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)}))
-            # self.update(source='globaldata',\
-            #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
-
-    def check_source(self,source,check_only_sections=None):
-        """ this procedure checks whether data of a specified source is valid.
-
-        INPUT:
-            source: the data source we want to check
-            check_only_sections: a string or list with sections to be checked
-        OUTPUT:
-            returns True or False
-        """
-
-        # we set source ok to false as soon as we find a invalid input
-        source_ok = True
-
-        # convert to a single-item list in case of a string
-        check_only_sections_def = (([check_only_sections]) if \
-                                   type(check_only_sections) is str else \
-                                    check_only_sections)
-                                  
-        if source not in self.sources.keys():
-            self.logger.info('Source '+source+' does not exist')
-            source_ok = False
-
-        for sectiondatakey in self.sources[source]:                             
-            section,datakey = sectiondatakey.split(':')                         
-            if ((check_only_sections_def is None) or \
-                (section in check_only_sections_def)):                          
-                checkdatakeys = []
-                if type(self.__dict__[section]) is pd.DataFrame:
-                    checkdata = self.__dict__[section]
-                elif type(self.__dict__[section]) is model_input:
-                    checkdata = self.__dict__[section].__dict__
-
-                if (datakey not in checkdata):                              
-                    # self.logger.info('Expected key '+datakey+\
-                    #                  ' is not in parameter input')                        
-                    source_ok = False                                           
-                elif (checkdata[datakey] is None) or \
-                     (pd.isnull(checkdata[datakey]) is True):                    
-        
-                    # self.logger.info('Key value of "'+datakey+\
-                    #                  '" is invalid: ('+ \
-                    # str(self.__dict__[section].__dict__[datakey])+')')         
-                    source_ok = False
-
-        return source_ok
-
-    def check_source_globaldata(self):
-        """ this procedure checks whether all global parameter data is
-        available, according to the keys in the self.sources"""
-
-        source_globaldata_ok = True
-
-        #self.get_values_air_input()
-
-        # and now we can get the surface values
-        #class_settings = class4gl_input()
-        #class_settings.set_air_input(input_atm)
-        
-        # we only allow non-polar stations
-        if not (self.pars.lat <= 60.):
-            source_globaldata_ok = False
-            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-        
-        # check lat and lon
-        if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
-            source_globaldata_ok = False
-            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
-            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
-        else:
-            # we only check the ground parameter data (pars section). The 
-            # profile data (air_ap section) are supposed to be valid in any 
-            # case.
-            source_ok = self.check_source(source='globaldata',\
-                                          check_only_sections=['air_ac',\
-                                                               'air_ap',\
-                                                               'pars'])
-            if not source_ok:
-                source_globaldata_ok = False
-        
-            # Additional check: we exclude desert-like
-            if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
-                source_globaldata_ok = False
-                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
-            if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
-                source_globaldata_ok = False
-                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
-            elif self.pars.cveg < 0.02:
-                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
-                source_globaldata_ok = False
-
-        return source_globaldata_ok
-
-
-class c4gli_iterator():
-    """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
-    
-        for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator
-    """
-    def __init__(self,file):
-        # take file as IO stream
-        self.file = file
-        self.yaml_generator = yaml.load_all(file)
-        self.current_dict = {}
-        self.current_class4gl_input = class4gl_input()
-        separator = self.file.readline() # this is just dummy
-        self.header = file.readline()
-        if self.header != '# CLASS4GL record; format version: 0.1\n':
-            raise NotImplementedError("Wrong format version: '"+self.header+"'")
-    def __iter__(self):
-        return self
-    def __next__(self):
-        self.current_dict = self.yaml_generator.__next__()
-        self.current_class4gl_input.load_yaml_dict(self.current_dict)
-        return self.current_class4gl_input
-
-
-
-#get_cape and lift_parcel are adapted from the SkewT package
-    
-class gl_dia(object):
-    def get_lifted_index(self,timestep=-1):
-        self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.)
-    
-#from SkewT
-#def get_lcl(startp,startt,startdp,nsteps=101):
-#    from numpy import interp
-#    #--------------------------------------------------------------------
-#    # Lift a parcel dry adiabatically from startp to LCL.
-#    # Init temp is startt in K, Init dew point is stwrtdp,
-#    # pressure levels are in Pa    
-#    #--------------------------------------------------------------------
-#
-#    assert startdp<=startt
-#
-#    if startdp==startt:
-#        return np.array([startp]),np.array([startt]),np.array([startdp]),
-#
-#    # Pres=linspace(startp,60000.,nsteps)
-#    Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps)
-#
-#    # Lift the dry parcel
-#    T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) 
-#    # Mixing ratio isopleth
-#    starte=VaporPressure(startdp)
-#    startw=MixRatio(starte,startp)
-#    e=Pres*startw/(.622+startw)
-#    T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK
-#
-#    # Solve for the intersection of these lines (LCL).
-#    # interp requires the x argument (argument 2)
-#    # to be ascending in order!
-#    P_lcl=interp(0.,T_iso-T_dry,Pres)
-#    T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1])
-#
-#    # # presdry=linspace(startp,P_lcl)
-#    # presdry=logspace(log10(startp),log10(P_lcl),nsteps)
-#
-#    # tempdry=interp(presdry,Pres[::-1],T_dry[::-1])
-#    # tempiso=interp(presdry,Pres[::-1],T_iso[::-1])
-#
-#    return P_lcl,T_lcl
-
-
-
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    """ Calculate mixed-layer height from temperature and wind speed profile
-
-        Input:
-            HAGL: height coordinates [m]
-            THTV: virtual potential temperature profile [K]
-            WSPD: wind speed profile [m/s]
-
-        Output:
-            BLH: best-guess mixed-layer height
-            BLHu: upper limit of mixed-layer height
-            BLHl: lower limit of mixed-layer height
-
-    """
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHl
-
-
-
-#from class
-def get_lcl(startp,startt,startqv):
-        # Find lifting condensation level iteratively
-    lcl = 20.
-    RHlcl = 0.5
-    
-    itmax = 30
-    it = 0
-    while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHd
-
-def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
-    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
-    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
-
-
-#from os import listdir
-#from os.path import isfile #,join
-import glob
-
-
-class wyoming(object):
-    def __init__(self):
-       self.status = 'init'
-       self.found = False
-       self.DT = None
-       self.current = None
-       #self.mode = 'b'
-       self.profile_type = 'wyoming'  
-       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
-       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-         
-    def set_STNM(self,STNM):
-        self.__init__()
-        self.STNM = STNM
-        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
-        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
-        self.current = None
-        self.found = False
-        self.FILES.sort()
-        
-    def find_first(self,year=None,get_atm=False):
-        self.found = False    
-                
-        # check first file/year or specified year
-        if year == None:
-            self.iFN = 0
-            self.FN = self.FILES[self.iFN]
-        else:
-            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-        self.current = self.sounding_series.find('h2')
-        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
-        
-        # go through other files and find first sounding when year is not specified
-        self.iFN=self.iFN+1
-        while keepsearching:
-            self.FN = self.FILES[self.iFN]
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            self.iFN=self.iFN+1
-            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
-        self.found = (self.current is not None)
-
-        self.status = 'fetch'
-        if self.found:
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        
-        if self.found and get_atm:
-            self.get_values_air_input()
-        
-    
-    def find(self,DT,get_atm=False):
-        
-        self.found = False
-        keepsearching = True
-        #print(DT)
-        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
-        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
-            self.DT = DT
-            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            
-        keepsearching = (self.current is not None)
-        while keepsearching:
-            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            if DTcurrent == DT:
-                self.found = True
-                keepsearching = False
-                if get_atm:
-                    self.get_values_air_input()
-                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            elif DTcurrent > DT:
-                keepsearching = False
-                self.current = None
-            else:
-                self.current = self.current.find_next('h2')
-                if self.current is None:
-                    keepsearching = False
-        self.found = (self.current is not None)
-        self.status = 'fetch'
-
-    def find_next(self,get_atm=False):
-        self.found = False
-        self.DT = None
-        if self.current is None:
-            self.find_first()
-        else:                
-            self.current = self.current.find_next('h2')
-            self.found = (self.current is not None)
-            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
-            while keepsearching:
-                self.iFN=self.iFN+1
-                self.FN = self.FILES[self.iFN]
-                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-                self.current = self.sounding_series.find('h2')
-                
-                self.found = (self.current is not None)
-                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
-        if self.found:        
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        if self.found and get_atm:
-            self.get_values_air_input()
-       
-
-
-    def get_values_air_input(self,latitude=None,longitude=None):
-
-        # for iDT,DT in enumerate(DTS):
-        
-            #websource = urllib.request.urlopen(webpage)
-        #soup = BeautifulSoup(open(webpage), "html.parser")
-        
-       
-        #workaround for ...last line has 
 which results in stringlike first column
-        string = self.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = self.current.find_next('pre').find_next('pre').text
-
-        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
-        #PARAMS.insert(0,'date',DT)
-
-        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
-        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
-        
-        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
-        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
-        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
-        HAGL = HGHT - np.float(PARAMS['Station elevation'])
-        ONE_COLUMN.insert(0,'HAGL',HAGL)
-
-        
-        
-        
-        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
-        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
-        ONE_COLUMN.insert(0,'QABS',QABS)
-        
-        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
-
-        #mixed layer potential temperature
-        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
-
-        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
-        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
-        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
-
-        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
-        BLHV = np.max((BLHV,10.))
-        BLHVu = np.max((BLHVu,10.))
-        BLHVd = np.max((BLHVd,10.))
-        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
-
-        #security values for mixed-layer jump values dthetav, dtheta and dq
-        
-        # fit new profiles taking the above-estimated mixed-layer height
-        ONE_COLUMNNEW = []
-        for BLH in [BLHV,BLHVu,BLHVd]:
-            ONE_COLUMNNEW.append(pd.DataFrame())
-            
-            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
-            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
-            
-            listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
-                
-                # get index of lowest valid observation. This seems to vary
-                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
-                if len(idxvalid) > 0:
-                    #print('idxvalid',idxvalid)
-                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
-                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
-                    else:
-                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
-                else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
-                    #print(col,meanabl)
-               
-                
-                # if col == 'PRES':
-                #     meanabl =  
-            
-                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
-                #THTVM = np.nanmean(THTV[HAGL <= BLH])
-                #print("new_pro_h",new_pro_h)
-                # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV',]:
-                    #for moisture
-                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
-                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
-                    if len(listHAGLNEW) > 4:
-                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
-                        dtheta = np.max((0.1,dtheta_pre))
-                        #meanabl = meanabl - (dtheta - dtheta_pre)
-                        #print('dtheta_pre',dtheta_pre)
-                        #print('dtheta',dtheta)
-                        #print('meanabl',meanabl)
-                        #stop
-                        
-                    else:
-                        dtheta = np.nan
-                else:
-                    if len(listHAGLNEW) > 4:
-                        #for moisture (it can have both negative and positive slope)
-                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
-                    else:
-                        dtheta = np.nan
-                #print('dtheta',dtheta)
-                
-                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
-            
-                
-                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
-                
-            #QABSM = np.nanmean(QABS[HAGL <= BLH])
-            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
-            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
-            
-        # we just make a copy of the fields, so that it can be read correctly by CLASS 
-        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
-            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
-            
-            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
-        
-            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
-            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
-
-
-        # assign fields adopted by CLASS
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'b':
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'u':
-            PARAMS.insert(0,'h',   BLHVu)
-        elif self.mode == 'd':
-            PARAMS.insert(0,'h',   BLHVd)
-        else:
-            PARAMS.insert(0,'h',   BLHV)
-            
-
-        try:
-            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
-        except:
-            print("could not convert latitude coordinate")
-            PARAMS.insert(0,'latitude', np.nan)
-            PARAMS.insert(0,'lat', np.nan)
-        try:
-            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
-            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
-            PARAMS.insert(0,'lon', 0.)
-        except:
-            print("could not convert longitude coordinate")
-            PARAMS.insert(0,'longitude', np.nan)
-            PARAMS.insert(0,'lon', 0.)
-
-        if latitude is not None:
-            print('overwriting latitude with specified value')
-            PARAMS['latitude'] = np.float(latitude)
-            PARAMS['lat'] = np.float(latitude)
-        if longitude is not None:
-            print('overwriting longitude with specified value')
-            PARAMS['longitude'] = np.float(longitude)
-        try:
-            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
-            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
-            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            # This is the nearest datetime when sun is up (for class)
-            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
-            # apply the same time shift for UTC datetime
-            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
-            
-        except:
-            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
-            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
-            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
-            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
-
-        
-
-        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
-        # as we are forcing lon equal to zero this is is expressed in local suntime
-        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
-
-           
-        ONE_COLUMNb = ONE_COLUMNNEW[0]
-        ONE_COLUMNu = ONE_COLUMNNEW[1]
-        ONE_COLUMNd = ONE_COLUMNNEW[2]
-        
-
-        THTVM = np.nanmean(THTV[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
-        
-        QABSM = np.nanmean(QABS[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
-        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
-        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
-
-        BLHVe = abs(BLHV - BLHVu)
-        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-
-        #PARAMS.insert(0,'dq',0.)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
-        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
-        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
-        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
-        
-        if self.mode == 'o': #original 
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
-        elif self.mode == 'b': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNb
-            BLCOLUMN = ONE_COLUMNb
-        elif self.mode == 'u': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNu
-            BLCOLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNd
-            BLCOLUMN = ONE_COLUMNd
-        else:
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb
-
-        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
-        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
-        # print(BLCOLUMN['HAGL'][lt6000])
-        # print(BLCOLUMN['HAGL'][lt2500])
-        # 
-        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
-
-        #print(BLCOLUMN['HAGL'][lt2500])
-        PARAMS.insert(0,'OK',
-                      ((BLHVe < 200.) and 
-                       ( len(np.where(lt6000)[0]) > 5) and
-                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
-                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
-                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
-                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
-                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
-                      )
-                     )
-
-        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
-        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
-        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
-        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
-        
-        
-        PARAMS = PARAMS.T
-
-        
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = USE_ONECOLUMN
-        # if self.mode == 'o': #original 
-        #     self.ONE_COLUMN = ONE_COLUMN
-        # elif self.mode == 'b': # best BLH
-        #     self.ONE_COLUMN = ONE_COLUMNb
-        # elif self.mode == 'u':# upper BLH
-        #     self.ONE_COLUMN = ONE_COLUMNu
-        # elif self.mode == 'd': # lower BLH
-        #     self.ONE_COLUMN=ONE_COLUMNd
-        # else:
-        #     self.ONE_COLUMN = ONE_COLUMN
-
diff --git a/lib/data_global.py b/lib/data_global.py
deleted file mode 100644
index 9c3d9b5..0000000
--- a/lib/data_global.py
+++ /dev/null
@@ -1,936 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov  7 10:51:03 2017
-
-@author: Hendrik Wouters
-
-Purpose: provides class routines for ground and atmosphere conditions used for
-the CLASS miced-layer model
-
-Usage:
-    from data_global import data_global
-    from class4gl import class4gl_input
-    from data_soundings import wyoming
-
-    # create a data_global object and load initial data pages
-    globaldata = data_global()
-    globaldata.load_datasets()
-    # create a class4gl_input object
-    c4gli = class4gl_input()
-    # Initialize it with profile data. We need to do this first. Actually this
-    # will set the coordinate parameters (datetime, latitude, longitude) in
-    # class4gl_input.pars.__dict__, which is required to read point data from
-    # the data_global object.
-
-    # open a Wyoming stream for a specific station
-    wy_strm = wyoming(STNM=91376)
-    # load the first profile
-    wy_strm.find_first()
-    # load the profile data into the class4gl_input object
-    c4gli.get_profile_wyoming(wy_strm)
-    
-    # and finally, read the global input data for this profile
-    c4gli.get_global_input(globaldata)
-
-
-"""
-
-import netCDF4 as nc4
-import numpy as np
-import datetime as dt
-#you can install with
-#import pynacolada as pcd
-import pandas as pd
-import xarray as xr
-import os
-import glob
-import sys
-import errno
-import warnings
-import logging
-
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-class book(object):
-    """ this is a class for a dataset spread over multiple files. It has a
-    similar purpose  open_mfdataset, but only 1 file (called current 'page')
-    one is loaded at a time. This saves precious memory.  """
-    def __init__(self,fn,concat_dim = None,debug_level=None):
-        self.logger = logging.getLogger('book')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # filenames are expanded as a list and sorted by filename
-        self.pages = glob.glob(fn); self.pages.sort()
-        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
-        if len(self.pages) == 0:
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
-        self.ipage = -1; self.page = None
-        self.renames = {} # each time when opening a file, a renaming should be done.
-        self.set_page(0)
-
-        # we consider that the outer dimension is the one we concatenate
-        self.concat_dim = concat_dim
-        if self.concat_dim is None:
-            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
-
-    # this wraps the xarray sel-commmand
-    def sel(*args, **kwargs):
-        for dim in kwargs.keys():
-            if dim == self.concat_dim:
-                self.browse_page(**{dim: kwargs[dim]})
-        return page.sel(*args,**kwargs)
-
-
-    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
-    #def __getattr__(self,attr):
-    #    orig_attr = self.page.__getattribute__(attr)
-    #    if callable(orig_attr):
-    #        def hooked(*args, **kwargs):
-    #            for dim in kwargs.keys():
-    #                if dim == self.concat_dim:
-    #                    self.browse_page(**{dim: kwargs[dim]})
-    #
-    #            result = orig_attr(*args, **kwargs)
-    #            # prevent wrapped_class from becoming unwrapped
-    #            if result == self.page:
-    #                return self
-    #            self.post()
-    #            return result
-    #        return hooked
-    #    else:
-    #        return orig_attr
-
-    def set_renames(self,renames):
-        #first, we convert back to original names, and afterwards, we apply the update of the renames.
-        reverse_renames = dict((v,k) for k,v in self.renames.items())
-        self.renames = renames
-        if self.page is not None:
-            self.page = self.page.rename(reverse_renames)
-            self.page = self.page.rename(self.renames)
-
-    def set_page(self,ipage,page=None):
-        """ this sets the right page according to ipage:
-                - We do not switch the page if we are already at the right one
-                - we set the correct renamings (level -> lev, latitude -> lat,
-                etc.)
-                - The dataset is also squeezed.
-        """
-
-        if ((ipage != self.ipage) or (page is not None)):
-
-            if self.page is not None:
-                self.page.close()
-
-            self.ipage = ipage
-            if page is not None:
-                self.page = page
-            else:
-                if self.ipage == -1:
-                   self.page = None
-                else:
-                    #try:
-
-                    self.logger.info("Switching to page "+str(self.ipage)+': '\
-                                     +self.pages[self.ipage])
-                    self.page = xr.open_dataset(self.pages[self.ipage])
-
-
-            # do some final corrections to the dataset to make them uniform
-            if self.page is not None:
-               if 'latitude' in self.page.dims:
-#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
-               if 'level' in self.page.dims:
-                   self.page = self.page.rename({'level':'lev'})
-
-               self.page = self.page.rename(self.renames)
-               self.page = self.page.squeeze(drop=True)
-
-    def browse_page(self,rewind=2,**args):
-
-        # at the moment, this is only tested with files that are stacked according to the time dimension.
-        dims = args.keys()
-
-
-        if self.ipage == -1:
-            self.set_page(0)
-
-        found = False
-        iipage = 0
-        startipage = self.ipage - rewind
-        while (iipage < len(self.pages)) and not found:
-            ipage = (iipage+startipage) % len(self.pages)
-            for dim in args.keys():
-                this_file = True
-
-                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
-                if 'dims' not in self.__dict__:
-                    self.dims = {}
-                if dim not in self.dims.keys():
-                    self.dims[dim] = [None]*len(self.pages)
-
-                if self.dims[dim][ipage] is None:
-                    self.logger.info('Loading coordinates of dimension "'+dim+\
-                                     '" of page "' +str(ipage)+'".')
-                    self.set_page(ipage)
-                    # print(ipage)
-                    # print(dim)
-                    # print(dim,self.page[dim].values)
-                    self.dims[dim][ipage] = self.page[dim].values
-
-                # determine current time range of the current page
-                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
-                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
-
-                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
-                    this_file = False
-
-            if this_file:
-                found = True
-                self.set_page(ipage)
-            else:
-
-                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
-                #    iipage = len(self.pages) # we stop searching
-
-                iipage += 1
-
-        if not found:
-            self.logger.info("Page not found. Setting to page -1")
-            #iipage = len(self.pages) # we stop searching further
-            self.set_page(-1)
-
-        if self.ipage != -1:
-            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
-        else:
-            self.logger.debug("I'm now at page "+ str(self.ipage))
-
-
-class data_global(object):
-    def __init__(self,sources= {
-        # # old gleam
-        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
-        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
-        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
-        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
-        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
-        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
-        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
-        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
-        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
-        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
-        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
-        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
-        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
-        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
-        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
-        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
-        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
-        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
-        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
-        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
-        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
-        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
-        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
-        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
-        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
-        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
-        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
-        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
-        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
-        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
-        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
-        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
-        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
-        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
-        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
-        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
-        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
-        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
-        },debug_level=None):
-        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
-        self.sources = sources
-        self.datarefs = {}
-        self.datasets = {}
-        self.datetime = dt.datetime(1981,1,1)
-
-        self.logger = logging.getLogger('data_global')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-        self.debug_level = debug_level
-
-        warnings.warn('omitting pressure field p and advection')
-
-    def in_library(self,fn):
-        if fn not in self.library.keys():
-            return False
-        else:
-            print("Warning: "+fn+" is already in the library.")
-            return True
-
-    def add_to_library(self,fn):
-        if not self.in_library(fn):
-            print("opening: "+fn)
-            self.library[fn] = \
-                book(fn,concat_dim='time',debug_level=self.debug_level)
-
-            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
-            #if 'latitude' in self.library[fn].variables:
-            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-
-    # default procedure for loading datasets into the globaldata library
-    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
-        if type(varssource) is str:
-            varssource = [varssource]
-        if type(varsdest) is str:
-            varsdest = [varsdest]
-
-        self.add_to_library(input_fn)
-
-        if varssource is None:
-            varssource = []
-            for var in self.sources[input_fn].variables:
-                avoid = \
-                ['lat','lon','latitude','longitude','time','lev','level']
-                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
-                    varssource.append(var)
-
-        if varsdest is None:
-            varsdest = varssource
-
-        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
-        for ivar,vardest in enumerate(varsdest):
-            varsource = varssource[ivar]
-            print('setting '+vardest+' as '+varsource+' from '+input_fn)
-
-            if vardest in self.datarefs.keys():
-                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
-            #self.add_to_library(fn,varsource,vardest)
-            if vardest != varsource:
-                libkey = input_fn+'.'+varsource+'.'+vardest
-                if libkey not in self.library.keys():
-                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
-                    self.library[libkey] = book(input_fn,\
-                                                debug_level=self.debug_level)
-                    self.library[libkey].set_renames({varsource: vardest})
-
-                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-            else:
-                self.datarefs[vardest] = input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-
-            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
-            #     print('Warning: '+ vardest "not in " + input_fn)
-
-
-
-    def load_datasets(self,sources = None,recalc=0):
-
-        if sources is None:
-            sources = self.sources
-        for key in sources.keys():
-            #datakey,vardest,*args = key.split(':')
-            datakey,vardest = key.split(':')
-            #print(datakey)
-
-            fnvarsource = sources[key].split(':')
-            if len(fnvarsource) > 2:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource,fnargs = fnvarsource
-                fnargs = [fnargs]
-            elif len(fnvarsource) > 1:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource = fnvarsource
-                fnargs = []
-            else:
-                fn = sources[key]
-                varsource = vardest
-            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
-
-    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
-            # the default way of loading a 2d dataset
-            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
-                self.load_dataset_default(fn,varsource,vardest)
-            elif datakey == 'IGBPDIS':
-                if vardest == 'alpha':
-                    ltypes = ['W','B','H','TC']
-                    for ltype in ltypes:
-                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
-                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
-
-
-                    # landfr = {}
-                    # for ltype in ['W','B','H','TC']:
-                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
-
-
-
-                    keytemp = 'alpha'
-                    fnkeytemp = fn+':IGBPDIS:alpha'
-                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
-                        self.library[fnkeytemp]  = book(fnkeytemp,
-                                                        debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-                    else:
-                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
-                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
-                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
-                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
-                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
-                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
-                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
-
-                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-
-                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
-                        for ltype in ltypes:
-                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
-
-                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
-                        print('writing file to: '+fnkeytemp)
-                        os.system('rm '+fnkeytemp)
-                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
-                        self.library[fnkeytemp].close()
-
-
-                        self.library[fnkeytemp]  = \
-                            book(fnkeytemp,debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-
-
-                else:
-                    self.load_dataset_default(fn,varsource,vardest)
-
-
-            elif datakey == 'GLAS':
-                self.load_dataset_default(fn,varsource,vardest)
-                if vardest == 'z0m':
-                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
-                elif vardest == 'z0h':
-                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
-            elif datakey == 'DSMW':
-
-
-                # Procedure of the thermal properties:
-                # 1. determine soil texture from DSMW/10.
-                # 2. soil type with look-up table (according to DWD/EXTPAR)
-                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
-                #    with parameter look-up table from Noilhan and Planton (1989).
-                #    Note: The look-up table is inspired on DWD/COSMO
-
-                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
-
-
-
-                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
-                self.load_dataset_default(fn,'DSMW')
-                print('calculating texture')
-                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
-                TEMP  = {}
-                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
-                TEMP3 = {}
-                for SPKEY in SPKEYS:
-
-
-                    keytemp = SPKEY+'_values'
-                    fnoutkeytemp = fnout+':DSMW:'+keytemp
-                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                    else:
-                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
-                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
-                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-                        # for faster computation, we need to get it to memory out of Dask.
-                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
-                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
-
-                # yes, I know I only check the last file.
-                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
-                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
-                        print('idx',idx,SPKEY)
-                        SEL = (TEMP2 == idx)
-                    #     print(idx,len(TEMP3))
-                        for SPKEY in SPKEYS:
-                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
-
-                    for SPKEY in SPKEYS:
-                        keytemp = SPKEY+'_values'
-                        fnoutkeytemp = fnout+':DSMW:'+keytemp
-                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
-                        os.system('rm '+fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].close()
-
-
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                keytemp = 'texture'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-                else:
-                    self.library[fn+':DSMW:texture'] = xr.Dataset()
-                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
-                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
-                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
-                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-
-                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
-
-                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
-                    zundef[zundef < 0] = np.nan
-                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
-                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
-
-                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-
-
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-                print('calculating texture type')
-
-
-
-                keytemp = 'itex'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-                else:
-                    self.library[fnoutkeytemp] = xr.Dataset()
-                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-                    X = self.datasets['texture'].page['texture'].values*100
-                    X[pd.isnull(X)] = -9
-
-
-                    self.datasets[keytemp][keytemp].values = X
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
-                    self.datasets['itex'].close()
-
-
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-
-                keytemp = 'isoil'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                isoil_reprocessed = False
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-                else:
-                    isoil_reprocessed = True
-                    print('calculating soil type')
-                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    ITEX = self.datasets['itex'].page['itex'].values
-                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
-                    LOOKUP = [
-                              [-10 ,9],# ocean
-                              [0 ,7],# fine textured, clay (soil type 7)
-                              [20,6],# medium to fine textured, loamy clay (soil type 6)
-                              [40,5],# medium textured, loam (soil type 5)
-                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
-                              [80,3],# coarse textured, sand (soil type 3)
-                              [100,9],# coarse textured, sand (soil type 3)
-                            ]
-                    for iitex,iisoil in LOOKUP:
-                        ISOIL[ITEX > iitex] = iisoil
-                        print('iitex,iisoil',iitex,iisoil)
-
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    LOOKUP = [
-                              [9001, 1 ], # ice, glacier (soil type 1)
-                              [9002, 2 ], # rock, lithosols (soil type 2)
-                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
-                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
-                              [9,    9 ], # undefined (ocean)
-                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
-                              [9000, 9 ], # undefined (inland lake)
-                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
-                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
-                            ]
-                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
-                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
-
-                    CODE_VALUES[ITEX == 901200] = 9012
-                    for icode,iisoil in LOOKUP:
-                        ISOIL[CODE_VALUES == icode] = iisoil
-
-                    self.datasets['isoil']['isoil'].values = ISOIL
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-                    print('saved inbetween file to: '+fnoutkeytemp)
-
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                #adopted from data_soil.f90 (COSMO5.0)
-                SP_LOOKUP = {
-                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
-                  # (by index)                                           loam                    loam                                water      ice
-                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
-                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
-                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
-                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
-                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
-                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
-                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
-                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
-                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
-                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
-                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
-                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
-                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
-                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
-                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
-                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
-                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
-                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
-                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
-                  # Important note: For peat, the unknown values below are set equal to that of loam
-                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
-                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
-                  #error in table 2 of NP89: values need to be multiplied by e-6
-                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
-                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
-
-                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
-                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
-                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
-                }
-
-
-                # isoil_reprocessed = False
-                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-
-                #     self.library[fn+':DSMW:isoil'] = \
-                #             book(fnoutkeytemp,debug_level=self.debug_level)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-                # else:
-                #     isoil_reprocessed = True
-                #     print('calculating soil type')
-                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-
-
-
-                # this should become cleaner in future but let's hard code it for now.
-                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
-                print('calculating soil parameter')
-                DATATEMPSPKEY = {}
-                if (recalc < 1) and (isoil_reprocessed == False): 
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        keytemp = SPKEY
-                        fnoutkeytemp=fnout+':DSMW:'+keytemp
-                        self.library[fn+':DSMW:'+SPKEY] =\
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
-                        self.datarefs[SPKEY] =fnoutkeytemp
-                else:
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-
-                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
-                    ISOIL = self.datasets['isoil'].page['isoil'].values
-                    print(np.where(ISOIL>0.))
-                    for i in range(11):
-                        SELECT = (ISOIL == i)
-                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
-
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
-
-                        os.system('rm '+fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].close()
-                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
-
-                        self.library[fn+':DSMW:'+SPKEY] = \
-                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-
-
-            else:
-                self.load_dataset_default(fn,varsource,vardest)
-
-
-
-
-
-
-#
-#                 # only print the last parameter value in the plot
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'cala'
-#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'crhoc'
-#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#     key = "CERES"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         CERES_start_date = dt.datetime(2000,3,1)
-#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
-#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
-#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
-#
-#         var = 'cc'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#         print(class_settings.lat,class_settings.lon)
-#
-#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
-#
-#         input_nc.close()
-#
-
-
-#     key = "GIMMS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
-#         print("Reading Leag Area Index from "+input_fn)
-#         var = 'LAI'
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
-#
-#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
-#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
-#
-#         if np.isnan(tarray[idatetime]):
-#             print("interpolating GIMMS cveg nan value")
-#
-#             mask = np.isnan(tarray)
-#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-#             else:
-#                 print("Warning. Could not interpolate GIMMS cveg nan value")
-#
-#         class_settings.__dict__[var] = tarray[idatetime]
-#
-#         input_nc.close()
-#
-#     key = "IGBPDIS_ALPHA"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         var = 'alpha'
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
-#         print("Reading albedo from "+input_fn)
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#
-#         landfr = {}
-#         for ltype in ['W','B','H','TC']:
-#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
-#
-#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-#
-#         alpha=0.
-#         for ltype in landfr.keys():
-#             alpha += landfr[ltype]*aweights[ltype]
-#
-#
-#         class_settings.__dict__[var] = alpha
-#         input_nc.close()
-#
-#
-#     key = "ERAINT_ST"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         print("Reading soil temperature from "+input_fn)
-#
-#         var = 'Tsoil'
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         var = 'T2'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
-#
-#
-#         input_nc.close()
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #var = 'T2'
-#     #valold = class_settings.__dict__[var]
-#     #
-#     #class_settings.__dict__[var] = 305.
-#     #class_settings.__dict__['Tsoil'] = 302.
-#     #valnew = class_settings.__dict__[var]
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #
-#     #var = 'Lambda'
-#     #valold = class_settings.__dict__[var]
-#
-#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
-#     ## I need to ask Chiel.
-#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
-#     #
-#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
-#     #class_settings.__dict__[var] = valnew
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     key = "GLAS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
-#         print("Reading canopy height for determining roughness length from "+input_fn)
-#         var = 'z0m'
-#
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
-#
-#         lowerlimit = 0.01
-#         if testval < lowerlimit:
-#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
-#             class_settings.__dict__[var] = lowerlimit
-#         else:
-#             class_settings.__dict__[var] = testval
-#
-#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
-#
-#
-#         input_nc.close()
-
-
-
-
-
diff --git a/lib/interface_functions.py b/lib/interface_functions.py
deleted file mode 100644
index 3e483f3..0000000
--- a/lib/interface_functions.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-#from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-#'_afternoon.yaml'
-def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
-    filename = yaml_file.name
-    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-    #yaml_file = open(filename)
-
-    #print('going to next observation',filename)
-    yaml_file.seek(index_start)
-
-    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-
-    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-    filebuffer.write(buf)
-    filebuffer.close()
-    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-    
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-
-    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-    print(command)
-    os.system(command)
-    jsonstream = open(filename+'.buffer.json.'+str(index_start))
-    record_dict = json.load(jsonstream)
-    jsonstream.close()
-    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-
-
-    if mode =='mod':
-        modelout = class4gl()
-        modelout.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        return modelout
-    elif mode == 'ini':
-
- 
-        # datetimes are incorrectly converted to strings. We need to convert them
-        # again to datetimes
-        for key,value in record_dict['pars'].items():
-            # we don't want the key with columns that have none values
-            if value is not None: 
-                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
-               # elif (type(value) == str):
-                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-
-            if (value == 0.9e19) or (value == '.9e19'):
-                record_dict['pars'][key] = np.nan
-        for key in record_dict.keys():
-            #print(key)
-            if key in ['air_ap','air_balloon',]:
-                #NNprint('check')
-                for datakey,datavalue in record_dict[key].items():
-                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-
-        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        c4gli = class4gl_input()
-        print(c4gli.logger,'hello')
-        c4gli.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-        return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-
-class stations(object):
-    def __init__(self,path,suffix='ini',refetch_stations=False):
-
-        self.path = path
-
-        self.file = self.path+'/stations_list.csv'
-        if (os.path.isfile(self.file)) and (not refetch_stations):
-            self.table = pd.read_csv(self.file)
-        else:
-            self.table = self.get_stations(suffix=suffix)
-            self.table.to_csv(self.file)
-        
-        self.table = self.table.set_index('STNID')
-        #print(self.table)
-
-    def get_stations(self,suffix):
-        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
-        if len(stations_list_files) == 0:
-            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
-        stations_list_files.sort()
-        print(stations_list_files)
-        if len(stations_list_files) == 0:
-            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
-        stations_list = []
-        for stations_list_file in stations_list_files:
-            thisfile = open(stations_list_file,'r')
-            yamlgen = yaml.load_all(thisfile)
-            try:
-                first_record  = yamlgen.__next__()
-            except:
-                first_record = None
-            if first_record is not None:
-                stations_list.append({})
-                for column in ['STNID','latitude','longitude']:
-                    #print(first_record['pars'].keys())
-                    stations_list[-1][column] = first_record['pars'][column]
-                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
-            yamlgen.close()
-            thisfile.close()
-    
-        print(stations_list)
-        return pd.DataFrame(stations_list)
-
-class stations_iterator(object):
-    def __init__(self,stations):
-        self.stations = stations
-        self.ix = -1 
-    def __iter__(self):
-        return self
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.stations.table)) 
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_row(self,row):
-        self.ix = row
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_STNID(self,STNID):
-        self.ix = np.where((self.stations.table.index == STNID))[0][0]
-        print(self.ix)
-        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-    def close():
-        del(self.ix)
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.records))
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-# #'_afternoon.yaml'
-# def get_record_yaml(yaml_file,index_start,index_end):
-#     filename = yaml_file.name
-#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-#     #yaml_file = open(filename)
-# 
-#     #print('going to next observation',filename)
-#     yaml_file.seek(index_start)
-# 
-#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-# 
-#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-#     filebuffer.write(buf)
-#     filebuffer.close()
-#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-#     
-#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-# 
-#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-#     print(command)
-#     os.system(command)
-#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
-#     record_dict = json.load(jsonstream)
-#     jsonstream.close()
-#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-#  
-#     # datetimes are incorrectly converted to strings. We need to convert them
-#     # again to datetimes
-#     for key,value in record_dict['pars'].items():
-#         # we don't want the key with columns that have none values
-#         if value is not None: 
-#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
-#            # elif (type(value) == str):
-#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-#                 
-#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
-# 
-#         if (value == 0.9e19) or (value == '.9e19'):
-#             record_dict['pars'][key] = np.nan
-#     for key in record_dict.keys():
-#         print(key)
-#         if key in ['air_ap','air_balloon',]:
-#             print('check')
-#             for datakey,datavalue in record_dict[key].items():
-#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-# 
-#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-# 
-#     c4gli = class4gl_input()
-#     c4gli.load_yaml_dict(record_dict)
-#     return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
-
-    records = pd.DataFrame()
-    for STNID,station in stations.iterrows():
-        dictfnchunks = []
-        if getchunk is 'all':
-
-            # we try the old single-chunk filename format first (usually for
-            # original profile pairs)
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
-            if os.path.isfile(fn):
-                chunk = 0
-                dictfnchunks.append(dict(fn=fn,chunk=chunk))
-
-            # otherwise, we use the new multi-chunk filename format
-            else:
-                chunk = 0
-                end_of_chunks = False
-                while not end_of_chunks:
-                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
-                    if os.path.isfile(fn):
-                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
-                    else:
-                        end_of_chunks = True
-                    chunk += 1
-
-            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
-            # yamlfilenames = glob.glob(globyamlfilenames)
-            # yamlfilenames.sort()
-        else:
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
-            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
-            
-        if len(dictfnchunks) > 0:
-            for dictfnchunk in dictfnchunks:
-                yamlfilename = dictfnchunk['fn']
-                chunk = dictfnchunk['chunk']
-                print(chunk)
-
-                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
-                pklfilename = yamlfilename.replace('.yaml','.pkl')
-
-                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
-                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
-                generate_pkl = False
-                if not os.path.isfile(pklfilename): 
-                    print('pkl file does not exist. I generate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                elif not (os.path.getmtime(yamlfilename) <  \
-                    os.path.getmtime(pklfilename)):
-                    print('pkl file older than yaml file, so I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-
-                if refetch_records:
-                    print('refetch_records flag is True. I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                if not generate_pkl:
-                    records = pd.concat([records,pd.read_pickle(pklfilename)])
-                   # irecord = 0
-                else:
-                    with open(yamlfilename) as yaml_file:
-
-                        dictout = {}
-
-                        next_record_found = False
-                        end_of_file = False
-                        while (not next_record_found) and (not end_of_file):
-                            linebuffer = yaml_file.readline()
-                            next_record_found = (linebuffer == '---\n')
-                            end_of_file = (linebuffer == '')
-                        next_tell = yaml_file.tell()
-                        
-                        while not end_of_file:
-
-                            print(' next record:',next_tell)
-                            current_tell = next_tell
-                            next_record_found = False
-                            yaml_file.seek(current_tell)
-                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
-                            linebuffer = ''
-                            while ( (not next_record_found) and (not end_of_file)):
-                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
-                                linebuffer = yaml_file.readline()
-                                next_record_found = (linebuffer == '---\n')
-                                end_of_file = (linebuffer == '')
-                            filebuffer.close()
-                            
-                            next_tell = yaml_file.tell()
-                            index_start = current_tell
-                            index_end = next_tell
-
-                            
-                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
-                            print(command)
-                            
-                            os.system(command)
-                            #jsonoutput = subprocess.check_output(command,shell=True) 
-                            #print(jsonoutput)
-                            #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
-                            record = json.load(jsonstream)
-                            dictouttemp = {}
-                            for key,value in record['pars'].items():
-                                # we don't want the key with columns that have none values
-                                if value is not None: 
-                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
-                                   if (type(value) in regular_numeric_types):
-                                        dictouttemp[key] = value
-                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
-                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
-                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
-                            recordindex = record['index']
-                            dictouttemp['chunk'] = chunk
-                            dictouttemp['index_start'] = index_start
-                            dictouttemp['index_end'] = index_end
-                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
-                            for key,value in dictouttemp.items():
-                                if key not in dictout.keys():
-                                    dictout[key] = {}
-                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
-                            print(' obs record registered')
-                            jsonstream.close()
-                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
-                    records_station = pd.DataFrame.from_dict(dictout)
-                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+pklfilename+') for station '\
-                          +str(STNID))
-                    records_station.to_pickle(pklfilename)
-                    # else:
-                    #     os.system('rm '+pklfilename)
-                    records = pd.concat([records,records_station])
-    return records
-
-def stdrel(mod,obs,columns):
-    stdrel = pd.DataFrame(columns = columns)
-    for column in columns:
-        stdrel[column] = \
-                (mod.groupby('STNID')[column].transform('mean') -
-                 obs.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') + \
-                (mod[column] -
-                 mod.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') 
-    return stdrel
-
-def pct(obs,columns):
-    pct = pd.DataFrame(columns=columns)
-    for column in columns:
-        #print(column)
-        pct[column] = ""
-        pct[column] = obs[column].rank(pct=True)
-    return pct
-
-def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
-    stats = pd.DataFrame()
-    for key in keys: 
-        stats['d'+key+'dt'] = ""
-        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
-                              (obs_afternoon.ldatetime - \
-                               obs_morning.ldatetime).dt.seconds*3600.
-    return stats
-
diff --git a/lib/interface_multi.py b/lib/interface_multi.py
deleted file mode 100644
index 83148e5..0000000
--- a/lib/interface_multi.py
+++ /dev/null
@@ -1,2061 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-# from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-cdictpres = {'blue': (\
-                   (0.,    0.,  0.),
-                   (0.25,  0.25, 0.25),
-                   (0.5,  .70, 0.70),
-                   (0.75, 1.0, 1.0),
-                   (1,     1.,  1.),
-                   ),
-       'green': (\
-                   (0. ,   0., 0.0),
-                   (0.25,  0.50, 0.50),
-                   (0.5,  .70, 0.70),
-                   (0.75,  0.50, 0.50),
-                   (1  ,    0,  0.),
-                   ),
-       'red':  (\
-                  (0 ,  1.0, 1.0),
-                  (0.25 ,  1.0, 1.0),
-                   (0.5,  .70, 0.70),
-                  (0.75 , 0.25, 0.25),
-                  (1,    0., 0.),
-                  )}
-
-statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-os.system('module load Ruby')
-
-class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
-        """ creates an interactive interface for analysing class4gl experiments
-
-        INPUT:
-            path_exp : path of the experiment output
-            path_obs : path of the observations 
-            globaldata: global data that is being shown on the map
-            refetch_stations: do we need to build the list of the stations again?
-        OUTPUT:
-            the procedure returns an interface object with interactive plots
-
-        """
-        
-        # set the ground
-        self.globaldata = globaldata
-
- 
-        self.path_exp = path_exp
-        self.path_obs = path_obs
-        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
-
-        # # get the list of stations
-        # stationsfile = self.path_exp+'/stations_list.csv'
-        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
-        #     stations = pd.read_csv(stationsfile)
-        # else:
-        #     stations = get_stations(self.path_exp)
-        #     stations.to_csv(stationsfile)
-
-        # stations = stations.set_index('STNID')
-
-        self.frames = {}
-
-        self.frames['stats'] = {}
-        self.frames['worldmap'] = {}
-                
-        self.frames['profiles'] = {}
-        self.frames['profiles'] = {}
-        self.frames['profiles']['DT'] = None
-        self.frames['profiles']['STNID'] = None
-
-        #self.frames['worldmap']['stationsfile'] = stationsfile
-        self.frames['worldmap']['stations'] = stations(self.path_exp, \
-                                                       suffix='ini',\
-                                                       refetch_stations=refetch_stations)
-
-        # Initially, the stats frame inherets the values/iterators of
-        # worldmap
-        for key in self.frames['worldmap'].keys():
-            self.frames['stats'][key] = self.frames['worldmap'][key]
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_ini'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='ini',\
-                                           refetch_records=refetch_records
-                                           )
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_mod'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='mod',\
-                                           refetch_records=refetch_records
-                                           )
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_obs_afternoon'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_obs,\
-                                           subset='afternoon',\
-                                           refetch_records=refetch_records
-                                           )
-
-        self.frames['stats']['records_all_stations_mod'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['records_all_stations_ini']['dates'] = \
-            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
-
-
-        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
-
-        self.frames['stats']['records_all_stations_obs_afternoon'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['viewkeys'] = ['h','theta','q']
-        print('Calculating table statistics')
-        self.frames['stats']['records_all_stations_mod_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_mod'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-
-        self.frames['stats']['inputkeys'] = inputkeys
-        
-        # self.frames['stats']['inputkeys'] = \
-        #     [ key for key in \
-        #       self.globaldata.datasets.keys() \
-        #       if key in \
-        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
-
-
-        # get units from the class4gl units database
-        self.units = dict(units)
-        # for those that don't have a definition yet, we just ask a question
-        # mark
-        for var in self.frames['stats']['inputkeys']:
-            self.units[var] = '?'
-
-        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
-        self.frames['stats']['records_all_stations_ini_pct'] = \
-                  pct(self.frames['stats']['records_all_stations_ini'], \
-                      columns = self.frames['stats']['inputkeys'])
-
-        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
-        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-        #     mod['
-
-        # 
-        # 
-        # \
-        #        self.frames['stats']['records_all_stations_mod'], \
-
-
-
-        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
-        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
-        #               columns = [ 'd'+key+'dt' for key in \
-        #                           self.frames['stats']['viewkeys']], \
-        #              )
-
-        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
-        #               obs = self.frames['stats']['records_all_stations_ini'], \
-        #               columns = self.frames['stats']['viewkeys'], \
-        #              )
-        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
-        
-        print('filtering pathological data')
-        # some observational sounding still seem problematic, which needs to be
-        # investigated. In the meantime, we filter them
-        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
-
-        # we filter ALL data frames!!!
-        for key in self.frames['stats'].keys():
-            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
-               (self.frames['stats'][key].index.names == indextype):
-                self.frames['stats'][key] = self.frames['stats'][key][valid]
-        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
-
-        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
-
-
-        print("filtering stations from interface that have no records")
-        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
-            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                    == STNID).sum() == 0):
-                print("dropping", STNID)
-                self.frames['worldmap']['stations'].table = \
-                        self.frames['worldmap']['stations'].table.drop(STNID)
-                    
-        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-        
-        # TO TEST: should be removed, since it's is also done just below
-        self.frames['stats']['stations_iterator'] = \
-            self.frames['worldmap']['stations_iterator'] 
-
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
-        self.next_station()
-
-        # self.goto_datetime_worldmap(
-        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-        #     'after')
-    def sel_station(self,STNID=None,rownumber=None):
-
-        if (STNID is not None) and (rownumber is not None):
-            raise ValueError('Please provide either STNID or rownumber, not both.')
-
-        if (STNID is None) and (rownumber is None):
-            raise ValueError('Please provide either STNID or rownumber.')
-            
-        if STNID is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
-            print(
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-            )
-            self.update_station()
-        elif rownumber is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
-            self.update_station()
-
-
-
-    def next_station(self,event=None,jump=1):
-        with suppress(StopIteration):
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-                = self.frames['worldmap']['stations_iterator'].__next__(jump)
-            # self.frames['worldmap']['stations_iterator'].close()
-            # del(self.frames['worldmap']['stations_iterator'])
-            # self.frames['worldmap']['stations_iterator'] = \
-            #                 selfself.frames['worldmap']['stations'].iterrows()
-            # self.frames['worldmap']['STNID'],\
-            # self.frames['worldmap']['current_station'] \
-            #     = self.frames['worldmap']['stations_iterator'].__next__()
-
-        self.update_station()
-
-    def prev_station(self,event=None):
-        self.next_station(jump = -1,event=event)
-    def update_station(self):
-        for key in ['STNID','current_station','stations_iterator']: 
-            self.frames['stats'][key] = self.frames['worldmap'][key] 
-
-
-
-        # generate index of the current station
-        self.frames['stats']['records_current_station_index'] = \
-            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-             == \
-             self.frames['stats']['current_station'].name)
-
-        # create the value table of the records of the current station
-        tab_suffixes = \
-                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        for tab_suffix in tab_suffixes:
-            self.frames['stats']['records_current_station'+tab_suffix] = \
-                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-        # go to first record of current station
-        self.frames['stats']['records_iterator'] = \
-                        records_iterator(self.frames['stats']['records_current_station_mod'])
-        (self.frames['stats']['STNID'] , \
-        self.frames['stats']['current_record_chunk'] , \
-        self.frames['stats']['current_record_index']) , \
-        self.frames['stats']['current_record_mod'] = \
-                        self.frames['stats']['records_iterator'].__next__()
-
-        for key in self.frames['stats'].keys():
-            self.frames['profiles'][key] = self.frames['stats'][key]
-
-        STNID = self.frames['profiles']['STNID']
-        chunk = self.frames['profiles']['current_record_chunk']
-        if 'current_station_file_ini' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_ini'].close()
-        self.frames['profiles']['current_station_file_ini'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-        if 'current_station_file_mod' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_mod'].close()
-        self.frames['profiles']['current_station_file_mod'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_afternoon'].close()
-        self.frames['profiles']['current_station_file_afternoon'] = \
-            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-        self.frames['profiles']['records_iterator'] = \
-                        records_iterator(self.frames['profiles']['records_current_station_mod'])
-        (self.frames['profiles']['STNID'] , \
-        self.frames['profiles']['current_record_chunk'] , \
-        self.frames['profiles']['current_record_index']) , \
-        self.frames['profiles']['current_record_mod'] = \
-                        self.frames['profiles']['records_iterator'].__next__()
-
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-
-        self.update_record()
-
-    def next_record(self,event=None,jump=1):
-        with suppress(StopIteration):
-            (self.frames['profiles']['STNID'] , \
-            self.frames['profiles']['current_record_chunk'] , \
-            self.frames['profiles']['current_record_index']) , \
-            self.frames['profiles']['current_record_mod'] = \
-                      self.frames['profiles']['records_iterator'].__next__(jump)
-        # except (StopIteration):
-        #     self.frames['profiles']['records_iterator'].close()
-        #     del( self.frames['profiles']['records_iterator'])
-        #     self.frames['profiles']['records_iterator'] = \
-        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
-        #     (self.frames['profiles']['STNID'] , \
-        #     self.frames['profiles']['current_record_index']) , \
-        #     self.frames['profiles']['current_record_mod'] = \
-        #                     self.frames['profiles']['records_iterator'].__next__()
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        self.update_record()
-
-    def prev_record(self,event=None):
-        self.next_record(jump=-1,event=event)
-
-    def update_record(self):
-        self.frames['profiles']['current_record_ini'] =  \
-            self.frames['profiles']['records_current_station_ini'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'],\
-                  self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon'] =  \
-            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'] , \
-                  self.frames['profiles']['current_record_index'])]
-
-        self.frames['profiles']['current_record_mod_stats'] = \
-                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
-                    self.frames['profiles']['STNID'], \
-                    self.frames['profiles']['current_record_chunk'], \
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
-                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_ini_pct'] = \
-                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        # frame
-        # note that the current station, record is the same as the stats frame for initialization
-
-        # select first 
-        #self.frames['profiles']['current_record_index'], \
-        #self.frames['profiles']['record_yaml_mod'] = \
-        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
-        #                   self.frames['stats']['current_record_index'])
-        self.frames['profiles']['record_yaml_mod'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_mod'], \
-               self.frames['profiles']['current_record_mod'].index_start,
-               self.frames['profiles']['current_record_mod'].index_end,
-               mode='mod')
-                                
-        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_ini'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_ini'], \
-               record_ini.index_start,
-               record_ini.index_end,
-                mode='ini')
-
-        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_obs_afternoon'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_afternoon'], \
-               record_afternoon.index_start,
-               record_afternoon.index_end,
-                mode='ini')
-
-
-        key = self.frames['worldmap']['inputkey']
-        # only redraw the map if the current world map has a time
-        # dimension
-        if 'time' in self.globaldata.datasets[key].page[key].dims:
-            self.goto_datetime_worldmap(
-                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                'after')
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap',
-                                                  'profiles'])
-        else:
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap_stations',
-                                                  'profiles'])
-
-    def abline(self,slope, intercept,axis):
-        """Plot a line from slope and intercept"""
-        #axis = plt.gca()
-        x_vals = np.array(axis.get_xlim())
-        y_vals = intercept + slope * x_vals
-        axis.plot(x_vals, y_vals, 'k--')
-
-    def plot(self):
-        import pylab as pl
-        from matplotlib.widgets import Button
-        import matplotlib.pyplot as plt
-        import matplotlib as mpl
-        '''
-        Definition of the axes for the sounding table stats
-        '''
-        
-        fig = pl.figure(figsize=(14,9))
-        axes = {} #axes
-        btns = {} #buttons
-
-        # frames, which sets attributes for a group of axes, buttens, 
-        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
-            label = 'stats_'+str(key)
-            axes[label] = fig.add_subplot(\
-                            len(self.frames['stats']['viewkeys']),\
-                            5,\
-                            5*ikey+1,label=label)
-            # Actually, the axes should be a part of the frame!
-            #self.frames['stats']['axes'] = axes[
-
-            # pointer to the axes' point data
-            axes[label].data = {}
-
-            # pointer to the axes' color fields
-            axes[label].fields = {}
-
-
-        fig.tight_layout()
-        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
-
-        label ='stats_colorbar'
-        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
-        axes[label].fields = {}
-
-        from matplotlib.colors import LinearSegmentedColormap
-        cdictpres = {'blue': (\
-                           (0.,    0.,  0.),
-                           (0.25,  0.25, 0.25),
-                           (0.5,  .70, 0.70),
-                           (0.75, 1.0, 1.0),
-                           (1,     1.,  1.),
-                           ),
-               'green': (\
-                           (0. ,   0., 0.0),
-                           (0.25,  0.50, 0.50),
-                           (0.5,  .70, 0.70),
-                           (0.75,  0.50, 0.50),
-                           (1  ,    0,  0.),
-                           ),
-               'red':  (\
-                          (0 ,  1.0, 1.0),
-                          (0.25 ,  1.0, 1.0),
-                           (0.5,  .70, 0.70),
-                          (0.75 , 0.25, 0.25),
-                          (1,    0., 0.),
-                          )}
-        
-        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-        label = 'times'
-               
-        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-
-
-        label = 'worldmap'
-               
-        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-        axes[label].lat = None
-        axes[label].lon = None
-
-        label = 'worldmap_colorbar'
-        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
-        axes[label].fields = {}
-
-        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
-        label = 'worldmap_stations'
-        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
-        axes[label].data = {}
-
-        fig.canvas.mpl_connect('pick_event', self.on_pick)
-        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
-
-
-        """ buttons definitions """
-        
-        label = 'bprev_dataset'
-        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous dataset')
-        btns[label].on_clicked(self.prev_dataset)
-
-        label = 'bnext_dataset'
-        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next dataset')
-        btns[label].on_clicked(self.next_dataset)
-
-        label = 'bprev_datetime'
-        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous datetime')
-        btns[label].on_clicked(self.prev_datetime)
-
-        label = 'bnext_datetime'
-        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next datetime')
-        btns[label].on_clicked(self.next_datetime)
-
-
-        label = 'bprev_station'
-        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous station')
-        btns[label].on_clicked(self.prev_station)
-
-        label = 'bnext_station'
-        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next station')
-        btns[label].on_clicked(self.next_station)
-
-        label = 'bprev_record'
-        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous record')
-        btns[label].on_clicked(self.prev_record)
-
-        label = 'bnext_record'
-        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next record')
-        btns[label].on_clicked(self.next_record)
-
-
-        # self.nstatsview = nstatsview
-        # self.statsviewcmap = statsviewcmap
-        self.fig = fig
-        self.axes = axes
-        self.btns = btns
-        self.tbox = {}
-        # self.hover_active = False
-
-        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
-        #                                transform=plt.gcf().transFigure)
-
-        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
-                                          transform=plt.gcf().transFigure)
-
-        label = 'air_ap:theta'
-        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
-
-        label = 'air_ap:q'
-        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
-
-        label = 'out:h'
-        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
-
-        label = 'out:theta'
-        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
-
-        label = 'out:q'
-        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
-
-        label = 'SEB'
-        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
-
-
-        self.hover_active = False
-        self.fig = fig
-        self.fig.show()
-        self.fig.canvas.draw()
-        self.refresh_plot_interface()
-
-
-    # def scan_stations(self):
-    #     blabla
-        
-
-
-    # def get_records(current_file):
-    #     records = pd.DataFrame()
-
-    #     # initial position
-    #     next_record_found = False
-    #     while(not next_record_found):
-    #         next_record_found = (current_file.readline() == '---\n')
-    #     next_tell = current_file.tell() 
-    #     end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #     while not end_of_file:
-    #         current_tell = next_tell
-    #         next_record_found = False
-    #         current_file.seek(current_tell)
-    #         while ( (not next_record_found) and (not end_of_file)):
-    #             current_line = current_file.readline()
-    #             next_record_found = (currentline == '---\n')
-    #             end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #         # we store the position of the next record
-    #         next_tell = current_file.tell() 
-    #         
-    #         # we get the current record. Unfortunately we need to reset the
-    #         # yaml record generator first.
-    #         current_yamlgen.close()
-    #         current_yamlgen = yaml.load_all(current_file)
-    #         current_file.seek(current_tell)
-    #         current_record_mod = current_yamlgen.__next__()
-    #     current_yamlgen.close()
-
-    #     return records
-
-       #      next_record_found = False
-       #      while(not record):
-       #          next_record_found = (self.current_file.readline() == '---\n')
-       #      self.current_tell0 = self.current_file.tell() 
-
-       #  
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell0 = self.current_file.tell() 
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell1 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell0)
-       #  self.r0 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell1)
-       #  next_record_found = False
-       #  while ( (not next_record_found) and (not end_of_file):
-       #      current_line = self.current_file.readline()
-       #      next_record_found = (currentline == '---\n')
-       #      end_of_file = (currentline == '') # an empty line means we are at the end
-
-       #  self.current_tell2 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell1)
-       #  self.r1 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell2)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell3 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell2)
-       #  self.r2 = self.current_yamlgen.__next__()
-
-       #  # go to position of next record in file
-       #  self.current_file.seek(self.current_tell3)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell4 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell3)
-       #  self.r3 = self.current_yamlgen.__next__()
- 
-       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
-
-    def goto_datetime_worldmap(self,DT,shift=None):
-        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
-            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
-            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
-            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
-                self.frames['worldmap']['iDT'] += 1
-            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
-                self.frames['worldmap']['iDT'] -= 1 
-            # for gleam, we take the values of the previous day
-            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
-                self.frames['worldmap']['iDT'] -= 2 
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-        #else:
-        #    self.frames['worldmap'].pop('DT')
-
-    def next_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def prev_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def next_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-    def prev_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-
-
-    def sel_dataset(self,inputkey):
-        self.frames['worldmap']['inputkey'] = inputkey
-        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
-        self.goto_datetime_worldmap(
-            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-            'after')# get nearest datetime of the current dataset to the profile
-        if "fig" in self.__dict__.keys():
-            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
-       
-    # def prev_station(self,event=None):
-    #     self.istation = (self.istation - 1) % self.stations.shape[0]
-    #     self.update_station()
-
-
-
-
-    #def update_datetime(self):
-    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
-    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
-    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
-    #        print(self.worldmapfocus['DT'])
-    #        self.refresh_plot_interface(only='worldmap')
-
-    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
-
-        #print('r1')
-        for argkey in args.keys():
-            self.__dict__[arg] = args[argkey]
-
-        axes = self.axes
-        tbox = self.tbox
-        frames = self.frames
-        fig = self.fig
- 
-        if (only is None) or ('worldmap' in only):
-            globaldata = self.globaldata
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
-            else:
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
-            keystotranspose = ['lat','lon']
-            for key in dict(datasetxr.dims).keys():
-                if key not in keystotranspose:
-                    keystotranspose.append(key)
-
-            datasetxr = datasetxr.transpose(*keystotranspose)
-            datasetxr = datasetxr.sortby('lat',ascending=False)
-
-            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
-            lonleft = lonleft - 360.
-            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
-            label = 'worldmap'
-            axes[label].clear()
-            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
-            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
-
-        if (only is None) or ('worldmap' in only):
-            #if 'axmap' not in self.__dict__ :
-            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
-            #else:
-
-            #stations = self.stations
-
-
-            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
-            #     resolution = 'l', 
-            # area_thresh = 0.1,
-            #     llcrnrlon=-180., llcrnrlat=-90.0,
-            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
-            # 
-            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
-            # self.gmap.drawcountries(color='white',linewidth=0.3)
-            # #self.gmap.fillcontinents(color = 'gray')
-            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
-            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
-            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
-            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # #self.ax5.shadedrelief()
-
-           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
-
-
-            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
-            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
-
-            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
-            if 'lev' in field.dims:
-                field = field.isel(lev=-1)
-
-            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
-            axes[label].axis('off')
-
-            from matplotlib import cm
-            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
-            
-            
-            title=frames['worldmap']['inputkey']
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
-            axes[label].set_title(title)
-
-            label ='worldmap_colorbar'
-            axes[label].clear()
-            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
-
-
-            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
-            # x,y = self.gmap(lons,lats)
-            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
-            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
-
-        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
-
-            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
-            store_xlim = {}
-            store_ylim = {}
-            for ikey, key in enumerate(statskeys_out):
-                if (only is not None) and ('stats_lightupdate' in only):
-                    store_xlim[key] = axes['stats_'+key].get_xlim()
-                    store_ylim[key] = axes['stats_'+key].get_ylim()
-                self.axes['stats_'+key].clear()    
-
-            label = 'times'
-            self.axes[label].clear()
-
-            key = 'dthetadt'
-            x = self.frames['stats']['records_all_stations_ini']['datetime']
-            #print(x)
-            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-            #print(y)
-            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            #print(z)
-
-            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
-            self.axes[label].data[label] = self.axes[label].scatter(x.values,
-                                                                    y.values,
-                                                                    c=z.values,
-                                                                    cmap=self.statsviewcmap,
-                                                                    s=2,
-                                                                    vmin=0.,
-                                                                    vmax=1.,
-                                                                    alpha=alpha_cloud_pixels)
-
-            
-            x = self.frames['stats']['records_current_station_ini']['datetime']
-            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-
-            x = self.frames['profiles']['records_current_station_ini']['datetime']
-            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
-            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
-
-            for ikey, key in enumerate(statskeys_out):
-
-                # show data of all stations
-                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_all_stations_mod_stats'][key]
-                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                qvalmax = x.quantile(0.999)
-                qvalmin = x.quantile(0.001)
-                print('applying extra filter over extreme values for plotting stats')
-                selx = (x >= qvalmin) & (x < qvalmax)
-                sely = (x >= qvalmin) & (x < qvalmax)
-                x = x[selx & sely]
-                y = y[selx & sely]
-                z = z[selx & sely]
-                self.axes['stats_'+key].data['stats_'+key] = \
-                       self.axes['stats_'+key].scatter(x,y, c=z,\
-                                cmap=self.statsviewcmap,\
-                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
-
-                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_current_station_mod_stats'][key]
-                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['profiles']['records_current_station_mod_stats'][key]
-                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
-
-                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
-                y = self.frames['stats']['current_record_mod_stats'][key]
-                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
-                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
-                    axes['stats_'+key].annotate(text, \
-                                               xy=(x,y),\
-                                               xytext=(0.05,0.05),\
-                                               textcoords='axes fraction',\
-                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
-                                               color='white',\
-                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
-                # self.axes['stats_'+key].data[key+'_current_record'] = \
-                #        self.axes['stats_'+key].scatter(x,y, c=z,\
-                #                 cmap=self.statsviewcmap,\
-                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
-
-                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
-                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
-                # # highlight data for curent station
-                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
-
-                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
-
-                if ikey == len(statskeys_out)-1:
-                    self.axes['stats_'+key].set_xlabel('external')
-                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
-                axes['stats_'+key].set_ylabel('model')
-
-
-                if (only is not None) and ('stats_lightupdate' in only):
-                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
-                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
-                else:
-                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
-                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
-                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
-                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
-                self.abline(1,0,axis=self.axes['stats_'+key])
-
-        if (only is None) or ('stats_colorbar' in only):
-            label ='stats_colorbar'
-            axes[label].clear()
-            import matplotlib as mpl
-            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
-            self.axes[label].fields[label] = \
-             mpl.colorbar.ColorbarBase(self.axes[label],\
-                        orientation='horizontal',\
-                        label="percentile of "+self.frames['worldmap']['inputkey'],
-                        alpha=1.,
-                                cmap=self.statsviewcmap,\
-                                       norm=norm
-                         )
-
-        #print('r1')
-        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
-            #print('r2')
-            label = 'worldmap_stations'
-            axes[label].clear()
-            
-            stations = self.frames['worldmap']['stations'].table
-            globaldata = self.globaldata
-            
-            key = label
-
-            #print('r3')
-            if (stations is not None):
-                xlist = []
-                ylist = []
-                #print('r4')
-                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
-            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    xlist.append(x)
-                    ylist.append(y)
-                #picker is needed to make it clickable (pick_event)
-                axes[label].data[label] = axes[label].scatter(xlist,ylist,
-                                                              c='r', s=15,
-                                                              picker = 15,
-                                                              label=key,
-                                                              edgecolor='k',
-                                                              linewidth=0.8)
-
-            # cb.set_label('Wilting point [kg kg-3]')
-                #print('r5')
-
-                
-            #     xseries = []
-            #     yseries = []
-            #     for iSTN,STN in stations.iterrows():
-            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
-            #         xseries.append(x)                    
-            #         yseries.append(y)
-            #         
-            #         
-            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
-                    
-                if ('current_station' in frames['worldmap']):
-                    #print('r5')
-                    STN = frames['stats']['current_station']
-                    STNID = frames['stats']['STNID']
-                    #print('r5')
-
-                    x,y = len(axes['worldmap'].lon)* \
-                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
-                          len(axes['worldmap'].lat)* \
-                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    #print('r6')
-                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
-                    #                          == \
-                    #                          self.frames['worldmap']['STNID'])\
-                    #                         & \
-                    #                         (self.seltablestats['DT'] \
-                    #                          == self.axes['statsview0].focus['DT']) \
-                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
-                    #print('r7')
-                    text = 'STNID: '+ format(STNID,'10.0f') + \
-                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
-                            ', LON: '+format(STN['longitude'],'3.3f')+ \
-                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
-
-                            #+', VAL: '+format(VAL,'.3e')
-
-                    axes[label].scatter(x,y, c='r', s=30,\
-                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
-                    #print('r8')
-            
-                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
-                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
-                    #colorstation = max((min((1.,colorstation)),0.))
-                    colorstation =0.2
-                    from matplotlib import cm
-                    axes[label].annotate(text,
-                                         xy=(x,y),
-                                         xytext=(0.05,0.05),
-                                         textcoords='axes fraction', 
-                                         bbox=dict(boxstyle="round",
-                                         fc = cm.viridis(colorstation)),
-                                         arrowprops=dict(arrowstyle="->",
-                                                         linewidth=1.1),
-                                         color='white' if colorstation < 0.5 else 'black')
-                    #print('r9')
-
-                    # #pos = sc.get_offsets()[ind["ind"][0]]
-                    # 
-                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
-                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
-                    # axes[label].data[label+'statannotate'].set_text(text)
-                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
-                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
-            #print('r9')
-            axes[label].axis('off')
-            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
-            axes[label].set_ylim((len(axes['worldmap'].lat),0))
-            #print('r10')
-
-        if (only is None) or ('profiles' in only): 
-            #print('r11')
-
-            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
-            # # self.update_station(goto_first_sounding=False)
-            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
-            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
-            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
-            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
-
-            label = 'air_ap:theta'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-                # +\
-                # ' -> '+ \
-                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-            
-            
-            
-            
-            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-            #print('r12')
-
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
-            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
-            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
-            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            #print('r13')
-            # 
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r14')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-
-            #print('r15')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-                          
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r16')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r17')
-            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
-            print(hmax)
-            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
-            if valid_mod:
-
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="mod "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                                 +'LT')
-
-            #print('r18')
-            axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('theta [K]')
-
-            label = 'air_ap:q'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-
-            #print('r19')
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            if valid_mod:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            else:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            # 
-            #print('r20')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r21')
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            #print('r23')
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r24')
-            if valid_mod:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="fit ")#+\
-                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
-                             #+'LT')
-            #print('r25')
-            #axes[label].legend()
-
-            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            #axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('q [kg/kg]')
-
-            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-
-            # #pl.subplots_adjust(right=0.6)
-
-            # label = 'q_pro'
-            # axes[label].clear()
-
-            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
-            # 
-            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
-            # 
-            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
-
-            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
-            # #pl.subplots_adjust(right=0.6)
-            # axes[label].set_xlabel('specific humidity [kg/kg]')
- 
-
-            #print('r26')
-            time = self.frames['profiles']['record_yaml_mod'].out.time
-            for ilabel,label in enumerate(['h','theta','q']):
-                axes["out:"+label].clear()
-                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
-                axes["out:"+label].set_ylabel(label)
-                if ilabel == 2:
-                    axes["out:"+label].set_xlabel('local sun time [h]')
-                
-            #print('r27')
-            label = 'SEB'
-            axes[label].clear()
-            
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
-            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
-            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
-            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
-                
-            #print('r28')
-            
-            axes[label].legend()
-            
-            #         for ax in self.fig_timeseries_axes:
-#             ax.clear()
-#         
-#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
-#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
-#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
-#         #print(self.morning_sounding.c4gl.out.Swin)
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
-#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
-#         self.fig_timeseries_axes[3].legend()
-#         self.fig.canvas.draw()
-            
-
-
-
-
-
-
-        #self.ready()
-        #print('r29')
-        fig.canvas.draw()
-        #fig.show()
-
-        self.axes = axes
-        self.tbox = tbox
-        self.fig = fig
-
-    def on_pick(self,event):
-        #print("HELLO")
-        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
-        #self.axes['theta_pro'].clear()
-        #self.axes['q_pro'].clear()
-        
-
-        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
-        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
-        keys_to_axes = {}
-        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
-
-        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
-        keys_to_axes['worldmap'] = 'worldmap'
-        
-        axes = self.axes
-        #nstatsview = self.nstatsview
-        #statsviewcmap = self.statsviewcmap
-        stations = self.frames['worldmap']['stations'].table
-
-
-        #print("p1")
-        current = event
-        artist = event.artist
-        
-        selkey = artist.get_label()
-        
-        #print(keys_to_axes)
-        
-        label = keys_to_axes[selkey]
-        #print("HELLO",selkey,label)
-
-        # # Get to know in which axes we are
-        # label = None
-        # for axeskey in axes.keys():
-        #     if event.inaxes == axes[axeskey]:
-        #         label = axeskey
-        #         
-
-        # cont, pos = None, None
-        
-        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
-        ind = event.ind
-        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
-        d = axes[label].collections[0]
-        #d.set_offset_position('data')
-        xy = d.get_offsets()
-        x, y =  xy[:,0],xy[:,1]
-        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
-
-        #print("p2")
-        if len(ind) > 0:
-            #print("p3")
-            pos = x[ind[0]], y[ind[0]]
-
-            #if label[:-1] == 'statsview':
-            #    #seltablestatsstdrel = self.seltablestatsstdrel
-            #    #seltablestatspct = self.seltablestatspct
-
-            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-            #    
-            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
-            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-            #    
-            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
-            #el
-            if (label == 'worldmap') or (label == 'worldmap_stations'):
-                self.hover_active = False
-                if (self.frames['worldmap']['STNID'] !=
-                    self.frames['profiles']['STNID']):
-                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
-                # so we just need to perform update_station
-                    self.update_station()
-            elif (label[:5] == 'stats'):
-
-                self.hover_active = False
-                if (self.frames['stats']['STNID'] !=
-                self.frames['profiles']['STNID']) or \
-                   (self.frames['stats']['current_record_chunk'] != 
-                    self.frames['profiles']['current_record_chunk']) or \
-                   (self.frames['stats']['current_record_index'] != 
-                    self.frames['profiles']['current_record_index']):
-
-
-
-                    for key in ['STNID','current_station','stations_iterator']: 
-                        self.frames['worldmap'][key] = self.frames['stats'][key] 
-
-                    for key in self.frames['stats'].keys():
-                        self.frames['profiles'][key] = self.frames['stats'][key]
-
-                    STNID = self.frames['profiles']['STNID']
-                    chunk = self.frames['profiles']['current_record_chunk']
-                    if 'current_station_file_ini' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_ini'].close()
-                    self.frames['profiles']['current_station_file_ini'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-                    if 'current_station_file_mod' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_mod'].close()
-                    self.frames['profiles']['current_station_file_mod'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_afternoon'].close()
-                    self.frames['profiles']['current_station_file_afternoon'] = \
-                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-                    # go to hovered record of current station
-                    self.frames['profiles']['records_iterator'] = \
-                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # ... and go to the record of the profile window (last one that
-                    # was picked by the user)
-                    found = False
-                    EOF = False
-                    while (not found) and (not EOF):
-                        try:
-                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
-                            #print("hello*")
-                            #print(self.frames['profiles']['current_record_index'])
-                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
-                               (index == self.frames['profiles']['current_record_index']) and \
-                               (STNID == self.frames['profiles']['STNID']):
-                                #print('found!')
-                                found = True
-                        except StopIteration:
-                            EOF = True
-                    if found:
-                        self.frames['stats']['current_record_mod'] = record
-                        self.frames['stats']['current_record_chunk'] = chunk
-                        self.frames['stats']['current_record_index'] = index
-                    # # for the profiles we make a distinct record iterator, so that the
-                    # # stats iterator can move independently
-                    # self.frames['profiles']['records_iterator'] = \
-                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # (self.frames['profiles']['STNID'] , \
-                    # self.frames['profiles']['current_record_index']) , \
-                    # self.frames['profiles']['current_record_mod'] = \
-                    #                 self.frames['profiles']['records_iterator'].__next__()
-
-
-                    # for the profiles we make a distinct record iterator, so that the
-                    # stats iterator can move independently
-
-                    self.update_record()
-
-
-
-    def on_plot_hover(self,event):
-        axes = self.axes
-        #print('h1')
-
-        # Get to know in which axes we are
-        label = None
-        for axeskey in axes.keys():
-            if event.inaxes == axes[axeskey]:
-                label = axeskey
-                
-        #print('h2')
-
-        cont, pos = None, None
-        #print (label)
-        
-        if label is not None:
-            if  ('data' in axes[label].__dict__.keys()) and \
-                (label in axes[label].data.keys()) and \
-                (axes[label].data[label] is not None):
-                
-                #print('h3')
-                cont, ind =  axes[label].data[label].contains(event)
-                selkey = axes[label].data[label].get_label()
-                if len(ind["ind"]) > 0:
-                    #print('h4')
-                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
-                    #print('pos',pos,selkey)
-
-
-                    #if label[:-1] == 'statsview':
-                    #    seltablestatsstdrel = self.seltablestatsstdrel
-                    #    seltablestatspct = self.seltablestatspct
-
-                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
-                    #    self.hover_active = True
-                    #    
-                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
-                    #    
-                    #el
-                    #print(label[:5])
-                    if (label[:5] == 'stats') or (label == 'times'):
-                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
-                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
-                        
-
-                        if label[:5] == 'stats':
-                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                            (self.frames['stats']['STNID'] ,
-                             self.frames['stats']['current_record_chunk'], 
-                             self.frames['stats']['current_record_index']) = \
-                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-                        # elif label[:5] == 'stats':
-                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
-                        #     (self.frames['stats']['STNID'] ,
-                        #      self.frames['stats']['current_record_chunk'], 
-                        #      self.frames['stats']['current_record_index']) = \
-                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-
-
-                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-                        
-                        # # TO TEST: should be removed, since it's is also done just below
-                        # self.frames['stats']['stations_iterator'] = \
-                        #     self.frames['worldmap']['stations_iterator'] 
-                
-                
-                        # self.goto_datetime_worldmap(
-                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-                        #     'after')
-
-
-                        # scrolling to the right station
-                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                        EOF = False
-                        found = False
-                        while (not found and not EOF):
-                            if (STNID == self.frames['stats']['STNID']):
-                                   found = True 
-                            if not found:
-                                try:
-                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                                except (StopIteration):
-                                    EOF = True
-                        if found:
-                        #    self.frames['stats']['STNID'] = STNID
-                            self.frames['stats']['current_station'] =  station
-
-                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
-                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
-
-
-                        # generate index of the current station
-                        self.frames['stats']['records_current_station_index'] = \
-                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                             == self.frames['stats']['STNID'])
-
-
-                        tab_suffixes = \
-                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            self.frames['stats']['records_current_station'+tab_suffix] = \
-                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-
-                        # go to hovered record of current station
-                        self.frames['stats']['records_iterator'] = \
-                                        records_iterator(self.frames['stats']['records_current_station_mod'])
-
-
-                        # ... and go to the record of the profile window (last one that
-                        # was picked by the user)
-                        found = False
-                        EOF = False
-                        while (not found) and (not EOF):
-                            try:
-                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                                #print("hello*")
-                                #print(self.frames['profiles']['current_record_index'])
-                                if (index == self.frames['stats']['current_record_index']) and \
-                                   (chunk == self.frames['stats']['current_record_chunk']) and \
-                                   (STNID == self.frames['stats']['STNID']):
-                                    #print('found!')
-                                    found = True
-                            except StopIteration:
-                                EOF = True
-                        if found:
-                            #print('h5')
-                            self.frames['stats']['current_record_mod'] = record
-                            self.frames['stats']['current_record_chunk'] = chunk
-                            self.frames['stats']['current_record_index'] = index
-
-                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
-                        tab_suffixes = \
-                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            #print(tab_suffix)
-                            #print(self.frames['stats']['records_current_station'+tab_suffix])
-                            self.frames['stats']['current_record'+tab_suffix] =  \
-                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                      (self.frames['stats']['STNID'] , \
-                                       self.frames['stats']['current_record_chunk'] , \
-                                       self.frames['stats']['current_record_index'])]
-
-
-                        self.hover_active = True
-                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                        # print('h13')
-                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
-                        #     self.goto_datetime_worldmap(
-                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                        #         'after')
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap',
-                        #                                           'profiles'])
-                        # else:
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap_stations',
-                        #                                           'profiles'])
-
-
-
-                    elif label in ['worldmap_stations','worldmap']:
-                        #print('h5')
-
-                        if (self.axes['worldmap'].lat is not None) and \
-                           (self.axes['worldmap'].lon is not None):
-
-
-                            #self.loading()
-                            self.fig.canvas.draw()
-                            self.fig.show()
-
-
-                            # get position of 
-                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
-                                                                 self.axes['worldmap'].lat[0]) + \
-                                           self.axes['worldmap'].lat[0],4)
-                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
-                                                                 self.axes['worldmap'].lon[0]) + \
-                                           self.axes['worldmap'].lon[0],4)
-                        
-                            stations = self.frames['worldmap']['stations'].table
-                            #print('h7')
-                        
-                            #reset stations iterator:
-                            # if 'stations_iterator' in self.frames['worldmap'].keys():
-                            #     self.frames['worldmap']['stations_iterator'].close()
-                            #     del(self.frames['worldmap']['stations_iterator'])
-                            # if 'stations_iterator' in self.frames['stats'].keys():
-                            #     self.frames['stats']['stations_iterator'].close()
-                            #     del(self.frames['stats']['stations_iterator'])
-                            self.frames['worldmap']['stations_iterator'] =\
-                               stations_iterator(self.frames['worldmap']['stations'])
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                            EOF = False
-                            found = False
-                            while (not found and not EOF):
-                                #print('h8',station.latitude,latmap)
-                                #print('h8',station.longitude,lonmap)
-                                if (round(station.latitude,3) == round(latmap,3)) and \
-                                    (round(station.longitude,3) == round(lonmap,3)):
-                                       found = True 
-                                if not found:
-                                    try:
-                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                                    except (StopIteration):
-                                        EOF = True
-                            if found:
-                                self.frames['worldmap']['STNID'] = STNID
-                                self.frames['worldmap']['current_station'] = \
-                                        station
-                        
-                            self.frames['stats']['stations_iterator'] = \
-                                self.frames['worldmap']['stations_iterator'] 
-                            #print('h8')
-                            # inherit station position for the stats frame...
-                            for key in self.frames['worldmap'].keys():
-                                self.frames['stats'][key] = self.frames['worldmap'][key]
-                                
-                            ## fetch records of current station...
-                            #self.frames['stats']['records_current_station_mod'] =\
-                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                            # ... and their indices
-                            self.frames['stats']['records_current_station_index'] = \
-                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                                     == \
-                                     self.frames['stats']['current_station'].name)
-
-
-                            tab_suffixes = \
-                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['records_current_station'+tab_suffix] = \
-                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-                            # ... create a record iterator ...
-                            #self.frames['stats']['records_iterator'].close()
-                            del(self.frames['stats']['records_iterator'])
-                            self.frames['stats']['records_iterator'] = \
-                                self.frames['stats']['records_current_station_mod'].iterrows()
-
-
-
-                        
-                            #print('h9')
-                            # ... and go to to the first record of the current station
-                            (self.frames['stats']['STNID'] , \
-                             self.frames['stats']['current_record_chunk'] , \
-                             self.frames['stats']['current_record_index']) , \
-                            self.frames['stats']['current_record_mod'] = \
-                                self.frames['stats']['records_iterator'].__next__()
-                        
-
-
-
-                            #print('h10')
-                            # cash the current record
-                            tab_suffixes = \
-                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['current_record'+tab_suffix] =  \
-                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                          (self.frames['stats']['STNID'] , \
-                                           self.frames['stats']['current_record_chunk'] , \
-                                           self.frames['stats']['current_record_index'])]
-
-                            #print('h11')
-                            
-                            self.hover_active = True
-                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                            #print('h13')
-
-                        
-
-            #if (stations is not None):
-            #    for iSTN,STN in stations.iterrows():
-            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
-            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
-
-        # self.fig.show()
- 
-        # we are hovering on nothing, so we are going back to the position of
-        # the profile sounding
-        if pos is None:
-            if self.hover_active == True:
-                #print('h1*')
-                
-                #self.loading()
-                # to do: reset stations iterators
-
-                # get station and record index from the current profile
-                for key in ['STNID', 'current_station']:
-                    self.frames['stats'][key] = self.frames['profiles'][key]
-
-                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
-                self.frames['stats']['current_station'] = \
-                        self.frames['profiles']['current_station']
-                #print('h3a*')
-                self.frames['stats']['records_current_station_mod'] = \
-                        self.frames['profiles']['records_current_station_mod']
-                #print('h3b*')
-
-                # the next lines recreate the records iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-
-                # reset stations iterator...
-                #self.frames['stats']['records_iterator'].close()
-                del(self.frames['stats']['records_iterator'])
-                self.frames['stats']['records_iterator'] = \
-                    self.frames['stats']['records_current_station_mod'].iterrows()
-                #print('h4*')
-
-                # ... and go to the record of the profile window (last one that
-                # was picked by the user)
-                found = False
-                EOF = False
-                while (not found) and (not EOF):
-                    try:
-                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                        #print("hello*")
-                        #print(self.frames['profiles']['current_record_index'])
-                        #print(self.frames['profiles']['STNID'])
-                        #print(STNID,index)
-                        if (index == self.frames['profiles']['current_record_index']) and \
-                            (chunk == self.frames['profiles']['current_record_chunk']) and \
-                            (STNID == self.frames['profiles']['STNID']):
-                            #print('found!')
-                            found = True
-                    except StopIteration:
-                        EOF = True
-                if found:
-                    #print('h5*')
-                    self.frames['stats']['current_record_mod'] = record
-                    self.frames['stats']['current_record_chunk'] = chunk
-                    self.frames['stats']['current_record_index'] = index
-
-                #print('h6*')
-
-
-
-                # # fetch records of current station...
-                # self.frames['stats']['records_current_station_mod'] =\
-                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                # ... and their indices
-                self.frames['stats']['records_current_station_index'] = \
-                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                         == \
-                         self.frames['stats']['current_station'].name)
-
-
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['records_current_station'+tab_suffix] = \
-                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-                
-
-                # cash the records of the current stations
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['current_record'+tab_suffix] =  \
-                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                              (self.frames['stats']['STNID'] , \
-                               self.frames['stats']['current_record_chunk'] , \
-                               self.frames['stats']['current_record_index'])]
-
-
-                # the next lines recreate the stations iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-                #print('h7*')
-
-                # reset the stations iterators
-                for framekey in ['stats','worldmap']:
-                    ##print(framekey)
-                    if 'stations_iterator' in self.frames[framekey]:
-                        #self.frames[framekey]['stations_iterator'].close()
-                        del(self.frames[framekey]['stations_iterator'])
-
-                self.frames['worldmap']['current_station'] = \
-                        self.frames['profiles']['current_station']
-
-                #recreate the stations iterator for the worldmap...
-                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-
-                # ... and go the position of the profile
-                #print('h8*')
-                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                EOF = False
-                found = False
-                while (not found and not EOF):
-                    if STNID == self.frames['profiles']['STNID'] :
-                        found = True 
-                    if not found:
-                        try:
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                        except (StopIteration):
-                            EOF = True
-                if found:
-                    self.frames['worldmap']['current_station'] = station
-                    self.frames['worldmap']['STNID'] = STNID
-                #print('h9*')
-                self.frames['stats']['stations_iterator'] = \
-                    self.frames['worldmap']['stations_iterator'] 
-
-                # the stats window now inherits the current station from the
-                # worldmap
-                for key in ['STNID','current_station','stations_iterator']: 
-                    self.frames['stats'][key] = self.frames['worldmap'][key] 
-                #print('h10*')
-
-                # # we now only need inherit station position and go to first record
-                # for key in self.frames['worldmap'].keys():
-                #     self.frames['stats'][key] = self.frames['worldmap'][key]
-
-                # self.frames['stats']['records_current_station'] =\
-                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
-
-                # #print(self.frames['stats']['records_current_station'])
-                # self.frames['stats']['records_iterator'] = \
-                #                 self.frames['stats']['records_current_station'].iterrows()
-                # (self.frames['stats']['STNID'] , \
-                # self.frames['stats']['current_record_index']) , \
-                # self.frames['stats']['current_record_mod'] = \
-                #                 self.frames['stats']['records_iterator'].__next__()
-                
-
-
-
-
-
-
-                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
-                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
-                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
-                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-                self.hover_active = False
-                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
-    # def loading(self):
-    #     self.tbox['loading'].set_text('Loading...')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-    #     sleep(0.1)
-    # def ready(self):
-    #     self.tbox['loading'].set_text('Ready')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-
-
-
diff --git a/lib/model.py b/lib/model.py
deleted file mode 100644
index 8760411..0000000
--- a/lib/model.py
+++ /dev/null
@@ -1,2214 +0,0 @@
-# 
-# CLASS
-# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
-# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
-# Copyright (c) 2011-2015 Chiel van Heerwaarden
-# Copyright (c) 2011-2015 Bart van Stratum
-# Copyright (c) 2011-2015 Kees van den Dries
-# 
-# This file is part of CLASS
-# 
-# CLASS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published bygamma
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-# 
-# CLASS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with CLASS.  If not, see .
-#
-
-import copy as cp
-import numpy as np
-import sys
-import warnings
-import pandas as pd
-from ribtol_hw import zeta_hs2 , funcsche
-import logging
-#from SkewT.thermodynamics import Density
-#import ribtol
-
-grav = 9.81
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-
-def qsat(T,p):
-    return 0.622 * esat(T) / p
-
-
-def ribtol(Rib, zsl, z0m, z0h): 
-    Rib = np.float64(Rib)
-    zsl = np.float64(zsl)
-    z0m = np.float64(z0m)
-    z0h = np.float64(z0h)
-    #print(Rib,zsl,z0m,z0h)
-    if(Rib > 0.):
-        L    = 1.
-        L0   = 2.
-    else:
-        L  = -1.
-        L0 = -2.
-    #print(Rib,zsl,z0m,z0h)
-    while (abs(L - L0) > 0.001):
-        L0      = L
-        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
-        Lstart  = L - 0.001*L
-        Lend    = L + 0.001*L
-        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
-                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-        L       = L - fx / fxdif
-        #print(L,fx/fxdif)
-        if(abs(L) > 1e12):
-            break
-
-    return L
-  
-def psim(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psim = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-    return psim
-  
-def psih(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psih  = 2. * np.log( (1. + x*x) / 2.)
-        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-    return psih
- 
-class model:
-    def __init__(self, model_input = None,debug_level=None):
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        self.logger = logging.getLogger('model')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        """ initialize the different components of the model """ 
-
-        if model_input is not None:
-            # class4gl style input
-            if 'pars' in model_input.__dict__.keys():
-
-                # we make a reference to the full input first, so we can dump it
-                # afterwards
-                self.input_c4gl = model_input
-
-                # we copy the regular parameters first. We keep the classical input
-                # format as self.input so that we don't have to change the entire
-                # model code.
-                self.input = cp.deepcopy(model_input.pars)
-
-                # we copy other sections we are interested in, such as profile
-                # data, and store it also under input
-
-                # I know we mess up a bit the structure of the class4gl_input, but
-                # we will make it clean again at the time of dumping data
-
-                # So here, we copy the profile data into self.input
-                # 1. Air circulation data 
-                if 'sw_ac' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ac']:
-                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
-                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
-
-                    # correct pressure of levels according to surface pressure
-                    # error (so that interpolation is done in a consistent way)
-
-                    p_e = self.input.Ps - self.input.sp
-                    for irow in self.input.air_ac.index[::-1]:
-                       self.input.air_ac.p.iloc[irow] =\
-                        self.input.air_ac.p.iloc[irow] + p_e
-                       p_e = p_e -\
-                       (self.input.air_ac.p.iloc[irow]+p_e)/\
-                        self.input.air_ac.p.iloc[irow] *\
-                        self.input.air_ac.delpdgrav.iloc[irow]*grav
-
-
-
-                # 2. Air circulation data 
-                if 'sw_ap' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ap']:
-                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
-
-            # standard class input
-            else:
-                self.input = cp.deepcopy(model_input)
-
-    def load_yaml_dict(self,yaml_dict):
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                for keydata,value in data.items():
-                    self.__dict__[keydata] = value
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            #elif key == 'sources':
-            #    self.__dict__[key] = data
-            elif key == 'out':
-                # lets convert it to a list of dictionaries
-                dictouttemp = pd.DataFrame(data).to_dict('list')
-            else: 
-                 warnings.warn("Key '"+key+"' is be implemented.")
-            #     self.__dict__[key] = data
-
-
-        self.tsteps = len(dictouttemp['h'])
-        self.out = model_output(self.tsteps)
-        for keydictouttemp in dictouttemp.keys():
-            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
-
-
-  
-    def run(self):
-        # initialize model variables
-        self.init()
-  
-        # time integrate model 
-        #for self.t in range(self.tsteps):
-        while self.t < self.tsteps:
-          
-            # time integrate components
-            self.timestep()
-  
-        # delete unnecessary variables from memory
-        self.exitmodel()
-    
-    def init(self):
-        # assign variables from input data
-        # initialize constants
-        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
-        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        self.rho        = 1.2                   # density of air [kg m-3]
-        self.k          = 0.4                   # Von Karman constant [-]
-        self.g          = 9.81                  # gravity acceleration [m s-2]
-        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-        self.bolz       = 5.67e-8               # Bolzman constant [-]
-        self.rhow       = 1000.                 # density of water [kg m-3]
-        self.S0         = 1368.                 # solar constant [W m-2]
-
-        # A-Gs constants and settings
-        # Plant type:       -C3-     -C4-
-        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
-        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
-        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
-        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
-        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
-        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
-        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
-        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
-        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
-        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
-        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
-
-        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
-        self.mair       =  28.9;                # molecular weight air [g mol -1]
-        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
-
-        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
-        self.wmax       =  0.55;                # upper reference value soil water [-]
-        self.wmin       =  0.005;               # lower reference value soil water [-]
-        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
-        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
-
-        # Read switches
-        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
-        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
-        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
-        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
-        self.sw_sl      = self.input.sw_sl      # surface layer switch
-        self.sw_rad     = self.input.sw_rad     # radiation switch
-        self.sw_ls      = self.input.sw_ls      # land surface switch
-        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
-        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
-
-        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
-        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
-        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-  
-        # initialize mixed-layer
-        self.h          = self.input.h          # initial ABL height [m]
-        self.Ps         = self.input.Ps         # surface pressure [Pa]
-        self.sp         = self.input.sp         # This is also surface pressure
-                                                #but derived from the global data [Pa]
-        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
-        self.ws         = None                  # large-scale vertical velocity [m s-1]
-        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
-        self.we         = -1.                   # entrainment velocity [m s-1]
-       
-         # Temperature 
-        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
-        
-        
-        self.substep    = False
-        self.substeps   = 0
-
-
-
-        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
-        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
-        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
-        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
-        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
- 
-        self.wstar      = 0.                    # convective velocity scale [m s-1]
- 
-        # 2m diagnostic variables 
-        self.T2m        = None                  # 2m temperature [K]
-        self.q2m        = None                  # 2m specific humidity [kg kg-1]
-        self.e2m        = None                  # 2m vapor pressure [Pa]
-        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
-        self.u2m        = None                  # 2m u-wind [m s-1]
-        self.v2m        = None                  # 2m v-wind [m s-1]
- 
-        # Surface variables 
-        self.thetasurf  = self.input.theta      # surface potential temperature [K]
-        self.thetavsurf = None                  # surface virtual potential temperature [K]
-        self.qsurf      = None                  # surface specific humidity [g kg-1]
-
-        # Mixed-layer top variables
-        self.P_h        = None                  # Mixed-layer top pressure [pa]
-        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
-        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
-        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
-        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
-        self.dz_h       = None                  # Transition layer thickness [-]
-        self.lcl        = None                  # Lifting condensation level [m]
-
-        # Virtual temperatures and fluxes
-        self.thetav     = None                  # initial mixed-layer potential temperature [K]
-        self.dthetav    = None                  # initial virtual temperature jump at h [K]
-        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
-        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
-       
-        
-        
-        
-        
-        
-        # Moisture 
-        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
-
-        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
-        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
-        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
-  
-        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
-        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
-        self.e          = None                  # mixed-layer vapor pressure [Pa]
-        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
-        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
-      
-        
-        
-        # CO2
-        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
-        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
-        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
-        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
-        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
-        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
-        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
-        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
-        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
-        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
-       
-        # Wind 
-        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
-        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
-        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = self.input.advu       # advection of u-wind [m s-2]
-        
-        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
-        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = self.input.advv       # advection of v-wind [m s-2]
-         
-  # BEGIN -- HW 20170606
-        # z-coordinate for vertical profiles of stratification above the mixed-layer height
-
-        if self.sw_ac:
-        # this is the data frame with the grided profile on the L60 grid
-        # (subsidence, and advection) 
-            self.air_ac      = self.input.air_ac  # full level air circulation
-                                                  # forcing
-            # self.air_ach     = self.input.air_ach # half level air circulation
-            #                                       # forcing
-            
-
-        if self.sw_ap:
-        # this is the data frame with the fitted profile (including HAGL,
-        # THTA,WSPD, SNDU,WNDV PRES ...)
-            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
-
-            # just for legacy reasons...
-            if 'z' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
-            if 'p' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
-
-            indexh = np.where(self.air_ap.z.values == self.h)
-            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
-                raise ValueError("Error input profile consistency: mixed- \
-                                 layer height needs to be equal to the second \
-                                 and third \
-                                 level of the vertical profile input!")
-            # initialize q from its profile when available
-            p_old = self.Ps
-            p_new = self.air_ap.p[indexh[0][0]]
-            
-            if ((p_old is not None) & (p_old != p_new)):
-                print("Warning: Ps input was provided ("+str(p_old)+\
-                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
-                    +str(p_new)+"Pa).")
-                                    
-            self.Ps = p_new
-            # these variables/namings are more convenient to work with in the code
-            # we will update the original variables afterwards
-            #self.air_ap['q'] = self.air_ap.QABS/1000.
-
-            self.air_ap = \
-                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
-            # we require the temperature fields, since we need to consider
-            # advection
-            # if self.sw_ac:
-            #     #self.air_ap['theta'] = self.air_ap['t'] *
-
-            #     # we consider self.sp in case of air-circulation input (for
-            #     # consistence)
-            #     self.air_ap['t'] = \
-            #                 self.air_ap.theta *  \
-            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
-            # else:
-            # we consider self.Ps in case of balloon input only 
-            self.air_ap = self.air_ap.assign(t = lambda x: \
-                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
-
-            #self.air_ap['theta'] = self.air_ap.THTA
-            if 'u' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
-            if 'v' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
-
-            for var in ['theta','q','u','v']:
-
-                
-                if self.air_ap[var][1] != self.air_ap[var][0]:
-                    raise ValueError("Error input profile consistency: two \
-                                     lowest profile levels for "+var+" should \
-                                     be equal.")
-                
-                # initialize the value from its profile when available
-                value_old = self.__dict__[var]
-                value_new = self.air_ap[var][indexh[0][0]]
-                
-                if ((value_old is not None) & (value_old != value_new)):
-                    warnings.warn("Warning:  input was provided \
-                                     ("+str(value_old)+ "kg kg-1), \
-                                     but it is now overwritten by the first \
-                                     level (index 0) of air_ap]var\ which is \
-                                     different (" +str(value_new)+"K).")
-                                        
-                self.__dict__[var] = value_new
-
-                # make a profile of the stratification 
-                # please note that the stratification between z_pro[i] and
-                # z_pro[i+1] is given by air_ap.GTHT[i]
-
-                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
-                # np.gradient(self.z_pro)
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
-
-
-                self.__dict__['gamma'+var] = \
-                    self.air_ap['gamma'+var][np.where(self.h >= \
-                                                     self.air_ap.z)[0][-1]]
-
-
-
-        # the variable p_pro is just for diagnosis of lifted index
-            
-            
-
-            # input Ph is wrong, so we correct it according to hydrostatic equation
-            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-
-            #if self.sw_ac:
-                # note that we use sp as surface pressure, which is determined
-                # from era-interim instead of the observations. This is to
-                # avoid possible failure of the interpolation routine
-                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
-                #                          + \
-                #                          list(self.air_ap.p[3:]))
-
-            # else:
-                # in the other case, it is updated at the time of calculting
-                # the statistics 
-
-# END -- HW 20170606      
-        #print(self.air_ap)
-
-        if self.sw_ac and not self.sw_ap:
-            raise ValueError("air circulation switch only possible when air \
-                             profiles are given")
-        
-        if self.sw_ac:
-
-            # # # we comment this out, because subsidence is calculated
-            # according to advection
-            # #interpolate subsidence towards the air_ap height coordinate
-            # self.air_ap['w'] = np.interp(self.air_ap.p,\
-            #                               self.air_ac.p,\
-            #                               self.air_ac.w) 
-            # #subsidence at the mixed-layer top
-            # self.w = self.air_ap.w[1]
-        
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-                # in case we didn't find any points, we just take the lowest one.
-                # actually, this can happen if ERA-INTERIM pressure levels are
-                # inconsistent with 
-                if in_ml.sum() == 0:
-                    warnings.warn(" no circulation points in the mixed layer \
-                                  found. We just take the bottom one.")
-                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-
-                for var in ['t','q','u','v']:
-    
-                   # calculation of the advection variables for the mixed layer
-                   # we weight by the hydrostatic thickness of each layer and
-                   # divide by the total thickness
-                   self.__dict__['adv'+var] = \
-                            ((self.air_ac['adv'+var+'_x'][in_ml] \
-                             + \
-                             self.air_ac['adv'+var+'_y'][in_ml])* \
-                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                            self.air_ac['delpdgrav'][in_ml].sum()
-
-                   # calculation of the advection variables for the profile above
-                   # (lowest 3 values are not used by class)
-                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
-                   self.air_ap['adv'+var] = \
-                           np.interp(self.air_ap.p,\
-                                     self.air_ac.p,\
-                                     self.air_ac['adv'+var+'_x']) \
-                           + \
-                           np.interp(self.air_ap.p, \
-                                       self.air_ac.p, \
-                                       self.air_ac['adv'+var+'_y'])
-
-                # as an approximation, we consider that advection of theta in the
-                # mixed layer is equal to advection of t. This is a sufficient
-                # approximation since theta and t are very similar at the surface
-                # pressure.
-                self.__dict__['advtheta'] = self.__dict__['advt']
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # self.wrho = np.interp(self.P_h,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) 
-            # self.ws   = self.air_ap.w.iloc[1]
-
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                self.air_ap = self.air_ap.assign(wp = 0.)
-                self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                              self.air_ac.p, \
-                                              self.air_ac['wp'])
-                self.air_ap = self.air_ap.assign(R = 0.)
-                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                     self.Rv*self.air_ap.q)
-                self.air_ap = self.air_ap.assign(rho = 0.)
-                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-                
-                self.air_ap = self.air_ap.assign(w = 0.)
-                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-                #print('hello w ini')
-
-                # Note: in case of sw_ac is False, we update it from prescribed
-                # divergence
-                self.ws   = self.air_ap.w[1]
-
-                # self.ws   = self.wrho/self.rho
-                # self.ws   = self.wrho/(self.P_h/ \
-                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
-                #                         self.theta) # this should be T!!!
-
-                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-                #                         + \
-                #                         self.air_ac['divU_y'][in_ml])* \
-                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                #             self.air_ac['delpdgrav'][in_ml].sum() \
-        
-
-        # Tendencies 
-        self.htend      = None                  # tendency of CBL [m s-1]
-        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
-        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
-        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
-        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
-        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
-        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
-        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
-        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
-        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
-        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
-        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
-  
-        # initialize surface layer
-        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
-        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
-        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
-        self.z0m        = self.input.z0m        # roughness length for momentum [m]
-        self.z0h        = self.input.z0h        # roughness length for scalars [m]
-        self.Cm         = 1e12                  # drag coefficient for momentum [-]
-        self.Cs         = 1e12                  # drag coefficient for scalars [-]
-        self.L          = None                  # Obukhov length [m]
-        self.Rib        = None                  # bulk Richardson number [-]
-        self.ra         = None                  # aerodynamic resistance [s m-1]
-  
-        # initialize radiation
-        self.lat        = self.input.lat        # latitude [deg]
-        #self.fc         = self.input.fc         # coriolis parameter [s-1]
-        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
-        self.lon        = self.input.lon        # longitude [deg]
-        self.doy        = self.input.doy        # day of the year [-]
-        self.tstart     = self.input.tstart     # time of the day [-]
-        self.cc         = self.input.cc         # cloud cover fraction [-]
-        self.Swin       = None                  # incoming short wave radiation [W m-2]
-        self.Swout      = None                  # outgoing short wave radiation [W m-2]
-        self.Lwin       = None                  # incoming long wave radiation [W m-2]
-        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
-        self.Q          = self.input.Q          # net radiation [W m-2]
-        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
-  
-        # initialize land surface
-        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
-        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
-        self.T2         = self.input.T2         # temperature deeper soil layer [K]
-                           
-        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
-        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
-        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
-        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
-                           
-        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
-        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
-        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
-                           
-        self.C1sat      = self.input.C1sat      
-        self.C2ref      = self.input.C2ref      
-
-        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
-        
-        self.LAI        = self.input.LAI        # leaf area index [-]
-        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
-        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = self.input.alpha      # surface albedo [-]
-  
-        self.rs         = 1.e6                  # resistance transpiration [s m-1]
-        self.rssoil     = 1.e6                  # resistance soil [s m-1]
-                           
-        self.Ts         = self.input.Ts         # surface temperature [K]
-                           
-        self.cveg       = self.input.cveg       # vegetation fraction [-]
-        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
-        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
-        self.cliq       = None                  # wet fraction [-]
-                          
-        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
-  
-        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
-        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
-        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
-  
-        self.H          = None                  # sensible heat flux [W m-2]
-        self.LE         = None                  # evapotranspiration [W m-2]
-        self.LEliq      = None                  # open water evaporation [W m-2]
-        self.LEveg      = None                  # transpiration [W m-2]
-        self.LEsoil     = None                  # soil evaporation [W m-2]
-        self.LEpot      = None                  # potential evaporation [W m-2]
-        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
-        self.G          = None                  # ground heat flux [W m-2]
-
-        # initialize A-Gs surface scheme
-        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
-
-        # initialize cumulus parameterization
-        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
-        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
-        self.ac         = 0.                    # Cloud core fraction [-]
-        self.M          = 0.                    # Cloud core mass flux [m s-1] 
-        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
-  
-        # initialize time variables
-        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
-        self.dt     = self.input.dt
-        self.dtcur      = self.dt
-        self.firsttime = True
-        self.t      = 0
- 
-        # Some sanity checks for valid input
-        if (self.c_beta is None): 
-            self.c_beta = 0                     # Zero curvature; linear response
-        assert(self.c_beta >= 0 or self.c_beta <= 1)
-
-        # initialize output
-        self.out = model_output(self.tsteps)
- 
-        self.statistics()
-  
-        # calculate initial diagnostic variables
-        if(self.sw_rad):
-            self.run_radiation()
- 
-        if(self.sw_sl):
-            for i in range(10): 
-                self.run_surface_layer()
-  
-        if(self.sw_ls):
-            self.run_land_surface()
-
-        if(self.sw_cu):
-            self.run_mixed_layer()
-            self.run_cumulus()
-        
-        if(self.sw_ml):
-            self.run_mixed_layer()
-
-    def timestep(self):
-
-        self.dtmax = +np.inf
-        self.logger.debug('before stats') 
-        self.statistics()
-
-        # run radiation model
-        self.logger.debug('before rad') 
-        if(self.sw_rad):
-            self.run_radiation()
-  
-        # run surface layer model
-        if(self.sw_sl):
-            self.logger.debug('before surface layer') 
-            self.run_surface_layer()
-        
-        # run land surface model
-        if(self.sw_ls):
-            self.logger.debug('before land surface') 
-            self.run_land_surface()
- 
-        # run cumulus parameterization
-        if(self.sw_cu):
-            self.logger.debug('before cumulus') 
-            self.run_cumulus()
-   
-        self.logger.debug('before mixed layer') 
-        # run mixed-layer model
-        if(self.sw_ml):
-            self.run_mixed_layer()
-        self.logger.debug('after mixed layer') 
- 
-        #get first profile data point above mixed layer
-        if self.sw_ap:
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                # here we correct for the fact that the upper profile also
-                # shifts in the vertical.
-
-                diffhtend = self.htend - self.air_ap.w[zidx_first]
-                if diffhtend > 0:
-                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            else:
-                if self.htend > 0:
-                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            #print(self.h,zidx_first,self.ws,self.air_ap.z)
-
-        
-        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
-        self.logger.debug('before store') 
-        self.substep =  (self.dtcur > self.dtmax)
-        if self.substep:
-            dtnext = self.dtcur - self.dtmax
-            self.dtcur = self.dtmax
-
-        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
-
-        # HW: this will be done multiple times in case of a substep is needed
-        # store output before time integration
-        if self.firsttime:
-            self.store()
-  
-        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
-        # time integrate land surface model
-        if(self.sw_ls):
-            self.integrate_land_surface()
-        self.logger.debug('before integrate mixed layer') 
-        # time integrate mixed-layer model
-        if(self.sw_ml):
-            self.integrate_mixed_layer() 
-        self.logger.debug('after integrate mixed layer') 
-        if self.substep:
-            self.dtcur = dtnext
-            self.firsttime = False
-            self.substeps += 1
-        else:
-            self.dtcur = self.dt
-            self.t += 1 
-            self.firsttime = True
-            self.substeps = 0
-        self.logger.debug('going to next step')
-        
-        
-  
-    def statistics(self):
-        # Calculate virtual temperatures 
-        self.thetav   = self.theta  + 0.61 * self.theta * self.q
-        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
-        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
-        # Mixed-layer top properties
-        self.P_h    = self.Ps - self.rho * self.g * self.h
-        # else:
-            # in the other case, it is updated at the time that the profile is
-            # updated (and at the initialization
-
-        self.T_h    = self.theta - self.g/self.cp * self.h
-
-        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
-        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
-
-        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
-
-        # Find lifting condensation level iteratively
-        if(self.t == 0):
-            self.lcl = self.h
-            RHlcl = 0.5
-        else:
-            RHlcl = 0.9998 
-
-        itmax = 30
-        it = 0
-        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
-            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
-            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
-        else:
-            self.q2_h   = 0.
-            self.CO22_h = 0.
-
-        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
-        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
-        self.M      = self.ac * self.wstar
-        self.wqM    = self.M * self.q2_h**0.5
-
-        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
-        if(self.dCO2 < 0):
-            self.wCO2M  = self.M * self.CO22_h**0.5
-        else:
-            self.wCO2M  = 0.
-
-    def run_mixed_layer(self):
-        if(not self.sw_sl):
-            # decompose ustar along the wind components
-            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
-            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
-
-
-
-        # calculate large-scale vertical velocity (subsidence)
-        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
-            self.ws = -self.divU * self.h
-        # else:
-        #     in case the air circulation switch is turned on, subsidence is
-        #     calculated from the circulate profile at the initialization and
-        #     in the integrate_mixed_layer routine
-              
-        # calculate compensation to fix the free troposphere in case of subsidence 
-        if(self.sw_fixft):
-            w_th_ft  = self.gammatheta * self.ws
-            w_q_ft   = self.gammaq     * self.ws
-            w_CO2_ft = self.gammaCO2   * self.ws 
-        else:
-            w_th_ft  = 0.
-            w_q_ft   = 0.
-            w_CO2_ft = 0. 
-      
-        # calculate mixed-layer growth due to cloud top radiative divergence
-        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
-       
-        # calculate convective velocity scale w* 
-        if(self.wthetav > 0.):
-            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
-        else:
-            self.wstar  = 1e-6;
-      
-        # Virtual heat entrainment flux 
-        self.wthetave    = -self.beta * self.wthetav 
-        
-        # compute mixed-layer tendencies
-        if(self.sw_shearwe):
-            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
-        else:
-            self.we    = -self.wthetave / self.dthetav
-        # Don't allow boundary layer shrinking if wtheta < 0 
-        if(self.we < 0):
-            self.we = 0.
-
-        # Calculate entrainment fluxes
-        self.wthetae     = -self.we * self.dtheta
-        self.wqe         = -self.we * self.dq
-        self.wCO2e       = -self.we * self.dCO2
-        
-        htend_pre       = self.we + self.ws + self.wf - self.M
-        
-        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-        
- 
-        #print('thetatend_pre',thetatend_pre)
-        
-        #preliminary boundary-layer top chenage
-        #htend_pre = self.we + self.ws + self.wf - self.M
-        #preliminary change in temperature jump
-        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
-                          thetatend_pre + w_th_ft
-        
-        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
-        l_entrainment = True
-
-        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
-            l_entrainment = False
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! temperature jump is at the lower limit \
-                          and is not growing: entrainment is disabled for this (sub)timestep.") 
-        elif dtheta_pre < 0.1:
-            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
-            l_entrainment = True
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          " Warning! Potential temperature jump at mixed- \
-                          layer height would become too low limiting timestep \
-                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
-            self.dtmax = min(self.dtmax,dtmax_new)
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "next subtimestep, entrainment will be disabled")
-            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
-
-
-
-        # when entrainment is disabled, we just use the simplified formulation
-        # as in Wouters et al., 2013 (section 2.2.1)
-
-        self.dthetatend = l_entrainment*dthetatend_pre + \
-                        (1.-l_entrainment)*0.
-        self.thetatend = l_entrainment*thetatend_pre + \
-                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
-        self.htend = l_entrainment*htend_pre + \
-                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
-        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
-        #stop
-
-
-        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
-        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
-
-
-        # self.qtend = l_entrainment*qtend_pre + \
-        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
-        # self.CO2tend = l_entrainment*CO2tend_pre + \
-        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-
-
-        #     # part of the timestep for which the temperature mixed-layer jump
-        #     # was changing, and for which entrainment took place. For the other
-        #     # part, we don't assume entrainment anymore, and we use the
-        #     # simplified formulation  of Wouters et al., 2013
-
-        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
-        #   
-        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
-        #                      self.dthetatend + w_th_ft) + \
-        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
-        #     self.htend = fac*self.htend + \
-        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
-        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
-        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-
-        # else:
-        #     #self.htend = htend_pre
-        #     self.dthetatend = dthetatend_pre
-        #     self.thetatend = thetatend_pre
-        
-        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
-        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
-     
-        # assume u + du = ug, so ug - u = du
-        if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
-  
-            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
-            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
-        
-        # tendency of the transition layer thickness
-        if(self.ac > 0 or self.lcl - self.h < 300):
-            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
-        else:
-            self.dztend = 0.
-
-   
-    def integrate_mixed_layer(self):
-        # set values previous time step
-        h0      = self.h
-        
-        theta0  = self.theta
-        dtheta0 = self.dtheta
-        q0      = self.q
-        dq0     = self.dq
-        CO20    = self.CO2
-        dCO20   = self.dCO2
-        
-        u0      = self.u
-        du0     = self.du
-        v0      = self.v
-        dv0     = self.dv
-
-        dz0     = self.dz_h
-  
-        # integrate mixed-layer equations
-        
-            
-
-# END -- HW 20170606        
-        self.h        = h0      + self.dtcur * self.htend
-        # print(self.h,self.htend)
-        # stop
-        self.theta    = theta0  + self.dtcur * self.thetatend
-        #print(dtheta0,self.dtcur,self.dthetatend)
-        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
-        self.q        = q0      + self.dtcur * self.qtend
-        self.dq       = dq0     + self.dtcur * self.dqtend
-        self.CO2      = CO20    + self.dtcur * self.CO2tend
-        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
-        self.dz_h     = dz0     + self.dtcur * self.dztend
-            
-        # Limit dz to minimal value
-        dz0 = 50
-        if(self.dz_h < dz0):
-            self.dz_h = dz0 
-  
-        if(self.sw_wind):
-            self.u        = u0      + self.dtcur * self.utend
-            self.du       = du0     + self.dtcur * self.dutend
-            self.v        = v0      + self.dtcur * self.vtend
-            self.dv       = dv0     + self.dtcur * self.dvtend
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-            for var in ['t','q','u','v']:
-                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
-
-            # take into account advection for the whole profile
-                
-                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
-
-            var = 'z'
-            #print(self.air_ap[var])
-                #     print(self.air_ap['adv'+var])
-
-
-
-
-            #moving the profile vertically according to the vertical wind
-                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
-
-
-            # air_apvarold = pd.Series(np.array(self.air_ap.z))
-            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
-            # stop
-
-
-                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
-                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
-
-            #As t is updated, we also need to recalculate theta (and R)
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-
-            # air_aptheta_old = pd.Series(self.air_ap['theta'])
-            self.air_ap['theta'] = \
-                        self.air_ap.t * \
-                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
-                                         self.dtcur * self.air_ap.w[zidx_first:]
-
-#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
-#            print(self.t, self.dtcur,self.dt,self.htend)
-
-            # # the pressure levels of the profiles are recalculated according to
-            # # there new height (after subsidence)
-            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
-            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
-            #         * self.dtcur *  self.air_ap.w[zidx_first:]
-
-            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
-                    self.dtcur * self.air_ap.wp[zidx_first:]
-
-            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
-        # note that theta and q itself are updatet by class itself
-
-    
-        if self.sw_ap:
-            # Just for model consistency preservation purposes, we set the
-            # theta variables of the mixed-layer to nan values, since the
-            # mixed-layer values should overwritte by the mixed-layer
-            # calculations of class.
-            self.air_ap['theta'][0:3] = np.nan 
-            self.air_ap['p'][0:3] = np.nan 
-            self.air_ap['q'][0:3] = np.nan 
-            self.air_ap['u'][0:3] = np.nan 
-            self.air_ap['v'][0:3] = np.nan 
-            self.air_ap['t'][0:3] = np.nan 
-            self.air_ap['z'][0:3] = np.nan 
-
-            # Update the vertical profiles: 
-            #   - new mixed layer properties( h, theta, q ...)
-            #   - any data points below the new ixed-layer height are removed
-
-            # Three data points at the bottom that describe the mixed-layer
-            # properties
-            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
-                                           # columns as air_ap
-            # air_ap_head['z'].iloc[0] = 2.
-            # air_ap_head['z'].iloc[1] = self.__dict__['h']
-            # air_ap_head['z'].iloc[2] = self.__dict__['h']
-            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
-                        [2.,self.__dict__['h'],self.__dict__['h']]
-            for var in ['theta','q','u','v']:
-
-                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
-                        [self.__dict__[var], \
-                         self.__dict__[var], \
-                         self.__dict__[var] + self.__dict__['d'+var]]
-                
-            #print(self.air_ap)
-
-            # This is the remaining profile considering the remaining
-            # datapoints above the mixed layer height
-            air_ap_tail = self.air_ap.iloc[3:]
-            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
-
-            # print('h',self.h)
-            # # only select samples monotonically increasing with height
-            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            # air_ap_tail = pd.DataFrame()
-            # theta_low = self.theta
-            # z_low =     self.h
-            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            # for ibottom in range(1,len(air_ap_tail_orig)):
-            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
-            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-
-
-            # make theta increase strong enough to avoid numerical
-            # instability
-            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            air_ap_tail = pd.DataFrame()
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            theta_low = self.theta
-            z_low =     self.h
-            ibottom = 0
-            itop = 0
-            # print(air_ap_tail_orig)
-            # stop
-
-            # HW: this is the lower limit that we use for gammatheta, which is
-            # there to avoid model crashes. Besides on this limit, the upper
-            # air profile is modified in a way that is still conserves total
-            # quantities of moisture and temperature. The limit is set by trial
-            # and error. The numerics behind the crash should be investigated
-            # so that a cleaner solution can be provided.
-            gammatheta_lower_limit = 0.002
-            while ((itop in range(0,1)) or (itop != ibottom)):
-                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-                if (
-                    #(z_mean > (z_low+0.2)) and \
-                    #(theta_mean > (theta_low+0.02) ) and \
-                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
-                  (itop >= (len(air_ap_tail_orig)-1)) \
-                   :
-
-                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                    ibottom = itop+1
-                    theta_low = air_ap_tail.theta.iloc[-1]
-                    z_low =     air_ap_tail.z.iloc[-1]
-    
-
-                itop +=1
-                # elif  (itop > len(air_ap_tail_orig)-10):
-                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-                #print(itop,ibottom)
-
-            if itop > 1:
-                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! Temperature profile was too steep. \
-                                  Modifying profile: "+ \
-                                  str(itop - 1)+ " measurements were dropped \
-                                  and replaced with its average \
-                                  Modifying profile. \
-                                  mean with next profile point(s).") 
-
-
-            self.air_ap = pd.concat((air_ap_head,\
-                                     air_ap_tail,\
-                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
-                                                                      axis=1)
-
-            if  self.sw_ac:
-                qvalues = \
-                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
-
-                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
-                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
-                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-                self.P_h    = self.Ps - self.rho * self.g * self.h
-                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
-                        [self.Ps,  self.P_h, self.P_h-0.1]
-
-                self.air_ap.t = \
-                            self.air_ap.theta * \
-                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
-
-
-        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
-
-
-
-
-        # else:
-            # in the other case, it is updated at the time the statistics are
-            # calculated 
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if in_ml.sum() == 0:
-                warnings.warn(" no circulation points in the mixed layer \
-                              found. We just take the bottom one.")
-                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-            for var in ['t','q','u','v']:
-
-                # calculation of the advection variables for the mixed-layer
-                # these will be used for the next timestep
-                # Warning: w is excluded for now.
-
-                self.__dict__['adv'+var] = \
-                        ((self.air_ac['adv'+var+'_x'][in_ml] \
-                         + \
-                         self.air_ac['adv'+var+'_y'][in_ml])* \
-                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                        self.air_ac['delpdgrav'][in_ml].sum()
-
-                # calculation of the advection variables for the profile above
-                # the mixed layer (also for the next timestep)
-                self.air_ap['adv'+var] = \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p,\
-                                              self.air_ac['adv'+var+'_x']) \
-                                    + \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p, \
-                                              self.air_ac['adv'+var+'_y'])
-                # if var == 't':
-                #     print(self.air_ap['adv'+var])
-                #     stop
-
-            # as an approximation, we consider that advection of theta in the
-            # mixed layer is equal to advection of t. This is a sufficient
-            # approximation since theta and t are very similar at the surface
-            # pressure.
-
-            self.__dict__['advtheta'] = self.__dict__['advt']
-
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            # update the vertical wind profile
-            self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                          self.air_ac.p, \
-                                          self.air_ac['wp'])
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-            
-            air_apwold = self.air_ap['w']
-            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-            #print('hello w upd')
-
-            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # # self.wrho = np.interp(self.P_h,\
-            # #                      self.air_ach.p,\
-            # #                      self.air_ach['wrho']) \
-
-
-
-            # Also update the vertical wind at the mixed-layer height
-            # (subsidence)
-            self.ws   = self.air_ap.w[1]
-        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
-
-            ## Finally, we update he 
-            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-            #                        + \
-            #                        self.air_ac['divU_y'][in_ml])* \
-            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-            #            self.air_ac['delpdgrav'][in_ml].sum() 
-            
-
-        if self.sw_ap:
-            for var in ['theta','q','u','v']:
-
-                # update of the slope (gamma) for the different variables, for
-                # the next timestep!
-
-                # there is an warning message that tells about dividing through
-                # zero, which we ignore
-
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                    # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap['gamma'+var] = gammavar
-
-                # Based on the above, update the gamma value at the mixed-layer
-                # top
-                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
-                                                                     self.air_ap.z)[0][-1]]
-
-            
-    def run_radiation(self):
-        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
-        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
-        sinlea = max(sinlea, 0.0001)
-        
-        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
-  
-        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
-  
-        self.Swin  = self.S0 * Tr * sinlea
-        self.Swout = self.alpha * self.S0 * Tr * sinlea
-        
-        
-        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
-        self.Lwout = self.bolz * self.Ts ** 4.
-          
-        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
-        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
-  
-    def run_surface_layer(self):
-        # HW: I had to raise the minimum wind speed to make the simulation with
-        # the non-iterative solution stable (this solution was a wild guess, so I don't
-        # know the exact problem of the instability in case of very low wind
-        # speeds yet)
-        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        # version of 20180730 where there are still some runs crashing. Maybe
-        # an upper limit should be set on the monin-obukhov length instead of
-        # a lower limmit on the wind speed?
-        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        
-        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
-        qsatsurf       = qsat(self.thetasurf, self.Ps)
-        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
-        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
-
-        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
-  
-        zsl       = 0.1 * self.h
-        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
-        
-
-
-        if self.sw_lit:
-            self.Rib  = min(self.Rib, 0.2)
-            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
-            self.zeta  = zsl/self.L
-            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
-            
-        
-            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
-            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
-            
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-        
-     
-            # diagnostic meteorological variables
-            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
-            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
-            
-            # diagnostic meteorological variables
-        else:
-            
-            ## circumventing any iteration with Wouters et al., 2012
-            self.zslz0m = np.max((zsl/self.z0m,10.))
-            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
-            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
-            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
-            self.L = zsl/self.zeta
-            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
-        
-            self.Cm = self.k**2.0/funm/funm
-            self.Cs = self.k**2.0/funm/funh
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-            
-            # extrapolation from mixed layer (instead of from surface) to 2meter
-            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
-            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
-            self.u2m    =                - self.uw     / self.ustar / self.k * funm
-            self.v2m    =                - self.vw     / self.ustar / self.k * funm
-        
-        
-        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
-        self.e2m    = self.q2m * self.Ps / 0.622
-     
-    def ribtol(self, Rib, zsl, z0m, z0h): 
-        if(Rib > 0.):
-            L    = 1.
-            L0   = 2.
-        else:
-            L  = -1.
-            L0 = -2.
-        #print(Rib,zsl,z0m,z0h)
-        
-        while (abs(L - L0) > 0.001):
-            L0      = L
-            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
-            Lstart  = L - 0.001*L
-            Lend    = L + 0.001*L
-            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
-                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-            L       = L - fx / fxdif
-            #print(L)
-            if(abs(L) > 1e12):
-                break
-
-        return L
-      
-    def psim(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psim = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-        return psim
-      
-    def psih(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psih  = 2. * np.log( (1. + x*x) / 2.)
-            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-        return psih
- 
-    def jarvis_stewart(self):
-        # calculate surface resistances using Jarvis-Stewart model
-        if(self.sw_rad):
-            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
-        else:
-            f1 = 1.
-  
-        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
-            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
-        else:
-            f2 = 1.e8
- 
-        # Limit f2 in case w2 > wfc, where f2 < 1
-        f2 = max(f2, 1.);
- 
-        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
-        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
-  
-        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
-
-    def factorial(self,k):
-        factorial = 1
-        for n in range(2,k+1):
-            factorial = factorial * float(n)
-        return factorial;
-
-    def E1(self,x):
-        E1sum = 0
-        for k in range(1,100):
-            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
-        return -0.57721566490153286060 - np.log(x) - E1sum
- 
-    def ags(self):
-        # Select index for plant type
-        if(self.c3c4 == 'c3'):
-            c = 0
-        elif(self.c3c4 == 'c4'):
-            c = 1
-        else:
-            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
-
-        # calculate CO2 compensation concentration
-        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
-
-        # calculate mesophyll conductance
-        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
-                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
-        gm            = gm / 1000. # conversion from mm s-1 to m s-1
-  
-        # calculate CO2 concentration inside the leaf (ci)
-        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
-        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
-  
-        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
-        D0            = (self.f0[c] - fmin) / self.ad[c]
-  
-        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
-        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
-        ci            = cfrac * (co2abs - CO2comp) + CO2comp
-  
-        # calculate maximal gross primary production in high light conditions (Ag)
-        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
-  
-        # calculate effect of soil moisture stress on gross assimilation rate
-        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
-  
-        # calculate stress function
-        if (self.c_beta == 0):
-            fstr = betaw;
-        else:
-            # Following Combe et al (2016)
-            if (self.c_beta < 0.25):
-                P = 6.4 * self.c_beta
-            elif (self.c_beta < 0.50):
-                P = 7.6 * self.c_beta - 0.3
-            else:
-                P = 2**(3.66 * self.c_beta + 0.34) - 1
-            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
-  
-        # calculate gross assimilation rate (Am)
-        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
-        Rdark        = (1. / 9.) * Am
-        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
-  
-        # calculate  light use efficiency
-        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
-  
-        # calculate gross primary productivity
-        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
-  
-        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
-        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
-        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
-  
-        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
-        a1           = 1. / (1. - self.f0[c])
-        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
-  
-        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
-  
-        # calculate surface resistance for moisture and carbon dioxide
-        self.rs      = 1. / (1.6 * gcco2)
-        rsCO2        = 1. / gcco2
-  
-        # calculate net flux of CO2 into the plant (An)
-        An           = -(co2abs - ci) / (self.ra + rsCO2)
-  
-        # CO2 soil surface flux
-        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
-        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
-  
-        # CO2 flux
-        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
-        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
-        self.wCO2    = self.wCO2A + self.wCO2R
- 
-    def run_land_surface(self):
-        # compute ra
-        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
-        #print('ueff',self.u,self.v,self.wstar)
-
-        if(self.sw_sl):
-          self.ra = (self.Cs * ueff)**-1.
-        else:
-          self.ra = ueff / max(1.e-3, self.ustar)**2.
-
-        #print('ra',self.ra,self.ustar,ueff)
-
-        # first calculate essential thermodynamic variables
-        self.esat    = esat(self.theta)
-        self.qsat    = qsat(self.theta, self.Ps)
-        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
-        self.dqsatdT = 0.622 * desatdT / self.Ps
-        self.e       = self.q * self.Ps / 0.622
-
-        if(self.ls_type == 'js'): 
-            self.jarvis_stewart() 
-        elif(self.ls_type == 'ags'):
-            self.ags()
-        else:
-            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
-
-        # recompute f2 using wg instead of w2
-        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
-          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
-        else:
-          f2        = 1.e8
-        self.rssoil = self.rssoilmin * f2 
- 
-        Wlmx = self.LAI * self.Wmax
-        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
-        self.cliq = min(1., self.Wl / Wlmx) 
-     
-        # calculate skin temperature implictly
-        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
-            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
-            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
-            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
-
-        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
-        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
-        #print('Ts',self.rs)
-
-        esatsurf      = esat(self.Ts)
-        self.qsatsurf = qsat(self.Ts, self.Ps)
-
-        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-  
-        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
-  
-        self.LE     = self.LEsoil + self.LEveg + self.LEliq
-        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
-        #print('H',self.ra,self.Ts,self.theta)
-        self.G      = self.Lambda * (self.Ts - self.Tsoil)
-        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
-        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
-        
-        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
-  
-        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
-   
-        d1          = 0.1
-        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
-        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
-        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
-        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
-  
-        # calculate kinematic heat fluxes
-        self.wtheta   = self.H  / (self.rho * self.cp)
-        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
-        self.wq       = self.LE / (self.rho * self.Lv)
- 
-    def integrate_land_surface(self):
-        # integrate soil equations
-        Tsoil0        = self.Tsoil
-        wg0           = self.wg
-        Wl0           = self.Wl
-  
-        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
-        self.wg       = wg0     + self.dtcur * self.wgtend
-        self.Wl       = Wl0     + self.dtcur * self.Wltend
-  
-    # store model output
-    def store(self):
-        t                      = self.t
-        
-        self.out.time[t]          = t * self.dt / 3600. + self.tstart
-
-        # in case we are at the end of the simulation, we store the vertical
-        # profiles to the output
-        
-        # if t == (len(self.out.time) - 1):
-        #     self.out.air_ac = self.air_ac
-        #     self.out.air_ap = self.air_ap
-
-        
-        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
-        #  for key in self.out.__dict__.keys():
-        #      if key in self.__dict__:
-        #          self.out.__dict__[key][t]  = self.__dict__[key]
-        
-        self.out.h[t]          = self.h
-        
-        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
-        
-        self.out.gammatheta[t] = self.gammatheta
-        self.out.gammau[t]     = self.gammau
-        self.out.gammav[t]     = self.gammav
-        self.out.gammaq[t]     = self.gammaq
-        self.out.theta[t]      = self.theta
-        self.out.thetav[t]     = self.thetav
-        self.out.dtheta[t]     = self.dtheta
-        self.out.dthetav[t]    = self.dthetav
-        self.out.wtheta[t]     = self.wtheta
-        self.out.wthetav[t]    = self.wthetav
-        self.out.wthetae[t]    = self.wthetae
-        self.out.wthetave[t]   = self.wthetave
-        
-        self.out.q[t]          = self.q
-        self.out.dq[t]         = self.dq
-        self.out.wq[t]         = self.wq
-        self.out.wqe[t]        = self.wqe
-        self.out.wqM[t]        = self.wqM
-      
-        self.out.qsat[t]       = self.qsat
-        self.out.e[t]          = self.e
-        self.out.esat[t]       = self.esat
-      
-        fac = (self.rho*self.mco2)/self.mair
-        self.out.CO2[t]        = self.CO2
-        self.out.dCO2[t]       = self.dCO2
-        self.out.wCO2[t]       = self.wCO2  * fac
-        self.out.wCO2e[t]      = self.wCO2e * fac
-        self.out.wCO2R[t]      = self.wCO2R * fac
-        self.out.wCO2A[t]      = self.wCO2A * fac
-
-        self.out.u[t]          = self.u
-        self.out.du[t]         = self.du
-        self.out.uw[t]         = self.uw
-        
-        self.out.v[t]          = self.v
-        self.out.dv[t]         = self.dv
-        self.out.vw[t]         = self.vw
-        
-        self.out.T2m[t]        = self.T2m
-        self.out.q2m[t]        = self.q2m
-        self.out.u2m[t]        = self.u2m
-        self.out.v2m[t]        = self.v2m
-        self.out.e2m[t]        = self.e2m
-        self.out.esat2m[t]     = self.esat2m
-
-
-        self.out.Tsoil[t]      = self.Tsoil
-        self.out.T2[t]         = self.T2
-        self.out.Ts[t]         = self.Ts
-        self.out.wg[t]         = self.wg
-        
-        self.out.thetasurf[t]  = self.thetasurf
-        self.out.thetavsurf[t] = self.thetavsurf
-        self.out.qsurf[t]      = self.qsurf
-        self.out.ustar[t]      = self.ustar
-        self.out.Cm[t]         = self.Cm
-        self.out.Cs[t]         = self.Cs
-        self.out.L[t]          = self.L
-        self.out.Rib[t]        = self.Rib
-  
-        self.out.Swin[t]       = self.Swin
-        self.out.Swout[t]      = self.Swout
-        self.out.Lwin[t]       = self.Lwin
-        self.out.Lwout[t]      = self.Lwout
-        self.out.Q[t]          = self.Q
-  
-        self.out.ra[t]         = self.ra
-        self.out.rs[t]         = self.rs
-        self.out.H[t]          = self.H
-        self.out.LE[t]         = self.LE
-        self.out.LEliq[t]      = self.LEliq
-        self.out.LEveg[t]      = self.LEveg
-        self.out.LEsoil[t]     = self.LEsoil
-        self.out.LEpot[t]      = self.LEpot
-        self.out.LEref[t]      = self.LEref
-        self.out.G[t]          = self.G
-
-        self.out.zlcl[t]       = self.lcl
-        self.out.RH_h[t]       = self.RH_h
-
-        self.out.ac[t]         = self.ac
-        self.out.M[t]          = self.M
-        self.out.dz[t]         = self.dz_h
-        self.out.substeps[t]   = self.substeps
-  
-    # delete class variables to facilitate analysis in ipython
-    def exitmodel(self):
-        del(self.Lv)
-        del(self.cp)
-        del(self.rho)
-        del(self.k)
-        del(self.g)
-        del(self.Rd)
-        del(self.Rv)
-        del(self.bolz)
-        del(self.S0)
-        del(self.rhow)
-  
-        del(self.t)
-        del(self.dt)
-        del(self.tsteps)
-         
-        del(self.h)          
-        del(self.Ps)        
-        del(self.fc)        
-        del(self.ws)
-        del(self.we)
-        
-        del(self.theta)
-        del(self.dtheta)
-        del(self.gammatheta)
-        del(self.advtheta)
-        del(self.beta)
-        del(self.wtheta)
-    
-        del(self.T2m)
-        del(self.q2m)
-        del(self.e2m)
-        del(self.esat2m)
-        del(self.u2m)
-        del(self.v2m)
-        
-        del(self.thetasurf)
-        del(self.qsatsurf)
-        del(self.thetav)
-        del(self.dthetav)
-        del(self.thetavsurf)
-        del(self.qsurf)
-        del(self.wthetav)
-        
-        del(self.q)
-        del(self.qsat)
-        del(self.dqsatdT)
-        del(self.e)
-        del(self.esat)
-        del(self.dq)
-        del(self.gammaq)
-        del(self.advq)
-        del(self.wq)
-        
-        del(self.u)
-        del(self.du)
-        del(self.gammau)
-        del(self.advu)
-        
-        del(self.v)
-        del(self.dv)
-        del(self.gammav)
-        del(self.advv)
-  
-        del(self.htend)
-        del(self.thetatend)
-        del(self.dthetatend)
-        del(self.qtend)
-        del(self.dqtend)
-        del(self.utend)
-        del(self.dutend)
-        del(self.vtend)
-        del(self.dvtend)
-     
-        del(self.Tsoiltend) 
-        del(self.wgtend)  
-        del(self.Wltend) 
-  
-        del(self.ustar)
-        del(self.uw)
-        del(self.vw)
-        del(self.z0m)
-        del(self.z0h)        
-        del(self.Cm)         
-        del(self.Cs)
-        del(self.L)
-        del(self.Rib)
-        del(self.ra)
-  
-        del(self.lat)
-        del(self.lon)
-        del(self.doy)
-        del(self.tstart)
-   
-        del(self.Swin)
-        del(self.Swout)
-        del(self.Lwin)
-        del(self.Lwout)
-        del(self.cc)
-  
-        del(self.wg)
-        del(self.w2)
-        del(self.cveg)
-        del(self.cliq)
-        del(self.Tsoil)
-        del(self.T2)
-        del(self.a)
-        del(self.b)
-        del(self.p)
-        del(self.CGsat)
-  
-        del(self.wsat)
-        del(self.wfc)
-        del(self.wwilt)
-  
-        del(self.C1sat)
-        del(self.C2ref)
-  
-        del(self.LAI)
-        del(self.rs)
-        del(self.rssoil)
-        del(self.rsmin)
-        del(self.rssoilmin)
-        del(self.alpha)
-        del(self.gD)
-  
-        del(self.Ts)
-  
-        del(self.Wmax)
-        del(self.Wl)
-  
-        del(self.Lambda)
-        
-        del(self.Q)
-        del(self.H)
-        del(self.LE)
-        del(self.LEliq)
-        del(self.LEveg)
-        del(self.LEsoil)
-        del(self.LEpot)
-        del(self.LEref)
-        del(self.G)
-  
-        del(self.sw_ls)
-        del(self.sw_rad)
-        del(self.sw_sl)
-        del(self.sw_wind)
-        del(self.sw_shearwe)
-
-# class for storing mixed-layer model output data
-class model_output:
-    def __init__(self, tsteps):
-        self.time          = np.zeros(tsteps)    # time [s]
-
-        # mixed-layer variables
-        self.h          = np.zeros(tsteps)    # ABL height [m]
-        
-        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammau     = np.zeros(tsteps)
-        self.gammav     = np.zeros(tsteps)
-        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
-        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
-        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
-        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
-        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
-        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
-        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
-        
-        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
-        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
-        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
-        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
-
-        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
-        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
-        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
-
-        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
-        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
-        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
-        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
-        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
-        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
-        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
-        
-        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
-        
-        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
-
-        # diagnostic meteorological variables
-        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
-        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
-        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
-        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
-        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
-        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
-
-        # ground variables
-        self.Tsoil       = np.zeros(tsteps)
-        self.T2          = np.zeros(tsteps)
-        self.Ts          = np.zeros(tsteps)
-        self.wg          = np.zeros(tsteps)
-
-        # surface-layer variables
-        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
-        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
-        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
-        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
-        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
-        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
-        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
-        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
-        self.L          = np.zeros(tsteps)    # Obukhov length [m]
-        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
-
-        # radiation variables
-        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
-        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
-        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
-        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
-        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
-
-        # land surface variables
-        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
-        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
-        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
-        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
-        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
-        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
-        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
-        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
-        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
-        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
-
-        # Mixed-layer top variables
-        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
-        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
-
-        # cumulus variables
-        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
-        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
-        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
-        
-        
-        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
-
-# class for storing mixed-layer model input data
-class model_input:
-    def __init__(self):
-
-        # # comment not valid
-        # we comment out the initialization, because there is a problem when
-        # inheriting values from one the another class4gl_iput. We also expect
-        # that the user specifies all the required parmameters (if not, an error
-        # is raised). 
-
-        # general model variables
-        self.runtime    = None  # duration of model run [s]
-        self.dt         = None  # time step [s]
-
-        # mixed-layer variables
-        self.sw_ml      = None  # mixed-layer model switch
-        self.sw_shearwe = None  # Shear growth ABL switch
-        self.sw_fixft   = None  # Fix the free-troposphere switch
-        self.h          = None  # initial ABL height [m]
-        self.Ps         = None  # surface pressure [Pa]
-        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
-        self.fc         = None  # Coriolis parameter [s-1]
-        
-        self.theta      = None  # initial mixed-layer potential temperature [K]
-        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
-
-        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
-
-        self.dtheta     = None  # initial temperature jump at h [K]
-        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = None  # advection of heat [K s-1]
-        self.beta       = None  # entrainment ratio for virtual heat [-]
-        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
-        
-        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
-        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
-        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
-
-        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = None  # advection of moisture [kg kg-1 s-1]
-        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
-
-        self.CO2        = None  # initial mixed-layer potential temperature [K]
-        self.dCO2       = None  # initial temperature jump at h [K]
-        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advCO2     = None  # advection of heat [K s-1]
-        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
-        
-        self.sw_wind    = None  # prognostic wind switch
-        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.du         = None  # initial u-wind jump at h [m s-1]
-        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = None  # advection of u-wind [m s-2]
-
-        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = None  # initial u-wind jump at h [m s-1]
-        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = None  # advection of v-wind [m s-2]
-
-        # surface layer variables
-        self.sw_sl      = None  # surface layer switch
-        self.ustar      = None  # surface friction velocity [m s-1]
-        self.z0m        = None  # roughness length for momentum [m]
-        self.z0h        = None  # roughness length for scalars [m]
-        self.Cm         = None  # drag coefficient for momentum [-]
-        self.Cs         = None  # drag coefficient for scalars [-]
-        self.L          = None  # Obukhov length [-]
-        self.Rib        = None  # bulk Richardson number [-]
-
-        # radiation parameters
-        self.sw_rad     = None  # radiation switch
-        self.lat        = None  # latitude [deg]
-        self.lon        = None  # longitude [deg]
-        self.doy        = None  # day of the year [-]
-        self.tstart     = None  # time of the day [h UTC]
-        self.cc         = None  # cloud cover fraction [-]
-        self.Q          = None  # net radiation [W m-2] 
-        self.dFz        = None  # cloud top radiative divergence [W m-2] 
-
-        # land surface parameters
-        self.sw_ls      = None  # land surface switch
-        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
-        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = None  # temperature top soil layer [K]
-        self.T2         = None  # temperature deeper soil layer [K]
-        
-        self.a          = None  # Clapp and Hornberger retention curve parameter a
-        self.b          = None  # Clapp and Hornberger retention curve parameter b
-        self.p          = None  # Clapp and Hornberger retention curve parameter p 
-        self.CGsat      = None  # saturated soil conductivity for heat
-        
-        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
-        self.wfc        = None  # volumetric water content field capacity [-]
-        self.wwilt      = None  # volumetric water content wilting point [-]
-        
-        self.C1sat      = None 
-        self.C2ref      = None
-
-        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
-        
-        self.LAI        = None  # leaf area index [-]
-        self.gD         = None  # correction factor transpiration for VPD [-]
-        self.rsmin      = None  # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = None  # surface albedo [-]
-        
-        self.Ts         = None  # initial surface temperature [K]
-        
-        self.cveg       = None  # vegetation fraction [-]
-        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
-        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
-        
-        self.Lambda     = None  # thermal diffusivity skin layer [-]
-
-        # A-Gs parameters
-        self.c3c4       = None  # Plant type ('c3' or 'c4')
-
-        # Cumulus parameters
-        self.sw_cu      = None  # Cumulus parameterization switch
-        self.dz_h       = None  # Transition layer thickness [m]
-        
-# BEGIN -- HW 20171027
-        # self.cala       = None      # soil heat conductivity [W/(K*m)]
-        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
-# END -- HW 20171027
diff --git a/lib/ribtol/Makefile b/lib/ribtol/Makefile
deleted file mode 100644
index e23e3e1..0000000
--- a/lib/ribtol/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-ribtol.so : ribtol.o
-	g++ -O3 -shared -Wl -z -def -o ribtol.so -lpython2.6 -lboost_python ribtol.o
-
-ribtol.o : ribtol.cpp
-	g++ -c -O3 -fPIC ribtol.cpp -I/usr/include/python2.6
-
-clean : 
-	rm -rf ribtol.o ribtol.so
diff --git a/lib/ribtol/MakefileMac b/lib/ribtol/MakefileMac
deleted file mode 100644
index bf34ea8..0000000
--- a/lib/ribtol/MakefileMac
+++ /dev/null
@@ -1,9 +0,0 @@
-# Note: boost-python needs to be installed: brew install boost-python -with-python3 -without-python
-ribtol.so : ribtol.o
-	clang++ -O3 -shared -o ribtol.so -L/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib -lpython3.6m -L/usr/local/lib -lboost_python3-mt -lpython ribtol.o
-
-ribtol.o : ribtol.cpp
-	clang++ -c -O3 -fPIC ribtol.cpp -I/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/include/python3.6m -I/usr/local/include
-
-clean : 
-	rm -rf ribtol.o ribtol.so
diff --git a/lib/ribtol/ribtol.cpp b/lib/ribtol/ribtol.cpp
deleted file mode 100644
index 148b0d3..0000000
--- a/lib/ribtol/ribtol.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-// fast conversion of bulk Richardson number to Obukhov length
-
-#include 
-#include 
-#include 
-using namespace std;
-
-inline double psim(double zeta)
-{
-  double psim;
-  double x;
-  if(zeta <= 0.)
-  {
-    //x     = (1. - 16. * zeta) ** (0.25)
-    //psim  = 3.14159265 / 2. - 2. * arctan(x) + log( (1.+x) ** 2. * (1. + x ** 2.) / 8.)
-    x    = pow(1. + pow(3.6 * abs(zeta),2./3.), -0.5);
-    psim = 3. * log( (1. + 1. / x) / 2.);
-  }
-  else
-  {
-    psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35;
-  }
-  return psim;
-}
-    
-inline double psih(double zeta)
-{
-  double psih;
-  double x;
-  if(zeta <= 0.)
-  {
-    // x     = (1. - 16. * zeta) ** (0.25)
-    // psih  = 2. * log( (1. + x ** 2.) / 2. )
-    x     = pow(1. + pow(7.9 * abs(zeta), (2./3.)), -0.5);
-    psih  = 3. * log( (1. + 1. / x) / 2.);
-  }
-  else
-  {
-    psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - pow(1. + (2./3.) * zeta, 1.5) - (10./3.) / 0.35 + 1.;
-  }
-  return psih;
-}
-
-
-double ribtol(double Rib, double zsl, double z0m, double z0h)
-{
-  double L, L0;
-  double Lstart, Lend;
-  double fx, fxdif;
-
-  if(Rib > 0.)
-  {
-    L    = 1.;
-    L0   = 2.;
-  }
-  else
-  {
-    L  = -1.;
-    L0 = -2.;
-  }
-    
-  while (abs(L - L0) > 0.001)
-  {
-    L0      = L;
-    fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / pow(log(zsl / z0m) - psim(zsl / L) + psim(z0m / L), 2.);
-    Lstart  = L - 0.001 * L;
-    Lend    = L + 0.001 * L;
-    fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / pow(log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart), 2.)) - (-zsl / Lend * (log(zsl / z0h) - psih(zsl / Lend) + psih(z0h / Lend)) / pow(log(zsl / z0m) - psim(zsl / Lend) + psim(z0m / Lend), 2.)) ) / (Lstart - Lend);
-    L       = L - fx / fxdif;
-  }
-  
-  return L;
-
-}
-
-BOOST_PYTHON_MODULE(ribtol)
-{
-    using namespace boost::python;
-    def("ribtol", ribtol);
-}
-
diff --git a/lib/ribtol/ribtol.pyx b/lib/ribtol/ribtol.pyx
deleted file mode 100644
index e11a147..0000000
--- a/lib/ribtol/ribtol.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-#cython: boundscheck=False
-#cython: wraparound=False
-
-from libc.math cimport atan, log, exp, fabs
-
-cdef double psim(double zeta):
-    cdef double x, psim
-
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psim  = 3.14159265 / 2. - 2. * atan(x) + log((1. + x)**2. * (1. + x**2.) / 8.)
-    else:
-        psim  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-    return psim
-      
-cdef double psih(double zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psih  = 2. * log( (1. + x*x) / 2.)
-    else:
-        psih  = -2./3. * (zeta - 5./0.35) * exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-    return psih
-
-def ribtol(double Rib, double zsl, double z0m, double z0h): 
-    cdef double L, L0, fx, Lstart, Lend, fxdif
-
-    if(Rib > 0.):
-        L    = 1.
-        L0   = 2.
-    else:
-        L  = -1.
-        L0 = -2.
-    
-    while (fabs(L - L0) > 0.001):
-        L0      = L
-        fx      = Rib - zsl / L * (log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
-        Lstart  = L - 0.001*L
-        Lend    = L + 0.001*L
-        fxdif   = ( (- zsl / Lstart * (log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
-                                      (log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
-                  - (-zsl /  Lend   * (log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
-                                      (log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-        L       = L - fx / fxdif
-
-        if(fabs(L) > 1e15):
-            break
-
-    return L
diff --git a/lib/ribtol/ribtol_hw.py b/lib/ribtol/ribtol_hw.py
deleted file mode 100644
index 1946cc8..0000000
--- a/lib/ribtol/ribtol_hw.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Jan 12 10:46:20 2018
-
-@author: vsc42247
-"""
-
-
-
-# purpose of calc_cm_ch: calculate momentum and thermal turbulent diffusion coefficients of the surface layer with a non-iterative procedure (Wouters et al., 2012)
-
-# input:
-
-# zrib = bulk Richardson number = (g/T)* DT * z/(ua^2)
-#   with:
-#     g = 9.81 m/s2 the gravitational acceleration
-#     z = height (in meters) of the surface layer under consideration 
-#     T = (reference) temperature (in Kelvin) at height z 
-#     DT = (T - T_s) = temperature (in Kelvin) gradient between the surface and height z 
-#     u_a^2 = u^2 +  v^2 is the squared horizontal absolute wind speed 
-# zzz0m = ratio z/z0 between the height z and the momentum roughness length z0m
-# zkbm = ln(z0m/z0h), with z0m, z0h the momentum and thermal roughness length, respectively.
-
-# output: diffusion coefficients (CM and CH) which cna be used to determine surface-layer turbulent transport
-# u'w' = - CM ua^2.
-# w'T' = - CH ua DT 
-
-
-# Reference:
-# Wouters, H., De Ridder, K., and Lipzig, N. P. M.: Comprehensive
-# Parametrization of Surface-Layer Transfer Coefficients for Use
-# in Atmospheric Numerical Models, Bound.-Lay. Meteorol., 145,
-# 539–550, doi:10.1007/s10546-012-9744-3, 2012.
-
-import numpy as np
-
-def calc_cm_ch (zeta,zzz0m,zkbm):
-    krm = 0.4
-
-    #ZETA = zeta_hs2(zrib,zzz0m,zkbm)
-    FUNM,FUNH = funcsche(ZETA,zzz0m,zkbm)
-    CM = krm**2.0/FUNM/FUNM
-    CH = krm**2.0/FUNM/FUNH
-
-    # FUNMn,FUNHn = funcsche(0.,zzz0m,zkbm)
-    # CMn = krm**2.0/FUNMn/FUNMn
-    # CHn = krm**2.0/FUNMn/FUNHn
-
-    # print ZETA,FUNM,FUNH
-    # print 'CMCMN',CM/CMn
-    # print 'CHCHN',CH/CHn
-
-    return CM,CH
-
-
-def zeta_hs2(RiB,zzz0m,kBmin1):
-    #print(RiB,zzz0m,kBmin1)
-    mum=2.59
-    muh=0.95
-    nu=0.5
-    lam=1.5
-
-    betah = 5.0
-
-    zzz0h = zzz0m*np.exp(kBmin1)
-    zzzs = zzz0m*0.06 # to be changed!! r. 101 nog bekijken!!
-
-    L0M = np.log(zzz0m)
-    L0H = np.log(zzz0h)
-    facM = np.log(1.+lam/mum/zzzs)*np.exp(-mum*zzzs)/lam
-    facH = np.log(1.+lam/muh/zzzs)*np.exp(-muh*zzzs)/lam
-    L0Ms = L0M + facM 
-    L0Hs = L0H + facH
-
-    if RiB < 0.:
-        p = np.log(1.-RiB)
-        Q = -0.486 +0.219*p - 0.0331*p**2-4.93*np.exp(-L0H) - 3.65/L0H +\
-            0.38*p/L0H+ 14.8/L0H/L0H-0.946*p/L0H/L0H-10.0/L0H**3+ \
-            0.392*L0M/L0H-0.084*p*L0M/L0H+0.368*L0M/L0H/L0H
-        # print 'p: ',p
-        # print 'Q: ',Q
-        zeta = (1. + p*Q)* L0Ms**2/L0Hs * RiB
-    else:
-        betam = 4.76+7.03/zzz0m +0.24*zzz0m/zzz0h # to be changed
-        # betam = 5.0 + 1.59*10.**(-5.)*(np.exp(13.0-L0M)-1.0) \
-        #         +0.24*(np.exp(-kBmin1)-1.0) # to be changed!!
-        # print('betam',betam)
-        lL0M = np.log(L0M)
-        S0Ms = 1.-1./zzz0m + (1.+nu/mum/zzzs)*facM
-        S0Hs = 1.-1./zzz0h + (1.+nu/muh/zzzs)*facH
-        zetat = -0.316-0.515*np.exp(-L0H) + 25.8 *np.exp(-2.*L0H) + 4.36/L0H \
-                -6.39/L0H/L0H+0.834*lL0M - 0.0267*lL0M**2
-        # print('zetat',zetat)
-        RiBt = zetat *(L0Hs+ S0Hs*betah*zetat)/(L0Ms+S0Ms*betam*zetat)**2 
-        # print('RiBt',RiBt)
-
-        if (RiB > RiBt):
-            D = (L0Ms+S0Ms*betam*zetat)**3/\
-                (L0Ms*L0Hs+zetat*(2.*S0Hs * betah * L0Ms - S0Ms*betam*L0Hs))
-            zeta = zetat + D*(RiB-RiBt)
-        else:
-            r = RiB - S0Hs*betah/(S0Ms*betam)**2
-            B = S0Ms*betam*L0Hs- 2.*S0Hs*betah*L0Ms
-            C = 4.*(S0Ms*betam)**2 * L0Ms *(S0Hs*betah*L0Ms-S0Ms*betam*L0Hs)
-            zeta = - L0Ms / S0Ms/betam - B*C/(4.*(S0Ms*betam)**3 *(B**2+abs(C*r)))
-            if r != 0:
-                zeta = zeta + (B-np.sqrt(B**2+C*r) + B*C*r/(2.*(B**2+abs(C*r))))/(2.*(S0Ms*betam)**3*r)
-    # print('zeta',zeta)
-    return zeta
-
-def funcsche(zeta,zzz0,kBmin1):
-
-
-    mum=2.5
-    muh=0.9
-    nu=0.5
-    lam=1.5
-    
-    p2=3.141592/2.
-    
-    lnzzz0=np.log(zzz0)
-    zzzs=zzz0*0.06
-    zetamcorr=(1.+nu/(mum*zzzs))*zeta
-    zetam0=zeta/zzz0
-    zetahcorr=(1.+nu/(muh*zzzs))*zeta
-    zetah0=zeta/(zzz0*np.exp(kBmin1))
-    
-    if (zeta <= 0.):
-    
-        gamma=15.2
-        alfam=0.25
-        xx=(1.-gamma*zeta)**alfam
-        psim=2.*np.log((1.+xx)/2.)+np.log((1.+xx**2.)/2.)-2.*np.arctan(xx)+p2
-        xx0=(1.-gamma*zetam0)**alfam
-        psim0=2.*np.log((1.+xx0)/2.)+np.log((1.+xx0**2.)/2.)-2.*np.arctan(xx0)+p2
-        phimcorr=(1.-gamma*zetamcorr)**(-alfam)
-        
-        alfah=0.5
-        yy=(1.-gamma*zeta)**alfah
-        psih=2.*np.log((1.+yy)/2.)
-        yy0=(1.-gamma*zetah0)**alfah
-        psih0=2.*np.log((1.+yy0)/2.)
-        phihcorr=(1.-gamma*zetahcorr)**(-alfah)
-    else: 
-    
-        aa=6.1
-        bb=2.5
-        psim=-aa*np.log(zeta+(1.+zeta**bb)**(1./bb))
-        psim0=-aa*np.log(zetam0+(1.+zetam0**bb)**(1./bb))
-        phimcorr=1.+aa*(zetamcorr+zetamcorr**bb*(1.+zetamcorr**bb)**((1.-bb)/bb))/(zetamcorr+(1.+zetamcorr**bb)**(1./bb))
-        
-        cc=5.3
-        dd=1.1
-        psih=-cc*np.log(zeta+(1.+zeta**dd)**(1./dd))
-        psih0=-cc*np.log(zetah0+(1.+zetah0**dd)**(1./dd))
-        phihcorr=1.+cc*(zetahcorr+zetahcorr**dd*(1.+zetahcorr**dd)**((1.-dd)/dd))/(zetahcorr+(1.+zetahcorr**dd)**(1./dd))
-    
-    psistrm=phimcorr*(1./lam)*np.log(1.+lam/(mum*zzzs))*np.exp(-mum*zzzs)
-    psistrh=phihcorr*(1./lam)*np.log(1.+lam/(muh*zzzs))*np.exp(-muh*zzzs)
-    
-    funm=lnzzz0-psim+psim0 +psistrm
-    funh=lnzzz0+kBmin1-psih+psih0 +psistrh
-    return funm,funh
-
diff --git a/lib/ribtol/setup.py b/lib/ribtol/setup.py
deleted file mode 100644
index bfb44db..0000000
--- a/lib/ribtol/setup.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# build with "python setup.py build_ext --inplace"
-from distutils.core import setup
-from distutils.extension import Extension
-from Cython.Build import cythonize
-import numpy as np
-import os
-
-os.environ["CC"] = "g++-7"
-
-setup(
-    ext_modules = cythonize((Extension("ribtol", sources=["ribtol.pyx"], include_dirs=[np.get_include()], ), ))
-)
diff --git a/model.py b/model.py
deleted file mode 100644
index 8760411..0000000
--- a/model.py
+++ /dev/null
@@ -1,2214 +0,0 @@
-# 
-# CLASS
-# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
-# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
-# Copyright (c) 2011-2015 Chiel van Heerwaarden
-# Copyright (c) 2011-2015 Bart van Stratum
-# Copyright (c) 2011-2015 Kees van den Dries
-# 
-# This file is part of CLASS
-# 
-# CLASS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published bygamma
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-# 
-# CLASS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with CLASS.  If not, see .
-#
-
-import copy as cp
-import numpy as np
-import sys
-import warnings
-import pandas as pd
-from ribtol_hw import zeta_hs2 , funcsche
-import logging
-#from SkewT.thermodynamics import Density
-#import ribtol
-
-grav = 9.81
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-
-def qsat(T,p):
-    return 0.622 * esat(T) / p
-
-
-def ribtol(Rib, zsl, z0m, z0h): 
-    Rib = np.float64(Rib)
-    zsl = np.float64(zsl)
-    z0m = np.float64(z0m)
-    z0h = np.float64(z0h)
-    #print(Rib,zsl,z0m,z0h)
-    if(Rib > 0.):
-        L    = 1.
-        L0   = 2.
-    else:
-        L  = -1.
-        L0 = -2.
-    #print(Rib,zsl,z0m,z0h)
-    while (abs(L - L0) > 0.001):
-        L0      = L
-        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
-        Lstart  = L - 0.001*L
-        Lend    = L + 0.001*L
-        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
-                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-        L       = L - fx / fxdif
-        #print(L,fx/fxdif)
-        if(abs(L) > 1e12):
-            break
-
-    return L
-  
-def psim(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psim = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-    return psim
-  
-def psih(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psih  = 2. * np.log( (1. + x*x) / 2.)
-        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-    return psih
- 
-class model:
-    def __init__(self, model_input = None,debug_level=None):
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        self.logger = logging.getLogger('model')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        """ initialize the different components of the model """ 
-
-        if model_input is not None:
-            # class4gl style input
-            if 'pars' in model_input.__dict__.keys():
-
-                # we make a reference to the full input first, so we can dump it
-                # afterwards
-                self.input_c4gl = model_input
-
-                # we copy the regular parameters first. We keep the classical input
-                # format as self.input so that we don't have to change the entire
-                # model code.
-                self.input = cp.deepcopy(model_input.pars)
-
-                # we copy other sections we are interested in, such as profile
-                # data, and store it also under input
-
-                # I know we mess up a bit the structure of the class4gl_input, but
-                # we will make it clean again at the time of dumping data
-
-                # So here, we copy the profile data into self.input
-                # 1. Air circulation data 
-                if 'sw_ac' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ac']:
-                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
-                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
-
-                    # correct pressure of levels according to surface pressure
-                    # error (so that interpolation is done in a consistent way)
-
-                    p_e = self.input.Ps - self.input.sp
-                    for irow in self.input.air_ac.index[::-1]:
-                       self.input.air_ac.p.iloc[irow] =\
-                        self.input.air_ac.p.iloc[irow] + p_e
-                       p_e = p_e -\
-                       (self.input.air_ac.p.iloc[irow]+p_e)/\
-                        self.input.air_ac.p.iloc[irow] *\
-                        self.input.air_ac.delpdgrav.iloc[irow]*grav
-
-
-
-                # 2. Air circulation data 
-                if 'sw_ap' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ap']:
-                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
-
-            # standard class input
-            else:
-                self.input = cp.deepcopy(model_input)
-
-    def load_yaml_dict(self,yaml_dict):
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                for keydata,value in data.items():
-                    self.__dict__[keydata] = value
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            #elif key == 'sources':
-            #    self.__dict__[key] = data
-            elif key == 'out':
-                # lets convert it to a list of dictionaries
-                dictouttemp = pd.DataFrame(data).to_dict('list')
-            else: 
-                 warnings.warn("Key '"+key+"' is be implemented.")
-            #     self.__dict__[key] = data
-
-
-        self.tsteps = len(dictouttemp['h'])
-        self.out = model_output(self.tsteps)
-        for keydictouttemp in dictouttemp.keys():
-            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
-
-
-  
-    def run(self):
-        # initialize model variables
-        self.init()
-  
-        # time integrate model 
-        #for self.t in range(self.tsteps):
-        while self.t < self.tsteps:
-          
-            # time integrate components
-            self.timestep()
-  
-        # delete unnecessary variables from memory
-        self.exitmodel()
-    
-    def init(self):
-        # assign variables from input data
-        # initialize constants
-        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
-        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        self.rho        = 1.2                   # density of air [kg m-3]
-        self.k          = 0.4                   # Von Karman constant [-]
-        self.g          = 9.81                  # gravity acceleration [m s-2]
-        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-        self.bolz       = 5.67e-8               # Bolzman constant [-]
-        self.rhow       = 1000.                 # density of water [kg m-3]
-        self.S0         = 1368.                 # solar constant [W m-2]
-
-        # A-Gs constants and settings
-        # Plant type:       -C3-     -C4-
-        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
-        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
-        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
-        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
-        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
-        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
-        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
-        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
-        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
-        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
-        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
-
-        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
-        self.mair       =  28.9;                # molecular weight air [g mol -1]
-        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
-
-        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
-        self.wmax       =  0.55;                # upper reference value soil water [-]
-        self.wmin       =  0.005;               # lower reference value soil water [-]
-        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
-        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
-
-        # Read switches
-        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
-        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
-        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
-        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
-        self.sw_sl      = self.input.sw_sl      # surface layer switch
-        self.sw_rad     = self.input.sw_rad     # radiation switch
-        self.sw_ls      = self.input.sw_ls      # land surface switch
-        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
-        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
-
-        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
-        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
-        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-  
-        # initialize mixed-layer
-        self.h          = self.input.h          # initial ABL height [m]
-        self.Ps         = self.input.Ps         # surface pressure [Pa]
-        self.sp         = self.input.sp         # This is also surface pressure
-                                                #but derived from the global data [Pa]
-        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
-        self.ws         = None                  # large-scale vertical velocity [m s-1]
-        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
-        self.we         = -1.                   # entrainment velocity [m s-1]
-       
-         # Temperature 
-        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
-        
-        
-        self.substep    = False
-        self.substeps   = 0
-
-
-
-        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
-        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
-        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
-        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
-        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
- 
-        self.wstar      = 0.                    # convective velocity scale [m s-1]
- 
-        # 2m diagnostic variables 
-        self.T2m        = None                  # 2m temperature [K]
-        self.q2m        = None                  # 2m specific humidity [kg kg-1]
-        self.e2m        = None                  # 2m vapor pressure [Pa]
-        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
-        self.u2m        = None                  # 2m u-wind [m s-1]
-        self.v2m        = None                  # 2m v-wind [m s-1]
- 
-        # Surface variables 
-        self.thetasurf  = self.input.theta      # surface potential temperature [K]
-        self.thetavsurf = None                  # surface virtual potential temperature [K]
-        self.qsurf      = None                  # surface specific humidity [g kg-1]
-
-        # Mixed-layer top variables
-        self.P_h        = None                  # Mixed-layer top pressure [pa]
-        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
-        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
-        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
-        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
-        self.dz_h       = None                  # Transition layer thickness [-]
-        self.lcl        = None                  # Lifting condensation level [m]
-
-        # Virtual temperatures and fluxes
-        self.thetav     = None                  # initial mixed-layer potential temperature [K]
-        self.dthetav    = None                  # initial virtual temperature jump at h [K]
-        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
-        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
-       
-        
-        
-        
-        
-        
-        # Moisture 
-        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
-
-        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
-        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
-        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
-  
-        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
-        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
-        self.e          = None                  # mixed-layer vapor pressure [Pa]
-        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
-        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
-      
-        
-        
-        # CO2
-        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
-        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
-        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
-        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
-        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
-        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
-        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
-        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
-        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
-        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
-       
-        # Wind 
-        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
-        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
-        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = self.input.advu       # advection of u-wind [m s-2]
-        
-        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
-        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = self.input.advv       # advection of v-wind [m s-2]
-         
-  # BEGIN -- HW 20170606
-        # z-coordinate for vertical profiles of stratification above the mixed-layer height
-
-        if self.sw_ac:
-        # this is the data frame with the grided profile on the L60 grid
-        # (subsidence, and advection) 
-            self.air_ac      = self.input.air_ac  # full level air circulation
-                                                  # forcing
-            # self.air_ach     = self.input.air_ach # half level air circulation
-            #                                       # forcing
-            
-
-        if self.sw_ap:
-        # this is the data frame with the fitted profile (including HAGL,
-        # THTA,WSPD, SNDU,WNDV PRES ...)
-            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
-
-            # just for legacy reasons...
-            if 'z' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
-            if 'p' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
-
-            indexh = np.where(self.air_ap.z.values == self.h)
-            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
-                raise ValueError("Error input profile consistency: mixed- \
-                                 layer height needs to be equal to the second \
-                                 and third \
-                                 level of the vertical profile input!")
-            # initialize q from its profile when available
-            p_old = self.Ps
-            p_new = self.air_ap.p[indexh[0][0]]
-            
-            if ((p_old is not None) & (p_old != p_new)):
-                print("Warning: Ps input was provided ("+str(p_old)+\
-                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
-                    +str(p_new)+"Pa).")
-                                    
-            self.Ps = p_new
-            # these variables/namings are more convenient to work with in the code
-            # we will update the original variables afterwards
-            #self.air_ap['q'] = self.air_ap.QABS/1000.
-
-            self.air_ap = \
-                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
-            # we require the temperature fields, since we need to consider
-            # advection
-            # if self.sw_ac:
-            #     #self.air_ap['theta'] = self.air_ap['t'] *
-
-            #     # we consider self.sp in case of air-circulation input (for
-            #     # consistence)
-            #     self.air_ap['t'] = \
-            #                 self.air_ap.theta *  \
-            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
-            # else:
-            # we consider self.Ps in case of balloon input only 
-            self.air_ap = self.air_ap.assign(t = lambda x: \
-                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
-
-            #self.air_ap['theta'] = self.air_ap.THTA
-            if 'u' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
-            if 'v' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
-
-            for var in ['theta','q','u','v']:
-
-                
-                if self.air_ap[var][1] != self.air_ap[var][0]:
-                    raise ValueError("Error input profile consistency: two \
-                                     lowest profile levels for "+var+" should \
-                                     be equal.")
-                
-                # initialize the value from its profile when available
-                value_old = self.__dict__[var]
-                value_new = self.air_ap[var][indexh[0][0]]
-                
-                if ((value_old is not None) & (value_old != value_new)):
-                    warnings.warn("Warning:  input was provided \
-                                     ("+str(value_old)+ "kg kg-1), \
-                                     but it is now overwritten by the first \
-                                     level (index 0) of air_ap]var\ which is \
-                                     different (" +str(value_new)+"K).")
-                                        
-                self.__dict__[var] = value_new
-
-                # make a profile of the stratification 
-                # please note that the stratification between z_pro[i] and
-                # z_pro[i+1] is given by air_ap.GTHT[i]
-
-                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
-                # np.gradient(self.z_pro)
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
-
-
-                self.__dict__['gamma'+var] = \
-                    self.air_ap['gamma'+var][np.where(self.h >= \
-                                                     self.air_ap.z)[0][-1]]
-
-
-
-        # the variable p_pro is just for diagnosis of lifted index
-            
-            
-
-            # input Ph is wrong, so we correct it according to hydrostatic equation
-            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-
-            #if self.sw_ac:
-                # note that we use sp as surface pressure, which is determined
-                # from era-interim instead of the observations. This is to
-                # avoid possible failure of the interpolation routine
-                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
-                #                          + \
-                #                          list(self.air_ap.p[3:]))
-
-            # else:
-                # in the other case, it is updated at the time of calculting
-                # the statistics 
-
-# END -- HW 20170606      
-        #print(self.air_ap)
-
-        if self.sw_ac and not self.sw_ap:
-            raise ValueError("air circulation switch only possible when air \
-                             profiles are given")
-        
-        if self.sw_ac:
-
-            # # # we comment this out, because subsidence is calculated
-            # according to advection
-            # #interpolate subsidence towards the air_ap height coordinate
-            # self.air_ap['w'] = np.interp(self.air_ap.p,\
-            #                               self.air_ac.p,\
-            #                               self.air_ac.w) 
-            # #subsidence at the mixed-layer top
-            # self.w = self.air_ap.w[1]
-        
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-                # in case we didn't find any points, we just take the lowest one.
-                # actually, this can happen if ERA-INTERIM pressure levels are
-                # inconsistent with 
-                if in_ml.sum() == 0:
-                    warnings.warn(" no circulation points in the mixed layer \
-                                  found. We just take the bottom one.")
-                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-
-                for var in ['t','q','u','v']:
-    
-                   # calculation of the advection variables for the mixed layer
-                   # we weight by the hydrostatic thickness of each layer and
-                   # divide by the total thickness
-                   self.__dict__['adv'+var] = \
-                            ((self.air_ac['adv'+var+'_x'][in_ml] \
-                             + \
-                             self.air_ac['adv'+var+'_y'][in_ml])* \
-                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                            self.air_ac['delpdgrav'][in_ml].sum()
-
-                   # calculation of the advection variables for the profile above
-                   # (lowest 3 values are not used by class)
-                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
-                   self.air_ap['adv'+var] = \
-                           np.interp(self.air_ap.p,\
-                                     self.air_ac.p,\
-                                     self.air_ac['adv'+var+'_x']) \
-                           + \
-                           np.interp(self.air_ap.p, \
-                                       self.air_ac.p, \
-                                       self.air_ac['adv'+var+'_y'])
-
-                # as an approximation, we consider that advection of theta in the
-                # mixed layer is equal to advection of t. This is a sufficient
-                # approximation since theta and t are very similar at the surface
-                # pressure.
-                self.__dict__['advtheta'] = self.__dict__['advt']
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # self.wrho = np.interp(self.P_h,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) 
-            # self.ws   = self.air_ap.w.iloc[1]
-
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                self.air_ap = self.air_ap.assign(wp = 0.)
-                self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                              self.air_ac.p, \
-                                              self.air_ac['wp'])
-                self.air_ap = self.air_ap.assign(R = 0.)
-                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                     self.Rv*self.air_ap.q)
-                self.air_ap = self.air_ap.assign(rho = 0.)
-                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-                
-                self.air_ap = self.air_ap.assign(w = 0.)
-                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-                #print('hello w ini')
-
-                # Note: in case of sw_ac is False, we update it from prescribed
-                # divergence
-                self.ws   = self.air_ap.w[1]
-
-                # self.ws   = self.wrho/self.rho
-                # self.ws   = self.wrho/(self.P_h/ \
-                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
-                #                         self.theta) # this should be T!!!
-
-                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-                #                         + \
-                #                         self.air_ac['divU_y'][in_ml])* \
-                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                #             self.air_ac['delpdgrav'][in_ml].sum() \
-        
-
-        # Tendencies 
-        self.htend      = None                  # tendency of CBL [m s-1]
-        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
-        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
-        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
-        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
-        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
-        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
-        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
-        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
-        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
-        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
-        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
-  
-        # initialize surface layer
-        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
-        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
-        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
-        self.z0m        = self.input.z0m        # roughness length for momentum [m]
-        self.z0h        = self.input.z0h        # roughness length for scalars [m]
-        self.Cm         = 1e12                  # drag coefficient for momentum [-]
-        self.Cs         = 1e12                  # drag coefficient for scalars [-]
-        self.L          = None                  # Obukhov length [m]
-        self.Rib        = None                  # bulk Richardson number [-]
-        self.ra         = None                  # aerodynamic resistance [s m-1]
-  
-        # initialize radiation
-        self.lat        = self.input.lat        # latitude [deg]
-        #self.fc         = self.input.fc         # coriolis parameter [s-1]
-        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
-        self.lon        = self.input.lon        # longitude [deg]
-        self.doy        = self.input.doy        # day of the year [-]
-        self.tstart     = self.input.tstart     # time of the day [-]
-        self.cc         = self.input.cc         # cloud cover fraction [-]
-        self.Swin       = None                  # incoming short wave radiation [W m-2]
-        self.Swout      = None                  # outgoing short wave radiation [W m-2]
-        self.Lwin       = None                  # incoming long wave radiation [W m-2]
-        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
-        self.Q          = self.input.Q          # net radiation [W m-2]
-        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
-  
-        # initialize land surface
-        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
-        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
-        self.T2         = self.input.T2         # temperature deeper soil layer [K]
-                           
-        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
-        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
-        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
-        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
-                           
-        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
-        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
-        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
-                           
-        self.C1sat      = self.input.C1sat      
-        self.C2ref      = self.input.C2ref      
-
-        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
-        
-        self.LAI        = self.input.LAI        # leaf area index [-]
-        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
-        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = self.input.alpha      # surface albedo [-]
-  
-        self.rs         = 1.e6                  # resistance transpiration [s m-1]
-        self.rssoil     = 1.e6                  # resistance soil [s m-1]
-                           
-        self.Ts         = self.input.Ts         # surface temperature [K]
-                           
-        self.cveg       = self.input.cveg       # vegetation fraction [-]
-        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
-        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
-        self.cliq       = None                  # wet fraction [-]
-                          
-        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
-  
-        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
-        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
-        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
-  
-        self.H          = None                  # sensible heat flux [W m-2]
-        self.LE         = None                  # evapotranspiration [W m-2]
-        self.LEliq      = None                  # open water evaporation [W m-2]
-        self.LEveg      = None                  # transpiration [W m-2]
-        self.LEsoil     = None                  # soil evaporation [W m-2]
-        self.LEpot      = None                  # potential evaporation [W m-2]
-        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
-        self.G          = None                  # ground heat flux [W m-2]
-
-        # initialize A-Gs surface scheme
-        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
-
-        # initialize cumulus parameterization
-        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
-        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
-        self.ac         = 0.                    # Cloud core fraction [-]
-        self.M          = 0.                    # Cloud core mass flux [m s-1] 
-        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
-  
-        # initialize time variables
-        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
-        self.dt     = self.input.dt
-        self.dtcur      = self.dt
-        self.firsttime = True
-        self.t      = 0
- 
-        # Some sanity checks for valid input
-        if (self.c_beta is None): 
-            self.c_beta = 0                     # Zero curvature; linear response
-        assert(self.c_beta >= 0 or self.c_beta <= 1)
-
-        # initialize output
-        self.out = model_output(self.tsteps)
- 
-        self.statistics()
-  
-        # calculate initial diagnostic variables
-        if(self.sw_rad):
-            self.run_radiation()
- 
-        if(self.sw_sl):
-            for i in range(10): 
-                self.run_surface_layer()
-  
-        if(self.sw_ls):
-            self.run_land_surface()
-
-        if(self.sw_cu):
-            self.run_mixed_layer()
-            self.run_cumulus()
-        
-        if(self.sw_ml):
-            self.run_mixed_layer()
-
-    def timestep(self):
-
-        self.dtmax = +np.inf
-        self.logger.debug('before stats') 
-        self.statistics()
-
-        # run radiation model
-        self.logger.debug('before rad') 
-        if(self.sw_rad):
-            self.run_radiation()
-  
-        # run surface layer model
-        if(self.sw_sl):
-            self.logger.debug('before surface layer') 
-            self.run_surface_layer()
-        
-        # run land surface model
-        if(self.sw_ls):
-            self.logger.debug('before land surface') 
-            self.run_land_surface()
- 
-        # run cumulus parameterization
-        if(self.sw_cu):
-            self.logger.debug('before cumulus') 
-            self.run_cumulus()
-   
-        self.logger.debug('before mixed layer') 
-        # run mixed-layer model
-        if(self.sw_ml):
-            self.run_mixed_layer()
-        self.logger.debug('after mixed layer') 
- 
-        #get first profile data point above mixed layer
-        if self.sw_ap:
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                # here we correct for the fact that the upper profile also
-                # shifts in the vertical.
-
-                diffhtend = self.htend - self.air_ap.w[zidx_first]
-                if diffhtend > 0:
-                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            else:
-                if self.htend > 0:
-                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            #print(self.h,zidx_first,self.ws,self.air_ap.z)
-
-        
-        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
-        self.logger.debug('before store') 
-        self.substep =  (self.dtcur > self.dtmax)
-        if self.substep:
-            dtnext = self.dtcur - self.dtmax
-            self.dtcur = self.dtmax
-
-        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
-
-        # HW: this will be done multiple times in case of a substep is needed
-        # store output before time integration
-        if self.firsttime:
-            self.store()
-  
-        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
-        # time integrate land surface model
-        if(self.sw_ls):
-            self.integrate_land_surface()
-        self.logger.debug('before integrate mixed layer') 
-        # time integrate mixed-layer model
-        if(self.sw_ml):
-            self.integrate_mixed_layer() 
-        self.logger.debug('after integrate mixed layer') 
-        if self.substep:
-            self.dtcur = dtnext
-            self.firsttime = False
-            self.substeps += 1
-        else:
-            self.dtcur = self.dt
-            self.t += 1 
-            self.firsttime = True
-            self.substeps = 0
-        self.logger.debug('going to next step')
-        
-        
-  
-    def statistics(self):
-        # Calculate virtual temperatures 
-        self.thetav   = self.theta  + 0.61 * self.theta * self.q
-        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
-        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
-        # Mixed-layer top properties
-        self.P_h    = self.Ps - self.rho * self.g * self.h
-        # else:
-            # in the other case, it is updated at the time that the profile is
-            # updated (and at the initialization
-
-        self.T_h    = self.theta - self.g/self.cp * self.h
-
-        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
-        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
-
-        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
-
-        # Find lifting condensation level iteratively
-        if(self.t == 0):
-            self.lcl = self.h
-            RHlcl = 0.5
-        else:
-            RHlcl = 0.9998 
-
-        itmax = 30
-        it = 0
-        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
-            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
-            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
-        else:
-            self.q2_h   = 0.
-            self.CO22_h = 0.
-
-        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
-        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
-        self.M      = self.ac * self.wstar
-        self.wqM    = self.M * self.q2_h**0.5
-
-        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
-        if(self.dCO2 < 0):
-            self.wCO2M  = self.M * self.CO22_h**0.5
-        else:
-            self.wCO2M  = 0.
-
-    def run_mixed_layer(self):
-        if(not self.sw_sl):
-            # decompose ustar along the wind components
-            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
-            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
-
-
-
-        # calculate large-scale vertical velocity (subsidence)
-        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
-            self.ws = -self.divU * self.h
-        # else:
-        #     in case the air circulation switch is turned on, subsidence is
-        #     calculated from the circulate profile at the initialization and
-        #     in the integrate_mixed_layer routine
-              
-        # calculate compensation to fix the free troposphere in case of subsidence 
-        if(self.sw_fixft):
-            w_th_ft  = self.gammatheta * self.ws
-            w_q_ft   = self.gammaq     * self.ws
-            w_CO2_ft = self.gammaCO2   * self.ws 
-        else:
-            w_th_ft  = 0.
-            w_q_ft   = 0.
-            w_CO2_ft = 0. 
-      
-        # calculate mixed-layer growth due to cloud top radiative divergence
-        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
-       
-        # calculate convective velocity scale w* 
-        if(self.wthetav > 0.):
-            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
-        else:
-            self.wstar  = 1e-6;
-      
-        # Virtual heat entrainment flux 
-        self.wthetave    = -self.beta * self.wthetav 
-        
-        # compute mixed-layer tendencies
-        if(self.sw_shearwe):
-            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
-        else:
-            self.we    = -self.wthetave / self.dthetav
-        # Don't allow boundary layer shrinking if wtheta < 0 
-        if(self.we < 0):
-            self.we = 0.
-
-        # Calculate entrainment fluxes
-        self.wthetae     = -self.we * self.dtheta
-        self.wqe         = -self.we * self.dq
-        self.wCO2e       = -self.we * self.dCO2
-        
-        htend_pre       = self.we + self.ws + self.wf - self.M
-        
-        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-        
- 
-        #print('thetatend_pre',thetatend_pre)
-        
-        #preliminary boundary-layer top chenage
-        #htend_pre = self.we + self.ws + self.wf - self.M
-        #preliminary change in temperature jump
-        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
-                          thetatend_pre + w_th_ft
-        
-        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
-        l_entrainment = True
-
-        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
-            l_entrainment = False
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! temperature jump is at the lower limit \
-                          and is not growing: entrainment is disabled for this (sub)timestep.") 
-        elif dtheta_pre < 0.1:
-            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
-            l_entrainment = True
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          " Warning! Potential temperature jump at mixed- \
-                          layer height would become too low limiting timestep \
-                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
-            self.dtmax = min(self.dtmax,dtmax_new)
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "next subtimestep, entrainment will be disabled")
-            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
-
-
-
-        # when entrainment is disabled, we just use the simplified formulation
-        # as in Wouters et al., 2013 (section 2.2.1)
-
-        self.dthetatend = l_entrainment*dthetatend_pre + \
-                        (1.-l_entrainment)*0.
-        self.thetatend = l_entrainment*thetatend_pre + \
-                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
-        self.htend = l_entrainment*htend_pre + \
-                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
-        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
-        #stop
-
-
-        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
-        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
-
-
-        # self.qtend = l_entrainment*qtend_pre + \
-        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
-        # self.CO2tend = l_entrainment*CO2tend_pre + \
-        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-
-
-        #     # part of the timestep for which the temperature mixed-layer jump
-        #     # was changing, and for which entrainment took place. For the other
-        #     # part, we don't assume entrainment anymore, and we use the
-        #     # simplified formulation  of Wouters et al., 2013
-
-        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
-        #   
-        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
-        #                      self.dthetatend + w_th_ft) + \
-        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
-        #     self.htend = fac*self.htend + \
-        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
-        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
-        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-
-        # else:
-        #     #self.htend = htend_pre
-        #     self.dthetatend = dthetatend_pre
-        #     self.thetatend = thetatend_pre
-        
-        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
-        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
-     
-        # assume u + du = ug, so ug - u = du
-        if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
-  
-            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
-            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
-        
-        # tendency of the transition layer thickness
-        if(self.ac > 0 or self.lcl - self.h < 300):
-            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
-        else:
-            self.dztend = 0.
-
-   
-    def integrate_mixed_layer(self):
-        # set values previous time step
-        h0      = self.h
-        
-        theta0  = self.theta
-        dtheta0 = self.dtheta
-        q0      = self.q
-        dq0     = self.dq
-        CO20    = self.CO2
-        dCO20   = self.dCO2
-        
-        u0      = self.u
-        du0     = self.du
-        v0      = self.v
-        dv0     = self.dv
-
-        dz0     = self.dz_h
-  
-        # integrate mixed-layer equations
-        
-            
-
-# END -- HW 20170606        
-        self.h        = h0      + self.dtcur * self.htend
-        # print(self.h,self.htend)
-        # stop
-        self.theta    = theta0  + self.dtcur * self.thetatend
-        #print(dtheta0,self.dtcur,self.dthetatend)
-        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
-        self.q        = q0      + self.dtcur * self.qtend
-        self.dq       = dq0     + self.dtcur * self.dqtend
-        self.CO2      = CO20    + self.dtcur * self.CO2tend
-        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
-        self.dz_h     = dz0     + self.dtcur * self.dztend
-            
-        # Limit dz to minimal value
-        dz0 = 50
-        if(self.dz_h < dz0):
-            self.dz_h = dz0 
-  
-        if(self.sw_wind):
-            self.u        = u0      + self.dtcur * self.utend
-            self.du       = du0     + self.dtcur * self.dutend
-            self.v        = v0      + self.dtcur * self.vtend
-            self.dv       = dv0     + self.dtcur * self.dvtend
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-            for var in ['t','q','u','v']:
-                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
-
-            # take into account advection for the whole profile
-                
-                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
-
-            var = 'z'
-            #print(self.air_ap[var])
-                #     print(self.air_ap['adv'+var])
-
-
-
-
-            #moving the profile vertically according to the vertical wind
-                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
-
-
-            # air_apvarold = pd.Series(np.array(self.air_ap.z))
-            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
-            # stop
-
-
-                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
-                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
-
-            #As t is updated, we also need to recalculate theta (and R)
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-
-            # air_aptheta_old = pd.Series(self.air_ap['theta'])
-            self.air_ap['theta'] = \
-                        self.air_ap.t * \
-                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
-                                         self.dtcur * self.air_ap.w[zidx_first:]
-
-#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
-#            print(self.t, self.dtcur,self.dt,self.htend)
-
-            # # the pressure levels of the profiles are recalculated according to
-            # # there new height (after subsidence)
-            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
-            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
-            #         * self.dtcur *  self.air_ap.w[zidx_first:]
-
-            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
-                    self.dtcur * self.air_ap.wp[zidx_first:]
-
-            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
-        # note that theta and q itself are updatet by class itself
-
-    
-        if self.sw_ap:
-            # Just for model consistency preservation purposes, we set the
-            # theta variables of the mixed-layer to nan values, since the
-            # mixed-layer values should overwritte by the mixed-layer
-            # calculations of class.
-            self.air_ap['theta'][0:3] = np.nan 
-            self.air_ap['p'][0:3] = np.nan 
-            self.air_ap['q'][0:3] = np.nan 
-            self.air_ap['u'][0:3] = np.nan 
-            self.air_ap['v'][0:3] = np.nan 
-            self.air_ap['t'][0:3] = np.nan 
-            self.air_ap['z'][0:3] = np.nan 
-
-            # Update the vertical profiles: 
-            #   - new mixed layer properties( h, theta, q ...)
-            #   - any data points below the new ixed-layer height are removed
-
-            # Three data points at the bottom that describe the mixed-layer
-            # properties
-            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
-                                           # columns as air_ap
-            # air_ap_head['z'].iloc[0] = 2.
-            # air_ap_head['z'].iloc[1] = self.__dict__['h']
-            # air_ap_head['z'].iloc[2] = self.__dict__['h']
-            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
-                        [2.,self.__dict__['h'],self.__dict__['h']]
-            for var in ['theta','q','u','v']:
-
-                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
-                        [self.__dict__[var], \
-                         self.__dict__[var], \
-                         self.__dict__[var] + self.__dict__['d'+var]]
-                
-            #print(self.air_ap)
-
-            # This is the remaining profile considering the remaining
-            # datapoints above the mixed layer height
-            air_ap_tail = self.air_ap.iloc[3:]
-            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
-
-            # print('h',self.h)
-            # # only select samples monotonically increasing with height
-            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            # air_ap_tail = pd.DataFrame()
-            # theta_low = self.theta
-            # z_low =     self.h
-            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            # for ibottom in range(1,len(air_ap_tail_orig)):
-            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
-            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-
-
-            # make theta increase strong enough to avoid numerical
-            # instability
-            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            air_ap_tail = pd.DataFrame()
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            theta_low = self.theta
-            z_low =     self.h
-            ibottom = 0
-            itop = 0
-            # print(air_ap_tail_orig)
-            # stop
-
-            # HW: this is the lower limit that we use for gammatheta, which is
-            # there to avoid model crashes. Besides on this limit, the upper
-            # air profile is modified in a way that is still conserves total
-            # quantities of moisture and temperature. The limit is set by trial
-            # and error. The numerics behind the crash should be investigated
-            # so that a cleaner solution can be provided.
-            gammatheta_lower_limit = 0.002
-            while ((itop in range(0,1)) or (itop != ibottom)):
-                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-                if (
-                    #(z_mean > (z_low+0.2)) and \
-                    #(theta_mean > (theta_low+0.02) ) and \
-                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
-                  (itop >= (len(air_ap_tail_orig)-1)) \
-                   :
-
-                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                    ibottom = itop+1
-                    theta_low = air_ap_tail.theta.iloc[-1]
-                    z_low =     air_ap_tail.z.iloc[-1]
-    
-
-                itop +=1
-                # elif  (itop > len(air_ap_tail_orig)-10):
-                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-                #print(itop,ibottom)
-
-            if itop > 1:
-                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! Temperature profile was too steep. \
-                                  Modifying profile: "+ \
-                                  str(itop - 1)+ " measurements were dropped \
-                                  and replaced with its average \
-                                  Modifying profile. \
-                                  mean with next profile point(s).") 
-
-
-            self.air_ap = pd.concat((air_ap_head,\
-                                     air_ap_tail,\
-                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
-                                                                      axis=1)
-
-            if  self.sw_ac:
-                qvalues = \
-                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
-
-                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
-                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
-                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-                self.P_h    = self.Ps - self.rho * self.g * self.h
-                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
-                        [self.Ps,  self.P_h, self.P_h-0.1]
-
-                self.air_ap.t = \
-                            self.air_ap.theta * \
-                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
-
-
-        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
-
-
-
-
-        # else:
-            # in the other case, it is updated at the time the statistics are
-            # calculated 
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if in_ml.sum() == 0:
-                warnings.warn(" no circulation points in the mixed layer \
-                              found. We just take the bottom one.")
-                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-            for var in ['t','q','u','v']:
-
-                # calculation of the advection variables for the mixed-layer
-                # these will be used for the next timestep
-                # Warning: w is excluded for now.
-
-                self.__dict__['adv'+var] = \
-                        ((self.air_ac['adv'+var+'_x'][in_ml] \
-                         + \
-                         self.air_ac['adv'+var+'_y'][in_ml])* \
-                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                        self.air_ac['delpdgrav'][in_ml].sum()
-
-                # calculation of the advection variables for the profile above
-                # the mixed layer (also for the next timestep)
-                self.air_ap['adv'+var] = \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p,\
-                                              self.air_ac['adv'+var+'_x']) \
-                                    + \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p, \
-                                              self.air_ac['adv'+var+'_y'])
-                # if var == 't':
-                #     print(self.air_ap['adv'+var])
-                #     stop
-
-            # as an approximation, we consider that advection of theta in the
-            # mixed layer is equal to advection of t. This is a sufficient
-            # approximation since theta and t are very similar at the surface
-            # pressure.
-
-            self.__dict__['advtheta'] = self.__dict__['advt']
-
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            # update the vertical wind profile
-            self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                          self.air_ac.p, \
-                                          self.air_ac['wp'])
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-            
-            air_apwold = self.air_ap['w']
-            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-            #print('hello w upd')
-
-            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # # self.wrho = np.interp(self.P_h,\
-            # #                      self.air_ach.p,\
-            # #                      self.air_ach['wrho']) \
-
-
-
-            # Also update the vertical wind at the mixed-layer height
-            # (subsidence)
-            self.ws   = self.air_ap.w[1]
-        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
-
-            ## Finally, we update he 
-            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-            #                        + \
-            #                        self.air_ac['divU_y'][in_ml])* \
-            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-            #            self.air_ac['delpdgrav'][in_ml].sum() 
-            
-
-        if self.sw_ap:
-            for var in ['theta','q','u','v']:
-
-                # update of the slope (gamma) for the different variables, for
-                # the next timestep!
-
-                # there is an warning message that tells about dividing through
-                # zero, which we ignore
-
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                    # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap['gamma'+var] = gammavar
-
-                # Based on the above, update the gamma value at the mixed-layer
-                # top
-                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
-                                                                     self.air_ap.z)[0][-1]]
-
-            
-    def run_radiation(self):
-        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
-        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
-        sinlea = max(sinlea, 0.0001)
-        
-        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
-  
-        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
-  
-        self.Swin  = self.S0 * Tr * sinlea
-        self.Swout = self.alpha * self.S0 * Tr * sinlea
-        
-        
-        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
-        self.Lwout = self.bolz * self.Ts ** 4.
-          
-        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
-        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
-  
-    def run_surface_layer(self):
-        # HW: I had to raise the minimum wind speed to make the simulation with
-        # the non-iterative solution stable (this solution was a wild guess, so I don't
-        # know the exact problem of the instability in case of very low wind
-        # speeds yet)
-        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        # version of 20180730 where there are still some runs crashing. Maybe
-        # an upper limit should be set on the monin-obukhov length instead of
-        # a lower limmit on the wind speed?
-        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        
-        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
-        qsatsurf       = qsat(self.thetasurf, self.Ps)
-        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
-        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
-
-        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
-  
-        zsl       = 0.1 * self.h
-        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
-        
-
-
-        if self.sw_lit:
-            self.Rib  = min(self.Rib, 0.2)
-            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
-            self.zeta  = zsl/self.L
-            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
-            
-        
-            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
-            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
-            
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-        
-     
-            # diagnostic meteorological variables
-            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
-            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
-            
-            # diagnostic meteorological variables
-        else:
-            
-            ## circumventing any iteration with Wouters et al., 2012
-            self.zslz0m = np.max((zsl/self.z0m,10.))
-            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
-            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
-            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
-            self.L = zsl/self.zeta
-            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
-        
-            self.Cm = self.k**2.0/funm/funm
-            self.Cs = self.k**2.0/funm/funh
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-            
-            # extrapolation from mixed layer (instead of from surface) to 2meter
-            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
-            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
-            self.u2m    =                - self.uw     / self.ustar / self.k * funm
-            self.v2m    =                - self.vw     / self.ustar / self.k * funm
-        
-        
-        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
-        self.e2m    = self.q2m * self.Ps / 0.622
-     
-    def ribtol(self, Rib, zsl, z0m, z0h): 
-        if(Rib > 0.):
-            L    = 1.
-            L0   = 2.
-        else:
-            L  = -1.
-            L0 = -2.
-        #print(Rib,zsl,z0m,z0h)
-        
-        while (abs(L - L0) > 0.001):
-            L0      = L
-            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
-            Lstart  = L - 0.001*L
-            Lend    = L + 0.001*L
-            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
-                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-            L       = L - fx / fxdif
-            #print(L)
-            if(abs(L) > 1e12):
-                break
-
-        return L
-      
-    def psim(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psim = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-        return psim
-      
-    def psih(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psih  = 2. * np.log( (1. + x*x) / 2.)
-            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-        return psih
- 
-    def jarvis_stewart(self):
-        # calculate surface resistances using Jarvis-Stewart model
-        if(self.sw_rad):
-            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
-        else:
-            f1 = 1.
-  
-        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
-            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
-        else:
-            f2 = 1.e8
- 
-        # Limit f2 in case w2 > wfc, where f2 < 1
-        f2 = max(f2, 1.);
- 
-        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
-        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
-  
-        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
-
-    def factorial(self,k):
-        factorial = 1
-        for n in range(2,k+1):
-            factorial = factorial * float(n)
-        return factorial;
-
-    def E1(self,x):
-        E1sum = 0
-        for k in range(1,100):
-            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
-        return -0.57721566490153286060 - np.log(x) - E1sum
- 
-    def ags(self):
-        # Select index for plant type
-        if(self.c3c4 == 'c3'):
-            c = 0
-        elif(self.c3c4 == 'c4'):
-            c = 1
-        else:
-            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
-
-        # calculate CO2 compensation concentration
-        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
-
-        # calculate mesophyll conductance
-        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
-                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
-        gm            = gm / 1000. # conversion from mm s-1 to m s-1
-  
-        # calculate CO2 concentration inside the leaf (ci)
-        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
-        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
-  
-        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
-        D0            = (self.f0[c] - fmin) / self.ad[c]
-  
-        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
-        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
-        ci            = cfrac * (co2abs - CO2comp) + CO2comp
-  
-        # calculate maximal gross primary production in high light conditions (Ag)
-        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
-  
-        # calculate effect of soil moisture stress on gross assimilation rate
-        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
-  
-        # calculate stress function
-        if (self.c_beta == 0):
-            fstr = betaw;
-        else:
-            # Following Combe et al (2016)
-            if (self.c_beta < 0.25):
-                P = 6.4 * self.c_beta
-            elif (self.c_beta < 0.50):
-                P = 7.6 * self.c_beta - 0.3
-            else:
-                P = 2**(3.66 * self.c_beta + 0.34) - 1
-            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
-  
-        # calculate gross assimilation rate (Am)
-        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
-        Rdark        = (1. / 9.) * Am
-        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
-  
-        # calculate  light use efficiency
-        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
-  
-        # calculate gross primary productivity
-        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
-  
-        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
-        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
-        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
-  
-        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
-        a1           = 1. / (1. - self.f0[c])
-        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
-  
-        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
-  
-        # calculate surface resistance for moisture and carbon dioxide
-        self.rs      = 1. / (1.6 * gcco2)
-        rsCO2        = 1. / gcco2
-  
-        # calculate net flux of CO2 into the plant (An)
-        An           = -(co2abs - ci) / (self.ra + rsCO2)
-  
-        # CO2 soil surface flux
-        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
-        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
-  
-        # CO2 flux
-        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
-        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
-        self.wCO2    = self.wCO2A + self.wCO2R
- 
-    def run_land_surface(self):
-        # compute ra
-        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
-        #print('ueff',self.u,self.v,self.wstar)
-
-        if(self.sw_sl):
-          self.ra = (self.Cs * ueff)**-1.
-        else:
-          self.ra = ueff / max(1.e-3, self.ustar)**2.
-
-        #print('ra',self.ra,self.ustar,ueff)
-
-        # first calculate essential thermodynamic variables
-        self.esat    = esat(self.theta)
-        self.qsat    = qsat(self.theta, self.Ps)
-        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
-        self.dqsatdT = 0.622 * desatdT / self.Ps
-        self.e       = self.q * self.Ps / 0.622
-
-        if(self.ls_type == 'js'): 
-            self.jarvis_stewart() 
-        elif(self.ls_type == 'ags'):
-            self.ags()
-        else:
-            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
-
-        # recompute f2 using wg instead of w2
-        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
-          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
-        else:
-          f2        = 1.e8
-        self.rssoil = self.rssoilmin * f2 
- 
-        Wlmx = self.LAI * self.Wmax
-        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
-        self.cliq = min(1., self.Wl / Wlmx) 
-     
-        # calculate skin temperature implictly
-        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
-            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
-            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
-            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
-
-        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
-        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
-        #print('Ts',self.rs)
-
-        esatsurf      = esat(self.Ts)
-        self.qsatsurf = qsat(self.Ts, self.Ps)
-
-        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-  
-        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
-  
-        self.LE     = self.LEsoil + self.LEveg + self.LEliq
-        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
-        #print('H',self.ra,self.Ts,self.theta)
-        self.G      = self.Lambda * (self.Ts - self.Tsoil)
-        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
-        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
-        
-        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
-  
-        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
-   
-        d1          = 0.1
-        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
-        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
-        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
-        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
-  
-        # calculate kinematic heat fluxes
-        self.wtheta   = self.H  / (self.rho * self.cp)
-        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
-        self.wq       = self.LE / (self.rho * self.Lv)
- 
-    def integrate_land_surface(self):
-        # integrate soil equations
-        Tsoil0        = self.Tsoil
-        wg0           = self.wg
-        Wl0           = self.Wl
-  
-        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
-        self.wg       = wg0     + self.dtcur * self.wgtend
-        self.Wl       = Wl0     + self.dtcur * self.Wltend
-  
-    # store model output
-    def store(self):
-        t                      = self.t
-        
-        self.out.time[t]          = t * self.dt / 3600. + self.tstart
-
-        # in case we are at the end of the simulation, we store the vertical
-        # profiles to the output
-        
-        # if t == (len(self.out.time) - 1):
-        #     self.out.air_ac = self.air_ac
-        #     self.out.air_ap = self.air_ap
-
-        
-        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
-        #  for key in self.out.__dict__.keys():
-        #      if key in self.__dict__:
-        #          self.out.__dict__[key][t]  = self.__dict__[key]
-        
-        self.out.h[t]          = self.h
-        
-        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
-        
-        self.out.gammatheta[t] = self.gammatheta
-        self.out.gammau[t]     = self.gammau
-        self.out.gammav[t]     = self.gammav
-        self.out.gammaq[t]     = self.gammaq
-        self.out.theta[t]      = self.theta
-        self.out.thetav[t]     = self.thetav
-        self.out.dtheta[t]     = self.dtheta
-        self.out.dthetav[t]    = self.dthetav
-        self.out.wtheta[t]     = self.wtheta
-        self.out.wthetav[t]    = self.wthetav
-        self.out.wthetae[t]    = self.wthetae
-        self.out.wthetave[t]   = self.wthetave
-        
-        self.out.q[t]          = self.q
-        self.out.dq[t]         = self.dq
-        self.out.wq[t]         = self.wq
-        self.out.wqe[t]        = self.wqe
-        self.out.wqM[t]        = self.wqM
-      
-        self.out.qsat[t]       = self.qsat
-        self.out.e[t]          = self.e
-        self.out.esat[t]       = self.esat
-      
-        fac = (self.rho*self.mco2)/self.mair
-        self.out.CO2[t]        = self.CO2
-        self.out.dCO2[t]       = self.dCO2
-        self.out.wCO2[t]       = self.wCO2  * fac
-        self.out.wCO2e[t]      = self.wCO2e * fac
-        self.out.wCO2R[t]      = self.wCO2R * fac
-        self.out.wCO2A[t]      = self.wCO2A * fac
-
-        self.out.u[t]          = self.u
-        self.out.du[t]         = self.du
-        self.out.uw[t]         = self.uw
-        
-        self.out.v[t]          = self.v
-        self.out.dv[t]         = self.dv
-        self.out.vw[t]         = self.vw
-        
-        self.out.T2m[t]        = self.T2m
-        self.out.q2m[t]        = self.q2m
-        self.out.u2m[t]        = self.u2m
-        self.out.v2m[t]        = self.v2m
-        self.out.e2m[t]        = self.e2m
-        self.out.esat2m[t]     = self.esat2m
-
-
-        self.out.Tsoil[t]      = self.Tsoil
-        self.out.T2[t]         = self.T2
-        self.out.Ts[t]         = self.Ts
-        self.out.wg[t]         = self.wg
-        
-        self.out.thetasurf[t]  = self.thetasurf
-        self.out.thetavsurf[t] = self.thetavsurf
-        self.out.qsurf[t]      = self.qsurf
-        self.out.ustar[t]      = self.ustar
-        self.out.Cm[t]         = self.Cm
-        self.out.Cs[t]         = self.Cs
-        self.out.L[t]          = self.L
-        self.out.Rib[t]        = self.Rib
-  
-        self.out.Swin[t]       = self.Swin
-        self.out.Swout[t]      = self.Swout
-        self.out.Lwin[t]       = self.Lwin
-        self.out.Lwout[t]      = self.Lwout
-        self.out.Q[t]          = self.Q
-  
-        self.out.ra[t]         = self.ra
-        self.out.rs[t]         = self.rs
-        self.out.H[t]          = self.H
-        self.out.LE[t]         = self.LE
-        self.out.LEliq[t]      = self.LEliq
-        self.out.LEveg[t]      = self.LEveg
-        self.out.LEsoil[t]     = self.LEsoil
-        self.out.LEpot[t]      = self.LEpot
-        self.out.LEref[t]      = self.LEref
-        self.out.G[t]          = self.G
-
-        self.out.zlcl[t]       = self.lcl
-        self.out.RH_h[t]       = self.RH_h
-
-        self.out.ac[t]         = self.ac
-        self.out.M[t]          = self.M
-        self.out.dz[t]         = self.dz_h
-        self.out.substeps[t]   = self.substeps
-  
-    # delete class variables to facilitate analysis in ipython
-    def exitmodel(self):
-        del(self.Lv)
-        del(self.cp)
-        del(self.rho)
-        del(self.k)
-        del(self.g)
-        del(self.Rd)
-        del(self.Rv)
-        del(self.bolz)
-        del(self.S0)
-        del(self.rhow)
-  
-        del(self.t)
-        del(self.dt)
-        del(self.tsteps)
-         
-        del(self.h)          
-        del(self.Ps)        
-        del(self.fc)        
-        del(self.ws)
-        del(self.we)
-        
-        del(self.theta)
-        del(self.dtheta)
-        del(self.gammatheta)
-        del(self.advtheta)
-        del(self.beta)
-        del(self.wtheta)
-    
-        del(self.T2m)
-        del(self.q2m)
-        del(self.e2m)
-        del(self.esat2m)
-        del(self.u2m)
-        del(self.v2m)
-        
-        del(self.thetasurf)
-        del(self.qsatsurf)
-        del(self.thetav)
-        del(self.dthetav)
-        del(self.thetavsurf)
-        del(self.qsurf)
-        del(self.wthetav)
-        
-        del(self.q)
-        del(self.qsat)
-        del(self.dqsatdT)
-        del(self.e)
-        del(self.esat)
-        del(self.dq)
-        del(self.gammaq)
-        del(self.advq)
-        del(self.wq)
-        
-        del(self.u)
-        del(self.du)
-        del(self.gammau)
-        del(self.advu)
-        
-        del(self.v)
-        del(self.dv)
-        del(self.gammav)
-        del(self.advv)
-  
-        del(self.htend)
-        del(self.thetatend)
-        del(self.dthetatend)
-        del(self.qtend)
-        del(self.dqtend)
-        del(self.utend)
-        del(self.dutend)
-        del(self.vtend)
-        del(self.dvtend)
-     
-        del(self.Tsoiltend) 
-        del(self.wgtend)  
-        del(self.Wltend) 
-  
-        del(self.ustar)
-        del(self.uw)
-        del(self.vw)
-        del(self.z0m)
-        del(self.z0h)        
-        del(self.Cm)         
-        del(self.Cs)
-        del(self.L)
-        del(self.Rib)
-        del(self.ra)
-  
-        del(self.lat)
-        del(self.lon)
-        del(self.doy)
-        del(self.tstart)
-   
-        del(self.Swin)
-        del(self.Swout)
-        del(self.Lwin)
-        del(self.Lwout)
-        del(self.cc)
-  
-        del(self.wg)
-        del(self.w2)
-        del(self.cveg)
-        del(self.cliq)
-        del(self.Tsoil)
-        del(self.T2)
-        del(self.a)
-        del(self.b)
-        del(self.p)
-        del(self.CGsat)
-  
-        del(self.wsat)
-        del(self.wfc)
-        del(self.wwilt)
-  
-        del(self.C1sat)
-        del(self.C2ref)
-  
-        del(self.LAI)
-        del(self.rs)
-        del(self.rssoil)
-        del(self.rsmin)
-        del(self.rssoilmin)
-        del(self.alpha)
-        del(self.gD)
-  
-        del(self.Ts)
-  
-        del(self.Wmax)
-        del(self.Wl)
-  
-        del(self.Lambda)
-        
-        del(self.Q)
-        del(self.H)
-        del(self.LE)
-        del(self.LEliq)
-        del(self.LEveg)
-        del(self.LEsoil)
-        del(self.LEpot)
-        del(self.LEref)
-        del(self.G)
-  
-        del(self.sw_ls)
-        del(self.sw_rad)
-        del(self.sw_sl)
-        del(self.sw_wind)
-        del(self.sw_shearwe)
-
-# class for storing mixed-layer model output data
-class model_output:
-    def __init__(self, tsteps):
-        self.time          = np.zeros(tsteps)    # time [s]
-
-        # mixed-layer variables
-        self.h          = np.zeros(tsteps)    # ABL height [m]
-        
-        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammau     = np.zeros(tsteps)
-        self.gammav     = np.zeros(tsteps)
-        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
-        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
-        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
-        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
-        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
-        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
-        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
-        
-        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
-        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
-        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
-        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
-
-        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
-        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
-        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
-
-        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
-        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
-        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
-        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
-        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
-        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
-        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
-        
-        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
-        
-        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
-
-        # diagnostic meteorological variables
-        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
-        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
-        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
-        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
-        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
-        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
-
-        # ground variables
-        self.Tsoil       = np.zeros(tsteps)
-        self.T2          = np.zeros(tsteps)
-        self.Ts          = np.zeros(tsteps)
-        self.wg          = np.zeros(tsteps)
-
-        # surface-layer variables
-        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
-        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
-        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
-        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
-        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
-        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
-        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
-        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
-        self.L          = np.zeros(tsteps)    # Obukhov length [m]
-        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
-
-        # radiation variables
-        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
-        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
-        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
-        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
-        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
-
-        # land surface variables
-        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
-        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
-        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
-        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
-        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
-        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
-        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
-        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
-        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
-        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
-
-        # Mixed-layer top variables
-        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
-        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
-
-        # cumulus variables
-        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
-        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
-        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
-        
-        
-        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
-
-# class for storing mixed-layer model input data
-class model_input:
-    def __init__(self):
-
-        # # comment not valid
-        # we comment out the initialization, because there is a problem when
-        # inheriting values from one the another class4gl_iput. We also expect
-        # that the user specifies all the required parmameters (if not, an error
-        # is raised). 
-
-        # general model variables
-        self.runtime    = None  # duration of model run [s]
-        self.dt         = None  # time step [s]
-
-        # mixed-layer variables
-        self.sw_ml      = None  # mixed-layer model switch
-        self.sw_shearwe = None  # Shear growth ABL switch
-        self.sw_fixft   = None  # Fix the free-troposphere switch
-        self.h          = None  # initial ABL height [m]
-        self.Ps         = None  # surface pressure [Pa]
-        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
-        self.fc         = None  # Coriolis parameter [s-1]
-        
-        self.theta      = None  # initial mixed-layer potential temperature [K]
-        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
-
-        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
-
-        self.dtheta     = None  # initial temperature jump at h [K]
-        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = None  # advection of heat [K s-1]
-        self.beta       = None  # entrainment ratio for virtual heat [-]
-        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
-        
-        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
-        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
-        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
-
-        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = None  # advection of moisture [kg kg-1 s-1]
-        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
-
-        self.CO2        = None  # initial mixed-layer potential temperature [K]
-        self.dCO2       = None  # initial temperature jump at h [K]
-        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advCO2     = None  # advection of heat [K s-1]
-        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
-        
-        self.sw_wind    = None  # prognostic wind switch
-        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.du         = None  # initial u-wind jump at h [m s-1]
-        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = None  # advection of u-wind [m s-2]
-
-        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = None  # initial u-wind jump at h [m s-1]
-        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = None  # advection of v-wind [m s-2]
-
-        # surface layer variables
-        self.sw_sl      = None  # surface layer switch
-        self.ustar      = None  # surface friction velocity [m s-1]
-        self.z0m        = None  # roughness length for momentum [m]
-        self.z0h        = None  # roughness length for scalars [m]
-        self.Cm         = None  # drag coefficient for momentum [-]
-        self.Cs         = None  # drag coefficient for scalars [-]
-        self.L          = None  # Obukhov length [-]
-        self.Rib        = None  # bulk Richardson number [-]
-
-        # radiation parameters
-        self.sw_rad     = None  # radiation switch
-        self.lat        = None  # latitude [deg]
-        self.lon        = None  # longitude [deg]
-        self.doy        = None  # day of the year [-]
-        self.tstart     = None  # time of the day [h UTC]
-        self.cc         = None  # cloud cover fraction [-]
-        self.Q          = None  # net radiation [W m-2] 
-        self.dFz        = None  # cloud top radiative divergence [W m-2] 
-
-        # land surface parameters
-        self.sw_ls      = None  # land surface switch
-        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
-        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = None  # temperature top soil layer [K]
-        self.T2         = None  # temperature deeper soil layer [K]
-        
-        self.a          = None  # Clapp and Hornberger retention curve parameter a
-        self.b          = None  # Clapp and Hornberger retention curve parameter b
-        self.p          = None  # Clapp and Hornberger retention curve parameter p 
-        self.CGsat      = None  # saturated soil conductivity for heat
-        
-        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
-        self.wfc        = None  # volumetric water content field capacity [-]
-        self.wwilt      = None  # volumetric water content wilting point [-]
-        
-        self.C1sat      = None 
-        self.C2ref      = None
-
-        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
-        
-        self.LAI        = None  # leaf area index [-]
-        self.gD         = None  # correction factor transpiration for VPD [-]
-        self.rsmin      = None  # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = None  # surface albedo [-]
-        
-        self.Ts         = None  # initial surface temperature [K]
-        
-        self.cveg       = None  # vegetation fraction [-]
-        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
-        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
-        
-        self.Lambda     = None  # thermal diffusivity skin layer [-]
-
-        # A-Gs parameters
-        self.c3c4       = None  # Plant type ('c3' or 'c4')
-
-        # Cumulus parameters
-        self.sw_cu      = None  # Cumulus parameterization switch
-        self.dz_h       = None  # Transition layer thickness [m]
-        
-# BEGIN -- HW 20171027
-        # self.cala       = None      # soil heat conductivity [W/(K*m)]
-        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
-# END -- HW 20171027
diff --git a/runmodel.py b/runmodel.py
deleted file mode 100644
index fc4fd19..0000000
--- a/runmodel.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Example of how to run the Python code, and access the output
-# This case is identical to the default setup of CLASS (the version with interface) 
-#
-
-from pylab import *
-from model import *
-
-""" 
-Create empty model_input and set up case
-"""
-run1input = model_input()
-
-run1input.dt         = 60.       # time step [s]
-run1input.runtime    = 12*3600    # total run time [s]
-
-# mixed-layer input
-run1input.sw_ml      = True      # mixed-layer model switch
-run1input.sw_shearwe = False     # shear growth mixed-layer switch
-run1input.sw_fixft   = False     # Fix the free-troposphere switch
-run1input.h          = 200.      # initial ABL height [m]
-run1input.Ps         = 101300.   # surface pressure [Pa]
-run1input.divU       = 0.        # horizontal large-scale divergence of wind [s-1]
-run1input.fc         = 1.e-4     # Coriolis parameter [m s-1]
-
-run1input.theta      = 288.      # initial mixed-layer potential temperature [K]
-run1input.dtheta     = 1.        # initial temperature jump at h [K]
-run1input.gammatheta = 0.006     # free atmosphere potential temperature lapse rate [K m-1]
-run1input.advtheta   = 0.        # advection of heat [K s-1]
-run1input.beta       = 0.2       # entrainment ratio for virtual heat [-]
-run1input.wtheta     = 0.1       # surface kinematic heat flux [K m s-1]
-
-run1input.q          = 0.008     # initial mixed-layer specific humidity [kg kg-1]
-run1input.dq         = -0.001    # initial specific humidity jump at h [kg kg-1]
-run1input.gammaq     = 0.        # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-run1input.advq       = 0.        # advection of moisture [kg kg-1 s-1]
-run1input.wq         = 0.1e-3    # surface kinematic moisture flux [kg kg-1 m s-1]
-
-run1input.CO2        = 422.      # initial mixed-layer CO2 [ppm]
-run1input.dCO2       = -44.      # initial CO2 jump at h [ppm]
-run1input.gammaCO2   = 0.        # free atmosphere CO2 lapse rate [ppm m-1]
-run1input.advCO2     = 0.        # advection of CO2 [ppm s-1]
-run1input.wCO2       = 0.        # surface kinematic CO2 flux [ppm m s-1]
-
-run1input.sw_wind    = False     # prognostic wind switch
-run1input.u          = 6.        # initial mixed-layer u-wind speed [m s-1]
-run1input.du         = 4.        # initial u-wind jump at h [m s-1]
-run1input.gammau     = 0.        # free atmosphere u-wind speed lapse rate [s-1]
-run1input.advu       = 0.        # advection of u-wind [m s-2]
-
-run1input.v          = -4.0      # initial mixed-layer u-wind speed [m s-1]
-run1input.dv         = 4.0       # initial u-wind jump at h [m s-1]
-run1input.gammav     = 0.        # free atmosphere v-wind speed lapse rate [s-1]
-run1input.advv       = 0.        # advection of v-wind [m s-2]
-
-run1input.sw_sl      = False     # surface layer switch
-run1input.ustar      = 0.3       # surface friction velocity [m s-1]
-run1input.z0m        = 0.02      # roughness length for momentum [m]
-run1input.z0h        = 0.002     # roughness length for scalars [m]
-
-run1input.sw_rad     = False     # radiation switch
-run1input.lat        = 51.97     # latitude [deg]
-run1input.lon        = -4.93     # longitude [deg]
-run1input.doy        = 268.      # day of the year [-]
-run1input.tstart     = 6.8       # time of the day [h UTC]
-run1input.cc         = 0.0       # cloud cover fraction [-]
-run1input.Q          = 400.      # net radiation [W m-2] 
-run1input.dFz        = 0.        # cloud top radiative divergence [W m-2] 
-
-run1input.sw_ls      = False     # land surface switch
-run1input.ls_type    = 'js'      # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-run1input.wg         = 0.21      # volumetric water content top soil layer [m3 m-3]
-run1input.w2         = 0.21      # volumetric water content deeper soil layer [m3 m-3]
-run1input.cveg       = 0.85      # vegetation fraction [-]
-run1input.Tsoil      = 285.      # temperature top soil layer [K]
-run1input.T2         = 286.      # temperature deeper soil layer [K]
-run1input.a          = 0.219     # Clapp and Hornberger retention curve parameter a
-run1input.b          = 4.90      # Clapp and Hornberger retention curve parameter b
-run1input.p          = 4.        # Clapp and Hornberger retention curve parameter c
-run1input.CGsat      = 3.56e-6   # saturated soil conductivity for heat
-
-run1input.wsat       = 0.472     # saturated volumetric water content ECMWF config [-]
-run1input.wfc        = 0.323     # volumetric water content field capacity [-]
-run1input.wwilt      = 0.171     # volumetric water content wilting point [-]
-
-run1input.C1sat      = 0.132     
-run1input.C2ref      = 1.8
-
-run1input.LAI        = 2.        # leaf area index [-]
-run1input.gD         = 0.0       # correction factor transpiration for VPD [-]
-run1input.rsmin      = 110.      # minimum resistance transpiration [s m-1]
-run1input.rssoilmin  = 50.       # minimun resistance soil evaporation [s m-1]
-run1input.alpha      = 0.25      # surface albedo [-]
-
-run1input.Ts         = 290.      # initial surface temperature [K]
-
-run1input.Wmax       = 0.0002    # thickness of water layer on wet vegetation [m]
-run1input.Wl         = 0.0000    # equivalent water layer depth for wet vegetation [m]
-
-run1input.Lambda     = 5.9       # thermal diffusivity skin layer [-]
-
-run1input.c3c4       = 'c3'      # Plant type ('c3' or 'c4')
-
-run1input.sw_cu      = False     # Cumulus parameterization switch
-run1input.dz_h       = 150.      # Transition layer thickness [m]
-
-"""
-Init and run the model
-"""
-r1 = model(run1input)
-r1.run()
-
-"""
-Plot output
-"""
-figure()
-subplot(131)
-plot(r1.out.t, r1.out.h)
-xlabel('time [h]')
-ylabel('h [m]')
-
-subplot(132)
-plot(r1.out.t, r1.out.theta)
-xlabel('time [h]')
-ylabel('theta [K]')
-
-subplot(133)
-plot(r1.out.t, r1.out.q*1000.)
-xlabel('time [h]')
-ylabel('q [g kg-1]')

From e530f499ae4d84c850de3e3a7f078171faa89dbf Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 21 Aug 2018 22:06:27 +0200
Subject: [PATCH 013/129]  almost initial version

---
 data_ground.py | 393 -------------------------------------------------
 1 file changed, 393 deletions(-)
 delete mode 100644 data_ground.py

diff --git a/data_ground.py b/data_ground.py
deleted file mode 100644
index d4e0b5a..0000000
--- a/data_ground.py
+++ /dev/null
@@ -1,393 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov  7 10:51:03 2017
-
-@author: vsc42247
-
-Purpose: Set surface conditions for the CLASS boundary-layer model
-"""
-
-
-import netCDF4 as nc4
-import numpy as np
-import datetime as dt
-#you can install with
-import pynacolada as pcd
-import pandas as pd
-
-def get_class4gl_ground(class_settings,**kwargs):   
-    
-    key = "IGBPDIS"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-    
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
-        print('reading soil water saturation from '+input_fn)
-
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__['wsat'] = input_nc.variables['wsat'][ilon,ilat]
-        input_nc.close()
-
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc"
-        print('reading soil water field capacity from '+input_fn)
-    
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__['wfc'] = input_nc.variables['wfc'][ilon,ilat]
-        input_nc.close()
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc"
-        print('reading soil wilting point from '+input_fn)
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__['wwilt'] = input_nc.variables['wwp'][ilon,ilat]
-        input_nc.close()
-        
-    key = "GLEAM"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-        
-        #INPUT_gleam = gleam() 
-        #INPUT_gleam.path = "/kyukon/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/" 
-        
-        gleam_path = "/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/"
-        print('reading soil-water content for "+str(class_settings,datetime.year)+" from '+gleam_path)
-        
-        gleam_files = {}
-        
-        gleam_vars = ['SMroot','SMsurf']
-        
-        for VAR in gleam_vars:
-            gleam_files[VAR] = nc4.Dataset(gleam_path+'/'+str(class_settings.datetime.year)+'/'+VAR+'_'+str(class_settings.datetime.year)+'_GLEAM_v3.1a.nc','r')
-        
-
-        year = class_settings.datetime.year
-        day = class_settings.datetime.day
-        hour = class_settings.datetime.hour
-  
-        ilat = np.where(gleam_files['SMsurf'].variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(gleam_files['SMsurf'].variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        VAR = 'SMsurf'; class_settings.wg = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
-        VAR = 'SMroot'; class_settings.w2 = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
-        
-        for VAR in gleam_vars:
-            gleam_files[VAR].close()
-    
-    key = "MOD44B"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-    
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc"
-        print('initializing vegetation fraction from '+input_fn)
-        var = 'cveg'
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__[var] = input_nc.variables['fv'][ilon,ilat]
-        input_nc.close()
-        
-    key = "DSMW"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-         # Procedure of the thermal properties:
-         # 1. determine soil texture from DSMW
-         # 2. soil type with look-up table (according to DWD/EXTPAR)
-         # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987) 
-         #    with parameter look-up table from Noilhan and Planton (1989). 
-         #    Note: The look-up table is inspired on DWD/COSMO
-                 
-       
-        #preparing for soil thermal properties
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc"
-        
-        print("deriving soil thermal properties for the force-restore methodes from the soil texture file "+ input_fn)
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        DSMW = input_nc.variables['DSMW'][ilat,ilon]
-        
-        
-        #EXTPAR: zfine   = soil_texslo(soil_unit)%tex_fine
-        SP = {}; SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code']
-        for SPKEY in SPKEYS: 
-            SP[SPKEY] = np.array(input_nc.variables[SPKEY][DSMW])
-        input_nc.close()
-        
-        SP['texture'] = (0.5*SP['tex_medium']+1.0*SP['tex_coarse']) /(SP['tex_coarse']+SP['tex_medium']+SP['tex_fine'])
-        
-        if pd.isnull(SP['texture']):
-            print('Warning, texture is invalid> Setting to Ocean')
-            SP['itex'] = 9
-        
-        else:
-            SP['itex'] = int(SP['texture']*100)
-        
-        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-        SP['isoil'] = np.zeros_like(SP['itex'],dtype=np.int)
-        LOOKUP = [
-                  [0 ,7],# fine textured, clay (soil type 7)
-                  [20,6],# medium to fine textured, loamy clay (soil type 6)
-                  [40,5],# medium textured, loam (soil type 5)
-                  [60,4],# coarse to medium textured, sandy loam (soil type 4)
-                  [80,3],# coarse textured, sand (soil type 3)
-                ]
-        for iitex,iisoil in LOOKUP: 
-            SP['isoil'][SP['itex'] >= iitex ] = iisoil 
-        
-        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-        LOOKUP = [
-                  [9001, 1 ], # ice, glacier (soil type 1) 
-                  [9002, 2 ], # rock, lithosols (soil type 2)
-                  [9003, 3 ], # salt, set soiltype to sand (soil type 3)
-                  [9004, 8 ], # histosol, e.g. peat (soil type 8)
-                  [9,    9 ], # undefined (ocean)
-                  [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
-                  [9000, 9 ], # undefined (inland lake)
-                  [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
-                  [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
-                ]
-        # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
-        for icode,iisoil in LOOKUP: 
-            SP['isoil'][SP['code'] == icode] = iisoil 
-        
-        #adopted from data_soil.f90 (COSMO5.0)
-        SP_LOOKUP = { 
-          # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea  
-          # (by index)                                           loam                    loam                                water      ice
-          'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
-          'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
-          'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
-          'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
-          'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
-          'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
-          'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
-          'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
-          'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
-          'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
-          'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
-          'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
-          'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
-          'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
-          'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
-          'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
-          'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
-          'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
-          'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
-          #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
-          'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , np.nan    , np.nan   ,  np.nan  ],
-          #error in table 2 of NP89: values need to be multiplied by e-6
-          'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
-          'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , np.nan    , np.nan   ,  np.nan  ],
-          'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , np.nan    , np.nan   ,  np.nan  ],
-          'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , np.nan    , np.nan   ,  np.nan  ],
-          'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , np.nan    , np.nan   ,  np.nan  ],
-        }
-        
-        for SPKEY in SP_LOOKUP.keys(): 
-            SP[SPKEY] = np.zeros_like(SP['isoil'],dtype=np.float)
-        
-        for i in range(11):
-            SELECT = (SP['isoil'] == i)
-            for SPKEY in SP_LOOKUP.keys(): 
-                SP[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
-        
-        for SPKEY in list(SP_LOOKUP.keys())[-6:]: 
-            var = SPKEY
-            class_settings.__dict__[var] = np.float(SP[SPKEY])
-            
-        # only print the last parameter value in the plot
-        
-        #inputs.append(cp.deepcopy(class_settings))
-        #var = 'cala'
-        #class_settings.__dict__[var] = np.float(SP['cala0'])
-        #valnew = class_settings.__dict__[var]
-        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-        
-        #inputs.append(cp.deepcopy(class_settings))
-        #var = 'crhoc'
-        #class_settings.__dict__[var] = np.float(SP['crhoc'])
-        #valnew = class_settings.__dict__[var]
-        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-        
-    key = "CERES"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
-        
-        CERES_start_date = dt.datetime(2000,3,1)
-        DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
-        DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
-        print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
-            
-        var = 'cc'
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        print(class_settings.lat,class_settings.lon)
-        
-        class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:(idatetime+class_settings.runtime),ilat,ilon])/100.
-   
-        input_nc.close()
-    
-    key = "GIMMS"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
-       
-    
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
-        print("Reading Leag Area Index from "+input_fn)
-        var = 'LAI'
-        
-        #plt.plot
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
-        
-        print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
-        tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
-        
-        if np.isnan(tarray[idatetime]):
-            print("interpolating GIMMS cveg nan value")
-            
-            mask = np.isnan(tarray)
-            if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-                tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-            else:
-                print("Warning. Could not interpolate GIMMS cveg nan value")
-                
-        class_settings.__dict__[var] = tarray[idatetime]
-        
-        input_nc.close()
- 
-    key = "IGBPDIS_ALPHA"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
-       
-        var = 'alpha'
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
-        print("Reading albedo from "+input_fn)
-    
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        
-        landfr = {}
-        for ltype in ['W','B','H','TC']:   
-            landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
-        
-        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-        
-        alpha=0.
-        for ltype in landfr.keys():
-            alpha += landfr[ltype]*aweights[ltype]
-        
-        
-        class_settings.__dict__[var] = alpha
-        input_nc.close()        
-        
-        
-    key = "ERAINT_ST"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
-       
-        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
-        print("Reading soil temperature from "+input_fn)
-        
-        var = 'Tsoil'
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-        
-        
-        class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
-        
-        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
-        var = 'T2'
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-        
-        
-        class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
-        
-        
-        input_nc.close()
-        
-        
-    
-    #inputs.append(cp.deepcopy(class_settings))
-    #var = 'T2'
-    #valold = class_settings.__dict__[var]
-    #
-    #class_settings.__dict__[var] = 305.
-    #class_settings.__dict__['Tsoil'] = 302.
-    #valnew = class_settings.__dict__[var]
-    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-    
-    
-    
-    #inputs.append(cp.deepcopy(class_settings))
-    #
-    #var = 'Lambda'
-    #valold = class_settings.__dict__[var]
-    
-    ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book. 
-    ## I need to ask Chiel.
-    ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
-    #
-    #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg'] 
-    #class_settings.__dict__[var] = valnew
-    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-    
-    
-    
-    key = "GLAS"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
-       
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
-        print("Reading canopy height for determining roughness length from "+input_fn)
-        var = 'z0m'
-    
-        
-        #plt.plot
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
-        
-        lowerlimit = 0.01
-        if testval < lowerlimit:
-            print('forest canopy height very very small. We take a value of '+str(lowerlimit))
-            class_settings.__dict__[var] = lowerlimit
-        else:
-            class_settings.__dict__[var] = testval
-        
-        class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
-        
-        
-        input_nc.close()
-        

From 945a6513c28ee51f9884eb7f37abcba5a6556261 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 10:00:52 +0200
Subject: [PATCH 014/129] get the simultions.py working for first test release

---
 class4gl/__init__.py                          |   3 +-
 class4gl/__pycache__/class4gl.cpython-36.pyc  | Bin 28681 -> 28782 bytes
 .../__pycache__/data_global.cpython-36.pyc    | Bin 0 -> 17867 bytes
 .../interface_functions.cpython-36.pyc        | Bin 0 -> 10575 bytes
 .../interface_multi.cpython-36.pyc            | Bin 0 -> 31044 bytes
 class4gl/__pycache__/model.cpython-36.pyc     | Bin 36319 -> 36319 bytes
 class4gl/class4gl.py                          |   9 ++-
 class4gl/data_air.py                          |  17 ++++--
 class4gl/interface_functions.py               |   5 +-
 class4gl/model.py                             |   2 +-
 class4gl/ribtol/__init__.py                   |   2 +-
 .../__pycache__/__init__.cpython-36.pyc       | Bin 0 -> 294 bytes
 .../__pycache__/ribtol_hw.cpython-36.pyc      | Bin 0 -> 3455 bytes
 class4gl/simulations/simulations.py           |  57 +++++++++---------
 14 files changed, 56 insertions(+), 39 deletions(-)
 create mode 100644 class4gl/__pycache__/data_global.cpython-36.pyc
 create mode 100644 class4gl/__pycache__/interface_functions.cpython-36.pyc
 create mode 100644 class4gl/__pycache__/interface_multi.cpython-36.pyc
 create mode 100644 class4gl/ribtol/__pycache__/__init__.cpython-36.pyc
 create mode 100644 class4gl/ribtol/__pycache__/ribtol_hw.cpython-36.pyc

diff --git a/class4gl/__init__.py b/class4gl/__init__.py
index a21583b..f9c2212 100644
--- a/class4gl/__init__.py
+++ b/class4gl/__init__.py
@@ -1,4 +1,5 @@
-from . import model,class4gl,interface_multi,data_air,data_global
+# from . import model,class4gl,interface_multi,data_air,data_global
+from ribtol import *
 
 __version__ = '0.1.0'
 
diff --git a/class4gl/__pycache__/class4gl.cpython-36.pyc b/class4gl/__pycache__/class4gl.cpython-36.pyc
index d84b87586ff096df14c9205c6ba38d2385d9224e..6cf8a9fc010af32a30fb2783f7c29d2b54e753c4 100644
GIT binary patch
delta 5056
zcmaJ_4Qw366~4XOyW2b8*^co~>^RBU#&IrxcAVIO5)vom$N9m;iIbQOa9-c6FPrS$
z9RI>Ao0&H=@4b0HbAI5Ga`}?du_>91m&83M!c9ux?ZARh9nb^tb*CQb*j_dx
zr=l!+PSG_}W0BJ_J!Z!E9ZX>nJFdqI8c|hxoZ5t*pf;%|sjb$lsjbm#%;c$Brm@&V
ziV~o)C3+pT*Xi}tHt3DiHt9>LZPuHiEywMae2d-!i>mZxdjl-Nk~0C8G+WPw^yRFY
z)v#K&1peyauLl0AsdpW#XAML(KDI<(0qdIBQdqZ?)~$qoGi!l<%ekPQg5EOL3cXgE
zZDZYRB}>gHdOK?yVC^#jy@U0z>scoZUC&mrE*R-#t63V_Rcs?$%hth67i_bhZGdey
zmX&V;j8k%TTv+JOSptvZuS`n!Nj@lQ+9_X2I*8bhDd#
zduN-?Te@9m!aHMdvpZ`WuDdyB|2MIrI4!kI#wu7|CbLJ}8Q#C98usRRzye5OHwLg9
zzHSgeQ+TZMtMFw-X%jC+t+iFiu@*qDV&-@$m$A)B(_Xv88P-E=&sjZcyBTk~Xz~^q<;xJ75n2KC
z;E2nYioQE
zs|!GePMgzAeMcRN!$rYLpIgT93wAv`U7vwrTJWD;sM*3MO#xA-)FL
z_J~b&=`w8;wmdWM^1U!DRZ?#?lEEUAv0a8|?BPb5CoJ3K`*1Hj
zSE>wHNqRk6G`Q=OyhXC`+ko#A_`0hA0ZC8j$O(ZTp2Nn
zMYF(saZx=P;bz{MG(j1%9&oK$Q`_|SH)C&mvU3($@aIBby9OrTQG6Pm0^bWF2%z=
z>yw{{UHLr#BWZBdpY`kmtl!uTv9W7oXQUTrHwkCs+Pw+oisgTk~fG~HjdWHgq15e+=RGi<`my2w)C!6`ovgolhQAy
zdiN$}%myQvo|QNG7V-05R=xqYli@-sUz~miFFak#@qF$eJcw{N0!MHWJcJSgO2Q`*
z&LDglp$Xvu1YaSb(kjObAs6<&5B>0+&)~z)r+fz5iq_68t$wuRw!>z~}w#SI?r{
zH^;^EFu9=aV9DXj0Y<=YuicP81;gpk{0TI0b`6-pNtKFBJi70}G6uMekvnD}VbR@R
zJsA_Oj4{(LGPr_C7@n(lK+l~_o}cdL7mz)EHD3ES`w{dgjQXzo5>nj=3+I{*zhY5N4MfxoKTBK&g5@mA9Egm4xtkO(iN6?{ut1D)i`D_GegYJA`98Mz*0ER*OmA_gBa<#gf-kN#FXL
z(4h=)52X{n<)6f*RA)&JGNo!2CLI2CoS`!3E00~wvN8f$15TZ*SYmkim@*-r9$vAW
z9DL5pWg>gh%%09b-RD{Jh05E*Gr`u^aUb&I<4AP@T=lVkh;t)1G92;m3x(l-M|cu}
zOiQwSfL*%CkB~|MfNxXjtM?C^)f?k9K&
zEvju6tBy4*ZIyvzOM;Q3FvEw$@zHhgfSnm#rCbu<9$l8EcLB~}xqzf-c<{iOKLYQX
z1Wk$eMvrvj1SA|vE|^pB(B_}wKL0{M4gdERJg5W$
z>qJOjd-z&Q$!I3SoGffZCWGHY0peXC=e#LChW(d?!4w$wnyM;VTnqA##l8AAB`;po
z%e6f)Aw%$7)I%e^dEO+3PPBH={c2e1T+b{n*qJO4fg}6HLnk^1&->z2qb|NtWLvOw
zUMLkN`cSAK3Z-yWsa!dss)uFR$iOR|8e@u5ZaPN!jlTn&{5uHWLwF8As#zP}Pv6J>
z%Ltf#(lxp|xMS}@|E=+WI5gG>@2A|@4U3;~6<{70m9d+YO69$=dIhYZj%Ss2adLdu
z(nXgU#pt%P_S53W6je!p-Igl#
z$_k}6$d`$`m}fNL=F0%!C&K(`b#q6C51|gc9tLC-9>(w{SbVdxiVg(F&zknDcp^S2
z05q@nd=9iJ_Jpjo0P}&zb1^LroVdhVG
zS{#Ugng}(5W+{c^_@8koaOy7uSrSD&GocTn472Y(pk{@DTv>pKE9&79Q0xF-4l-S1
zceDhgbg;MoBn>O#P_A`dE6RSY-8p*h2PhCmO+aZ^;!3p&qvA{Y(#m&prn(JZZ+jqc
z_d`p+kQU@w4R?$pbi%BRQ$fgN;RiVXDhv$qyf|X_uER572drn1l_fO%a|nxM`5lZ6
zh>P}yGG*)S$by9%mL5QQ8`9DLF9UBwX(&n)K27QMAg;d^VF-Z=&|aj55hyiNwIy9f
zuz$4>QHm6We5w`=0dENSWlC?z-QtOS$KqJS!#)$g&KJgO<~F9BA40K6lzF9zBDYAv
z?5B7OSPC^o4RRa-Tg7l$9BG9&`ppV#(mbSyZx*`B@UqGm9q!rIxU8ChS5^jpW13vA
za*IZOwj;w6P6>V=qF+HA`u|_?EzyBuoKY={xMf_c2^nY&9|59YQ1pjt%sakQ&(~t)NFfsADSSB~7
vKp>|i3oP)EIb%s}Yu;fc+uR0k#b)^58|G;m-rpb{5PIp%)=+jXEd

delta 5086
zcmaJ_4Qw366}~<1{rdci?buF8&T;-OfBqqHNeYRRnEVjONgO966VhY7yS}Wm_rvVk
zu~U^x!O)^Ys&=Rf6|Gv-0!>vada9^E0#$(8LO`Nw3tbA0N~7{4LTCXs&_drg>)4J}
z^(=q$X5O2b_ujnsX70|PRqi~iv|d$F5m!TBJj@%Ez#D-XpIV~3>EGG&jn<7tLovd3
zghgZDq)L2Gsi_vZ@H&#q2jcg$;v(i{a%d9dL(yrAA?+c_lO5axw21|D{z%Uz=`gjz7y1
zT#f~1Kmy$;!3O$wf&@{8M@yfC&MQh%ydAMt#38YqfR!8O;7BHIo1>;ZdkL(4q;yNP
zsjRw=NH4g@VX9$`?&tO5q4IX6LA+9aL}8*UkyNe}HzbxRbHvF+eNz)wZw531<`S^r
zfWzmBM-pr1Es)(z4-0W8YvovsJ5JuRiRAOer-|!n!5tOzlqF)QqHXI^ET9s2(RA9d
zbT^&$!XpJkbIp{>r5P}iFM&weF>`#j4KVVq(yJAHN@q7@RFZKxEK3Fb`J0KJHOmX>
zwyE)VONXmI3MR02A5TGSi#Sr7YV0P`3t6t2b$AbG)g3RGe5v?e?fh$@lcx?@w#l~=
z$+5hrlERu)l0^y{ck(0dtSxyj$$e0|SUV9U({HI?9+n#}6OYuV)(>Ez{eT028wgTy
zz6EqQ07^)k_%2ZJ7vBxoBWfBNx}g$_jcZw3y3h-8(=lDC8Q%x7>G(nDc$IcuGtyJ?
z%SxGs^D)xuK=RR|+@Kqi`T*1C6B{5OT*rByPi_r1RP-Wx-X6*3951XD3TDpm)rEOp
zn48)Bs7c1iqueORj#1QSoN4u_W{~%AZeCd2(XzDea&`G8nsuG{Rm&qvToe{IEW4cT
zm1j&hUEp~f$r)ubAp*e@&EloLD`JxjzOMA#!q+QmS=2dZ(b#dv3mG_7CLV8JRWU|e
z@Nt5H6ouyp?K=oJbhJ?h{TwafX
z+uzyy%lWszvC#{=M@?57<#pm>$H8irurj$kH`7+mFvs{-p>-~6x{4-vQCo8@cf>F=
z)RP~+Mm*5Dt%AjDB!X*YO}GHHSvr2HXzF0rG$X;AQ{^
zKsW9HT)>wB4S-t#zKKLp*5Q<7Ec+pe;C$;d`tVapUqCJ;=T1%Ir!eW1xOi2vT*k~~
zcrIGgUu3Ff=XJ^vo-eQ%MW*ipPZ?lUdOkO5ayOkH95!{=;d^NrPc4kQC%m!|S7%Y0
zsoB;Eldr-CC&7+7E-y%@NVBD#(nJ{R1q+rm$t!cLoL8pjdmP`+W8j7?C#TV{mK*n7
zfE@P1qnbTpI=qECzC+**@7mWP
z%V80#MGaH8vYPF9kx6P+q2LCI{jS76SEybjEBylM_c3zIs29)J`9TfwsZpWzLq>$R
z_pW@FWF`ao?8d2xzC?7hNRBG6>9{oOS#ituP3w`)DMc33RL~2qZfDV)mNm1cmP4i)
z7FA)H@~B-O$FH$1$JO)M!iLl0;`Yr&?8lKue2u@*1T~Y}UD$X@w*!|+@nLkQuQA+U
zd>+97h5zLX^6$`as%&}(IdEb*xhr*9B7TiV{S0^%)M|hXstBm!5g=-@UrH&({ELet
zI{8fmlv7&9PcRU2M+$U3{t&W$9l&U8RO6O5Xq!}ej~_L;$sdEj&j2R@-GCv
zoW&4b4#JB?EMp7=FN&LczpFegs&+LldlB*oI0{PXFT|Z!ja$Z#c}lRth?^?y-t|h^
zo*RkTmrD4j@oh|%_qyCvx>vPwLwWv9aJm6g4)LEoVJb)?wmo%2isO3^D7tuY@BAyF
z!*t|(3H_+4A4}8Qg6rmErC9$&aNaWzk+F>`Eo0?U-2FxTcwpUnylwn5gfjd!fYefY
zSL*UQ#^mAt6%=xu;yo_r?QdRn79+Ftm@`w4Ntd7uoD=%~;o>?>5BBw?l)h9Yuf+T+
zKs5l*GWjg!*MaH<90tgcMAFKV!~YCQYV$Wx(+Wrm=fRT%hv?r~NDx)yZBZ>6iLMd7
z2bz>*Y3M*rFx*cZzEgbZ;7WQGpFX%mc|p8%aBeD&VemYTUaE4OG<-}t0r6S2-?+%I}n9HKLV;$El%-B_W=O4sv6Q#q8x1S=4
zOp}c|`?Mi8W9qcR7l^SM3I~axuPzq?rR0sOYO#=(#y18u+L$cTshF&Y{0~^`0l<#{
zrwKe&x9KVO5c;x`JWN!oimBT-ZR_8JtdozR7}&b~1M&9Y;^3zNQKLWEKTHcCgz@S2
zT@sPX(*rM`aSEDldLg1Z+Px6HXYvmJDM`xu$HLpRY*xF0ti6b_j{)xoM2k_k`zfMX
zgkBAW_85-<=LezRMbs)*kYF{#7k+mv{dN_b%_M~Pctp#d^%ClA9DGhGi)Fcp`K|7?~{
zDTgEQqBnD+^s|QV6{2|vCS4vO9I^jSC{W`kWk6Y|#Fa{whD%RozNBv0O6;waINPX|
zU-xDd@eeTePk_8X;_|jh>-5tAzl%ou_^=qaJ6B>uTHy&aLH=vVUk#Y0$Zu(Eop{+^
zU0ee0wE(;nXzF&**MN@ve;s%n7Rn?~_;fF(?f_g5*a_$c^Z@z*vg$)S-`?`H8#wsm%9VzYmVPqMkeN}^_5C-0#vRhc$gQVB3(r1Xaf
z6_t8XMGeXk^3{jnendb%_{?u2h=4`>y?}T=x3tKA0SRdoZWn$Ka6f@pKBzgCz9pX<
zvNG~C{GdJwfmIL)7qpzAIr5fg;W3SKZJY(iIR6$zB7m2rod1PK{)*#;MsgOt*plF$
z!?i{gk;n%DQ>iMWRK~dsE@{k+!=AF+i(Zq>8zZ*4p_%48IRZ73P~@M870mwv{eZ8W

diff --git a/class4gl/__pycache__/data_global.cpython-36.pyc b/class4gl/__pycache__/data_global.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99b6987d3bcfe285420dc335e739d39ab2fecdce
GIT binary patch
literal 17867
zcmcJ14SXC|b??mV_iFWN*_P#xOk~HljIA%*aUvY86j^qh*s`&dI9WnBtG%yLTex|g^=V0C`~g!353%6CD8Cq^HSdX
zpL=IkE7?g5q^sZDxu55rbM86k-h1vH?X9nme)@L%&71#5lAe=letyIU@tFS^0h1CE
zlbMp26AFL(5`O*;B!c`MN`(14l8EwmU80V^V~H4l*C*=vyCKoQ-;Iez{%%S%X-&Ci
z=41XlWm!5Ru>cF+Cb6K_vgAvwK{~|3NQZg4l__IVXC!qUkc$o*TFTOxS}dsJvzj_u
zoKw{UYHv^f{@(tcy=q@i?}2D^Fg0sU7L9)O8m+*L?38-CIBRK!8I7KtHA+QO>sL!g
zaW2a=Q%&bnrl}gJkS!pYDH^JdRDr1}daUW9S(?-gO-&aIENf+p1yh|hfdOhptw~KC
zJ~lKqrcP(m8rzdkEoz23U1VB58ol03=~{nO#V=zNr&X4+Qb|2uoJi%>>~yJUShZQp
z^*BcR^lBw9oy-83ynQn1S-zzC_yXP>{pfjspB}M^p
zL1PudQGr;ttf^T`ozGg6G;o=0UgP+5si!rypkXkqqRNV@HJQZ-W)0KoQisykY$~5$
z{3KWq?<5@JoIY<*oHd50XNr<`=RA^RqTHCGWQ&!fRWfUtGbRO*lt6W$s
zY3c$9vn!co*|e2R?pEg~v*}3{*cjT(Y}ODMVknqeDrO5};JH+|&AmiBq~jVv#vbs92*}!e$|z|dkj*#2DE0Zm-BrmQGNwrn&K!7b60nSxYr|LkSH`HCaW+mO0Ne?x*
zH-VSS5K=z$P9wM*Oms2zw1*TyNGs_pA9fcgZ_cHPcO@FQQnf74ozZCY94wqPT7K_k
zJ5y$wM!eHh_mHx_+V8TR1xU)p)y!*iTK?jzPirO_j83ytc7h;lI%TB^MepZBWDt)z
zfIyQHGR!l~a>BmiO&XHA{&0E_EJ>{5?f>SPKHImgZHbT*$d)RM5Ts!9@@
zoX)s5VYfPg0V@`uj=j9Sx-*TTg<7O%4MT%M>>#tVLx@1J0ChLHe4<7QhDxRJ=~Bl@
zzs=Mf>@*NdXN$9@I;~9?jm0ii9Zl2ZML&AeGmOW)2Z1H!WF{}kwv<$CACsmchH6VW
zCFiq!OR_9k{+!oDq01nP4Pam{dol!td1<8+K9&GD0bI{|LL9lweB#qs5}g428$12$T;
z4%1XIVi-lk3225nwH~7)O7q)+M?O%N7q_D<0zSeu^xmtbxfO>)stKs?v;!!$b0E%H>Oadd$KsK
zb?X@Z?m08PudmO2?K{wI7Bkj-%Fwz!GrJtAF_T@TMJJquc4uL92LagJgg}yGa#)V}
zVp3FTkk`pUDJTc!s1J`^t|z&Np%!h@RGogr_aL4Vc+7(c7P@RXCoM^bq_=cb6kD<6
z9Em{`Z(oLnR<<#KS^3=<=$Ytg5+R6=jWh;Q9_kBFdW7>{`^
z0*f!-ZiAHNfIbnk1StnOqzQO(LKhBG3>pBbMZV8Xp^Zp$+4PO3Nh0azbz>rK5|y^}
zmX@293IPV_jB*2%XGY=8sM99~fXWk%2DHS$2)F=O;o;Pg
zfV*V{!X%17Qrbv%!1Hp`YK+pv2>Q{xYHxmq1o1Z~P5I0g8x|VY=0&7ofe_nHF&Ycl
z2jVtFiRT~W)gXJF9
zP_(EZv!iA+>b$1HCa9@A%}R?jGf=7M1Cs`yiE5x>I!6oO!mFWo%o!FI6EJ9e#k`Z-
zBz4b#ipcVMk7uN;-RFVRtaO){p;mS>?k?V_Cz?w0W>Ga~W;Ly>v92nVQG;Zp+%m@L
z0s_PbPFY#$SDmo8+=S6ik>xHHIu)Y}O`J$|{pkca%4FB)#<5QDGox5C0;tqkZ>+&f
zh<_lR$0o-hbK&^2g-j8)#xjgLN-GP76XUb9J2x8}sNHS~xE_UE)IH^?f?cIB#yT{a
z!((nkAcb3$s2o*-Fuh^K!v3JF%B}t=3~?(Aa*NXB8DG*fqWc`FdewNvkRU>?!(+Cg
z$X3YyR>&O5tN{wK2{A=Xt;|>PGo`|X+Lqg-O7M)lVA}zUYWtVuDXDnK_Q#|%vhfK!
zUzSF(&D<2cuOeU-+Q=
z-XPhb4cIMXlK%zzhaxhxhMI=ZZ^RC9l|`B_3P1zpK>a1CRCoiF3Nh3tD61f1hFLB{
z5Av!s%G)Fs*@&=0>ghs3&Bj$Vn^vWpy_V~xN*zoO^&XuHoRN#~a8q?tl9>UYBQu%P
zk{tmjKMF2kx#QtK(`N^md_bxMX8KUWe?~SRLn%=YGr{RoSc@Bz*_tUIVEmfbh%qdp
z5QBLEm+HAOBgmjnaH+8F{CWh5&nc}!llU^}@HiLWa>p@jW=N-&cXg&(%&3@6wStLV
zk=jwN>!2L2aUEr)qf3XR(QnXrBf@gWNwP9zc`~$6>Qcuv3qFBmHQm!&ZaTVS8dc`W
z^i)-s>)?$=nfEhnLvd?mouh*ZX?r-17s)-GbhD0GnY)wrC30~GK@LU1mXpT+Vk
zz*kY7F+QO=nBV5HH8ZwSJ!V_v)r;KqSy=?SgwO4VHYk#r9^~wO4!fkwL}mm
z5X`|lwQzwY$EB!#Hy-n91d=c8Bi;7PeoTe!atr?a$O&RvT&J|)-7ZJ{>wNukztRrv
z529Wy7{fcJM3tDlQ*kHHnk7)DGCE4@B$+e6GeYY)T4*MdPBfW>2W2)-@mMlBgKfN9
z5(2-9>15JSffQdB8G9(k*-9wam?&Cj8hrUg(=#pUOfoTHA82R|w}->g9elN&MrQR#
z%e6r~=Gzb~@3ELdJ1p$3u)o5riFuRuRW$#xdhEU$vAb$!Eo=>IWoy|www|@I4R^J(
z_Kd-c`d*rH;xO&96fUN$vuaTj&&bC
zH8eba^u*|xV8-0uUSdW0s?5v3dPc5a#EwkPO_(~Z$Gj1YPc|Z%U51B
z#42v{X38RDCNH|_$f=<{M@Pp;P8~hoJ+YXaOBu;Ry4$q!y~(|k#aSc2m|RF1Mrx7g
z?ea1SORDsG^wj48t?8+6bx(a|?RTWfN~c$Mgsc_)AYH!VqPtnh8=OHOXu(kMq2Uv8pcz#EV)lRV1F|BySX&DHT$x-^3dyf6g_zbRAyX@pi&YxTQZ+hX8ojWi
z=w>W2`A$5e
z8Fs(!J}EGo{agr(U~)SchY^%_;Al|nY2ZFLu^J?2zLv+KlsN2Ub8wJgv(#A+mveT)
zNEwSx2q%n2njFBiv^2Dg8^bQ%bQEUse12kPkOS2~j}rGh7!itXM&N|u+bodF#qm?j
z^DJEBRj66E^A)>FjqsSX1(NoV`iR}L!W)|#d9vNx9;3iP8lt@?-Yn7a@N
zhaC2sw3lxoca;U++O+%hi5(~NkBQx7VDxY~a$4Zqua-9okB)FSWeePs>dy6(Z)9C9Mavbd{t=>y{3D#E9z-zy
zpleAscUy4s5iKYYzsb*!ycF)7=A+aZFisNE
z!l9@sxihfLC5EEgF;|2+QwTLAGskvg7CHz?=
zoDc$DM2NZdgHU~(kgYZft+jV2l1$5_aL&4ewCD<;KxbKSqrl`vl`z~YEpWHA%MBFw
zwZIr|!U@+VxLaFc9Q|^W+=_S`o^tJNf)&>@ye_!=tU5ETP{BV!zYs$b{sxGORFM~M
z!)X|-C&YgNs^^0r76JxE@WTfy+dhaL.ABMm~WlqtV)gB`rtR|!?ZB*0MFNMXBM
z>O*M>r3#foZ6k%4fC9A&RLJk@Cxw9L7Kc)}BNV`SQkyG?fzg-belBQyVxz-yxb;){
zH0&3jP-sXo_N9E*;=YB}tVsi^vURMm$;aYAIKXmk?DO4MZv)p0YpC4Vzz2Qvja
zqgy?K^|kQL>uKnyjf6phhfHbkHIN^SW-iL%!&u%>YZh0uPjlmm=G*aX#lx-Q1I-u`
zKl?81NIMM)*oBkqC7&#vX)b&KC4njBi~^SXSdhmKd0r~rUkNUB*g*_kJJ`$HGH5(m
zDhdh4%pjN>1mk5EBYQ)Kov@;jCE2(iPTu+{$++M0b88Y{4cz7gU7Le9wQa^b+{aJdxVP?;1f)q$ICq$YJ7Qq*S%hCkDi+=bqo7A
zUw4DAg3YB|G0L!BJj!MxObj(-N;+rHx$g$!4pCwc8PQ3YT+h*hVh}
zxSYqiRvHiPRu}2D{EotJG%5wI&NZ^rShx|gvx!U0v*@Wtt{_R9dHy||vdx?-3Y*!M
zjZ$tMJeioeR+c533zwsvigwM$eY{NN?I>(!m-2QU)ppggWb>JZ!jr&cD{uK>7@PIv
z65jk$Jib*!&X{cNyjn88K3bOWW8%jq8Ox|Zfh({yUg1(Lu&wo%PNf^!Ey6k
zE#Q5id_@@`g+=es4Bt%u3eBj2;p%{(V_8FLYSd_osI{yqD{HORm@3Q~jj2MY(U>-_
zG0)X#%nG<36`<(1q#rFq0tz_Bxkck;H`u_K${5GWCf#nfn<$q!h6@(LCecU*iY@L)
z1|qb9n_Mq?XYsW9Y>}?8
z_^iPLOc1uB-@wd`rzu453xCIJK7L^-5^b2<--RfIh6}B35jtL52b%32lT|sB^R)Xv
zN(uur?|{wkc|~mq8lM7>d!Y@8ou>^gRSoD{FV>Tu{X`nHxpP2a(+WvPS?w6t%mExq
zbq=V?B_NTMiPGOzqQxBW{Thkp@M`7&ktVsMIgIz`(ZKf7NU?p8sX87uw@Pe3&%2!G
zQHVTFb%@pEt#0SmIKW#`2)nV7OFTb(6|+EnZX<7Z1+PV6GoQt-yi=*HMQoG3)?Uk6
zraV6WhW%C6+3W1JQvpEQY_DCC8!<0c)-$?c0@@+1H+?pSJMJbFJS3nA{f)pb5DpEMGN(zw%OWncqp+F9!OJ%GPj;JiNlvxfmay%%+o+bu*$deKc5o$6dHS_@
z`YKqlp_RO3S64RFSk9>NZt!y${6y?Le#-1n&D@4KctO%X#I*uxq{5UsqZAt02q3qx
z!!&|59Cl&62A6p)%w+^waQanxdsSG#t_ml_xmU--=1MyoWJd)4RD;Hd^t7^({eTVX
zxSGI@BD|K>BfO4vAw0&8BOGPD2;)=--hxdFyX;MvBRa5I*<9ITZ?QMp?e^v=KP0Jz
zov=6CTlgxxZpMZ^J;{dA_D$?6gr_*1F*c6)An);do^u0_pXNQ@NTD97(AAa@yNOE^
zh0T>q*p2ojb~{Ty6EgXk+z(I9!xWvXP3nLMmkxom%iI0<@^FngQHT%N4+rr*#
zx40|M^H)6=DG_9w*vEU?#I_N@m#{n`aAbk(Jq7nawI}wx-S7*zdCrb}M^3dq+(hg0{qN
zW4E(+vUjmNR+aoS`xkbO{U7!`dtp_{d)OKFUiLore)fS?B|l_8V*kosWItvSppMA*1no2%`zmKv%WglaEYf3(j7Hw!D{_bIa#y(y{
z>7TQE*;d*5`xM*49;zuJ?tGg45B4zo
z4EvvmEn`h3e}&qgMakz7tCrBX^0EDE_BZS-`#k#sJL}~M9K96#B721WFZL+=TQ7&+
ztLl7-eTjV;{XE9L?A2OT<16fO_5^#9{crZfE7W+3eU*KUeVzRs``Rni_y+rX_B8tk
z@bu~B8tj?7HdHQU4*Mqi7W+2)N9-v&pxfUe%Z6RVR@Y)S8b7c%u&c2{L2O~%?toP|
zM(e?q7WV>H)m=fmw$9zJ^7SsQ?yxg}9yiXs4{Oclnfn&baL9L5nparWDvS$S!=~!|
zRo!>Mb^|Wp*R0cpJ?Ht=dL9O3LHNp7th~3nSiJ!2;@TF>bli$oHd-y<-lg_Nc4P|P
zQsUFXfVF`=%l)H$m2I#`+pxaA4C`y+60NT<<*>H_13*T28E9^rc^EV|&D?|a^;5j&
z!>B3BYp|)>Gji9ve4su(jIk!EQnMmo%%f!AR<*$yZ^
zk9YcwEUFrL3Z^K?At*PyK3MU^hE>FIh!5WsCH!>W=lF2or=uEv+XnTww%&K-yXD*B
zls)NY-!!PdeD9CHc6{PwJmII4I!E5+NaZ!;t(&Fmu5`IhTd;5rm53seOc&uTb>q_-
z%g#f$%*-8lTlU5nRpFD9xM@F1pa&xN=b3e>g|SBaSc277`&B>^bu4mg_~i
zo980;Y&_9Suth&ED7WlIw@l#UmL(cJRJ@F)DlTpgSYAYj0?HqMez4qj0a8^O9QcU=
zP58^B0zJw&$I$#79h@ZsSrZ4OhsCAgI=2dCjhENfLZvJY(#0L^$(egDL;;-=(Pjcuh!U@fq*YKqp|3HuAMgObMJ^jpg>~CJBKXCWI{@J6~Jso!i&GXWZoKl@xbDB9Vb241KOfhxjVW(<*Sasq^^cXVia!0PPsQ~w
z{(L8-_ZxAY#I5x{04%mFoqg;hfb-RkZ~oica{m(7H+Fw}Tk!24kL!Q@LHd2hllaei
zt^PKy-}9kI)cXwp`~|`LIU+B;E%#aL{ddImhqn-Be-hVU0DiYW{}C_eJ`auiJ;VSR
zT#}??R}JyVnZTfkNO$ZNkG{qTA~N{kr6SVw{S!m_;<@d;
z;ZNk_`ZXtCZZn^ri0i4o>w9+HI~>>d3~ze=dzZI+kzH~9<=ekFAKDy=>mBoBqpRzIWvB2lT$5WgmIyqxTNzzaGB#o)-pB4d_2&kA7fwYjHs5j33!MsB=5;
zt$l;SBK$dWc+u^BgZe4Xg&t3_5qbIU1If#;`{{uG&buGld|mK~0sYPgr-yF)2_hq(
zzwP11#~&QfD-BHxKGaV^l90bcdjQ
zP^S?+^4;}=IyWY*%AkG?&+-krWARm&;S$D6RDLY(!oipdONYv$)xX8{CmyHYue?@>
zaC|a9@MHs`B}Vy^o{&H?MRW8eB;m=pK)&=uT>tUpb9X%StDg<%VTe0{rkZWe*UQey?3PbM=$n0>5&7-ye|n{Jh#9yc;f@L
z#f&54!@J=lxa#7_B}Yyn(MyTIa35|=7`KB_#vf4dpD1`21$Q7Qx1VRptDhpDqZ{Iw
zm}y2ZGzu~l5YI8s^J7>#kVU+DE?XPV{t~y)v%f&#sry+^-OqaJe%4d>v!1%2b5lp3N7tBz8@7?w4oXu4~g3{J}|V}P2Upq4>%(Zq*eP6T%l3iv{Y
zWsLol?@#BmB?q4`mh!2zhGjtk!(pTfxDZbl@Ui0J@O_0;0iTYU^osI+aFbz5bCePz
z)C;ruyc3wGe*nLSuqrbdZ<`=4ocfRek=UoOi2yH4#CS~?jIltCePbiX62UTG
zgCxRbb9Oq3DLoOmVd&V=Lyml7qM1+bxVwo1c3j}W|13!OkB*PrmV{7o8c71m^&?{_quD+Y89SMDYlmqecf-)Bu|)l$q46O?mZRv%
z&o$`*Nxd=fc0
zI8|CvV>iMWJ~Tk7AE(@M%-@1f59k}nE<8PQv>}G_FmiA@j;Fy#SA^OW6`vn%3AD@G
z@pm&)?Y>R&CSRMplWOBQRuxCG@;0<=m0SE&U&Y4()V7s!YH{%wfLDWjInLMD;ly3w
z0vvr||8k2alx*jiuR{r?mg`Y1KOnbp$V79Cyo>7kwjg3(m^HG5DtZ7(XQxveiyED
ztuX>vP6#$*_|V~fj<1m3=LB#?q_k+9L5XpU0{R-w36^k`KV=&9L5vd=H|Y&BTinhW
zQOfqg3>l4-cbo#+s5x~LDKncME*3Ib-3f|+=^*SN(Q_d}o$x{N>C3=ns9~xI{LmNt
SL8@2$`Tf4I()9b%mj4fj^2mz-

literal 0
HcmV?d00001

diff --git a/class4gl/__pycache__/interface_functions.cpython-36.pyc b/class4gl/__pycache__/interface_functions.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2d3b80b04e14243152534b07def68ee19a067d8
GIT binary patch
literal 10575
zcmd5?TZ|l6TCQ7HS66q>{cd~Q9>4Wn`Zkks
z0ud|GqdIl!T>tZ*|NNKl%=5*f?f+5yjlX-KDF3XC{0!u8;|Xu5iXs%DHWWwYx8~^l
zHk=HSS*0K1Vb8>iV4ZE3l@+$S!8--@kDXNN_^Txb!lo*E^*JK(K%}HmnIps_>
zC!C4qv@^}+W@F|-I_s=6%cIUYbNud8G4IUt_ky#)??q>k-%HLC-r2^P=2_<~k7YH^
zHJ6=bRr!h{a>9OJ5w?5&PunlUksnsKgK!S{ri#P=kBU&Qy+v7YC}
zoR~+?r^JF-d|z=ci6wCc@2ACCaSrcignD0DEqz1`-CR`@E8K6jq#K6FXTpZ-wmKVY
z`=Kk>L^Z0e?YMrlw)0M~zP{dj-CAF}d+qL87;H!HR;9ak=d~~1zrR*%RKxK4PGfD=
zNXGgfmAc>BkCHr%RM}|+Th&Hl^(&LizF&`MTJ;JNvl&KJ{+xfU?z>fa-`#0qa#!35
z8i8z9TQwrN|I?AVjVCN4ahdgwCRCweiMr5*fwv*@!W3CBBO@%4Ly0MDEWVITNVgV9
z5mxGvE2~i;YZ`{3AHC_hgD0Fo605ONfzTj@kfFD}`4Qw!`RKp!Th)?mZ>U78xAAF&
zZev@Hp~0UhauTnP-$=Q+8#LXNo9~3R>({QWrLXHR4msK9%#fLNtY2*gW
zzz@r2hp)s*8HC(#?O&x}qobzivcqDW4Zv?efjhB=d-hQ)n
z%EElN4K`lHc%g+vQ48vnYG|jjGHrW|mBc|_8ErUG8AGKA?E|9J*kn$Csf(zTMI^{C
zrLQOO83*y5N@W#oK8??5D;(Z*f-LQjUv0XTN@78^WAdq73z#hkit<8JnINXLuCT%N{~x=>S7$
z473<+ZR0!ND=&|Ty$8)`Wzb{e1OV{buoc{nk==qqh8=qzumqW=i>Ze{>zxZ*c}t+#Pa(}S`!x+LD`|&|JqP6;tw8D
z`t65GG>3JMgR|q_yf{y`61mUj!T5dHOdN`p%|x&J+m3O|eePDG`&GZAU%h%YQ98x)
z*8cXkE6WGfW}}>_?{?l?tF~HUsx$9=6?(Gji-Rp$7d!4yXMSye>tO9#`Fi=<)$Je*
zuU=b!?!~ROt-8M^QB%4qy)f|cqESm%-O@^D7A7eyz5T`4Un@6)s;F$&8}8+2I`%WG
zDh|0yAQuFF1QM&ZXOP5}M>C*kurOuA>{hKr2
zz4ecGI+-$a&aBpDrP@mJ^oCh&1c5)$Or)aenBK<|E+g5&VSF^##leJ_X>u;YA@y-I
ze^+~KkWTH53iGA;EqMVwC|h1n)b@yuAocLEz0s(LQ7ve;Uar#cG?%G#F7CKdB{e@4
z-oOng-)bX#xjby2WOiK;3jO^II?2nFkgAhcDET=`o<#zceUfEF!#%L{ZI*tv#*HskUnA
z*L4f;8Op0mx~-bXEop{oXXZ3pwY52I2DM`va_SYsgt{SJ}Cdm(b_lz{U`#X(YBW0-4kqBTFMv7eb9RPw{lJI)|#f
z5$gp--iQo#8BhWz!}QOhHXCa&%lwTm)3eeZY^6*uC*XciTh>v=W@l=xPf>-$y<7|5
zAs01-hN`!d%>4(Ouijm?NdZK=i9s7Fe*xqrW<6v<&{`s~NLVZAnPj5s7Nny14>6K_lZJzvy6r}_-9fJtyUIrWJ7^h_{{bIRD`0mfv@z^Z
zXR<%l&CHR9-2xPBS<|&fbO&4U6w01K8T
z`%v*GRE$pXQaAgy;%i@5;w)QBMgC{3d$~HsRF!r~jAOioE^NLiz;U$j!0J+NLwWo|
zST5UBy?kt7J+N@7gB2EIu&Ab2Ao}A>`hMI-+ahclWCGGTwb{w7eSvXN$)eR5Ai^{(bpsU(4Pru)m}W5K2~
zbc)9covCA=+{Uq@-SlKQS)v6&rBm3h%dl@V60;UG_M84m`F?~+{}@k5hCxvmpl=qS
znF{K%3Z2A%8oo$N@t?Mk*6W?wJ{9HBQ@07@!YXDpBhO+)d5#h?k*R-7^Nr9j^ooH&
zD1U&5gDt4YqNEbqmBCL(?lzwA9+H8|I~kEqelj8zd8k4NEdqW8Km~{UQ0NmRe97Px
zf4)#Pl_ZWmG$xs2r(RT)7Vsj6E2TK$uV@*hd|@2a&QhP10}_TBg$@G!=uOWZJlylM
z<4O)j{+imeT6)qvPlzdrI-YDB8K|05V@eN)eJ>MUp?ab2!Ka8a@2L@~8I%koPdM0V
zGzwMbQ3%tIJask{_~O`8+vJ0f6F~u}q$8KI@SBvIqhy{Eb{NP_lnY4u%;qTY1hXm7
z`XQdsMnb_S$@Rh$Ska3=`%0YNBy-FK(i#KfsCrn(U={S$TT?%R&ZG+<5tc*RM~F>y
z8N54k5Euu@lBGiVkI-!>9gCloj$XTC7{Fk^JoG026ZECrR`&iz
zoa^sNhHAMF?H;~3_~?(qCCBTJ?U}{90zYxrmF~%19a|oqjN5oV7e2^v$1(9G=JG)!-kbsn9@Dc#-!VPQ1;ZsV8%fXp
z1W$UBes=I3tJ$B@M6?Fwe}~)+B$ZSJPS>egc8J_X8NfCJ&3dSg>QyT5onY3!J)H`H
zvqsb@bL#VI(tn{<_BAPBJaz8$)&7iuJQm}BCt5y*<0mn^&p0D|GKLZ~JdNbW|Cr{Y
zW88=95*=<{l5GakulqaqY8lkg?;;+OJC@{s!{(AAhL@c=GTI-K@+7?I$awTOK!wb1
z7Jj;gxJli!>r&nt|4q7UAD--2xRW@Zy<8m
zc*5AA<0RdK@w7gjQJzpik=--o%#q&yL2Q9H`lCN(vcs7;1}>oF-g%7;u^a>rUZ4i}&=A_IT-`5>I$jsGU91+UxQ3krBQJ
zn&#qJmY3P?9F2=M)A1awm}V98kRhAC!3iLo-FY$FooT<)oA<-sydTE%z&2;V*F~`e
z+_)gl(3`paWQu+d6wMR*i9P927SDAT;@R#Z-GcS_KT5$es5sv%FO5)k0W!UW{as?Z
zGu~V;M=Ka<2Y#N3=DmgZ4030|$#d;ad{(RgXI-Q-^~l4CTJD~Y&vWFe%Uy9%JcV1l
z8-%oWOVLt%9tZ>>zq|sl$Ys0H8Skuj4*j2nQsr7oG5&IN-YcOm
zmmTSkcd_=f;wt3wLTtz9L=h400^Vi37x7-(0M_o!iFKC2V{1SZ{iXO^kK!9WikIU_
z@4}(7mpwA%&AJj#5@qS!l!r<;G0%l<9XPQ;vVfg`j`!zGx&tG#jc}Y=Kz1HHR30L-
ze?$42@^#im-by?Pj;zEh-bL>zaZ}jh`450HW7O*`<1g^;m?Sqm%Zhi2WEw44#^aTt
zbU#!c&wv{l_%*jSWeCM5pGWesx|ZncFNj1v7S9vpx$Of&+}-#0?mb8?dYd{OAn6$8
z)?TA?2}y|{o{|XMu;d3($!!xtRC=|9sFp4h31hZaO4~AMB4@0W%jM4cpj*%ot|WNC
zFHt1SV?A361N4{f$OzB&p`YHJUTM2g-6_?>(gV5g(rm|C8^F+Qxk7XnOJv?qO&6Rm
z4MA+0tt`m`$P$u7U1v=5{~5|IpCSS&ni+V^WJJjixsN9#uaVL0LRXF8jU4tcY>rg`
z(HYS}%Y_TOj&|}nN`^+0-L7jW`*ZldY?}dGpC96)A+YLCfY$$t_WU~)v?f@Bk?Yf8
z`!pteg&MtzgzpbJbERM_?ClTjBBTc)iQsSpE6C!`oY3vg3_;p3-z7#5Q=z-u8CwOy
zB>Qkv3HT5>^7mwn6rR*+$T|}_pp?n&!%8ICOyug*jQhw23KENM#S!R*l47F7#Dsvd
z7%57@rnjjZ1#*c&;IzC&-y9CVNQD-7+T`mGz&d`divIv$EVNax?Exvh$!(~lfs1}@
z8EysS3pB(_l(7F#H^uS`l-#D|OO*U7CFHuKw+45SOYCY$6LkU_sC5u^68+5wcjT9;
zE=Na@!F^t08oLqx9ws?}uPHUF7daSoas=h_)diE!N`L_0ExOMcKXDlO+BwDWvc2fX
zDG(>NeeM{_f!=V60NEaRDk{wI*NKq>(UhC?`Bj?6Ob5n)
zZKTuTTBId^nX2DF;><#1_ZwAN@%M2zQm<9$#D)FK3I?(r@GJfIiXL-L9Ks|&a_HWm
z!$>fYqFHd*4b?Jiz#s+WW()(Vsb0a?3@#zY%q!{?Anq|hfE51~P(P+>=tThF1m_Gy
zQdC0y7W9HXJxVW(KP2eH5nZo5ozs90+<2to5=mG@c1KI*gN8Z+^w6rC)
zUP3>j;Vkzh>e9JweL3>tb(qBr{Ra>(p9p&mQ-L;`
zZj~?91nwgU3r=-Hc1H&Lt*rwb9a;55QVGCN)4NdUGx_^yiSrT(={97Ox}8wG-$mI^
zXs<~P6VwHs;<91}`!u7LR7v%Nkel>9J$vkL}|FELd;VB51(j;tVWl56k@yW=flWk8-CigDU9%e+F@;
z4Y9?xQ~BWD%EZTD+i4miAO5k?el6BP`rU3uV6#C!;v0&7cnJh>p=41>w;J~<+5*JE
zGtS_@(q^#HqwOD$VGHTstRaR;+0O6kvTV|NA%OmA3;nF&BV=wuPDnobc{
zik0d%biI$;W2cC19Qn-CXD@uEKTg&zpfOmf@S?TP3>h^

literal 0
HcmV?d00001

diff --git a/class4gl/__pycache__/interface_multi.cpython-36.pyc b/class4gl/__pycache__/interface_multi.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3df1d943e2c469eed04842d450f5bacef790654
GIT binary patch
literal 31044
zcmeHwdw5&Nbtmq<00@92NKq8uq99t5MOhYQNtSIzHf39uACc*Y=*WSsz#s=C0fGQ|
z0ZJqSBxz_TZW1Mm`%2O@P1+=_n{N8td}+Vk?zZXk+io{~H+Q@Jvh8kDx7lpdN4lT4
z`*nZ6GxuIRNJ>ij_^Xue+`02ObLPyu^*+8Tmlckhht%2?otb#>{QVa$Ow8ufrP7J{{KReSmJ^)GuV$Rkyq(Du9Lvrqr&9{N
zoXuRB1@ubyQc{Q3pafI}$6Y5U9cS*;%-x-)iGot0n8G9~0
zn@N?5s|A%U%$KIlO*_Ha%b9t{yu#<0W6sVxW@*_8UAdCYmz~gbshG_>=JYYgsvKQ%
zLh1bSLfWy;-~Z@r8_32#(lLzZK|G~!C=jSx)j%pxHQ*Xm6RugdatO;A%IFVlFg%zI
zzzxyOg_LExOeJSyy?7Un1(~`;pK0
zqh7whe=x97r~M${)blsyY4mTOe+blKZ$Xa(R>OeSQ{E~4ap&4tJ$#UTrMnhYBM94B
zjh+r%Ggp4`VjZht7en?lLh1GMHkOT4YWG$2f498RhPE1HOXqggVxY?&(#20TgqS{c
za4}f!&kgvYOy9R*bb((7nyJpjmRFUNL6P0+p#y?IRfz%5L%wk&1K
zC`FoRZjX%4XP-A~9kqJ@9z$G0wV{1F7R}t~MnLTYhC%xzXtuxFLBZStXD>p*T|5qe
zz8Jk0TsaAhb?dq%Hl#V=wh3bn)a$Am!`Y7e8e06;hCH`!3cqC|00AC?Kpszj4uUE%
zYiH8sOev8rB!mN}XUo~k8SN)GPRtc;M5goWC9vd#CrcBVwdIVRrMxV~VhQ)}(Wxh&
znLg!zGITjzUPvKv;*@w3#koXzA=8jOQCuxUW^c|^oGGhRQBrOrB0;h)7iqL5Alg-rZ*=LDlql*kPUK3_&MQ>QiJ+9ZY(7((a?G>S
zPUyn))T3t|D_h8pn@+g2IyaYHbGlnA48_c?x=saRmz^#zm7SR_+N$IXxSmuxpZ946
z;hl9Xs59F{GbWmzgMcp-iv?$FOBiU~R6R`y?UyM{Lr-giPM1edbc_Wj1R9ppjeqkk@tGGUEBy&y
z>q$VzP3TO}8Hct3t}<(tlXcKu(xAj#)vKy(dDEKZ7QhH`RwBIGSG5%
zc$rgKAet^0Z71T@)x+>VjPG+|^-+c{=8kLE2ZA|4#wVi~>?!GePI%5vFJ(&hA(ZC?
zNjax|K2uJ)c%U`eT23fiKy938o=si`PjO6Dc7ptJqUeO^U5vN##aSn`3`&-hv2q4s
zh1qNd#j5OV*`~VZSRl9)DzH!|qNJ|?l39Slm@O631t(ZuUk3PPbjP#c59f%VW0lG_
zpi@gPtuCSNpn_ezQgXV9Y(8^2lTTevLz!n;AeueNvN{TxwX!z?#^X+~l*!N84
zs%hU(=P;ddI;Y?`7HE*{V$YB&rKFEL*5$OFMDIc+1-%do%PRW-bF{f_^J>7(;QRH*
z^-O7@xRlW|rOTz+6UUBC=+}w6>a!$|Q`aX-bq#xIHDAsiSzdP{DGuFoDs?A-mtKJr
zh=;AP5jJ`{!e-2f@iF2?9AT!>6YMocg55??{O&vCeUDgU)|feB4Oyesh}mmI%(&5k
ze?yl4j>W8AquU%n%*bGm83`UjY_Bn73>ag^s5xXsj8UV<=tj8*&5_`MIf9tV-tFcG
zM?CCjEn<$pARegrTG{^{c&eCdFGlTws;S>)%)mE{cN!8Nv5!hP-Uv7S@TmPhh-pI<
zbtv^Yvx+>=Q&rMoArZ0KABN(fCl3_TArZK43=|rz1OzE`3uj#%zUadt>`)cu1_Rqe
zhuUG(Sb{)N0f_Flsmk#u^O1yZQE5E0NFbL!|yWeI_-EkXiMkelmlfn_Xm!N{)QPhnicvuW{7AgX(kmsc*r*Kayl=Ri_4D+(}l{R$%r8-oF!#1GNmQN
zazfxj1u(_j>JrvY5K$+Rox*1qczTJ`+<+0YPDU&Hw9{(=|Yf}zs^oG*Lrh!vzjNeVWugJxrn
zSqd!vv4VUA&7T8|wS6ifGiU16SPsMRLwkvzL(#y@IV>|H8Ja4LcEl`SG;AHhmNgeS
zwDmV;5sj-V^WmDgcDib!j1w{lb*gyP>JLbt@beAg+lSJ{c$A%kZqNCl&V^ZEL&
ztYdZnR%ioE;CT*o9R}Qee@}ydXM?{Ns|ZKERJpLsQ$}l=7E*r~r(K#B*Nl}Oml_CR
z&#r0g_|=22km?g?_$IW)@HNA}?1iX4gkq%vvt-V`qpH6@P>x}}U|_@S52!(WwO2zM
zSVK`mc!oDLRSCNfROM_npc)~Q5lV;}xoOC|TOf}B@~8*L#o4qEc_C`F335jZ&U;#*
zVGiuJ7qPCl7r(sr5Gwig
z3U`LLi4bfsuMjP>u1xlN`E^Wcc44)!=(L7vC9kI?E$U$JBP9mB2v;Ez6_XWs-I#E!
zqjxIDNL8W?(32v=-X;qmA{|_vZgfky{ye18RiBec1~B9UY#y?4X|nOS@NN4Nu?$l8
zbb?u^s&e2{j(p95v0T4(Q4$
zJ#~W~8moT^I)F~1w(wn%APKVDV*mlkV2p%&pjT19IuRTRorRV*3SBI2^cp?p34Fm7
z|G^Mq!
z)LYZO%6M0ip(NBYkuBBI22Q&OT(|49En-N7wg^4h4sH5+DoaDgf2^RkS5PJd^#}R7
zj-YNAv8$0S6Il`2Cw}Qx59SZwW%I0RomCzXcqyH1F4<^IL*|MSEA=gGxEGN!%mESADvCai4?&
zqE!mgRWCzpI#T7(!k;msfXJeIJxmKLof^W+tubKx?M6RUF18K*ZoAd6q2kvRN<-B|
zNhX#(`?08BhdH7Y(zjp%dh@6%jod-)0JrKO9{jht42h$PAVdVrQvt+K0014xc|(Ju38inZ^?Agzl?H;Jz@
z*!M7c?0OKi7GdAZgncaufh83St;xI#DKu9-0v}e+f+!}#r9~rYrvvR$s{uD3YmNvSp)8B&4~AQS8e=nFPu6G8YO2BB
zKtE+ybQH8D=HSBHI!g$)&mdkDKd=89HQM|@BFhhnyHI1dG2=*BxnAb%)udBtXpZ!h5-wNST)icQ0@JJYD{&2#fPg9m)JAM8@pU_-`BM=4Mi-J>!u=x_YT@E
z&}M^Z>=5aFR59TReL^a)jF!}V8qyY
z!`}Tawbz9M`vurA?AroP172#sn|g@04eH=uD^)Q55hY%-43p`iZ@#no}ppp9)n7cI&e
zvh3rk6K>vvYP-C5Zjm>^eNlDNFZZr(%DsDwyn7|@J$|{{Kid%4np*
za*%yNz0r+7zfJt(Zv3#CQcrNiwB&!%kAG^*_^18&3tPrdyYbH`#*b}+|0dM(h+F$W
z?$+Fda0&Vbaz|bN9sU$9HzoU6kl^75R?FpL;Wb+NyhgUj%{j>XQnr%O8i?pRVXhpM
zQubX;7J{RfGhu|5PtRoX^V*{8Ym@WGn=v
zzyE_@oSc7aFT;L%ay;x_)U
zq-_`ehbQMhKl;*NJoWj{PR;{3O86CQFWQ_Yeg%*Gxt^OdK*JIRmvHT!bb9IRf`i>Q
zXdETCcE3G2|DS)Ie_!>g`7@KUQtjgMM_%xkCSAh(-eiN4|#n5uai5MiriUGVM8K2x9$dZ&d3bXRhH(z
z`V?vLn_eUQ)MTamNvs`bi>oEsF@-{v?3|IAv74pRF@=3IKQiva6aC4h@vlu*b~M6~
zxMbW1B;S=z50Z{{QF9lZT*CZZBSvnNhmrf~V&p}3XpFQf<93`+PFA{nd~{5*Lt~@A
zD(!-cn@RVNuT8#gBwk~`zy^I}t_af$^l&Fyw$p_Y*OKg45m)I*XYEva`IKybRwC}#
z$~?OmXggU6!Q$f7!u%ub(+`8xx($DQvJz#4WSjrO-yTHE{*s$`#ch|Lm~^ZQ4?W-x
zgFl-*hqZ)%i!$xc!FkoPe}R7ALBJOn;5!KTMF#wm=OEyh>HihaLBI~rFQM2e`7aFk
zBAwr)^ILR&8xGR?MTrjqQk0ZI2LW4`*O)@egCeF8M|s>fw11b*@6q{vI)4BM&?H$S
zvQl1Sz}Dq8Qb5Y{5l4AnVgWSrwErueKcw^D;EW$jhL+dCn_$zME>r8wXJ?LF$ttL#
zCHtq5!r9RfvAoVCPS}0N>QPu|%+AB)#EE9GC&(Rs%tmO$q9-_~Ss>RSo%XcCit*|U
z0oh50a1%bw{Fyv#KRU`}YpHyC9UaB#a#O-;L*-UWD1=0g@t
zHMp5yaKf|c!sT=+*}k-#Pt6t!1=zkgZQu}O8cq~e68V|*>|%+g7CN$%k-#psP`sS6
zlQDeG=8+hN5_*rxWeV=@y!X}V@faD{PN&B|Q%;mgpH)|Qj
zs}^c4<=B6VNc$U15zN9uz)th~XX$XTgAkknq6(#!d4&Dri2hx4$`U(Gz=&WDhK+*;A#@u(
zA^NaaFX3CfCFK_3UiuKN4Dc`9Ve}$wWUEIk5n`xEW%ssIBX)1QzIxsrWpDL#lR9Pj8Kh3IhtHHhP27H*N<Gjg8+0f~^V9Ko$+8MjUU@>g}M?VaUx+;YITn>#bb4wS8
z?Q0)qCN3PuFFT~SG0SNw$J+B^Y!j&1nYaJb8K^Uc$w4*1Oj5cSnXG7I)+*^r`@NYkbh1d0g;8^jpl
z{Mt{&5rG(53pIpRkzZB?$lN@Opi@Ns3>~sDa1h}^k1{}1FEM>`6_{~t1tc~%HY9fI
zCZqjV$ks&?$Z8^Z%Fy*}YrNjc<>`M+?a>AQnQCvxQOO(D`s=9m&YP~a*XT{PrmXN4
z2KFYN3-a32-9X|78Cp=n!7X_n2!5STI3;=qgdH}F!m>U`ZWiQbGq~9d{9&wD;-nU~
zf?_^TYj^F!&Y(!WMzN2+@1!HLjS8s!{d9gFj>p~FbT6S8k;$I@BlK;}$<8n;<2y1`
zNY+78jB6zU@UQ`QVIJIU-?mbRwsUSz^fj#$$WI_|#8X;>I9nUVqEtZkBzv&bjKy~$
znNbG0R${XMZedv-q^VJ?gygVO7zcTJqA%__?lN6mYvON6uWe7G61!ZibAVu
z{B9R<(VD8mO(RW1f^Pw*(@n!gY%Yh#3cZ_C)nYtiH*ezT%E5BH8q47*iCe=yEH-1g
zd9ls@YPB85k8rTV2&{Y+wT$JutL^XSQh5x^+ig6cch%&V!V#rh4?=gU_QfEN>><3f
ziUsh20N%~XQ}yerpN?X1ReL@Kgj=K6J9YG)aX#+z%9Lk*C#
zA37&Qsru+kpre~(NYnkAQTj!f@|Jz7+DzY8Y`eFK_psd|*mYNVfD72wdQ^9{Cx=$T
z9_wNFI@R~79iVoPTccrpUX-Ur^CL^UYdEv&#*9{bi1ACX+NC>3MP*lYXKs(`!y!6N
zk3_W#1JUmfwceZSF~H@E)m||K+nXD!_Z775Ej$DUlQ5XF)q34h_Emd@bq_VSLp74y
z&&?%JH$hjYR>O6CSOWU_fqKXwHm1}Fp553YX{>{cfNNIiS?o}A4A8end@=i#@~s>#
zG-Oi=ly(rJXBReZ`tbCxsq%Prmu%MXc+voRC1FLePyGun1o(g_jvk4@;-QT|wXaG`
zgnHO*2PG`|QZO0D9o4S#8Kq
zbF1(Ip_f)TYr{A?%6}g#ANEt-Mlm@Y7zUk&12q~9>N0Py4x*$7bxy)lYzKT-SaUNg2N_zo~9m-WFY3_-Tkh%-y
zHkZwi=CT>`&6RyO4)5x+@7YxLDYtBm&C&85xnnr2+oifTEb*V#{yEG%F}^&kk5|PQ
z65j{dR=Q_0#kJttFMGY}TJX~6-G1fK)oa^&Yok}*xV6!OC)B;@ySv4AAAFfUm{ZZ$vMg!aMc^Kf+#JhE#sc&*J&fR~;m1kmk)t5$7q6YRSXvsdjzoda2Q5He>2|xbW3EzjttSczvNx!G||daGT!YW4^g}KB6Ac^_2ek=+*WntUXMc
z`)M;K?3K-!T&JI22NU#FACr63c9=-7ZJ>L^{?NBX_j}O7H%Ir!Tq@V;{>DvoKaUXh
zbgF!!bAV4hUcS3JsHVio`3WfeccW*>S8c3;i{4WmW1pEb)%UBX%5n8HdtY@Jz3ReM
zQ%xh}8TBT05&mbdQbF9@OWy2_W1qJBwi~0>0ZIa}RC=V5ISJ@aIFd+t44hHo^nrx2aj7FtwANY>9yYc$FKkKHeHP
zO#Y{1ha|W1Qw+7_9#WrZjT^?{k=h_+{Xyu#;G*a`xIF
z&8a!`>*Kh5Vjj|4$BFD#3%Y*LfKY4r$t@Sa^>1_iS@;k8xZb#?Kj7o)QsJAb=@0t3
zT`G7rRXOm0!)j4DLS8t+Z3;9~OA=CWL(m3pND(1trKMS;Wwgpeq^p?JfQN`)B{B=t
zSpj8MK%X}$`)ULWV5QpaTj13+Vn&)dxIa2HdQ-aJwo^Lc*#_EN#7o@pH){{`gWLlVeXvNoU&vM
zUk~SwX+4|$1H9ppme@!A?{!IC|E}QsvD~BTQ-gtdtfbeDl;4Q_J$`yZKc9O%*RS4M
zJ<=Ok1bsf>o@ex40O_|HUnsyYGKM_sDi)spfF=%19DoANVM
zRaLDi=Hc9vsO#HgPVsi|jCWiOqRyMKergM>f9M8SOP-s;`l;MjSWAAv`rFhyziF)5
zHe1p9hp&URpXY{H>y)in`}qayjRsmX=4NQkbX(E-)f-@abF|jbx5Bzk>lYhny*<`<
z`t89!kK8g=?yMe>cfI%evFhw%sCGy7j%z*keJ%aVO|xlK{K$3c?B}^*OX-xYb@ua1
zOTDDt<=1E1I=kcTY4vXPa@J7auHGZpzi8fgT)meychj6w@8g%cdIN5w%WB{@LZ3eq
zeWY>xKAfxb7@ep7&6<$se8%HBg+n)!f1aAp&GM%oy-rJfv;678zZie|H#flg=Ee-`
z_f&2xtm|XupOQa)_BvRz?S-e^u-EI9tyuf{|H=8&kKF+4o1-=H+e~X63aKer-xPns
zIO?3irlsUJ4FuG8dF6_RwmHwm_JI2C8>D%*1@4;x
z_j_)TCkeP8x1Ixa+h*%
z5g!d>EisG@R&R{!2@BO
zpUfj95O~Mj8C)}CzXe`5;PM%5T7Wx2E+eKJn4D9ea#q@u<
z$opqt(iD`N{mjI3mH7TQPwZcs*sl`%A33%E@k;QS=?5>_zlKtr*h10HR$#xE&cAM4
z!c~FRY0F>A$7N{oXY;I%oWs%h2maDCpM(u7
zq5^@Jzi`H$U?S+Bb)!<)t{SuUF4oR~fzxFeGSE%0*$Jlo4mio%v<2b~jvDEsK*>GZ
zCH9U3{R}`l#-eY7Iv%lyQM}+_Ax*Be{5p)}E!;u(cN0R4F^b%7{IeUo9aq
zI-{jB^De4intY8rk?0N~sjeh(%sec~aM-z=+`Aoh>xZ`f6Z1z~Xu4(bdx_E*o$p2-
z`@86T51dNqp=YO0B`U#VM^4OD+72fkf8y+^#8*5^=t?I|;&60^mTu_oA)9wKj+@B{
ztm8`Mb=>A-&f%7xD+{V7!&Fi<`k%SB?u6IY#i|Z>`mB{_
zi*n0Ua0c+~TiO0P7jCp#E+xDCrqr}>BFKVErEtqw_@??E|^^ovhva>3FeStfI$r_+E4KN5FB*4Vqrd8Ud3s(2nRTQ_DhI)tqTqg
z0ZH#=$88{TowRaB`~Xr`+T>mo&^2_&(IZENu;=d=Iv+oBTz@_>h3NR<#4Q(|cxLMC
zqf-xGIF+!gENis2EGePhso0Dzoqx?8@PRK+I-wcdnzaZdj}X5JI!Ec;K?k>+;Fukp
zW^-ZtBPh%MUOFF!b@Z4VE
zA-@DnN7GALoUe;66$?eNg0IAI{VN6&4duZZ
zFV!uWE76t2bBps6i}ROkZa>?1(mBbjL0T?XdT{I>H*1wval;#I-4o9(T(W-y(Mj{d
z6~}l4IOFxOW1fGgGMXu5?D_S?Tz+*e@!Y=WVJ*90FTM5R#L`=i)%IPg>~00+Cs{zW
z9H$mXQ)u~n&Y-s)B2C3vA!5zWjpAF9Z~Fy
z;Ido{T^k0x#lkFZFc5pHX+j7xxh?Wm2I7}lU=NE4_27HMP_Pm
zI1DNAZr|xwoMwsmL&_iH)jjQ}{$uTYaalvt(SLzuR@Zxk~KBEW4V@w*>rvFYj
zpTVWdN=AyVw9@8OJ}uwS76
z<8Yh~5qc2GG!B8av^|d#k8o+jEH=$s^h4Y*tk1=+*~u;;MoP`dS7DINMer>_L3jlPx59&)b@~>pWFlw*L=Ekqo0-<%?H<
ze0FI8T+IFlQt3_-l9D7DLcfEFdUOquZERB4atnpvAXetQtttQ~-jX#|&iM2t9Y
z&5D}?@X-pW1MV@icQ|1ra6A_mw^?zs!#HYnz&>>!(lN#P$S88lF<(I7zX60ZuE)g1
zA}~3RODr#l~lk5RF1N{@sHQUbqM4GI8fYeo&&bXXY$za
z;U<4$ERQDh7_kWmrg5tvz^mqlJ4=s{xDnpFnH#*@t#Rslkq+n3&;T$0~X2gXz9y
z;UKzVJDvyel*XXr!gR%4J6q;5x*=PwFz=Cx_z_IRty&NlkHNB%)|Ifaq{*0{iWj@+
z7#NieU=kfh3T;6Q19~yRFvZBspy894Tv;$TGhl3H!0;>rvmMywVS^3EW!OxDk$x`B
z3nAb}u#MGD1DaeE=2#&)Hyh&#=UTiPhv`=U({*0CPotzL4U+t?7{8FaySAg;QSHRG
zLllMtL3^~?4f8HQK-wLiZw_WrF!qA6KTNNB;R}nA{#k_$`xH-BYs|6PDt=rP!h5$D
zErExtCpZzlW)>Xe45;`^C|Ti90b?5=aQk{Q1f%phpO##_U|EC@Kj1gUb~nVruS;J$
zp!Q<1WK`}9lNRwz{7CC+5ce+|EC24IzM@3pb6p$UqTDJ6%5`&m4I6|WCcwGI?OY!(
z76d!&fiOx6or?jKzD(>N#O`PD21G
z$%p)-UWgjJX~?|mtPwJ90f!k_Gi1x=Cb6y+?rUzqc~=Y0G!JWpHVosGK0hz6F~J6{
z2MM>8srK3~Jbi1gRCjgY{%iYxRQvRM{e^lul-R!^x69Rb)_QCGwSg+{1ox?o{zEe{
znpWvBwz&@TsV}`y!qlGHu8V=fP!QX9i!tQ+zew_|)aHf6{nQm&mVf8M}k*-Lj}NHl?j%?*h-j&5@j3r}sYlAqs3=bdym=)4H0;o>lv-r4WcftbR`rC)OE$fm2n
zWOgQ3NKM=CKnB<=bF$ZPHQ12WPel_Kg@_6t^fJ2)Ex?J&^|Dkwld&ZnaLwpD9z$!?
zkf17vRs~g0IqlF7@;DraDYtutE>@$UC6@GYhSyo@BAs(|-b3fTbRM8X%~kYEn+&+&
zTCyd6UWGG0sFk`R{K*c{o1l?%rmv07(@ZG%QkjYcw<5Up$UPwi7
z?}fbWRVqL$bYv1*ps&bW7W6$$t!o4sn&UcxV?Ff5Ih!W%PFPRyTlJg&g}6_JAV%1*
zfhdlR8HXToBWBp@4nh{=O4&#UG#xXJ(6J!2o336+=nkU?dJJ_TxZV?t;qtR>FF*^9
zKpOJW5>q5CbR9sXt`eqHG~)3}Vq1G@_XOkC0`OUgLJSFrKozK2xY*y!8Nw#}(Ku820yG^ad8P#QOM!Ce_@?E9hr}pAL5UT3!v;61xnZqTMmkctfsU3vwTX`U2QcD*`mQQPmd0ZbTDhSNd^KQ!y6}P-
zO|=lH_N-9t$p)$kRZy2Y4A9<(4xD{I>g3*7r%O(_^t4t!g7J&|DVDqw_lDs*8XN@K
z$8!nStisC}9pg}HFs2b7U{B5U`lHng!7n$#rdy}!%i)JNWH$sqxselrSsub@!T7+K
z^>RSz85S<%LOdk6dhjuR#3Ia3%@GB~V?S*6dj%T48=%3`umRdGbz1|pUCi4AO+s8~
zyM#jS_{$B~`nDXAjQ2*Q_`UwqG$P+6JuZYDHI7KUfoTUW#yPNI!q>|;`cTNONce6y
zd{;f(9j&3#Nq4kvDyyNFg=`!jZW*m1{#XY_YsmhiYMrx?|LCzxc;47`VN3nVGf+Rd
zyHB{0%kf8RRDdL;+F$QY)qae`uzPW_?oXS!0IuuJjmpKvI>kRN9|*7HMJ?B`NAIoo
zbZ`XzAc|iv@PxYGYqy)@&VD|O(|>`n*tT+D`>}`-gi}b@+QH9oN=NNLPl6v)p2ISM
z(d~2#+Niai+Eza6QVOw@41TRVxx{d^mLKl*5Qc-Ae)#T2N?xDhEnGNaJ2EElz9}xu
zk~J5O*q?VfB*QcpW|;2X3=?T{4Ph-@IAZ_KrtuM3c}&Jf6jJl1xv*dVo9pAvJo{Uh
zbkUV5^oC|={~Qt^y7Ey>i4UpU(T~Pz1Neb0jQHvRr+b36wgiTsoZ@`S)x;>vr6YOohx+K=&aMJ(0MDJDxDggcfd);
zn{UU|Tl$}1G-s;V%K13`2RJ#}2M1e45|9)B)D*rYv0&%#Dk0RLZNe{AWQxtR(1f15
zac-ClXA3f~`wUCeyCT@bDdV?`N_L3|BpGpqKAE-3d1RS37Z@Q^#!K{lI~}Pa_wH<&
zf^zj)@BRD*eIKLa?;P0wm!G#0vTS?YMc+v}FEfI>AdT}-jk8QD+lVoym@uvYs&<8W
zWMa7vUvh`HE$(lF*MoR9sH`1mQYjWxW%jqyc|SitK!xWy+DAJ3xbVzaSx7dq@GDbHGal(NhUA{^li?BEw6f(y6wBF!6E|W(PHH=h16Zi-
z>xprBJ&0xM-~mAH!{rii&F=OQ_yrO$;etPx&fW5`xE!|zu%J8|@$Lsd8R{@c+RsJ+
z9SiURJ&0e0>Bem=ZJam{m;=^86lv<%g=9;>nEBI
zV9l%nPSHog*Slz;={}%wlY_12#>8AG~E
zLpQ4Tr9Q-Rudj3xPQ*C)_P_(N!B|(!j7-LM#jME1NL%FXfwu<+V>@C|{0qjyk^UG%
z_-`F?{Wf8|YT500)g1(MFu->Z(8++f=OBQV4R*KZAV3(U9{|gV4c6mDIxo?A7oB&*
zLE5d$lQQURTV7)dDG%T@;wbNB7VzzK-b3fTblwLC(BL*BD^>H6-L||&3P^cA;wbMb
z3wS@B57422!}Zqr&`muqG8Vf;4&8^_HJq8D>Z~i~93G6vyXNhZa4gWfBc;!^gikbf(
DrC9i%

literal 0
HcmV?d00001

diff --git a/class4gl/__pycache__/model.cpython-36.pyc b/class4gl/__pycache__/model.cpython-36.pyc
index 5e0fc2f4cee0c8fa20820b403d5cd75c7bb16d59..22ce5979fd3956be3dbecdc2b853497a0077c69d 100644
GIT binary patch
delta 6482
zcmb`L33yc1702gJW-=i$n-~N`fPeu8kPs3K1~H02uplC$C1Dv}GLy;RY}}b7B+`y(
zTb0e_i3=ct3)L#1j$5rL3b?iSRol8eMYLemS{16c)>`fP|0iZ3rR&#x-~4j!`tCdD
zf6smI?O$)-zuul(nVXxv=(=U|)|$$8zrxf-fn1Lc~j7c#j5x#ZLllLoaorBij(^tlT@EPo6V-30(XYx
z==8-6OD)v}=4My#eg#rHO@6U#flXzp0(Gimu+jy|rp#gL#3M%P66v4^0Qnr6FXn9d
z9Wk`OW31Bo$-S=ib~VrpWG^0LWe391*0`rhmT1|VqE?pI=ZiK)q8)mWjJ2F>w(PTF
zmOU2LgUyVb3Cc>==G3dc=%CBcE5`_EPoohGMj~P9bKh|_15mFg0E2>Df!8No7BQ{dPs6p$sTng+#^-Ef_4E}z(wY=
zf%9rAV9p0d3ou>kU(e1`dn2Z|i(ymJZZ4Y&BSKZ6HUu~v8pzwj<_&~@}P{)OKura?^G?19z
zy#lldtc*}VwjM^@h`J$SS-H+wJR&Z82{p6K@`}pbn_!koquZ&g%}B*Wb$)VdMaV7-
zF0Pu9NDFE^I&J_S2JQ!L29m&i00jU&>M%f2+yx8!N8b(P0QUlP^EOz&6u2`}yWBZ@
zTCgdIv=A~x_Su2B<&1hzWge>Pqso%5aUCkii$8*~j{;l)ueKG%jlfTUUBGT&2XK!-
z{9-Jnesq7-aSL!&TpyR)A6_egPb4#77psvER8N;cCTE=fh)OV04$S4C7`fGci^;g(
z4!}AHlmaHe#a$0;C9n>75O@Ok9H8ImD0+1*>Uf{@CB4rZTc1G71HcM^p1%RsEdYH%
z*YK9<0o+O|blBpSvpE;ql^_5;rVZvoE(Zv%`4Wy09LW3xGJ?raIm4{N|9z_@z<)4%Mc1`~tDLX|&8|`yd0wY(ZBy+nJ1?rM&*H
z?mqnTxs8iz^$=;EBPsYe$wsoHJb|#+w^UD(@twwZ4INH?bIw@hs+3;R$s9j@S;C81
zeZV-N8NdZ}0ATbNi6xRVP|*ncm}y#Sx!@l6)t%vwJu30J0m!h&B
z$J%xPR6RrKL6MPVh?sLI6^e%PdEF+eNhyNbpe)|OIBH`bEC!xBcpJr1fGMsX);xgm
zWR^jAQfUPnQSSwq3R_^&r6-GJWrU5et`aMuN229L;0)mB00N}X0ww{&6gG9pCu63f
z{3~EGFa>xCs1sP3O_6Yu5w~)*FXoGJH2K8r24{j-f;1#YFVXQM
z9)~t!4A-y$*6)BjfgFJA%EH{#{Etz$1>jn_Os3*O)LjQKM;62C4V(ZJQ9UIvLqHx-
z0K5z213WMez&Z%r3h-pz1nYL-4s+k!;?mn-ZWg#*sWZ~i>T7P+k4a5~y=jR)-t1RD
zPz_I>R=?QZKNp>G->GZzO1Q8)NBpyUUWGb|=UH*mIlonv_V4cKL?1`$IhgAnfF{0j
zsDkA71yfaj-o&x{@%=*OC(AtZRALQA(kl$!pHch<_!{UA4Bv0G1Fr6chMxkAixYJj
z00M!?N|R-8u8`l@277a*{IBYvL@RHxq1ys6Pt5#g(d7e>p7mUT+s;j`Cu63r`wN+y
zj8f$>pEmB0ryPId_*1y=Goq1p?ZIWc{rBw%IHSQir2-F(q2wN3yDFM
z9%A0&30wlu-;73&K-jSuMkoi?_tBYS>G>wqaUZ;gh93Yw
z1pWm42)F{c68Hjm3V7OP%T7HFxciIXt__?dr{BGS(ZV+ZBjr8OwRlISikN$%-b6hv
zu@Iv?z(!y-z%9wka`D_K^a0h8-lji8XKqvaaSpnxw?mkku6xB=F~0&H>E4N}|S^7>*%+&f3lH`nN5
zr@XsVSmrjlq}zN>9~{cX(=WVd9@)GMr^LD;ee>m5nY?U&5xdcmn1!iJalwyaO%zy;
zcp&6UWyW==d0>+=B*d~MJLyx@Xq~49XUqJL$wAj6!
z`X({l8L0>}7NeB;&Z^$Y>7CEo6O<;EN*D3W31dO^z&s!VECDFZbU5o!R}1j$`w-Uk
zzzqOn$nY`3e$=f87|1R!CZwEpUfQ8|qT^!#RX_ox9Y_^0_KY)kb1K*vK|N4L51~k9
zq(e%jJ&Oj9Ad!0dr1D-s=sH-G3#CCRY)2inOXdBqsRy(4NHtIKb%wcyS!Xx;q}BQVNI1vN
zH{EJ1Og%~VNIi3@$Nwtzw8D?~V*Tl)(}p@G9D6F^)G-P*X7v?M7PX^sDbN8>4vbRa
zG@0coSJ#KoETyM0UgMMAE0sMl2%UL;T!o9R09FEwAybcuBJa~mjrs`hjf&l
zK7!@%;09oN?{@b;cpI~FxA0x3$KL-0t=xT|!@62v*VmY59cYY4f}>OaJT2|oj@}u-FyK_67r>mJ
U-t-0B^#joYndtH6C9A!E2V_ckBLDyZ

delta 6715
zcmbW533yc1702gIX0l932s;7^NmvI`Cku{i?Mp?oSkKX|?XOTC2t7{{Js#a2jnt^L_Knx#uqLzH|Qf
zyf<@ngZ1bJYerdCmgCdi9gEiN&X}i6neJNJ-m)|lbh)*t=JI-iQJ1$hR9sx#`R~kP
zS7m9XD-vpowrRrSs;Hh^Q{!s%Ymvw}uixeOHN@8|8?B0}Pq5vp42r*Io1#p~vREv_
zF5yfuY#pAcW+=-=PdL-MkPZvJbLA)EkgzD}N{%wYR;Y-c@$*whDPvDrM+}hzL@tmG
zBrTM!?@Sw@WXGRO+hA4l^ajTgxua(tgZd4XgAKbc7;cFg*4Yum8VQR6{iNe8MU9`4
zxlkF10b)2tWokZA-=O*Zp-@nc+1_JP9{l1Apa3{i!kI0Xi+TbPF-BLirsYmT$5_B7
zVc0bxw038@7%y9@?#VilTM2)9)EDqXqMopk;aTpB210I+->Hc4@kn-awnMd@J9%2Q
zZ0G8itnR0OIiy6Htrz7^DpPH<=T_CA16N48={eFDQRGMdx5zOFq9}~7&wWA}fbm!#
zE>2^-=jbg%7fo`(IUgu8NBzKH*|`qOTcu%9sQ?^nkXBbMWo|8YO
zZ2pgIf72;*7Rotd0Z?(KjdZY?vTAcGhP5p$8ZiyG
zB-<&XIV9Y)MT7p}uo;c!q8NgG30`Oftnb5V6S$3
zP6f6#dxS^K#)#Bb&EMjQhzh-KR6)fU=@OHHDd*_s
z+S
zT?1=4tjk~xgT;V9$qoRwZ^l0#)^k9Egpm^P1;t>@_$_=xq-FHBN1`Dav!~&q>iX-w
zqW$%Xl7U%k;Tt4JIqhP+-du8yGBdulBw&?0?>nwKmXOwV47?H81>6njzz*OpfKq@m
zMKeHo+y<)%_#Kc5Yy}wbJ7N7n!kKDT6#LwB{f&NAmY{$p4@QA6s@lW#bk*Wta*fE_
zGrnDkaWlKI)*gU6;?=jIc@uCuun*V|>;>+SkO9+{Q(0m#d^{Z7CHKqi_d_`QBSxoI
zoJ84ChR45{Fhiz1UUl|fCB~H6u(pk|<)M57i}Pe2f%PVk53B~b*)_1P0Immi0uKNm
z0gNjH$*BJZKHfKD&Ft_-Z-}Ai9^i6-nYj+uI)JfZs2Bo91h;0UR+B>vdy_w;MJbBJ
zaY~3sRAQYX%sCx+0nO(Ds&E&t&pg}<=V9O>fPsGy)=R*Pz{|i&;5hIoZ~!<2yaGH9
zyapTuUIiWlUI&;P%80o(EEc=P>5x*sxZK^Z4EtlA63H4C|GfOtjM57*nHprqnIonL
zC(A28^6!&!voaYJ!FBBXRO{AbbFSI
ziSdumJ4;C`k)5(nFPU*s>_RM350n9m0S#yb+<+<}CAJ9m`LMe!*wV`lCGESb)^|TE
z_P?ej9v052rXG0}ij9||a_-08_5zF;Q^l|`b4&q?52Zj+P#%XZQWa*#U&3UjUFNDY
z_ssLFXfn@CY(AR9ff`^Qzu62uud108ayz5=LrcDA=e))lA`uc%o*~sS?gyDc7fFPMtGSUS5<+sC-w`!u8D_&E3_AYCgZ3
zgSR-@gGPNw_`GTc-xccpp*Gdt!GCGKhEOyb3aI*pvvMm{hdBVZrKWb&^MBg1&PV5H
zT+U%G+7|M=#VBlo$Aw78E=%GEW=&6t{TcS(fWH851562?AL1P}{|Zb%lj>ovSllRZ
zhK1>5>fXiRrNG|-3#PpX%Llvz9tPd@zGE{g-t(iMo85-X^SGC7@B`83a+GQeo
z%KOhvn8Q8X2jz}O;*$#_^t(5>IT8;4fFx}N>R&4EZ_yQ
zfS$lxKsLa-e*_jw{(4xufSZ9^fK3uIF%f@9aX?t%NdfG@Mqq=4GmQ^O!`9+yY7+NJ
zk7{ikwoLSbWov2)2FT{*)D^@_7cQ|D^uRhiD%>QuhMhVy&3EV={eeX#IZ0_B6#sfr
zi!y#N=5iXgB$9AL`7hIJA^jv*Rp8`h>0`6*ScO=YTnWbx*_LX4Oz;k5GTi!(90
z^Zva0daXd2ra!FRF5iU~H=OY^R)>^us7*|Ra=N1V-3$anwO5tk1=AaUqTy*W`Z!pfP
z$eS_m)Q&L>P%5l6;6sdI%8FnyKuxe1k#r2R^)%6nSpU%!1@ySC*UB^aU-l<65pNo3h7nT+@6RQb=Qh2
zv}_Cff#qsCT#**hq*^Pc8HHANi8SyjRc&ov&$8tDQm7vi$4Bx$8A67KuK?VU16Ri_
z;*jXiL`EpZ@e`4CwpcZGlz47Tl1;;g9}&zFd_B4k0*?VZ0cLhPEFHKD;Eml4YaMV8
zPz&q;aK~adum_+jD1!{Dl(|csjQcTWFYpks50Fyr6fH$~0L>!+<@hM9H-YuQ&A=@H
zN;=E7S-=?{riogRfr@pW?kyO2D?kOm1?wo#7svs4`rd=}KJWoR=^cYbtx-AD5XD8A
zQDGDkB}84_DQC!EtmOBoa`Q`A*Ebun4P91uhgd5OXNpLwZh&(m
zkO=%{H17kb77AqxnhZ46LXA)lED)wBFoe3H-0nq_3QDB@Abf`a!
zLY1cIJ5~-zb-??Xe&IE_v6C<1Y3jHR84>u

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 7baaa51..c87e0be 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -35,7 +35,14 @@
 from model import model_input
 from model import qsat
 #from data_soundings import wyoming 
-import Pysolar
+
+import importlib
+spam_loader = importlib.find_loader('Pysolar')
+found = spam_loader is not None
+if found:
+    import Pysolar
+else:
+    import pysolar as Pysolar
 import yaml
 import logging
 import warnings
diff --git a/class4gl/data_air.py b/class4gl/data_air.py
index 1c51deb..3860319 100644
--- a/class4gl/data_air.py
+++ b/class4gl/data_air.py
@@ -7,8 +7,17 @@
 import io
 import os
 import calendar
-import Pysolar
-import Pysolar.util
+
+import importlib
+spam_loader = importlib.find_loader('Pysolar')
+found = spam_loader is not None
+if found:
+    import Pysolar
+else:
+    import pysolar as Pysolar
+import Pysolar.util as Pysolarutil
+
+
 
 
 
@@ -360,7 +369,7 @@ def get_values_air_input(self,latitude=None,longitude=None):
             PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
             PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
             PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolarutil.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
             # This is the nearest datetime when sun is up (for class)
             PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
             # apply the same time shift for UTC datetime
@@ -371,7 +380,7 @@ def get_values_air_input(self,latitude=None,longitude=None):
             PARAMS['ldatetime'] = dt.datetime(1900,1,1)
             PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
             PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolarutil.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
             PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
             PARAMS['datetime_daylight'] =PARAMS['datetime'].value
 
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 3e483f3..27f44b3 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -163,14 +163,12 @@ def __init__(self,path,suffix='ini',refetch_stations=False):
             self.table.to_csv(self.file)
         
         self.table = self.table.set_index('STNID')
-        #print(self.table)
 
     def get_stations(self,suffix):
         stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
         if len(stations_list_files) == 0:
             stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
         stations_list_files.sort()
-        print(stations_list_files)
         if len(stations_list_files) == 0:
             raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
         stations_list = []
@@ -190,7 +188,7 @@ def get_stations(self,suffix):
             yamlgen.close()
             thisfile.close()
     
-        print(stations_list)
+        #print(stations_list)
         return pd.DataFrame(stations_list)
 
 class stations_iterator(object):
@@ -345,6 +343,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
 
             # we try the old single-chunk filename format first (usually for
             # original profile pairs)
+            print('hello',STNID)
             fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
             if os.path.isfile(fn):
                 chunk = 0
diff --git a/class4gl/model.py b/class4gl/model.py
index 8760411..e6eeb07 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -27,7 +27,7 @@
 import sys
 import warnings
 import pandas as pd
-from ribtol_hw import zeta_hs2 , funcsche
+from ribtol.ribtol_hw import zeta_hs2 , funcsche
 import logging
 #from SkewT.thermodynamics import Density
 #import ribtol
diff --git a/class4gl/ribtol/__init__.py b/class4gl/ribtol/__init__.py
index a21583b..02a6a7d 100644
--- a/class4gl/ribtol/__init__.py
+++ b/class4gl/ribtol/__init__.py
@@ -1,4 +1,4 @@
-from . import model,class4gl,interface_multi,data_air,data_global
+from . import ribtol_hw
 
 __version__ = '0.1.0'
 
diff --git a/class4gl/ribtol/__pycache__/__init__.cpython-36.pyc b/class4gl/ribtol/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef32d81b7b590483a5671bdd87c06a3b3edb7c2e
GIT binary patch
literal 294
zcmYLEu};G<5Vg~ULZN@a(5=c6mlBDksyedJg#jrYP9Zjlg&iZ?X+*_Oz+do7UYYm>
zCax60o!+~5rzhRr<#;@LeS6t-gpd!!Uqq-@2zQRe38#u^T2M-OR78AG3~ExuJiaAq
z@=4K>(r{wsy*CP)ewWO$i)^-=UW=An`8c^VofpDIJRRuasK%3!f`3kIBC@D9qrj%uE(k30`2inOQp#1IO{aF
zlkPfg;?6#xqv8QU^S}eN4}cI42&qB{P@iapKtiY>BtV6gP(gh6~xRF)qx
z&02lG*V=WimY;JQt1Bz3m(6=vM_F%Z)Zj5tbd)s|_fb@wd0J2DF~Nj-S9@CZbT1+_
zu4WaXmt^sxBmGE8@nRw>Vq4^wwr;=tReFA3wSh%XZBg}&t^IDpZM2$}Zax0-4=ozL
z-f#Bmex%>;S2~v7-`VU~+KvP9<|o!~%lek3uH8nt^=-w80O$Vt(!SetmRh@==91m)
z_LhPzh0W^aC3knL_k7)HE;ZV9*Il*S$AYuH+1qU|1t2*c5Czs#o}s9*oZ=aR@hI9c
zjH!a%Wu7S|15%tMPEpek(Xo
zB8hffDBRdlj+paIh@ljL>!tYuH$^&!HF;d5c_ORipy5eaF%gf;wb2JoiN?ptmSotc
zsSV|c!FXtw5pkZ)DsoOvn8P`f(k|=+FF())INk)}kQNi_x8^qBoh=Ldwc@cxB1=6BTiAj+i^vM!$wkQ-)
z+|iFz=jBs+L=hw7WY1?s9xY*|Py!Taia;$8$`n?cBBWr%%XIIf7pyL=gVlBK@u#%{jCZZ-&4?6E-Xvm>MJb7yoHCpZ&oQ$^C_PQgh-r!*MSx=OWyCbU
zu%jJm&X+|-P{j4XHn5~`pco?3On7-F65hpZ=xwkbY~_i_hzuC$z3cXCb1(ko$=CFn
zy(OQ&4XEgk1L5BTAy*5HUmcA6eIO``{r#Uk_oJ^(zg@L|dvNdKvhils?)>zB-+SSo
zU$5RH)()p;YxYC(N!IMAxl$XFt^Mp{1TayvU;6RCEfMjob|P*wy!&sI{fI)_0D#8+1EPz
z9jaQGwvQS0jvv_s&$lc|^ewP{zwcVwM=H0ex@GBG^#<4;HDrvsRNr!aw&$}a9g37C
z0gxBkG)ccBZSKGZkU;KPI=txl?1s-ApI!IanoJf!6(#2kvfF66D_78SZ=h0Coyni7
zt0`uvCNtRs_!&}8;e?jeqHeM>E3513CCy|RXftY&&9L{gtFZp0>QF`)riTmYP`+40
zac{!!ccq7MFD5C55&@+KeA3<+H;Ar?@aX_c$}{o#g37$O)afU53DN;Y4N?rOk|6o9
z9R`yB5;wgRk9*^omm(y5O5<4tbRU&eW{5OC9HbOVErSsr&0^hPlv<2yB25;U9Rqbv
zNHmaOh?W_mWd`$xXwaR3mKj0IN;ISbLW?W#E{F6vIpXmo#`428^2cj<1!)Ds#$2*0
z4y}qKR#Vanv`GhC{K;{9}{dK@zqh#J!zVP4s
z=f*}@Fh~JsvU-pd9vm3|sv4$=;(*5|YaxCAqiP4p|EVtO4qXqO1XXcVezfj1diCz&
zg@C4r*(y$&)Du)?sLE26L*=V`D^|4Kl}33CSYyOWoyM-?SWziff}z&&P^;oc?0TnD
zw<7iS7PwSBIPmp7cNhRM*N~b
b2Tnlsk9KzXe!F>%Y+d=!X4X)Rta17OSI+gJ

literal 0
HcmV?d00001

diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 719f9a5..b4257df 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -17,7 +17,6 @@
 parser.add_argument('--first-station-row')
 parser.add_argument('--last-station-row')
 parser.add_argument('--station-id') # run a specific station id
-parser.add_argument('--dataset')
 parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--error-handling',default='dump_on_success')
@@ -55,22 +54,23 @@
 }
 
 
-#SET = 'GLOBAL'
-SET = args.dataset
+# #SET = 'GLOBAL'
+# SET = args.dataset
 
-path_soundingsSET = args.path_soundings+'/'+SET+'/'
+# path_soundingsSET = args.path_soundings+'/'+SET+'/'
 
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
+print("getting stations")
+all_stations = stations(args.path_soundings,suffix='morning',refetch_stations=False)
 
 if args.global_chunk is not None:
+    
+    all_records_morning = get_records(all_stations.table,\
+                                  args.path_soundings,\
+                                  subset='morning',
+                                  refetch_records=False,
+                                  )
     totalchunks = 0
-    stations_iter = all_stations.iterrows()
+    stations_iter = all_stations.table.iterrows()
     in_current_chunk = False
     while not in_current_chunk:
         istation,current_station = stations_iter.__next__()
@@ -86,27 +86,28 @@
 
 else:
     if args.station_id is not None:
+        print("Selecting station by ID")
+        print(all_stations.table)
         stations_iter = stations_iterator(all_stations)
-        STNID,run_station = stations_iterator.set_STNID(STNID)
-        run_stations = pd.DataFrame(run_station)
+        STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+        run_stations = pd.DataFrame([run_station])
     else:
-        run_stations = pd.DataFrame(all_stations)
+        print("Selecting stations from a row range in the table")
+        run_stations = pd.DataFrame(all_stations.table)
         if args.last_station_row is not None:
             run_stations = run_stations.iloc[:(int(args.last_station)+1)]
         if args.first_station_row is not None:
             run_stations = run_stations.iloc[int(args.first_station):]
-        run_station_chunk = args.station_chunk
+    run_station_chunk = args.station_chunk
 
 #print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
 records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
+                              args.path_soundings,\
                               subset='morning',
                               refetch_records=False,
                               )
 records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
+                                args.path_soundings,\
                                 subset='afternoon',
                                 refetch_records=False,
                                 )
@@ -119,7 +120,7 @@
 experiments = args.experiments.split(';')
 for expname in experiments:
     exp = EXP_DEFS[expname]
-    path_exp = args.path_experiments+'/'+SET+'_'+expname+'/'
+    path_exp = args.path_experiments+'/'+expname+'/'
 
     os.system('mkdir -p '+path_exp)
     for istation,current_station in run_stations.iterrows():
@@ -128,8 +129,8 @@
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            file_morning = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
             fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
@@ -166,10 +167,12 @@
                     c4gli_morning.update(source=expname, pars=exp)
 
                     c4gl = class4gl(c4gli_morning)
+                    print(args.error_handling)
 
                     if args.error_handling == 'dump_always':
                         try:
                             c4gl.run()
+                            print('run succesfull')
                         except:
                             print('run not succesfull')
                         onerun = True
@@ -184,12 +187,10 @@
                         onerun = True
                     # in this case, only the file will dumped if the runs were
                     # successful
-                    elif args.error_handling == 'dump_on_succes':
+                    elif args.error_handling == 'dump_on_success':
                         try:
                             c4gl.run()
-                            print('run not succesfull')
-                            onerun = True
-
+                            print('run succesfull')
                             c4gli_morning.dump(file_ini)
                             
                             
@@ -201,7 +202,7 @@
                         except:
                             print('run not succesfull')
 
-                #iexp = iexp +1
+
             file_ini.close()
             file_mod.close()
             file_morning.close()

From 14043c58bb8547747d8673f53d32ebb454bc27b6 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 14:11:54 +0200
Subject: [PATCH 015/129] add temporary file

---
 class4gl/.interface_functions.py.swp          | Bin 0 -> 40960 bytes
 .../interface_functions.cpython-36.pyc        | Bin 10575 -> 10538 bytes
 class4gl/interface_functions.py               |  50 ++++++++++--------
 class4gl/simulations/.simulations.py.swp      | Bin 0 -> 28672 bytes
 class4gl/simulations/simulations.py           |   9 +++-
 5 files changed, 34 insertions(+), 25 deletions(-)
 create mode 100644 class4gl/.interface_functions.py.swp
 create mode 100644 class4gl/simulations/.simulations.py.swp

diff --git a/class4gl/.interface_functions.py.swp b/class4gl/.interface_functions.py.swp
new file mode 100644
index 0000000000000000000000000000000000000000..d8a5b3db951310f43ab0d7fddd8bc656eb41f3d5
GIT binary patch
literal 40960
zcmeI53zQ^PdB+Rp5GTq40$BE}j19Q(m#x-B%R~JHz@>
zn>QbItbAJSnr}7dYrVz)%812@EALl)z8|k4OpB8im4M2NeoUAnX5D|Nqg!g~Hdt
zC&7oo+raCi(*3^)Sp5B38;IG|AY47eVwfMu`=90u-wOrh{&@FVbb@LF&Q*bEKf1z+I
z_!u}FYyX;*g`=9zGUD8)1Gv=#~A2
zpw;U5Ng(yroARuGMzz`QE=9#slHh13tb{9J6f73~-lFG^%{5y=yS5mN`IXtFxw)WM
zc~01ASNyRs>J^*qMzCBBqgpR2IYcb?a(9@W-VvN^|(N+K`||3Y!vuQhwsTGubDhuIp^?zH{#Oq!%A8X_enAzT_Nj)W?_
z%H_xjGwpY4z0jXg(nN?_gsD7;ICZwCro2fd(~?Jm$xAEfElJ5s*6g)Pa*e2hRD-Cw
z7*wbNU3t)S%Oe{v8sE4$zOmtLJY&nov$ky9?rpqeB+0X(K1%5dhtW#c#4J(t9>j8r
z!WDLF`%=5t31RLL!N8iACU-)uMiq^kFVXsOrsFDUxAL>=E+UR&Q8oW+Y>#$c;h|cotucq8xOn@e{6&SCG;*j
z>8!1)tcoX{_l(mk$fi*&l`2uEDltaJ{1IP^{y}G`D#9)LBgw!vVh1)=$Rm9N+t`So
z;@mr7cZS6=-}4JnO6tE+PnXKb8E2ev+G(e4J)3K!++W_pO5PAFge0^Xirwnwyx!Gg
zXe$y%SA^ut;0iXNf|vbWjDCuB$PwL>fJ%nCQ1uKTXg9{`T$0HEAG&%8hZ)Z(BO#N7}CJUo6
z$*dr>phj&$@VQ%y7Sx<;%%5Nu)afnOm|ZQ=d$uE)@kj#9T^56~KY4V+FO7xFj+)Dg
zGlxu^q6%AR1asbe5LHdzAYY4dkg_8|^Ne82(qm1j{)+K&)UVd&aHVZC(WppF+L21r
z<1#&P7_{ap)vAc2S}n@2@^h9JyHn$vbT);FYPH)7cAC%5xa*S8OokQ3*0iG9T%IX=
z>M8!%>eSl4Qp>>7+a^H~-WW!*d)-f~X%RiFPwC
z1``y%)C}B4tqL2psM9NUW}n08EPT)PI*T54uG{KFt>$cnIYg%yddQXPZFDe`f#)-}<>db7L2?5svlY$7bGUj_54Ywl8k3Zp71CJ(tUYt^u`)NaV^
zz|z{a(pltpp(&T;w^mrnYJy*#Z*`b*m+iOm5|R!J
zXfG9Myi{9yf~YuIW~^J{ae^$?^nf?Nvokq4IiYXIO-`J;`P5|4J7w!h+qWm4>(U)@
znT(<`bftM4Nz3fl@))*)pljIgP%6t(Zt=&ZE)6aWEgiVDkIS`QueRdVLT|Ze-*w`z
zc2Xzb8*WdJ7(|GJHW=>IF|lUJczivDlc&u>GQZvzkfC;Iwf;5u~i
zBG`rA{RD6ay7dj93XT9DM5hkH(cr%Q_y!ihao~&S(^rEE_!7GFyFnd113VUd0e$%<
z@L}+`paVpgJ`Vg6z4;%&bHRDwAn-5f&bNS@!P~)QpboZzr-Hq~&(Whrzy1Ia{rW{<
z790ltgZ#b*yckH{KLm!?LnQ&}32nbCossGl<>i2jvAeq1>9y%xDzP$Gs-SX)j0^2X
zc;r)$nVhT?21GZn=|n?kkch
zrCyaL)+97CqEd3Se9`QU&Zs+PrWlRNsdP!F_n1YXGstieG2jR7dNT;qtdvuEQooF#
z%0k!TdnpEfVc=5D9yGfCyP(d9m>ek97Xws&@SwCwbKP&6@4CT0(pWF}$Jce~B{
z4iwImILz2KjRGXGBov#mg>hhEVi8dLeWe&+o6>
zLO+`or3BCqM^#aJ!lG?=Wo)EN?H%ZCDuiD`b5W{ardzQ*=g`j7nTpI2wWV9k-I$W{
zb8jhTOA!4^W@(LPuUHzBiK#8m#DpzxjXB4vbP=ZV0b;fZC5D=moyUI8-c9L9J2ukf
zHs8dy-V5lK3fb`4-zM|gYFbH`2NqTh?QgW?AuSr^xa>+yEd6%vk%h+PY
z0!nXh;tw=gOB(hv<6vUibU5xeY1&xs{eBd(6xZ$-vcuW5uF_S;WHmcb0+(L}m$U41
z4gs24Pfsv33H5vZM05j_f>uwfM%2w#B|~`q%jCoFUn|M-ZhpI2V0w{P#VWN>be~JG
zV;8$C(Iu4&&Oc@CDsAYGde7|icGMWa+l`8ML7VCkEw!;;w^quY*c?4&L~N>hhJB&d
z@|2?0VSjqO|sM;+KrnihAGn2C%)4vvz$Ahx*lejeUX|>X_
z-bELyt2*l|@%oJWE}oLF@p4^@GkICC?{cK3R#yr(z$$loyVS}M(xCfZYSnsGwC|wT
ztXE|POXl9IP_dtKqk2=*o>~Q4)h-C{I=CHnsyYTxg%
z0f_!@{qL_q@4p(H0Zs!?0l&uv@I&wea2NP*@O|(-@Llj0a4MJtKgUjR5BPiVGEfG4
zgAZaim;e
zBW?#`k2oFd1-_5H;U@5U@H%iZcr^GgYzkv1)j-tqb|6NyJ0IVqA?**@r!fqskyeEqD?UgRr$$GDd*a-4s)`Y
z&@U}KD&lx!Dfvm}^)ixh(yxGwrV14o4n|b73~5=<1c48+pK`cIXSOK+x$+IuL;0u*
znhHSlbyR^P-8!K2P_X>CcU=+2ro2tLpCu?#u6E|CDr89NVzkH&+%Fu7Zm)F}US7#r
z0#q30qLTMYGn7_Du@4vmRAH|}N+KkRUZ=WRxmzA~Es(DAL`X>`U1ue5>N7%9BTK=O
zv3D(fJVjDp`sFz4b(ntjcCo5c7Iz1X>S{jL4t9BqORcCmt|{5R#Z>hEcK?71A)niI
z6wB@uhwZo0?M?({v^wosRo2>hN^VH0*PL&*nY4C$9gzhsrP=FiQV1~WttbI@S`9Bm
zgKq`-5=5u(g^^cW3iTwG4kQsOIk%mL*9<+MX|Cgx;fk6HFs=Jl=^l0WW32$pZpmQ!u6@UKbi;9>@s3HD5uwirvnS6Jf@XEiO~UWYH{{u0qFF2bYQ!ydoear(Wa}5ksP0B6rv@1=U`0=|C`PR*}59CCftc
zg3n3-{a=&U)TSz{8_vvhG?j9OO-dCvDP_7au7MJo(Zg6hY%gq$AlenhR;6S-Fxki#
zUsh~0$}J_C?U~3p_qh_3s-au{z(RFv1dH_al>SUb+G;D<8MLZOM-8pbxa1PgXVliT
z48xYRlAYG0G|OhaNI5v0R%KnG2m4Ozr~{p%Mp<>benqe-4~v^!}Sn7*E21&!fv<3Z4jljt+khxE;I*YyiJTZ~qDq
zUA_UffyaSE!QJTYKL#HG*MgJ5qrlDR?3aVT0Gq(xU@ss#{I%dB;DL{#pT7c}0KN&s
z*Mqab0pJdF@ms-PgLA+U;BZg?H=vLIE4uf)z%}66;L+e~duu)XY21H@-u*@JH((N+
z2HeXQ%%J-oaPQ3{E%UJ0%qKmQt{B$Edwr&Hnc}fXBG!MJ#Ts^eodDsKbv)M}HIGqw
zmn?q}UHPfH8ey`ya{uh^qbc&uPG!4tNj~#zQ0cz4&uZ+FMIJ<{JPIY#1^YGcu45<$iQ{H;)o3wsB8m94
zQ>~o(GSsLzdVQ}Y3UnjOv&xAX(&l7Mha6?e
z;M#jJt`voA-0W{Rbw@1XKrWxw5RFiCB%uAE6OxIsQ+p*(;@BNKDr=WyUaiigw!IY9
zGE;xfZ4G6K)DdfGfPu(nKH(6_`H7v+es}Ll_6WO5FDz9mZ}n4qbh{L@&q-xee%Ee=
zUR^YK^&sEkWrMd^2TB^n+?60GlnffnI0Da61sQrVbo0i4ub)_#gq^DyHEF&{B
zA|SJ(+)!$JR$nV3A7N6c{p|OfMos-3_3V(9wWgW?R%eGLY-w4)C0o3$(UV&K*L@X*NCE1PI;q^TGN->Qq(bpM*igx$|>;t
zIPx}KBUYzk>elX7n^<^_PM|kp2FtipNsX}uJ+Qt=-mTEgy3Ih2OPRf$l0BP%sY&@=
zmC-=l%#UpG^o}x{cz%iX53Gh(N=Ac;xxq@zU_}2%Z7#e|^j!4+x>0uDi{AfQ@EY)H
za2xy>`@t;ucWeRQ1m6Jv2yOsx1y_R$z-DkT
zkp2620a^c_2OGdQunGJd_&m58Oo1)nao}$30-pdM17bJmf#-rQ=zuo35Qsm(1H1#g9lQt}4ITqNf^c37+Tc;(F6;<5g3Ex|7fu8RfL~!-
z_zHM8xB|%f|Nh_>>iImGNQa0+6
z3MQ9QdXzn^xt!V-WzE4AWf9cN3b-X>J+RxMyyLw|&u#gZ3{wpAxXv9@nw7+HpY3A>
zd1tK3tPS;Vm`W1s+b3mNr~9>!F1dEHIm&)!vaEMU*%$GGk7KeHj^wf)@9x4>9Ir+1
zkhCn*qH?|#6e;sxw4nQkTpv$qXLJR*-z0vD`1JTDZ02@gR4#Yoy&;vqg;rtyx+G*2wjrfHb)CZ^tzj`_Q2D^4c5sZIn}^URbSRGF9ko-0&NwcTFPZ_B2CG<*_2^0PNHAmNfLyPRa*w6E2En2_srBw
zMVF3|K}?(UXytZFZ-wO;P2%vDYf~|8P*sg8r0-{BD&d4fZu*P6HmXZQzL_GK!r{A_
zxFlHWu}?yJSaL=qMsc%phLDHO3LFhHbXJg7ER9QBYw(Sc7gE_|@p$O0U|WpaAZbLp
zi5Zy~Kz7%W%HT|vKc&+*Y)5$QdWjcjrXNw|Pp?_4`~
zsnuD9GWvg$QRz0L|J(EVzlDB(1CVq5jsi!3+tBUb1?pfcxEK9i&hL9OcnO#RTfwnF
z&hYzFa4)+3UEs6e1K{1@mEdx48K{COa1fBQ{B8mvmig&
z`v882e*YWr5^xnb0gQnCfcXC348Dji{{^rGYy<~@+tBCV1YQhI11Eu>q04_2{2iD9
z#{)U9PtF$D2mBP>{e19i^z(bct>EQg2e=UUV1FR;{7kI3Ywi6H`hPO5kXj?alS|zbTpYk`iB|K@u;x6}a*~iw;
zn=>J`><-EfRfLoAG#Wb5_V{A?tW!rM@iGP~ho;&XkdZq@Ux~*{_s8|AW569C?C5}C@*-U?M
z@=6tyVEV|oB!cZ-k~aiYW)y2xTgO5dSC1!
z6pK60J;ceO+G3|(8^$W1A{!pd$Qqn8wc%=sadWJg|5
zJFIQZ^dj3%*op|v;ifQfR77I2dmxi@Bx~qKg(Te8S
z#cY^(y9FmR#zggyrtQ1g2;!Q*J46$8TT?aFFnOJ-sJqx77!mp%P&0X^_sa1@Yp18)Peum5oHGj#VifGyxx
z=4kF?C%Gcg6D%O*a!R>dizh%*S`eb3gleC!@)Pv-MipuurH83_>%W4
zB>%%}{SuH0wc3@{cvcI`w`}nHaV&aX3Io(8YfMNqro71Z-9xu;B4yZzf4xZF!C{;yUX8s@Vw@4d?+&1-a*?l3d8p2n0A4F-m7sF}htdUs9
zEn{eL{MwvwSr7V$eiur-o!}wfi?JYXLS`37s?Mbw#qpkVCc|2S75#r3`v2`l|G&s+
zfWJWJ7axGHfX{*tf%gD87f|d17lI4G(ctl5Um#}!ei?ig+z8$X-T=gAa4{GM2Z95@
zr?3}@{oo29fB)|VU=heUfd_$q#%Az#@B$#d0^5K+Gw>Gd2R8%RCvX-x2Hb<~K+Y8S
z4mN{3!MQ;E0DSOOYzOZHuK{O(X8`g2-vs1u{e2HR!YhHCDR2px1vO9w2ZEnsPmsS&
z5Q1~Sr?D-(9vlar2IOpk+p#UY16&K94UPxD#=dYRkiR={A~*pY4t|BKmAu!lZ4X1P
zO9HmGuYID24QZA(Y=q%oLzGh{*6A@~WoYlizlNCny-jOjQm3|{S>6x72`g4}U)6IW
z6L!z~PUN^r4D^X`85!Ue;Re|QX1@U@KJDW4NcwAtefs74z5J}mSsqk>a`D0W>xgSJ
zEEAVphY(>n@#To!LFDy%oI%pjX@1HTWNm(wYl%K&)AcscYgjpsA(Bpx+3;cI6jBf-
z9fReA><}_g=y}#oSLkaSJ9o#^^Dw%iVAAdjrSwrKh>nyFphsOW8v6mUuQ
zMye{jnmdV9@!{WOq-#`vSXV6z8y#D30nRRZ#~)K(0ZPmwO1bhDi}e?^{n)NW5C0}(
s-lk)9`o$mYZ!+>%H0U6{KO>^OQ
zmZ~E4kRIylp|m6y+Nx4jQT4|XigMtPR=q+mQMIa2PgQ%Ul1e>P?V+8w4vJRu%{T97
z-kX{Cc0c(=(*Bam<@l+yGWXyc`==IYu6=3g257`bQXJaxA88sQc-cCMpIe`UXzh{p
zUl3&p-;V_FvAqe7qvZGyKXdHCC}tYYTSW)70K0?^Ny`Kwf?-M|
zT;TK}ABvlU4QwWx=#V%{Hjx$R(rOo)f~MAAc%G_|hYg`(shZahG$Ai;2xCIkJpZt!
z1^qCzquKbiQ
zMn>Q*?2Y!#81=JGI(iQIgi#VK&$({PsaeKKYl`tcy0lngD=C#x5iz44XNWD4sX3EN
zt89?g0fN)G8;xHaAeD_}Q<*}pnAOcHPmlGJeu%)Rz!73+3CZqAYOYkg?xf(0#q`o@aam#YljC=>33wG<@nrvNv`78)#3l(|
zrwt^QO_x|kZy^8Ds;XvJ%Ip%U-$0CCgYWT=_=_0rxC4myJ03I}QOzsaR8co6YDP9c
zy(_%k+2P(acI0A~EwE*3g6Lf?`Wbks`Zy852@cT*jr^1Nhyc5bPdevMlOv09n36#^
zmD4J3YHG{+8<<(DxXdF_?=i#l9x)?8kmzp|z`;p9fDM{~-NUa=Y((e+Z<0cY?3|Y^
z&o*T}T*gK**){AMOz^7(pg1e@8SpDXWFlj
d-uN$2cQYiQ%VWk_w|U@?(-I!$H|IP5
zlkajaH^=UcDTjhVzww!U{{9{1a}nCAUx{4+NxUy*IG;!pkiyZLR|1x7hYF3k))fy3
zxL&g#($&qHr=U3Eyk0DpGzr70DE_82Kn(v@K9(J+Sel#D8Ls-jhd~_Kb3%at_#qDL
z9EKa3@QXd$Fn|~sk){MeU=!7cf!_fp@osHKJHtJimrar|OUMy|&!B}I-(0bDkxgMR
zq(UERp_DejRYx?-#k|G(De?_+D1%umJH)aJx~8x?a!;TcvS0=ug-V={W3k9Mcdzl!
z^l`}G53+IhhU;}}5@{@i(|cR0)8TH|Szo{p>c4qro{9X^;YMAc&;~c2aEuV+&^$W_
zPgeE44FLUUM@Q?*q%wj^=%*Cd&M6uZ>}D1$7A?kTofPjRfJZO9jbUt0Xx+r%8(ri2o(<-!l$YljZX=7U7?TzXjH*O36P8sp4hx
zxZSLAJ(4)j3s_2|AdVj<4!}6xO%xQ9l5&Ux54Y@-7HPFqoo~4Z@B{WG4QZG>BX}#B
z;vMsAasX@$q`IfvhFKzw>Y#PFJ-~|GcI3RJGgf-rbPMB1ZYS-3*}K-d1~!M2WkZUStb+Cma#4bJqjB-LArQ
z{H1;7Fom!*$G8TLw>)d{DWo~t$)S&BjZ1vbPs!tku9M|%)kNkt@aFY%L!epr$Pfdz
zj;kFjNlL&R2}1JL2~B*yDSLtY*h$>qdGyLDZt@YfvX;r*M}|prQBRWJ(v6&M%;t6T
z@$-}T#6q3-MPM5|Mg!JLKQD7z$l~^p*5yd$tdYx_?y-(&TwW+Iu>)M&sfk;i0RjK)
z?1m|9?CMF*lcp1>nvN%5a{T7yi)B`t)lHMVhsCb`)^nu0qfdj&t`aDr%+1OMAL&MX
M&^0Uu<$x^z4}dvWRsaA1

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 27f44b3..ddbf228 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -7,6 +7,10 @@
 from contextlib import suppress
 from time import sleep
 
+import tempfile
+
+
+
 
 sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl,units
@@ -47,32 +51,33 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
     filename = yaml_file.name
     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
     #yaml_file = open(filename)
+    shortfn = filename.split('/')[-1]
 
     #print('going to next observation',filename)
     yaml_file.seek(index_start)
 
     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
 
-    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
+    filebuffer = open(gettempdir()+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w')
     filebuffer.write(buf)
     filebuffer.close()
     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
     
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+gettempdir()+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start)+' '
 
     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
     print(command)
     os.system(command)
-    jsonstream = open(filename+'.buffer.json.'+str(index_start))
+    jsonstream = open(gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start))
     record_dict = json.load(jsonstream)
     jsonstream.close()
-    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
+    os.system('rm '+gettempdir()+'/'+shortfn+'.buffer.yaml.'+str(index_start))
 
 
     if mode =='mod':
         modelout = class4gl()
         modelout.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        os.system('rm '+gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start))
 
         return modelout
     elif mode == 'ini':
@@ -99,9 +104,9 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
         #os.system('rm '+filename+'.buffer.json.'+str(index_start))
 
         c4gli = class4gl_input()
-        print(c4gli.logger,'hello')
+        #print(c4gli.logger,'hello')
         c4gli.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
+        os.system('rm '+gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start))
         return c4gli
 
 
@@ -365,14 +370,13 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
             # yamlfilenames = glob.glob(globyamlfilenames)
             # yamlfilenames.sort()
         else:
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
+            fn = format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
             dictfnchunks.append(dict(fn=fn,chunk=getchunk))
             
         if len(dictfnchunks) > 0:
             for dictfnchunk in dictfnchunks:
                 yamlfilename = dictfnchunk['fn']
                 chunk = dictfnchunk['chunk']
-                print(chunk)
 
                 #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
                 pklfilename = yamlfilename.replace('.yaml','.pkl')
@@ -380,25 +384,25 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                 #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
                 #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
                 generate_pkl = False
-                if not os.path.isfile(pklfilename): 
+                if not os.path.isfile(path_yaml+'/'+pklfilename): 
                     print('pkl file does not exist. I generate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
+                          path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
                     generate_pkl = True
-                elif not (os.path.getmtime(yamlfilename) <  \
+                elif not (os.path.getmtime(path_yaml+'/'+yamlfilename) <  \
                     os.path.getmtime(pklfilename)):
                     print('pkl file older than yaml file, so I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
+                          path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
                     generate_pkl = True
 
                 if refetch_records:
                     print('refetch_records flag is True. I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
+                          path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
                     generate_pkl = True
                 if not generate_pkl:
-                    records = pd.concat([records,pd.read_pickle(pklfilename)])
+                    records = pd.concat([records,pd.read_pickle(path_yaml+'/'+pklfilename)])
                    # irecord = 0
                 else:
-                    with open(yamlfilename) as yaml_file:
+                    with open(path_yaml+'/'+yamlfilename) as yaml_file:
 
                         dictout = {}
 
@@ -416,7 +420,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             current_tell = next_tell
                             next_record_found = False
                             yaml_file.seek(current_tell)
-                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            filebuffer = open(gettempdir()+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
                             linebuffer = ''
                             while ( (not next_record_found) and (not end_of_file)):
                                 filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
@@ -431,14 +435,14 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
 
                             
                             #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+gettempdir()+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+gettempdir()+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
                             print(command)
                             
                             os.system(command)
                             #jsonoutput = subprocess.check_output(command,shell=True) 
                             #print(jsonoutput)
                             #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
+                            jsonstream = open(gettempdir()+yamlfilename+'.buffer.json.'+str(current_tell))
                             record = json.load(jsonstream)
                             dictouttemp = {}
                             for key,value in record['pars'].items():
@@ -456,19 +460,19 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             dictouttemp['chunk'] = chunk
                             dictouttemp['index_start'] = index_start
                             dictouttemp['index_end'] = index_end
-                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
+                            os.system('rm '+gettempdir()+yamlfilename+'.buffer.json.'+str(current_tell))
                             for key,value in dictouttemp.items():
                                 if key not in dictout.keys():
                                     dictout[key] = {}
                                 dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
                             print(' obs record registered')
                             jsonstream.close()
-                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                            os.system('rm '+gettempdir()+yamlfilename+'.buffer.yaml.'+str(current_tell))
                     records_station = pd.DataFrame.from_dict(dictout)
                     records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+pklfilename+') for station '\
+                    print('writing table file ('+path_yaml+'/'pklfilename+') for station '\
                           +str(STNID))
-                    records_station.to_pickle(pklfilename)
+                    records_station.to_pickle(path_yaml+'/'pklfilename)
                     # else:
                     #     os.system('rm '+pklfilename)
                     records = pd.concat([records,records_station])
diff --git a/class4gl/simulations/.simulations.py.swp b/class4gl/simulations/.simulations.py.swp
new file mode 100644
index 0000000000000000000000000000000000000000..2979bd721914c462302dcaf5108d3deb10ed2a82
GIT binary patch
literal 28672
zcmeHPYm6jS6|NOf5mAB?_$8N_#Oh{xdS=&MRz`-n?kp>^EbGql(AlA^>8hS7sjlj+
zs-E5fSE9y8q6SezAjTN7pb^2h@s}8*iLb<{gqUa|Xb_DtC>SG(5x;XERky0EdwO;;
zCZ;O$O;z2;IrpA>?&I8hs#j)jxcXYQySdZAb)I3&zV^h@&9D9Vwx`~280&Fs=Z+ma
zcT?1;Yt{Ew?PzvKQ#>@oxIFc{6~UP}Y$ts?a%Wn;9mhLY{p=z3Iz8V`yfBDowC84b
zqnv0JT1lXiz~&O@CGF|gpKH8s$M#uDJUMX*d)4{ZZ=SD8QAwbZKqY}n0+j?R2~-lO
zBv479lED9s1d{Gq#_g!hi)4+K#q8sPbt
z8pc%K!s-9vc|H13m`a0d#;hU>f)v
zHZQ&dd*g}CGaRVF>VDmfTO@4v8i$&a1vMtdO!$V155z4=|Xb<
zzS?X-(5YTbUU4FPCoC^lMDt!%KX7bZkPug&aeHEv13V%eq!Vr1
z^}WPe+0Yn~P7TQmZY8nJV?8(8sF{mL4qm;$=I5C?6(>=xAo^y22GQ7fQmq<=bl{;h
zn(KYjvI6%w8l@&=(O>g?muq2B*mJy*ilLx)l?_ehkvYn;HY!Z?ktX%EDD20Qoz_N3
zKbz_S8+v6;HZ%!~+_syv)|8y*c?lb15RZE+v75}BvQf=(^6-Z4LQAYT>;(>VA#NPy
zyh<7qm0WcbRj`slL}oSA=%8zLetaH}AdG0T`_`C2aSJ3?(tqe(tu%A7H$5KDK;rX_SRc0j@ioD`wv{sY&&Y0m+qGfqHg
z=AOOg9J|eo`Vs)+nauloEg7xf8F$v-T*yC%E^Z91(v<
zF*r3R%i!>o)Hcj?V>_Yys@>_>ys-Q!WwS<+Jp33>$!Q(vhdrL_trUCs%@$8}*ju63
z#l9DayG(RM6v^RKL@{`omdw3eQhRyD_EVlW)4v1!-#+ob8*)Ym_Cf1KNE~{;cv^Q?
z`2%N&f{`F%H}c%r3Il%w8h#X^ll<8WT7J)Q87juwq3?&tw5sL6+{NQMq2u}tCGYkU
zYE(QicijzGe+~aAH|A#6itKpJjhn=D4O2hE7x#;_z@3Vg7C5amtMmq{SO!!}RP%ZM)@K@Fq<>
zm0S%B4fVjM3tAR6Fusbpj|Oudl8tHBhTZWS*?U}Gn0!GRjK&2^R5&f95pRf*rRfD|
zk)$@;5Ti{CUa%G)a(r(E0}~BFXs50nAsNOZ+$;*i93#VQ$Dxp32aBbeIXz7jrisEd
zD46xhhU2zv)PCNa>4C-!6=!A@iDy>V!`azcdAodeW?{!d=90Vjz@Ej$%paH28pcwJ
zahfkQ%#`6Q%8zkl>}!(sx};`ey)u*;tvjYyHZnfb+p#v2Wnk$=yNIKC*=Z8oLH<{7GKO-R>#Jo(lW8mmm?K=D$kt;&>CDt
zb~ln+)iVYN^@w(HY3OEl%$!7pqDI{f3JSU^2p;#6jTfq?E;JX3@YjWnKgp&qokk@|
z1api#15=1W_Wv(oN8bS3n(Tjd{{ATJ_@4mZ0$ku+;C|ThHvty{KZPy-5#TWJQeYeK
zUD)#P1Fis`g$@5OkN`E{SD=3
zXUvPX7UoSa@L&{Bmhp26qusLGFxrDK3}96|?s3Zv9I?xJ+LU0iL`j2{6pI_|DP!0N
z+5Q0EFQ~P#>8y6pvl5olDpmv3)OJM5L7Y4A4f6)Cr95F6zluW4_&%7<_4w9IGmYD-0;5!?
zr~2qMr`;wQjnQpFV(>O=6eJmFly(@gnlAT-)OezYjUUxUYVWO<1_WMRoY4q154$+2
z;=5$&z6#%%2LssClEuXAE@z@XWoDhONgH_BaF4Ke0fV@_XO{E9zRCy=q*NnlBf5dD
zRt#x!Ov}nYnl81-2)bOxI^DjPtfh=Je!QlI(=wdyQP1Md!R`Og;_Uh^oMn^!ul)PJ
z4150sKxh2hfq%f}|1t0$fX?;L0UkfsFdhSb1biCsfXjh1fhVvB@D1R0;2Pl7z%$qf
zcp7*J2!RRU8SDc*4SXN?0&oZLc7V?OcLC30Z{i+c9(Wq(`JV>v049O2;oN==m;xTh
zIsHlCHeeAr3|s+R0{jVeqxvla*8pz;b^(u~-gg5x0%rn$#hLw+z)9dRa3=6QoY8*?
zI120sb^wp#y#5|wL7v-F^KSi;Js=z3#nh$uz#>sG=%hJ@^~5wmL(E%6GDXgyqxxH#
zRUaz(wrp*TuIdeLEs)1ETln$;%UZs4BiHh&Vakm)1*}IgXDraHY
zv03h7d?~ffH9>M8rUx>*pan*e`9Oe3V6qxYKtIGG{3zy_iDQ$9`wA#^ksQ=2r(SQE
zbqulPP0z5WIx@R*+^A29p{-a@IU#eo&^E2`8SwBcAsFoWc@*|>%|d2v>e2OBO+ZS7
z+9Xt{lqgkp#$PNIV``y5ek?5ZZgn9l?#`em9hxXAdbDT`4;cYYZ!qbUS<@NP0a26h
zZj*logp$sZXci-*mXy@(8+hcI6NIK2IiScM|0djx|BAl68Yc^ii9S9@DS6dk!GO6aOPma7Z8OB
zxG+g_*OP{cm4rzIi&~YE9bJxe8ZVYZRQ2@B!R3|?+Ddv{4Ti6hMpreZCL~zWPO8ty
zwmL0tl2(zI@Dod=oIa4a3&pQbO*Gl!Enc@vV}h=~IL?m5iICLpru0%%3BR+=R#~k8)tC*leuiA-H%<0rERH(vMb@^MLt(U
z$5F8srE(5Qf~GFSY~7`bsx#E3iU!1NWQw!#Pp-#93^W^Y;&y6g=N89{*mM`8p3>;L
z7zWfUN}Fj3I-WXx_zd?g2a$m
zGv9zw$53L+^qf!LB?9H)gXy)D84a>56=kK<>8hsbBuz3Nmj{Un+pV0WvNu(B9OBvd
zsjYyeY?Tf$zW%2}Puc`gq3|pS90t!M7DK*}8Ma39!!wDyc;!2MF_zwyo0Y8ZxSnXr
z4=pYI7@laE)}%8ToXM1+Q%aCuw)+z6lY(+vE31bH+pNB)R9jV7sS2CcbaXwl0e3_#
z<;q%jpp(5&>&L0jEN3W(MG?fmx-7?ahsN##IV#zVYAU7;3|7=Nxj(g>
zf%5a)s4^^Rk;|E`E|TMHLyJS%Wpl>D2NwO_;;F>0$b8yjs?|LQD@{88ryuzHAlb68
z|F4x>&|iej9{}Xne*y3??0q`>zW{hO@Eh3pWdEN8J^)z26mSmk2yFd_fCqs-a5JzE
zxCnS0Hvgx9`CGbt)tH7s#>wvR?2e21l1Lpyc!2bUrum|`pY=7DZxC%HM_&eAb5+({I6Y#hN$ci>VL>6h2$r$_-
z&-PU*OXp%L45vm9>e3cgQ3p#ZVYOsEe?&U)983paEw>Y{yUgu$lMQZ>#fBn6_xxC-
z$Bg+r$aBn$l&mvh&
zlx_>tS)cWD6V%|hDkZd3ZN|92@GWVK!F5;?)sRr@O&3tZXz=j=eLJEG7_m;h9bsXL
z%@<)98hUl}djQRrAI7fy5=ABBNt9VKB7rkJH)@bZmKzbY5r?n1*!EoMR1wr&?t`H(
z@egu@6K%Wa`-L4Lm7ebI@s`_7GMiYBQ_tz%6b>3LSYLSKB8G?(_47_p$q&bhQ_b
z>e6f)J=D>Tf0G=J#c49_osG^gvgRWr#o>Zdl8k5Rq@v4JYtgY9jWhCYB{Obhv)IshoQ}0mo}$r
z*p!J{?mX7_w1d~c#(EUf+c?Ur6)MMHocXD}FUzqvu;;aw)SLXXN-M@^I)Q`FVCpBv
z=R&rxtYx;ar^51WIU3GWKKN|d(2ABm=F6C`zuC3%&4StB)xXWeSU@D{OTYYJT}n9R
zFysMae$GQ=NU{;ZVlkc2t`M(B4
zz{S8TfNj9Tu=hK_#lT~*?|%f4&A$R%3p@w9^nE|ke-eDEtCBz^fl30E1S$zs5~w6l
zNuZLze_sM47RMu^hOC=$-jRM1P|Pi?gw)0;MvwXa0x;G$8@107(vmgGt(+p~p~o!J
z)mnF{%Z`5aY8v{BdM*vDTtk(3^t)^kwJ#@3x6M_s%w<5+0~uY=f`Mik2oNQ!TtkIp
zY|%AT9*L^v;~U}wE|H=^2YzEK^~@L&ul?j$>S1;%b{drHg*=hcJS4JYx
Date: Wed, 22 Aug 2018 14:37:44 +0200
Subject: [PATCH 016/129] add use of system temporary directory

---
 class4gl/.interface_functions.py.swp          | Bin 40960 -> 40960 bytes
 .../interface_functions.cpython-36.pyc        | Bin 10538 -> 10781 bytes
 class4gl/interface_functions.py               |  25 +++++++++---------
 class4gl/simulations/.simulations.py.swp      | Bin 28672 -> 28672 bytes
 class4gl/simulations/simulations.py           |   2 ++
 5 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/class4gl/.interface_functions.py.swp b/class4gl/.interface_functions.py.swp
index d8a5b3db951310f43ab0d7fddd8bc656eb41f3d5..95717cf2146cfdf5077234636933be7e4d0072d3 100644
GIT binary patch
delta 998
zcmZY7TSydP7{>8;S2s^>yB3O==@g}fvD#77@<>=xsS%1L&?QE
zcWWt*jlyLY87*nvU*?Z!vYX2EUi(p-W!frIszS64t8#22*~mgBGVpnk$W?T}g;L$A
z%~+M@h)|~-FXG${o*K>Ts;P5VRlBsRnmSK|SF3b+jYkz#4a%7`Xz`dtnp8+~Zi_!u
z#DjI14G|eY1vcSfut+Q7@hC{-GD@)x3vhpd$PHXaA6&>s624kQhERze2*iUx5gj*i
z0p38#lu{NsSc91Wk&pO*VffI964>y(5QXmyG5}++6$r*__I?-RD9378@RL1XLp>^xg9yCmuXk`8<me$suL5E5)$3DYm_r3Qk~0PuHV{T
zU|Rg&>onDwo2at#qV=Y{j?|VMzPyJ&7z1zRW?U~RR2T9wOPS829(C9S6E0H66WEI%
z3pobd!*Lu#Aqubz(_tc4;6^Oo(yI}uP-&407UhV?GzA++D{Od2$$VIgPn<(P4#0tM
zyd?We@Sqk&_(AqYv0jn2k$*QfBM1-3xfo0FjiHCk?9Q0JlN6qkZ}WVSBkDjUUxaf3`Vk;-N!i73qtMaeyT2$zX!qQVyY
z(_+g*81;}*(=8~oP<)EY!XkneB3dDkET{;w@0A|}GAINWHQ;Qy2Q|B3^i~4#oJ*>2363yYZx6
z>HKL^U9QN`dX{p!qEwUXslC3jrMa!$*n)3@NkC6Aa3Cdno)vG{4k5?XoU|m@hDW}4u)_6-J!Bv
zs<~K$RG9EBL}U`z(TClr!aTe-iJZV*1aQ+740dXIXUbf&N_HoxtC=&@kbAYVWc*Si
zfoSDf6rq~;+f;1!0^OV0r*YGz$7X*Dwbe56KMcW`d%2pGJLNB?Aq(D9{Cyn4K@`D_
zI2cWh!Dwm~G<>AyL7c`Z^bzI2SH{Y2%_5X)v;O4@VG(uVm~jvhb7NFo3PdK|DS%{wwH0
zC(03m`TT+j#yyUH7*UGD3r5|7bXYKI;>AYyyU0wE_HVuxmFsn?l&pB&urw?i&voMKeP!+q
zIMLlK(&k51HQ+L+Qs4aOk5ZvNL{+K$K_Db5(IS;fNL*EjQmIuDKZ?{pMHT8fvv#~|
z+|pv}o;mY4XU?2CGv}NA$;t1}P3#GU^q-z-J(@o)Nq?4l{uJQvg3r3=n-gDdQ;90o
za4k}b5XtbHQ8NZD-i}NpDAj7bmMA4^$x^ZwFU4yEr2*0ItET3#pH$POv?vElgQAp5
zM3k9QMwCOPAyE#OhDAA28i7))j@HIXW1y=oQGYdC%aw9Ox?iF?4P29Gz!>j%N_m>4
zAsW6$N)yl%p;73Gik?ZR$7metaZxWoJ<%tbqiH$_l2bH8hptJbX*x_tpxj1B=@^tn
zN=`||@xMv@agqraiL0MCY}=?c=@NVCGP##%5%ACp5{vWK<;O{uzmYmxh(lA6uox5!
z`7TKS=W6Bhl5MbZd6Mr+PmvIR*;5HbvBy(2>MX*il>8(Lx{7(J*4#q8Zmihl@-(k1
z^9MpO5GERef`z;MaJTpmb2pjsM7cZzb7%RGSBLov-tFxq$`2qxX*Pmn!*0Q$@hk{r
z3_eQ(q7lujd305@x}I8tE?~gmA5M-#?uE~)%97+rrqog##Uzw;2p!KzEibgm4%CR$
z2G-+1Wl-u6Q=uNJbYv*KYo$tk9l532UR&)zD5WLGn}*OxOSTUc0Plx>pW`z%M@5Ce
z`8yH>4MYiy0x1}?m0S?oL+SSBA>kg?k+js(Y~9gNu^ljjYh1rEJrW(#^4lRZ?D&Dx
z9e>vpl%&zEO=;sAgxww*Tb5S|TeKq`sTFVnX4KKwC;p6h-=whWz$wY`&^Tz2*@cxw
zN2iI5WX3wuVd?DYuga}pE98WzwCsl&e(nTmepyA%ZpY1pnS`#Pj@%V;f`=ri52G>G
z>%ff&v##0$W@=-^D!F)dURpWr=w@0>@<1yHYA1x+4^|F1x)Z!%z&0=-+={?a&0Uka
zJx(|Srgg~K_39xBRDL>-^n-cH9t0mmo#_ANgXk%^Zs5dD9{+^wWOzRKo4!{`j$hZ_
zFHS(UD7*5GT`pPaiEs?=PxM38CCgrD)S7oJ@^ATf`no4d<-hkQ3v334upL64?L>YT
z@)=jDS89gqwVKr>oBz{4>}7Zecv9a(e0)~V7H)#7t64T{+DkQq-GZ`chpQ|bmo4T6
zwix5z(D(RDKI=k**^71lif$a+IDg{ah`_Z|X2wE;(emX=t-2fJtiwPgqI$v}U7nJ2
zL?@bO)}ui=hCG?{=tKo>k~adQ$|PZPJeJDv7XmxUG=DenYjP*&!NdE+U2z6fF~|Ut
z8)e3#VJ}=LTXw}>YSgVwo?>`i{P3k<=$C}d^H;-}_BND7U$d_ODSFsE@)U`I1h1y9
zHzTfczS=m)#B3>Ob`=ZO?CPg0)l0^Gj5QcLD4KDRu2yYSXt`3Yii^orE6t`+r|eGD
zw;##vNCqE83IE}WT{*&FJBt49)q9#-5hK~}NcI?veUX0@d2n_bxFba`8$}&>t+6Z;
zaT(&Mo@H-bfe%B+58$(Ko59l|_OS5j_oCPL3lE5ESY|km;k{mzyT*uyO}5}vzv1QB
z13B#2I%9}?`Ar@f$Z6fg!9x7w*we>QH$xA0r#g#V8p$9M5oS5$GC*&q=o
z{RX*yRC#Ld_|e1f@J{lPJ?Pc{Ime0I^+)j5@d**tV%m)f1Se7%2F`o@sex%z=-0Q5
zf&sKwq{Jq242ejNDBa_V4YhUXcmqBQ!vHd*N0W6GsH(}V4uNu<1o`8sTlzu;0YqP@
zh=vWLg}oRmA0W8+yiob;8m{;_mf_9xW3$2oB2I+bFB&2r^3>oL*^9rgLydcdpM9U>
z=l=>l{=~zXeQmrlZOJ5%9K5UEm2-$AF#D`}&(1-PQCq(Akg*+^l4Xs(V|yHx${m?{
z05-r!70Bm)TQR++>JSP*!trDBWmT#|w$@CatvP-Y
zJqm3YDDcPUDSv$n#R~O9hS#ZntJKmjJ~1!V_x2hKy1@W2xKFpEwbv|^*DdT~6RIvN
zEV-(zJmhHL3+3z)k&6R>v_g=RArHgqLS}#}Gz4HN3@eKCc>%yghd{RfnjNHJ2Pa0~
zKr00Vbfb=7iAK9<1?-R1PYWfXHCKkPr>{k*U51Qqy$6b8HxzwVai3mC0SprbiTGxT
z_KgOZtk)DLvMO8qAu_^_7ieV7Bw@gLa3w?wxj1G=dre)HpLw|zU6NWc@JAfJ#L5mQ
z9+Y76YffUVgqIG)TGC0<#IoFK$AL)#lUS86o|%^bpTwbiU{zkJI*C=q`WIL|;0)ly
z$Qfv*P%(^2I4ObSqIME6EV>rWPXW;MV-N6Y*p@U+wvsEe!UPY?2@|kPn*&Y~HgAw-
z+5n9Mbckj;B0fQNuXVk~xnNw{Od$|nH_}P92EnKdK5}4cFa;*w%9v4TAN$OREVQ)O
z?ajeK8nT1VkXV;urq{Us6{5(R1Z=^uJ!oc}Vc7y%GoDqBsm_q_Sns9u_T3IJ6
zFsg^WMe}q5-fsI4B(`$)u#@eOwvz*VsW8>aI-}0mA*sI08Ke854Fv2FbJQFI1vdeb
z6}?Ddf~=i0$2+9kw(7a~7Wig_-VCQ=9Kh!&MZ7x*wZ_2LsR6Ug39m4FNAE&YM%cCjvyo
zNpbQUXJcNvm;}Fg0X`o%&Me3{u4mUi>XPym?+(OcY#dV+0fH!cvF>^oE?lZFADIJ8
z^4Qof0B1(C`9uK+WR_n(x&f^sy=I*v8+@?w-Ny29IgQzT7kbo$SV*JxDgC
zy}i)(8b6TR$KUoRZWaJsK=CWd!Xq<<+Dw7w3bO|b#|{=w-ra2qgEDnr1|#z2zi*_pWm&DbU)AAd6UfY
z5A!Dnj-y=HImkVT85_+B
zeh8&e2h0Y&$}DiYGRDtLj%FrdENUMSWA>6DiGsc{66a4(zOn~VrK{A8O5OEXHZ3YW
zyXrg78kd^qF1vo4Rq7V*EIa{n{Ha1JQ$siOJAi!$+TQ0sD_lJ~3EUjStf#qPL*#4(
ziUNw3VUlm7_;X0Ui)1Tp*zpm6V(OV~sLdr*?C5#mB`NpfU0}%#wd`hSWB2iy>4R;t
z&0c(tSxoFzzmcwP!>h=sFDx0>KX8+D)CaE7Nz}8Ku;-x$2s(#_N1SVB1AnBr-Bp{F
zI;~h@TGv~LUz{#?-wqwi0TvT2jJjgzLSo3?41CjCiB;?`l?;%t1!j>r4w
zJL6`P-tAI0;sI3Za(O5%l$Hk&s63!nsPMo~L8woqNJzk_pbCTp;((1|)R
zs!+Y95(UOUDPB$#66IteS&kRt(7trjU_iZ=qL?
z*+N#1xk3)cu2NsQztAsjnWcg9U}2Cb4>lDVq}DZsTJ}&&D-6>?8m5tJq%iWJLZdVW
zD>1n;3eRzxfaipK9)suP7R@1=rWw%OL3?TTno=02Iob!~1ns8-Fz%$}oRS~@2d3gw
zp7^G_TC0gViM%Pu{ltvI^!F5&5Pw!LkUnvbc5*xcQ+dMTFfioXBng~v&dn`(Hk+Fp
z7C+BSkg)ijRt&{(MJw4A78UpEqdQR1*PYdJ?FQ-CpMF38QcGf=Q-bh|Sdjz(k4>KbAFUZkZoEN(eb9_%<-
z*V$qpGGU!lnp~rCP*ho}zQBVtkyV^nOF5=I_4t!&Bh(15`sX+-G&9lWNrS9l}~p|!~R)3gF6-C%$x%~#8{BSjIm_J#8X+X)M77ZCB$
zNGg?A*&dwO3nZWM^-8g9i)XAp@rt#d7~(h9(0Cq};8pfm&08wlY#)lFS6^SYuec10
zI3E|?q5a}?$bNX^J&xnTIxzWnB^k+(<_?rYdl3FN$0YWu-bwX!H#Hi7o=u3#sDB5?mQmAZ!$CHM>ID
zG>Qb=T4~8+htc?XoCqE(dd1@m&LSUdUykR57PnJdlgpj}+3yM-KYd~xxa0W%>qip<
zNCuI}iwdPSC1c}yItvS5ho6f_2>Fk&voh1SCZ0bib09BDm7yHNn-_{jW?fEfiv?Z%
zQv56N&@e9C_)eF1=C8%KdWOyR)eiURbIGqfinbZ%uuGh+q+v@lwocd+obJlta^D6W{6mG)M7I!~&0Q
z!UC|3eAg!=Tc&tp{~L#f*x&>4YW6_$LwNl>X!Xzt0&9BxeQfsFY1Z_o-hp;vG`bd*
zhRIb*mQD7d2R)w~h$K0o8gyx$8;C5ScL$(%!wmNHM$ij5(Cx9EvZrYmG*{Cxy&yCp
zi<-}?*kmi0aGo}UjZpm?Jmh|GBZD4vWeFv2X8gSy7}lxP2!W%}9njTRK0m8erf*^!
z0%>lK{JKGPJyI`(kb-tC>nydV)!*PIWJi1I+p^P#vq~cZ-5c60>@wnnsX?QFd!jVh
zh;_0MZ4v14Ux3`jcoh7_Aya^AXdJH&!_)g(E!DA{`FYmZ49(Z-Zj;zZ5%q
zTnBjJgGBdDC1POX-*CiH9smjP~-#?y^n-qXn9_d@3gG7AvHdI*CM#!zQrc+>m{RCLbMcuqzhaMB$P
z_pnJ0QrhQboL=4sTtDP;pnjG2(@}tDS@HTnPxAsDqdVXO=O6;I#;}*;LjcSW%4zH+
zKEwz3;IvYi*T!78?(>+jwBi!Nx
zGy>SN7skCXW?|en4XFH(lctlh@U{d^l=@LV;AADdJP5D+U}K06@`N+eQkJ{cbT%n2
z4~{h9h~YqQlUeU;=N0O7-69@kCygipCGp6tG7E_DjB-{vFE1EplqX=@qkPmEb9T@}
z)S~;Jhl>QPPRZImAZ-{}4bsb~GmfUIRd4P{@l
zrBd}{vd`Bh4^W@XT@CC-@WTk+lz>qeDt=)8(rRV-_zZOZXNG=AM#Y}t(Zm?6vtcwr
zCMC2Kj`>0t+kw*);_czv_u#z=AC$UxrDg*Hn8cOakjM*F64_dAhq<4M%*cU(>oAug
zer3Cn+=}EDAmTzW9d1L6<2d)A`0~gInGru2Io7tNMOs>_*sycR
zEd%7Juse~+u6;LhN08iu?6QZW+7Xkj-{R%#S(b0rB40*{O%odOL5(J%Z#>)HT{b-w&a=
z>Z<42@LhfimrskC9d|aj9+n^4_%?9QK#jP+0#QXZNt)=y)Gabd!oa0;9jHNe!&6Gt
zNz~X)l4_Vl;akDf!mt`8D(K)hLtJvYYP3g~>(#=l4q7@%PND*;sg`QNgboVmDoJwJ
zS4^DOr%*;uiNoUq*&M8+t9~hiLA0G%qBj1si-`Jsy=)gNzUF!~uWu@Xu0>W|tzEq0
z2R&A-xL5-K(Gkh|TJ@sKzJ{CuTFfr7Wq8!%bNw
z0sN|R+h@7$6fF`F*2P*JKoGWdxFW*>M!_$g*ch|n7@}2RGrHX4aj~Qr0M-o8N1;p=Os=yzM7K-!s
z+`?*QzWoKbfXnkpj))Wa2@(|5e5Q#3`bPDlW5atF!qqcK<m~%LEe4eLI)0A-Az<
z$>C+=o73<|oYj(j1n)N&-$<;l_5WcK)}o{rDrtSDJ*LBISdFKw<#;}xi?3QuC2WRG
F^FP>9B%1&L

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index ddbf228..42dba38 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -7,7 +7,7 @@
 from contextlib import suppress
 from time import sleep
 
-import tempfile
+from tempfile import gettempdir
 
 
 
@@ -348,9 +348,8 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
 
             # we try the old single-chunk filename format first (usually for
             # original profile pairs)
-            print('hello',STNID)
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
-            if os.path.isfile(fn):
+            fn = format(STNID,'05d')+'_'+subset+'.yaml'
+            if os.path.isfile(path_yaml+'/'+fn):
                 chunk = 0
                 dictfnchunks.append(dict(fn=fn,chunk=chunk))
 
@@ -359,8 +358,8 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                 chunk = 0
                 end_of_chunks = False
                 while not end_of_chunks:
-                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
-                    if os.path.isfile(fn):
+                    fn = format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                    if os.path.isfile(path_yaml+'/'+fn):
                         dictfnchunks.append(dict(fn=fn,chunk=chunk))
                     else:
                         end_of_chunks = True
@@ -389,7 +388,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                           path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
                     generate_pkl = True
                 elif not (os.path.getmtime(path_yaml+'/'+yamlfilename) <  \
-                    os.path.getmtime(pklfilename)):
+                    os.path.getmtime(path_yaml+'/'+pklfilename)):
                     print('pkl file older than yaml file, so I regenerate "'+\
                           path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
                     generate_pkl = True
@@ -435,14 +434,14 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
 
                             
                             #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+gettempdir()+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+gettempdir()+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+gettempdir()+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+gettempdir()+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
                             print(command)
                             
                             os.system(command)
                             #jsonoutput = subprocess.check_output(command,shell=True) 
                             #print(jsonoutput)
                             #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(gettempdir()+yamlfilename+'.buffer.json.'+str(current_tell))
+                            jsonstream = open(gettempdir()+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
                             record = json.load(jsonstream)
                             dictouttemp = {}
                             for key,value in record['pars'].items():
@@ -460,19 +459,19 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             dictouttemp['chunk'] = chunk
                             dictouttemp['index_start'] = index_start
                             dictouttemp['index_end'] = index_end
-                            os.system('rm '+gettempdir()+yamlfilename+'.buffer.json.'+str(current_tell))
+                            os.system('rm '+gettempdir()+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
                             for key,value in dictouttemp.items():
                                 if key not in dictout.keys():
                                     dictout[key] = {}
                                 dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
                             print(' obs record registered')
                             jsonstream.close()
-                            os.system('rm '+gettempdir()+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                            os.system('rm '+gettempdir()+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell))
                     records_station = pd.DataFrame.from_dict(dictout)
                     records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+path_yaml+'/'pklfilename+') for station '\
+                    print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\
                           +str(STNID))
-                    records_station.to_pickle(path_yaml+'/'pklfilename)
+                    records_station.to_pickle(path_yaml+'/'+pklfilename)
                     # else:
                     #     os.system('rm '+pklfilename)
                     records = pd.concat([records,records_station])
diff --git a/class4gl/simulations/.simulations.py.swp b/class4gl/simulations/.simulations.py.swp
index 2979bd721914c462302dcaf5108d3deb10ed2a82..fa508cc9ce2f5a68fdff618e8ecc170da0d80236 100644
GIT binary patch
delta 395
zcmXxfOG^S_6b9gT(9CO1rAW0Ii9k165D2+Q7cD|WD3UH%lq06%OpFG$h}`u9zC|tj
z0TI|xwCVB#T15Q;EojrG3+|@HdQPgTdp(p)H2>-2-&*_F*
zU9aedR@4gNqNe8SA@eQi^_JclXoI1nT~mwJX^qnK)dQT<0(srQ1+{2|
zWEW5&n_L~Zq+?p86`H3wz4E%Bph!6l@-tIb^)Y+@pfN6HvQh1?6>%plN=qSG$vV39
za-nebjrX^h(SIxAzsLIN19oYLmgzmn9MqsF1xca%08pnWy_mog9a4$%^v%6@
Date: Wed, 22 Aug 2018 16:34:49 +0200
Subject: [PATCH 017/129] test

---
 .../ribtol/__pycache__/__init__.cpython-36.pyc   | Bin 294 -> 0 bytes
 .../ribtol/__pycache__/ribtol_hw.cpython-36.pyc  | Bin 3455 -> 0 bytes
 2 files changed, 0 insertions(+), 0 deletions(-)
 delete mode 100644 class4gl/ribtol/__pycache__/__init__.cpython-36.pyc
 delete mode 100644 class4gl/ribtol/__pycache__/ribtol_hw.cpython-36.pyc

diff --git a/class4gl/ribtol/__pycache__/__init__.cpython-36.pyc b/class4gl/ribtol/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index ef32d81b7b590483a5671bdd87c06a3b3edb7c2e..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 294
zcmYLEu};G<5Vg~ULZN@a(5=c6mlBDksyedJg#jrYP9Zjlg&iZ?X+*_Oz+do7UYYm>
zCax60o!+~5rzhRr<#;@LeS6t-gpd!!Uqq-@2zQRe38#u^T2M-OR78AG3~ExuJiaAq
z@=4K>(r{wsy*CP)ewWO$i)^-=UW=An`8c^VofpDIJRRuasK%3!f`3kIBC@D9qrj%uE(k30`2inOQp#1IO{aF
zlkPfg;?6#xqv8QU^S}eN4}cI42&qB{P@iapKtiY>BtV6gP(gh6~xRF)qx
z&02lG*V=WimY;JQt1Bz3m(6=vM_F%Z)Zj5tbd)s|_fb@wd0J2DF~Nj-S9@CZbT1+_
zu4WaXmt^sxBmGE8@nRw>Vq4^wwr;=tReFA3wSh%XZBg}&t^IDpZM2$}Zax0-4=ozL
z-f#Bmex%>;S2~v7-`VU~+KvP9<|o!~%lek3uH8nt^=-w80O$Vt(!SetmRh@==91m)
z_LhPzh0W^aC3knL_k7)HE;ZV9*Il*S$AYuH+1qU|1t2*c5Czs#o}s9*oZ=aR@hI9c
zjH!a%Wu7S|15%tMPEpek(Xo
zB8hffDBRdlj+paIh@ljL>!tYuH$^&!HF;d5c_ORipy5eaF%gf;wb2JoiN?ptmSotc
zsSV|c!FXtw5pkZ)DsoOvn8P`f(k|=+FF())INk)}kQNi_x8^qBoh=Ldwc@cxB1=6BTiAj+i^vM!$wkQ-)
z+|iFz=jBs+L=hw7WY1?s9xY*|Py!Taia;$8$`n?cBBWr%%XIIf7pyL=gVlBK@u#%{jCZZ-&4?6E-Xvm>MJb7yoHCpZ&oQ$^C_PQgh-r!*MSx=OWyCbU
zu%jJm&X+|-P{j4XHn5~`pco?3On7-F65hpZ=xwkbY~_i_hzuC$z3cXCb1(ko$=CFn
zy(OQ&4XEgk1L5BTAy*5HUmcA6eIO``{r#Uk_oJ^(zg@L|dvNdKvhils?)>zB-+SSo
zU$5RH)()p;YxYC(N!IMAxl$XFt^Mp{1TayvU;6RCEfMjob|P*wy!&sI{fI)_0D#8+1EPz
z9jaQGwvQS0jvv_s&$lc|^ewP{zwcVwM=H0ex@GBG^#<4;HDrvsRNr!aw&$}a9g37C
z0gxBkG)ccBZSKGZkU;KPI=txl?1s-ApI!IanoJf!6(#2kvfF66D_78SZ=h0Coyni7
zt0`uvCNtRs_!&}8;e?jeqHeM>E3513CCy|RXftY&&9L{gtFZp0>QF`)riTmYP`+40
zac{!!ccq7MFD5C55&@+KeA3<+H;Ar?@aX_c$}{o#g37$O)afU53DN;Y4N?rOk|6o9
z9R`yB5;wgRk9*^omm(y5O5<4tbRU&eW{5OC9HbOVErSsr&0^hPlv<2yB25;U9Rqbv
zNHmaOh?W_mWd`$xXwaR3mKj0IN;ISbLW?W#E{F6vIpXmo#`428^2cj<1!)Ds#$2*0
z4y}qKR#Vanv`GhC{K;{9}{dK@zqh#J!zVP4s
z=f*}@Fh~JsvU-pd9vm3|sv4$=;(*5|YaxCAqiP4p|EVtO4qXqO1XXcVezfj1diCz&
zg@C4r*(y$&)Du)?sLE26L*=V`D^|4Kl}33CSYyOWoyM-?SWziff}z&&P^;oc?0TnD
zw<7iS7PwSBIPmp7cNhRM*N~b
b2Tnlsk9KzXe!F>%Y+d=!X4X)Rta17OSI+gJ


From afdf5aaa3c5db03008365bff021cc0e9cd3d623a Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 16:58:49 +0200
Subject: [PATCH 018/129] make interface work

---
 class4gl/.interface_functions.py.swp     | Bin 40960 -> 0 bytes
 class4gl/evaluation/evaluation.py        | 303 +++++++++++++++++++++++
 class4gl/interface_multi.py              |  82 +++---
 class4gl/simulations/.simulations.py.swp | Bin 28672 -> 0 bytes
 4 files changed, 345 insertions(+), 40 deletions(-)
 delete mode 100644 class4gl/.interface_functions.py.swp
 create mode 100644 class4gl/evaluation/evaluation.py
 delete mode 100644 class4gl/simulations/.simulations.py.swp

diff --git a/class4gl/.interface_functions.py.swp b/class4gl/.interface_functions.py.swp
deleted file mode 100644
index 95717cf2146cfdf5077234636933be7e4d0072d3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 40960
zcmeI53zQ^PdB+<>1q6wpNjxeRGg+#e>FJqWdC1B@cnTg^kOgGVIP~f1uAXVOr@QUy
z+1WusMM;c?-~nIcp=j`}n2?7GsHh==F&-0%0SzZU5+kT681ca|$?v<5s$137J+thJ
zNV?Ddx4P@z``zz;_xXMIyLaZNFF4~Y@0ik&IX=IU%iVC{rLz|ue)V4WUy;l02&+eK
z+H~YG5;W|y&}=MJy3?CV`b()DX2-tF5}XR#_1;RQ8%$N3l`uSVp&5T^w0c3eUa1D<
z`f{t}^VOhQ{hiCzpLiC
zmTJj;d*e07!$y~7%kohUIiN92v7ida0vJ@#)#X&72vg?
z3C;ss!HdEFVzl@s_z(!dN#NrcI<5p0;0W*_hKXCjwcyp@98dz^#UQf-EQ5_;8XN@f
z!)|e_v0X@m^vtJ<6GfxcY*%XK)yh({Tx(Q&`EF2ccWdfEdINw+R*&f^N}Y
z44TchA4gK5-i&AcGs=xtXStUz#2Jpa!&0~!_JXCn-(B+jiF%_Mv?@!%gkPFpuGfQZ
z>Ef{6D)|#(ubXeQYQfHO*sFAV1&4^`UY-uKv)hB!x!GE!SCKEUG3&KDrBmyQiK9_tsppcw|n95uv0Gp|#F?kf_xueot4zg&J8k9237dH2i
z7~ir*OVh6|2G#A303{Qcg@2(q>sK1xa;4)J)tC7S(r&l>;#`uXsTvZ+H6dIYDvpFo
zyvpRri8JeWD&5eZQ_@6;T7-!*h&XkzXJ))OVR9SNOrYe38@CX
z#!^tC33TL(rdu4_ux)a~(&UDkx8d~78_wFiVXL>{lCikVhWa?QD;)M#J0@j;s`nt4
zQ5CMRn_HJ#-9{K7ZuuGpJ-%6Ht8Bh#WA*LgR$d3hpmKZ7RhVk+2)ezXCP@)~qqPv0
zyqCaRpx>DYQk12fryGi@xute3kQ(zFt%e^r^={Bx?zTJ$ZMK(tam(6`A;pT=5?$9&
zXd^oV9YdUuq9P6RK2LbpOs>tK48ECTo=}wm5dj93rBb!o4g-^!(~00jjhZr$=i}&_
zsrWb1+MS@4--R((@{ifCwwIPFt(vB5CIt7@xQReU{#2#Y38xlJgtOmYWYH;)N7X|#n^;D=4;VE>I{`dxOsmpp4i6h#HK2FtZ!nQ
z81oaHd&lp}v^e2=eoksh{WtFES{Xb2^wUp0_0%)Z<}+68uWw-`Yl;;@65CA0ZgX>9
zuXLH(^2E^%Ar2Ybzy>t%qQAoIr&xy^$vugvWT-1uuKC|hRK&y7j|6Lq
zFdmUi3qlKO+*Sm^ol0*}t+^)rDON%4?ox%-)iR@Jt0yZS$$+)XQc(1#kE;2FiICM%
zW2fTGArs}O;^t~W-CGEHWivKNXg-QkbR=lLBiN#hSTkz8VtyR=%auB=v@I4IB}qv?
zQh9oO%m^F?&3dU^7IBozdHGemczLNaGr3V0QW9gq
z)rhLWM1?Q40=G%a!bYXn?&jO`7c)Bx-}P>L$)nA6n(bb*F<)X0(e8#`W2w{b_PjG2
zt)SA~8Z0c)9D~})61Y_9m}FtK(OG46R$(MI36|8aocYx;Pia88ag`N|hs>9ia@byO
z)ns)bwAK?x7+hIxFOfzUBg6UI@^yrcVHFbl+WyuGN>MH7%L~mmOYEWzD=s71Fo(8M
zlFmvqrRVqZ(?#aCWxh^Pq>7&47Iw6!r>Cd%
zP!KxOnJrs%U3pb&bZrDyzcw&z20_OZr%kQwlzNK-mpgR0Fw7YN!nOUnv(oKWR=rB-
z?d;lj?I_eP>Lk3@v*9foze>{T*jF7UaN_W?9HJe&DXyzn)_d|lxLbtu#
znP1KOThH5a#wmUwDp!w!g|bY``(cmuSUg=vB&g!nvz(w$NJYvx
zgzn<`{jFOvNEb#e{uzmTWl?s*yzO~KY@^Gq?dWSNhF?HqQ7T_*V6jYR&`!0Qk}SEj
zga_RW#-$nr1VxCuxp{V3-TDg*WLB@6yQ_+ov*VRi6O+x)%KM~#0q_Wkwp$T>ORoM_<|1#z9`}a!Hyqn){5m;zsHM3GB
z6wCi5*s=4S)!rqg^UphZ?PhK0k9#j^cehuV%3HOPcYcc|(pzp}y>6}+J+V1@%81xd
zZ4LWErRhm#UYn&7c6DrqgtHUnohJroqRm&T+rVb3JM6HWVQ8&KmOyrUA7oIbdd21Ck-Y9S1tzUs;$U|3e$M1C!tg@LX^Yc7->B
zE5QOd5j+DN3=RUfVQct1@LBK~umI}d4Dc+l7x)&ohi`&6gElw@h&|$a*d4?caRay*
zTnLT_zYTtX{oy(w_J}dCFZdcZhg-nSpbGNfiQp&L8a@nO25R6W@C@)D$or$n-(b)~q}GyN1hkI!(Qmn*C%|
z;!w4V8k339W`!F%m{_NsxM6`Epy_0<&$*1daZBv7v0_l^^YzwDy`|T2Gptrsc~a}n
zdJEQJPIepmrG?8ylx`v+KPkN4XA(~Ob(Qf%rQ+kklxy}WE$gW$@F50QZvSY{=jA_F
zzF~5%A6HQm5lEg?MKTL@gxdym8FH2%_o*Ag#EiEwGgzV`<#M}TRxv|z7yCwL~JB|w8=!Ky9Np{O4zEwJ7iFk`G)^*YyK^=^4sSrntSEY=fJNw!&uoCro}
zYU;^ZGWMyZk1I(cq+gEXUYqq@ik<|J=ipc=
zIt{&ZIxK5TF6KqHQ*Ej6uZb<T_;s}0wj>-1>tQZY!?L%mYF8|H
zev}Cdf^>D6DyE2L%XA1G+Zas@>&SFq!8wZa=-!@Ff7;9z?{fS;>zO20;=1Pso9bjPMbYE4i
z)^~Ir&BbsIQbZC=AmG@xIj)SJF^jU&i+=^qN-r+_tSvH;;IONL~=E1kfKE
zzos>n+2?Q;p5uv}Q*2VJxJfC~g>em(*i7Tb>S0G=YXs4*D7GoZ^MNTww)oOwn^SHn
z@nX&-#<|axpfnBbcQLS1-4?;7JtL(*SCYQk40Z&~veHpQYdbEu#IqT-EiJ{cC9Pzq
z6)DZK**Q`U&ZbvcSLngM(>m!ur|4)P*d3?nh=>T$est-Sty`OT8QAxzQ?%&+zk|Mh
zArSo^^;UguMBlFi@$VPC|0Wa1lX&0@=<=6>XMrD~!#@bV2wnpY13yD=zY~ZqUjtjf
z)4?I&esuTmgPXxs;3TjQxCNd4mEc8SBiI}41w@Cx3Ty)&_$2!I)!=yWH5k4YoCOX5
z_n?d43Vt7)0}chx0Xc9z`uM+~dy7B7mEa=qWN_Esx!ha8^T99ByT1h90H(pI!2Q^Y
z8T8x(?!8&0WgQlo`DA3$4a2&4ug^R#OFTA9WCwt5v4$;QXFxn<9nbVf&0`O8v1Ouq3rmBpjksTMlxiXKO#JgiGutyl5T
zDJh@-vL=x?{;9=Kj#B8YEiadqyL}gLR+yy2nI1|@?;@>>j)_D`{Pnu#MxL;VYRvM6
zwS%zQC(Cq)O_0bsDUu|CPwa3hj-%D&^9|El@epJQ!Wk0}A14g_of7kJ6gsH`i<{Ij
zg6UGh>(_H_?Z`8pD&4pCyPCUXlLt{Mk3uPQ&W2_^bqd8GY21PiO%@R+k%&(_)kR!&3~2p>rJIz+@V3&I8-mv<2G)^T84;0HQD!W4Jgcu4
zQI0UK)PDB+EuW@+j(c{>N*hZ}1go>d61KFo2NSQP1|shv$0;epn~5n>YJn3x;yfOd
z*+S`ORq}OXknupnxaLtA8l*`m9~x&9#4G
zpI2{I7Q}WboRz#Y5RzqCP87w;q7@<~PmypO{cR+9S~4P=$3)4650R9I$v5bVmqJT5
zH1(2`743DDUJ7P+7|^hn)Co+dp0Z{ewWlwwC9hKojqHy@D5Ju&)5zOoix^JDw5?rj
zHj(gZ?LZ&I40hsDB`wBQ^uYEaWw%0WWNphh92Zh&JEeFw1JjbShbp6iIHV@VHhcO+
znaw=A#`-5#Ln|Sp!NlBX1y(Sk|D!hNZV){e{l99I-4CPpzYV+!zhMjb8u%*sD7YT{Ie0xdA8Y~#0Xe^aFOdEJ1#lSn
zDmHW@O1Dnc7vaT&w+2H5YiPXy{q;Awl8kl^P(xdEQjh%^O
zQPvz>QWinIt$>3v)&qMS$~(@R_S~Lt$?WopDa}gk!jHi}C-00^nZ2R@15kPm4t4ppGAC9uYESB}@DElJX@NrDm!jXKe@4LG(6{Tx3I>bH8^r(!`oFZlZ>n-Z>
zA=A%O`WZt(W|*W;5TBjgh|SzijLL=3jsjt)+2}>vWo2Ph>3@aUjs1=d*@U%cwfN?#
z-9fW~W=%)dhdU>U(;p*=taQeuGnSf7E2gU02YlA}-*f05Z{pr^O&!W_w-)sG)CCAa
zBOw{<74h;*lNr$gT&84AAOp8LZ=bWA*k>Z{!@Fb}!Iy*Fae1jeb&}K2jhi(tGjbC}EJvHQ6D7+fB_kSTi``QCnCxS44>7k!uG0hZ
zl`&H`L2NnHXw4qDh*0*pZS|;f=-nQMN!eo61HHK2EAvvca}p(qz6cE`35O4=;SRG~
zBesVD5h;;Mm1IwGHe(o!QugaRafZ;bYRiCpWmdD}o>_XSxiF6k;G8;K7jGPtX+ct105Cc;@
zq5T>V2xh@Fm;xp6Oz;i#{!fDya6UK}$T@&tpx^%-ybfFejt664KOnyU
zw}3C9%l{+T3^srRz#ZuG?*P9CP6a1|2hioe0^SAYz;QtC>l2^;Cx9QKyPpSshJOAK
zxD~t+YzG$rAM6iAp0`JOyVl-+ssG2z3TY*x92i%TqCCyf59>D&JCwwp14#nL0Bf(c
zivwAtMMr)HUTPOd*K|l>9rlbTL+rz#c!>@O#wn#3W&=aa^O3#^EzxDoZFX^dSv~4v
ziUu+KF1vBpb5Amo&ydUp1!W&@i+R$7#a-^Pa*nN^H)la=*&S3|#Qq3w^-E+q^iID-
zOfhmrEp4}N^ov|u-IHYEB`Zep;qL|>E{oZPRnx;9JA~WDcEc@5=VH1|d^k3vj?#TO
z^Tt0-ZqMEK2R+k*(zBAaXT`ISSOPLH$J0#3jy;jtOb0k+r3p$jePvvnz>Y5Q8zL$*
zN_iMkKMg6PZJXcQeH8A0nVj)>9E0#Za~x5P8LzUJ(^eNMVSg~+NY@h`aE4g
zV+Quub%9<)ci1I*hOVt|0wbC_a5Un
z=-J6ynG4tkQcEIncQY@F>AUh${z}@?JGyi)-dEBMM9yg^jSCrYj1RS`J$^8&9#f^IMHznZf1;#>M2dzchd>P
zHGfx##_G1FYMNpEI?+&9IQ|#pwd}HTmTdra@T^dnou1>L;#s6CrfwCY5*Kx(Y*}X!
z-jyOna*A2!xLbk~errDX;@F{oDYX@IVH{h{i@2vJ3yjS?DuKr*tH;a!|4HbnTlPjf
zb?*PW3w{3&!HdAd==#@!mw^rt-G49e4)psKa4z^U`um;WlVAr}2IqjY!E?X^=(R^fuhTQ2wVedpb8EL51_N(3@!(9hW}gW?&AAj1oxq{za7kh5_lT83!VL)pbcb?
zUhduhF*>{45qLNHx}3=u{rzb0WN;h0`vu@x;C{;dejsHRfB%1aH1+jXh*$@@VQ~oiH7LTb;B-Wv`A(K7c4-uAPmbQ=y27TW^>hu4j!UE^
zf(x4d3mEQ&=lzlN8%LAc<>L~=>orS3O4MtnhBTaJmfDo0IXX~bjvBjn-eeXGykhU
zcM=^S*yE=!2DTi>cwj}7K8vv^ZbIf1M}khWiyY@UcQRO7@=P5~P+}JSe+zo=7mfbE
z&FH27jovRl0C$4VgO7m^0l61Y>;e~n^TAQz8DL)^cLIJHd>-5g-VXi@Tn1hWCc%N=
z0B{>NgAaqtf&BfySAiuU_XIu_{5>{<_kdRc@fFwt?45z1zWpE()AvT4Z
zKnTtOpToBBC*WA{d?0rVd=cBid%;!UB5)k|88(L3g4ci(!13TY;HSu1$-CbMFnY6F
z60og(?HfI8OtZCN6O8^EB3th3^%$`-w9nCBLyZ64rZu0bTU*dGAA#S56{~p;^_)n>
z-Lt+E8Ez5-eIi^&26#odQTBjAaDe?%yEr|b{u*MRwz+;UKPz&!2i2cke02Ug;@S*5
znz6gPulI~%UyjHfM9Q%qXAp@OC#U-#A*GqjBpa!0AozC@|p_pu{Yqluy=Xu?|tkkL_Oc=x;J+9XeLG
nU;NVkCL@1E!~QM5_%<^8rqQ= xmin) & (x < xmax))
+        ybin = y[in_bin]
+        xbin = x[in_bin]
+        if len(ybin) > 20:
+            k = kde.gaussian_kde((ybin))
+            zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+    zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+    zi_int = zi.cumsum(axis=1) 
+                 #  label=key+", "+\
+                 #                    'R = '+str(round(PR[0],3))+', '+\
+                 #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                 #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+    axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
+            colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+    axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
+            colors=['darkred'],alpha=0.5,)
+
+
+    latex = {}
+    latex['dthetadt'] =  r'$d \theta / dt $'
+    latex['dqdt'] =      r'$d q / dt $'
+    latex['dhdt'] =      r'$d h / dt $'
+
+    axes[varkey].set_xlabel('observations')     
+    axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+
+    PR = pearsonr(mod,obs)[0]
+    RMSE = rmse(obs,mod)                                               
+    BIAS = np.mean(mod) - np.mean(obs)
+    STD = mod.std()
+
+    axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
+                                  'R = '+str(round(PR,3))+', '+\
+                                  'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                  'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+                         s=0.1,alpha=0.14,color='k')
+    axes[varkey].legend(fontsize=5)
+                   
+    axes[varkey].set_xlabel('observations')     
+    if i==0:                                    
+        axes[varkey].set_ylabel('model')                                            
+    abline(1,0,axis=axes[varkey])
+    i +=1
+
+
+
+# legend for different forcing simulations (colors)
+ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+leg = []
+for ikey,key in enumerate(EXPS.keys()):
+    leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+    leg.append(leg1)
+ax.axis('off')
+#leg1 =
+ax.legend(leg,list(EXPS.keys()),loc=2,fontsize=10)
+
+
+# # legend for different stations (symbols)
+# ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+# leg = []
+# isymbol = 0
+# for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+#     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+#     leg.append(leg1)
+#     isymbol += 1
+# 
+# # symbol for all stations
+# leg1, = ax.plot([],'ko',markersize=10)
+# leg.append(leg1)
+
+
+# ax.axis('off')
+# ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+
+
+fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+
+
+#pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+fig.show()  
+
+
+
+
+
+
+
+
+
+
+
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 83148e5..6f70487 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -445,7 +445,7 @@ def update_record(self):
         key = self.frames['worldmap']['inputkey']
         # only redraw the map if the current world map has a time
         # dimension
-        if 'time' in self.globaldata.datasets[key].page[key].dims:
+        if (self.globaldata is not None) and ('time' in self.globaldata.datasets[key].page[key].dims):
             self.goto_datetime_worldmap(
                 self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
                 'after')
@@ -747,20 +747,21 @@ def plot(self):
 
     def goto_datetime_worldmap(self,DT,shift=None):
         DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
-            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
-            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
-            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
-                self.frames['worldmap']['iDT'] += 1
-            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
-                self.frames['worldmap']['iDT'] -= 1 
-            # for gleam, we take the values of the previous day
-            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
-                self.frames['worldmap']['iDT'] -= 2 
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-        #else:
-        #    self.frames['worldmap'].pop('DT')
+        if self.globaldata is not None:
+            if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
+                self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
+                DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
+                self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
+                if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
+                    self.frames['worldmap']['iDT'] += 1
+                elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
+                    self.frames['worldmap']['iDT'] -= 1 
+                # for gleam, we take the values of the previous day
+                if self.frames['worldmap']['inputkey'] in ['wg','w2']:
+                    self.frames['worldmap']['iDT'] -= 2 
+                self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
+            #else:
+            #    self.frames['worldmap'].pop('DT')
 
     def next_datetime(self,event=None):
         if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
@@ -824,28 +825,29 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
         frames = self.frames
         fig = self.fig
  
-        if (only is None) or ('worldmap' in only):
-            globaldata = self.globaldata
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
-            else:
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
-            keystotranspose = ['lat','lon']
-            for key in dict(datasetxr.dims).keys():
-                if key not in keystotranspose:
-                    keystotranspose.append(key)
-
-            datasetxr = datasetxr.transpose(*keystotranspose)
-            datasetxr = datasetxr.sortby('lat',ascending=False)
-
-            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
-            lonleft = lonleft - 360.
-            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
-            label = 'worldmap'
-            axes[label].clear()
-            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
-            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+        if globaldata is not None:
+            if (only is None) or ('worldmap' in only):
+                globaldata = self.globaldata
+                if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                    globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
+                    datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
+                else:
+                    datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+                keystotranspose = ['lat','lon']
+                for key in dict(datasetxr.dims).keys():
+                    if key not in keystotranspose:
+                        keystotranspose.append(key)
+
+                datasetxr = datasetxr.transpose(*keystotranspose)
+                datasetxr = datasetxr.sortby('lat',ascending=False)
+
+                lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
+                lonleft = lonleft - 360.
+                lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
+                label = 'worldmap'
+                axes[label].clear()
+                axes[label].lon = xr.concat([lonleft,lonright],'lon').values
+                axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
 
         if (only is None) or ('worldmap' in only):
             #if 'axmap' not in self.__dict__ :
@@ -887,10 +889,10 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             from matplotlib import cm
             axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
             
-            
             title=frames['worldmap']['inputkey']
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+            if globaldata is not None: 
+                if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                    title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
             axes[label].set_title(title)
 
             label ='worldmap_colorbar'
diff --git a/class4gl/simulations/.simulations.py.swp b/class4gl/simulations/.simulations.py.swp
deleted file mode 100644
index fa508cc9ce2f5a68fdff618e8ecc170da0d80236..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 28672
zcmeHPYm6jS6)psn6;Z+?pg+uICb7Dio}SrdVPRm1>&~*QEW5a~EK6pGvZkwgrlh*6
zx2k%28(fJH#YByw#z2Ts0>nr_g@F1;)M(-}1|h@*5(UIZG$Dc^zCirWeblR~dwO;;
zCZsz1O;z2;IrpA>>fC$JJ+({I`*&W+w$wLk_?@b0w;wpN@aTI!ciamf)3nvFv3b*`
z&08pI*l*c&md#*#Q(fHD{jj+7M-;)S&~HXvGq9%`t{H}#m)+zhbXpzPj2zz!r&9Oz
zcCVOdnOaJql)%~&=tRxQ3s2F`+q7|-3XhMS!%jc#>b2{YB}xgD5-255N}!ZLDS=V~
zr36X|loEKuNFZvTsNIC#yh-+GL0*r`T~CnD${BDlm;Yw@e5SnKmdig;K3^%X_vG@A
zm(OR*>x;R3`B+{`36v5jB~VJBlt3wgQUav}N(qz_C?!xzpp-x;0fTT!1sXrfiD7|1a1OCAOMyC6W9)n0bhN)rhORL3!DZ#a0>E(
z7{GPFE?^6A0iXe|!GZC6;IqJIfEKU%
zdGQ0_6MzL=1-u(L4tN%hk$ZtlfhXa_xB=(^2Z2Assd5i+H?RtH03Wyl7z4=ZLURAH
za<(ApXn#!p!brqU7*1pdY?fu7&BzZbrt2CiwW_Il?ZEM(OkO=Qms~qvVr<@a?MCEy
z%ZfBx>am@3W1^tbWM;4&)+GbOv6vGw&yUz{-?L{l+YRwI<{J5t={8n6-t{4ym=!lh
zqZ0(S7pX=jrKzn!$5R;wFONR*mJ`=y*IzPSkzQq!EKNSqN#exZAqR;Q!y>pJu&N6)>jDku|^mPvdl49dXwW{UHfrrv)
zuJ?7r@a)4Fl!}l=cg1mSu7!DF&+tLYr-I&PCN!02W+=D4ju+wZhhf^I>B2u-4jH;S2ZewU<
z+Hi%iS=uW43x%uZ>i5
z>)d{Qrl0Xhb$9RK&1PBFYgfHd+kW6GV{ZFp^Nh{XF~K;xfZpQIrWU`T^-S*`;(wzR
zJ2I0yJGtdmx;&q!-rzP|eUVc;6n26pl?-^*Af)@462IZKBd5iyb|}9ixObo&0e?m_
zI5jJ&VDXYPHcYoeGotIV*=m`*vHUJ(vqG6X{SYt7Y3=Iz9bW4Ll)LwGgO}RtEYah9
z*YU(v7TPD8WN|8@8N5vk`Zg}9ZMDwLR8d!5^xPxtTQrGhDI5Ix$_r)Q^8ATQvj6^djarnRxYiQP729ZmjUSf-aQ+
z71RlAy!(#pvToo+k?m<(Gw@qX#fvhh)%JskEx9W-m1Z~|HI0|U^lS%Bvtb*ECXF0b
zTp5O%y5Z9WEsJWfuVU^aW6o0&n`ZRPmRn1n!{Wwd8&YFr7YxzixRF}e6PBg!co>nW
zGF=narhzC}gPR=JS%P6A6NGVUn*oZ!7U5!08s-?8W?B}dbXr&}RrJY8qA*DmCP6{3
zj@K-^X`=VD`cwxrrl>hn%P2gxyy{O+Ps__i(^GSs=8}-yZM(M4&nNM?oYo+gQjC*)
zp`phNr_p}cjgf!jtkWhn6YG_M%xK*)xztPeOm4*5Otyi=6O~XNDm;jieYlO6f#eqI
z1I0;GdN8?SdX|e9B4MgqomLyKkr8$p4Lck{fsaMptaXnFAFug6SMQ$NxqHX_)ZmtK
zKAAqPbL=^%ZJT%O8p1>kwPWd3##h96vBS3AaGFjd9t6i4By(CFPbwlPRMM~+s!STk7f%^;yQ<^o6JrF0q2edwSe)%B1H8l
zl>HHl0{-UJx5}69D*kTbg&fDi21`yXRtJ|2E(nfOuUEl)q8}r36X|loBWW=4NQ$STluPHX|hS|iX-Sd49yK2imY}lS9yqw2O2|Jb`soyOULZ^>}u?bvyGO{F^AHB*oC)~mp1)v>8Q
z-kM_`lZ3|bF(ENHYYhsD^bJbW4_GBF_nOprq6f#18YAU3me>hzlks)-sjMH?xPP7s;O7Y`8&7GFvbdTOFJ{)}ie;v-Q_v0*^_Wvrr|C`wF
z9|7o$eBVd=#K_{gZ)bPSLdA0zU)31USG&zzM+f@Busm+yqDA;6dP4;Cf&W@Gjt;z{~JYd*bl$%R>;ZmDgHT9(RSWQ4mgsDj=
z*D2AeLCOIBDQq!>#(1~DP
zD`K+4+YwL0`F4n|9(y~u++w4vq{r1@@G56`S7T~If(7HGbY70tF>#Z$^1MWxS1jfD
zhQys~esyB3&gQRo+HHEL)B20!=s=w4NbRo6C^JP)yaZ$TpM*Sqtbs#hI>VvkoOa-o
zY}N5kV0z1Ryd9`>9X6J-6F9y}LayT1kDZI9X`zO)OA(_*F;fHWpjb;%I|mfOQ0H>i
z9}-2~85j~p1}!l+VPe1JdfJDf>4lNqs_3ojEhk`;ZP-qw(N$pv)FWD(XbE0Cb$ZHI
z#(1WqFCG{}Jr;&U1%WKc>_v^5D<5dhk6T3sae>K$;+0AgCb6Pl46CClu_b!WC*J~r
z^7Q`lsgel|+8HX!N~hyhN!^O-wE0`?Cnok@#U#~!QB}twoQeM`%VRNHr2~ww-|5hj
zoERzy{1fj>n4VNr*jiCPMA};YJ*wWSzDnV*+0)_ulvIZI
zQm(Ai0d#V-Q+6PZndJ|0p5Fr~uK!HnQSA5W?Eg&Q4B%Pp@oE2mH}G-5049KwfhVxne*m~2=mOUP
z+kvxzXRzPD6F3a)1&#-3|Nmv+W?%u(ffqJt+OL3zfm?tbz<;pc|26Oc@M+*`U<+^o
za6WK0@Nf75o&vrNJOtbc>;X0a_rVun0;d8`VE=zBuoZX?dw=o+Tn20a{*HbB8e&1&gIk?PK|EVFSRfWT3C{aT~G4n4=D$p{pAp=Ww-oQo7t^))Z;t4P*X(cnzcpg
zoDuK)d5&0-ish$Q9tNG=wyuL36;0+|OP7)7>!hHHX+OB7X%7f~C5AqA|#wb6q(GwUN#Ovn+3uejL8OzTvxx=P0t
z-X;D)jtHV{c3d~-QBvjUY9DUc?I_t(r}NZ(?Asy(qN&uO*HN{7JQqGcn|mJlos*&V
zB2Zm!n}(Y@^7z*&;8>g{lkaVKg`qtknkfz!l#(Po3n!JdTvH=D(r8j$-Y@7tPEXTh
zqo7wj;T|zp#8W+LaP&_ToRNar$B8UeQHxM6O!^?-SCTJ1Q
zol0H$Fo8JP_`I>n)6tiZb?soV>a9~jM^Vn#MKRZ>l7&$<^mP>LlcGBkO%lSfyaQ{!0jAqEdm)9N6Q}%-IAc0e->XzIQm+oKzy_^
zw<>VT1Pyx@>wEG5*5H^ALV6lSc~gzb@E2!(%6Dd1W)D8Im8e?hpH&(mKGX3md3wvrTck7WkkNM!UVJ+3P_%_?dZ2$GPiEkG48t?v%I&1-vq%Zxl#=4Mj
z%wfO{Y36v5j
zB~VJ>e=dO`i{l|#1J=zr?}$GM$k*mOt9;^7JRc=%Ct2iY+>NOZ65{<&t$3f4=REMB
zMFv}^gX)sQ-*lI?^nq4CpK?uDPg=-AEXD8Tuux6hNUk-w5$vc
z6``^D;81xes)mod#QTCO#enwxFI#bJ#(@0Pe>xV&sGSR6g9^ToXHuyciKJLMBu11&
K$OK|0wEqIHX#qI^


From 8ad4f2484b8683fa0a8200923753a8b74d24d267 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 17:02:31 +0200
Subject: [PATCH 019/129] gitignore

---
 .gitignore | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 113 insertions(+)
 create mode 100644 .gitignore

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..d5a1137
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,113 @@
+*.o, 
+*.pyc
+*.class 
+class4gl/__pycache__/
+class4gl/__pycache__/*
+*.log
+.*
+build/
+dist/
+
+*/__pychache__/
+*.py[cod]
+*$py.class
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+
+ 
+!/.gitignore

From fa082ef05f7430587faf2eb14facdfaec1d95a97 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 17:04:44 +0200
Subject: [PATCH 020/129] cleanups

---
 class4gl/__pycache__/__init__.cpython-36.pyc  |  Bin 376 -> 0 bytes
 class4gl/__pycache__/class4gl.cpython-36.pyc  |  Bin 28782 -> 0 bytes
 .../__pycache__/data_global.cpython-36.pyc    |  Bin 17867 -> 0 bytes
 .../interface_functions.cpython-36.pyc        |  Bin 10781 -> 0 bytes
 .../interface_multi.cpython-36.pyc            |  Bin 31044 -> 0 bytes
 class4gl/__pycache__/model.cpython-36.pyc     |  Bin 36319 -> 0 bytes
 dist/class4gl-0.1dev.tar.gz                   |  Bin 74685 -> 0 bytes
 dist/class4gl-0.1dev/PKG-INFO                 |   14 -
 dist/class4gl-0.1dev/bin/__init__.py          |    7 -
 dist/class4gl-0.1dev/lib/__init__.py          |    7 -
 dist/class4gl-0.1dev/lib/class4gl.py          | 1611 ------------
 dist/class4gl-0.1dev/lib/data_air.py          |  473 ----
 dist/class4gl-0.1dev/lib/data_global.py       |  936 -------
 .../lib/interface_functions.py                |  506 ----
 dist/class4gl-0.1dev/lib/interface_multi.py   | 2061 ---------------
 dist/class4gl-0.1dev/lib/model.py             | 2214 -----------------
 dist/class4gl-0.1dev/setup.py                 |    9 -
 17 files changed, 7838 deletions(-)
 delete mode 100644 class4gl/__pycache__/__init__.cpython-36.pyc
 delete mode 100644 class4gl/__pycache__/class4gl.cpython-36.pyc
 delete mode 100644 class4gl/__pycache__/data_global.cpython-36.pyc
 delete mode 100644 class4gl/__pycache__/interface_functions.cpython-36.pyc
 delete mode 100644 class4gl/__pycache__/interface_multi.cpython-36.pyc
 delete mode 100644 class4gl/__pycache__/model.cpython-36.pyc
 delete mode 100644 dist/class4gl-0.1dev.tar.gz
 delete mode 100644 dist/class4gl-0.1dev/PKG-INFO
 delete mode 100644 dist/class4gl-0.1dev/bin/__init__.py
 delete mode 100644 dist/class4gl-0.1dev/lib/__init__.py
 delete mode 100644 dist/class4gl-0.1dev/lib/class4gl.py
 delete mode 100644 dist/class4gl-0.1dev/lib/data_air.py
 delete mode 100644 dist/class4gl-0.1dev/lib/data_global.py
 delete mode 100644 dist/class4gl-0.1dev/lib/interface_functions.py
 delete mode 100644 dist/class4gl-0.1dev/lib/interface_multi.py
 delete mode 100644 dist/class4gl-0.1dev/lib/model.py
 delete mode 100644 dist/class4gl-0.1dev/setup.py

diff --git a/class4gl/__pycache__/__init__.cpython-36.pyc b/class4gl/__pycache__/__init__.cpython-36.pyc
deleted file mode 100644
index 75cd64362b5997bc8f56bf6abf2c3fb3e5409324..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 376
zcmX|-Jx>E67{|F6+OtjL11yq2ML1G1j#YvamEu%E
z3?sl|++qoCmjGuhoq+-Th`eP09aNfe>9U2Sb$wMzcPJ{$&5|xSQH`|1WsF)%s4%Yk
ztxBoqRPJGspXbH?^p;o5h;{!_H{MtGhpV5)*5AjaqC(~itws-n0|!h9Y?6R900y)?
zT$hlIcMDw+;yQ%T#;&ySy6v@;guDUM4X*<}@6meKtaXJ#AXM_oqH?2)qWC?o3pBl$
uqFOKQmKu(3?k2Mt`uif1VvYzAm9T{5&pVe5{SX?-M`0#)$a4~>et!YuvuOta

diff --git a/class4gl/__pycache__/class4gl.cpython-36.pyc b/class4gl/__pycache__/class4gl.cpython-36.pyc
deleted file mode 100644
index 6cf8a9fc010af32a30fb2783f7c29d2b54e753c4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 28782
zcmdUY32+?QdERtS&xwN|2;w5hZIVj@SP=lPy&&!ayF8ZMnRgohpZ8NjY#ZKYsz}S
zGGEtH$E>5)vDZVXAFoB$;
zGb;Q2`Lp8?2LWehh+M-Vxy_%Qq`Ic
z>f?ux#F{tXFfklT6;@q#f71h#&_~r+CtcM_=K)Lw!r5hHV`sj-l%?E8uJrbkujCvjlLu@d8O8Wr$q4o>
z?UR4tgI|5|v!6OXn`NCFKTHrssrul_g;E-*m5!OOV}6CLFlLeAb>(yAoL$JOp_c_o
zlONyAei4t8gkx!LC=GIlO#0kBTdPnm5Dml0l@@iJ2`u5sj`xr0^sV@
zhLb&i_N@Op|LBxcS*&hm?A%mi3hwqyuHN?IY2Z_+rqfqgJ`o9Nwf;6#CmTuTPz$7I
zbEeZ?GMz@RVz$HInNF{*XQZLiX}cRh1j`nui(v^km+?qBaZSEGZN<}`t3{UcQ_K0U
zYZJBT$~kjo;#@7fGPP6-KL;l!aZ^j<`#o*Ri(nR1GoH5Q>DjDjWH)koPv7Ko){D$L
zl|sqW=g)fK+UXT9k}0h&WxVk9r>><8Ysspn;+ABG&#>0u>QBFv(l1|W>&-AwoF$rN
zI6-d?TVcxpl?+=ED~f(Kte6$YJ7OiQB;HX_;0~)3DPmTa)s2uiDLdW?YXp@20AiBX
zVN!MYI;9qst%gx*Hv)2Fm%i>1Z`3?|6Y<30Je{kt(?z!Wt!`wf3Cp
zPgd;w)L9I~&1z+ei9s7Dm#QnJ
zaUmX@%c;mxu2ibnM}hL(c!w8RwF~8{7xPnliF~elU5fFdsyHu>(yw#w+6FRt#%jiP
zJi{tvt8_DYF9tenXLF9XA1Tv}PFuOf%zCNnjEC(GWU}7@2ko3&SkI@04(uF`9H$l@
zhqE>mjcc7+QiD4j568n%-Oy?S?R^oQ=b
z>TdW#Xzj;t$q=&6VgQ}Hqhm;%#=v~M_D;17u}2jzVCGgMRy?ym^EKsRLO;rGguFSE|N7r5ey;klt(p8k{0IE~
ze8cn8Gx=})@E`xj=NI1J@Z6ZmPh1@N>hC@>Jfq;eR%^hyGMhgqzh9ooe`nv1KJz=Z
zkI&@a`#oR$^RaJSo6Udr2#NavKk*k<8;K7!vfrNd^rDmhof@wk>yLi;sn`DN7iRLm_$B_n;THkIq^k7(R~k40Z?;${^yQg+=IqO-ANt{&jY0=z
zQ+oECivGgF?A-XAr(2kK_IjC%Z7SkyrdLX-Bwo%^E@N-zQgQef3tNj-`yfDi`V9w!
zuCVd4-H%VuIH8`YWInTkwULB470*~3iXf_ttUFaO
zI!9PM7J;Qy6rk*k<%LU`Dx6B$3tN@jUbM<|ReO;2)LsN#zfw!ZN=~|Zdo^bdBh)@b
zXNb-SoRmKA*oWcqG%P<0p0=8bTzm$cOv>1#8{S;ZrXri0g;Es}XBZT@c-GD>+NR{r
zUrI%62TWutVLM!>;I$uMlyPe%vz5|smAvTn%*uk5Ng3I5+4EAPEb464(n|=+lrpJE
z)?TWh7}Tlcczk76)hp|{=(wY
z*Y4MY@TO0t`WtcMdK84eSFZ)^w4!E{yOx%K?sFODax8TJ6g*o;WZ}g^ydBymUG1!6
zIWB6h_C#o{M^RK+UN!P{wTB=86&%Vn*>!8(rYwZ{*CG}{X7WiYxh|swNylbko`JS@
zgvA=yRX>oU+R^%6MBx1yVjL3OP`D3^Tdi-$awdtg!t^0c3=b1aouc4NU9IRd81TuO
zAt8#9(2HPfUA=%BVr_}sff&B^Wz^Uxg2h1nzxZCg9vT#v8`3(fc+Je!S@#>
zRoDf5SKG$Awf4VTLmF5Hc@xDXa$RVoDRywBzM}=XFWdq)=&g>-cc3wv9hQ?0vmih1
z*Y}|x2
zVQ$)mDkOM_#cc%;7y$&;Jp}-it0^gyJ3B==9<=Hd1!yvEX0Do@3}DoZiHV71?c{M&
z1o`vNTvzh@JE=v$N@H#SCscOYJUw|P=|!+IEBOI(7G
ze1*;9#i|wQlku2LK+m9V!oJ4xI2QpBNd=2kjCF??ykmaaFCg7t;&D#G2_?vOCc}N<
zK0U7YXol8}+1jJ`!i^ifI@!{0qjq4&>?PWPY!CsMVDkPPK3v^CwFglO3sDM#DD^do
z($>c@u??As9}1?ehRN}ok{3Y6z!&5Kr|t-_$4q0CSMYhuQT(WJtkkNlChi})VfU}(=sNslKKJGsinNK?X+n#$YWCC*%i^;TlDNGk~7}{#Ga+3JJHgxVk
zZSOx}Pno&=7-9^KVn4K-#G
zXU1~OeyBORtGX;+dSgCqYs+DK3-bnZu44JIVc$;e^HIQMlm7mh*^)Kd@R3>bZ!(gI#Nu!QO>-zPu>cZf36J*
zO1lfafRBy(>Tcu)PDWAtL{MrPOEE)=9b|v%k@BTxEdb@|)_l8iHS+WXH8WJr%IDiK
zLW=;_hZ?Y4W63Z7LP$#Y;Xs@X6+4N^at!G|ooAgpUF||>3ycL=lZAS0ix_pPzIydM
zdMaK|p#38QAvcC6u^dIt&q}^HSeCA499!$Iw)R<}o`8zj)>^2=)lv!poD5{8ypbz&
zl@r}~FdG$*0xf(YU*_yYF#(`px%VMWIk~b^s1`N~5bJYWSpFa-BFnW)jyX0;x)Nj^
zZO9m-r_52{p>*Ot?Lb(jeA|QsgjE(xD(a<{ajf8{K(-55(VMyf(G7z^Xe}J=0nI*H
zgz-spehI`=xDSvf)2X7m7C?f#YF0WxY!eh|vrt{)R>6*m>~kK_kk6C(M#@vdv`~|3
zX0c_EDIpBmDrE>IJU=UAHd2YENO{2XO81g^pb}l6BWVJ>#e9fMgPNL$c4KBf{;!HGk#?0ig2)Cg-
z#kwt$p<=!HLCIz+TPdxtl-rRU8{N#0o*KRJ(&d|Yakfx>0aUQv0k#x#gxUS##&YO=
z)X72nVK+&IHU};nJJ?QQet*GCwxKVnD3Cp2ekThj-+B7kx!6XN_w%M|I*^0(4e`b$_tz~ey
zH;Gpx`{<_H-sCV9axjhh`BPBHf=>*l{Ryfl#q^6$J&ozgIr{4Efj#=n^~+B^KZ=HL
zz?3P@-kz2Tws$r{YwJ(q*qjDIl<>8+q)+D&P|Hn5UO*yJ9!_W8tH{m`^Oh
ztFNwdQvdq|_!ayb2^`Z)t7xP0>2{r5x3H`9F
zQ=)@vXR_@}fP0;sH46xh^+%FgGS;v6X?=Q6#E8HJN8JO-wi_~R5B~V>*1C1byE^1u
zt<&hyl1a$E;K7gRwS$c%$k)Ir-YDR?x!n@RL-E<6Mj9L%)KrU_tA7eS@po(3tl-9}
zHV2{q0JfFVKFvt`L)h0{ri^PZJpHU^a1j-EW2f}c(USR>SwYc~>9ye<2(~1%-wA{Q
zf&4f=_TyoXI%C-RRQtLvSXMC0&=S_eRcy=QZTRm9gmCD?xRF
zP#=Q6%nBD1WY^(q-C0umO4w@5chqBmm8^CYJF8txiQN11n$_C}w$?gO?Jn-)_T!x>
z^m5Iznk5pW_T2s0GaAT;xkYIW<9+B(7$z_gYmpcg_aij2trvT(LEwC#*vGx8I~r2-
zA@<;Q=o8QZR{M(s#lh-OakzL0jPeoa_#W8iW@XqiA%cXhqYz5M)-fs=iX-qH9|{e^
zZ#^g>hpiKb7$a}%)hUo1h5wiv
z-_|7EDS^na<3ZR2@=n$h)>J)dovwGdN$Vu?JvbObjt=_zLJ$ZH>x|?*VVxC7=OA))
z7m*V69z}bczoYFVmSa}ma%47C?}UHc?L;d)RPU<3!|hV)jpC_dkK4u8kvcz2m=**K
z>k%pAQ7K~rZSXMSjiQt$^*w{{#G%juVr5v5NxsLB@1*rOzDI`;KA_^Jf!QR>1184Z
zu-#joB3^h-<2kw=es37?-w6m|>w>sXNL|h_6cC?WRDClOgkER~MGn-PX`g5bRS?b+
zqSOmvlxyK&DXzaCOfZeO+GE2jZ>T?ei-^tNyu>~XccWwQc
zm4`t6nfgJu$2|z>z3xHeIzT^iqTK>Na$!~xljdMNZL){F}n9cvzT61s~QP^(9+
z%LiDy$M~kQ!R<{Sm0ml5UhAh1t9!&u=+3t`q75E)PC6Q
zFFu0tGUN_Zg%FH{LkLHGcZH9*BkrK}^3R*HLMS$UwS=a
zJzGC)Jy$=1+CGf7i&*ER7ddiO$YGcCR{E-sz1c_+xnBzFQcJ4KlIl7jhpp$u|GfC;
zK!IOC*bNDLQNmslzlm`&!>=w#?VcU2wDc37f+Ix{o)-{Gu
zIj3TRu=o05OxIm}mZKCY07s?wX)IFtzE>M{K>Vwc3TX#9?!(r1G~!z8r_vRl^WWF4
zmk+7b$X$HiO8Z~0LFom0Q=Hr8p4>xFbUjcvt1q}F`u~QN!5oU)A8^C&1IYCvOoHIU
zOo^|30Lm-SH0yH9--7QyfcFCavY+g!AKjXEkNVIj0sUb87-$e@|2g*3L
zEXS8=-$rp1+kHu1^l74-+m-L!Bn`d_iv?(NFtC;0x1pKYp>vc@Hs7j~A(t<5Q@LG%
z$cmny6nQ_uAr8?ID5c*fOduFiYL{N?9wc*dn$H{gHi^7X5p
z@%*)Szl4yNE+XXSb93`4gGx7~ygGkb+;`oBhCXzUlJY%nq}qf!9`T}^1sFWR%!b-o
z2S$m@1=MsC0Twj}UXlTel?pb@@6q1n9t{Z*nRBu>Uh69>x4noA1=wnQCjq-~Xw$n-
zG}m@D4Kr*S1=o6BSb!FKBcp82q1~)?oVa~r<%BhL;_8X#VbbGkTx#7np}523Hj#57
zhhlEd&t1Dz+usPz!PHR_cv>xw(qJ+6Jq-)7H)q5uEC0D;U;FK6H{O`>j9WKvTvDmt
z|E`8*+2X8y7TN4`a8%q<1HzkYexk3qv!3>niVeMI*1mwWsFsm|NxprW85jBMT|=^}
z*pB6CT6g4wzc&NJE82~1h=Exuyp&$2Q}SUwILogEI=Pg-l=g$;jVi$YP1*nb^vpeq
zzjeAop@^#=LVR_1nzRMXqv02u@+WXvC#wI5@JB4X)(kE)aL
z9_c8%0>AzD=&aGP={R(%bk^xmxngh7*`)I!Is12;AKlV=C60D#3DL2UB*!o+Ht^*
zIIFaKGm4c$*^BeIMtX5`(bEeRFMhKMW8?g_7rZdmO;0bwS`MZnumm&;r3&oFExWSn
zC1~j-qs7zao0C(q0*(!{IS?7xJJ-DMdNqqNQh_mEp_a2JiQRWGIu7uw+?G(SE6ph_
z$u@-rNkLi;SOCRvw&JMPq=h-GS)6hPq1ZE14NX%LFRFGf>|^*)-Nqh+0b>qH7fMU^
z9r=XiAaX8%CfTIMUYMYam0YH*T0UrWFM1P{2BR<{$3W0%OBv8G!&$S@f_A11Gw%f{
z7_=#i2I`OvcE4E?tQrpL!16v!<8Aq_u0vlAbC{L%ZHz~oyWy#hXL8kGL?`*7#kXY&^kv{j^oFbIRMrJ;lAv@
z&!Qt1SIp)x1RUvnJHyx*YKX%MTR1kbn$lmg@N9VTf|Eu&7c8$!UaV~a)=8-
z5rgIvQ0^k+L2X3u)*saSHU147!#Y&J{0l>|8|gJ@!~}&eRKCawNJ)Gfewxlk+87Kk
zjAL3v4~!mJC7KJ^~dK1wBZykF~!Kvk{vCuwd)wRJB_
zii$yM?br^^tVQeF&@5+3VEG%xLoGS=)Xvx7tI{$fRD=m5R0~G)s|r&|36EMDMOnl~
zVAcr549pr6_G~plRqzM2Y7)v?sC31$0!m;v3cIT%3)UQbLrn|?m8chWo^7=6ZXI!>
z5cc}(F*oMwusqp7nr;{8A7SUQ0kMnrSqb~IZRx^*^{JM0pSl&=x*>3`3LIdt9Id=b
zOxzg6yM2UFdD8;EjSva%S&llNN3Hh5vMYwV_7*gpm!vV~=SAS7k{HJ!iU+Blh7C(6
z>kW&F*wzS3yBouS1mZ2_!3N`06Fy61$X;ba5!Y{Zsg_dsDQsX_LracN1W@{+F6-v`
z%^*Z-0x|eWU^~zZEe=ZG3{jD3?OTRAc}M)P#2*Uc6Zb8jws1i!LB(w>Q`PUcBfcY|
zs7wO>VK<4=dLWkT_V?o3A2%E+)g%5`*?))_HAYNFDXPwikeYnUk%L290p&R9=Nd-M4B!*Bt55?Uel%1c26RO`9@=A-Ikacl
zZ$0iu_9z>z=pI4~M(sCUZ2mnKs*eErm^V3mXmHFPByNMijp^_nLCk51
zIq41~*EsX|Er(N*^&=K($U9tYLs&iao~_s1L9A-;u0McvX$Zcnb#rUhJq+Jt^`mYN
zNZr-PSRr-UVkh@EV0u-hH>RXjdS_x&WtonC5?Z?!*
zPaj%`tLX`@pH>*FW-u~jrDWRst%z~^GePQr#zGznYcziA4Z#6~D6X!{uYHmI7SydB`e-VWvM
zuovHg^7hn^alT@%_U3sU>k?+`1^0mX&tkqF3&#IGpH>XF(h5+DV*#b;Si6h4+OhUJ
z<|;x#BLueMaI7%uJi*P52Bjdi51)l0Oj`~J<)HknVo8}#NjQa8F8A)^2JzhMAweooJJZMfBTQU#U
zPq+^v^%-tU!gtt;(l;3D2vr|O`6pG$xCi~X3372Qag)e*vNfN7
zbI6w=WCq&XU=K&OY@l(WrR^#-5^9s9dGtt3ZK>UExg$f8%b6-h&}PjX!~G%-HWi>k
z^R?vTd+b=W?L;tdV|TjJX0MVkpXCgsK!t0gS+-ww2nko@fEV!zNIV;~&v$zZ4_@LWCw@zE--O
zrYRTfIpO6+E3g}fQR6C90-~*F_aExG%|&xfra8psvOFrUR%&3$rGp{evUPVLYw
z8`@B1MhCk(Wp3xA>BVvD!Ya0TO6mx$33+&q>Z=XUzuJY31jTruK&*8`uCCJTnW&n_
zYWgV?CnfLulX22AMxcU0Z;fmAe@0PC3mSSOI5^!13C()3G7ei}O9_QDDfCV`TxDR1
z+Q03HCuRBrc*NF>=LlZyu4^9>o3m~0qxu@QD@DPAGO4p0*FAmmbSlp60%~fg5w{9B
zP)2=#%~L5~PhOxIUAD5a`s9y8Lm*~l(NOK&9ts9kGiaoE-^~S4IXX|m)5wo|4v4*|
ztwl!-EIxt&zvgewG}>0q-%zpzN7+=8Tq$czs#NVPBXEfYRII74|3LPjxJC
zhbf0EEs*#pnM7LuE`8jHus=#ihHcncU$OrkKA~04m7pkuYk!vC5z)-rKhC601$ts5
zV;6YE5_G_unBzJ7XPMyN(4h`noy+=p`aVrZPQL#Neg7Da*9V<hxC;l1c=WDb?9ZZ&F<{<{QnN>TUakTDP*QTJSQVL_p@N4qrH&
zk&0q}8+19fzb%g_M`ISZigS)MQ7R!jcPgS_ZP8=DE_hzLb`u+cgi5zkPQ~cwIed63
zW#5AkoYz+&Jh;y^qAI7iQf_#IO7yfL1e4rUXQ2_vF9REjX#Ffy^|&3NAjR{A^6^5P
z&GyDnv7UMw_oOUkN{e{GhM;8sE+QJlMh20ba_gZgvXxStCRO2uX`xOiQ1!lEsA|LA
zw6aaW<`yc`Zm3e_O85_YF)m|uoJD~~|H{6fhBidQbXz@|GDq*w;_I*^ec0&nFcLcdd
z^drE8s@XpI#uwDL#5N8L45TtpE+76qqzxlxP!!Yr9)oH(4j8a14nrXf2*i+Bi3?S9
z1FJoJO{{?1NVo+vQ5Hk7tUb7&=W~tlAQaAk9YGn9K`5d}Vn=$TXh-$b9*{8*9N?ea
z*+YSeFi#K~crM~`{s4}p!CV$6ccB5+mg9E6tLL?{?&>tx1-C^FGS0Nwv+zSBh*N;n
zpvoygn$Fb2Tc2hso=F48`|)~&$LpyZ1{WBGe2C-s%WO5}2PG_AQ$k9BYGViWJ$#6iG;0Xco8}?zGD>E47nkg5~7;t4^3T;t7bS
zT!?;)6~hZJ&+kIX&f)Av*)iwsb#R5Yc?C;p(-dUB7P~2?_GWD#3~|62lOL07?ylHr
z43N3;PPOLz06x@#+-K;MRRs5h;gsD!;x|WBlkls1JfhfT1RZHR5pZCS+F~hP$-AoI5|+nPy~<6vl|$Am}i_%(2-gC
zvgVwI_QaoGmQm#y^bjtw;M{^SC$%eb#$D2HYtHrH3ufi^-)+eo_47i{-OLMUn73BU
zk3*hjT=|QL#Tj^YMHI~IJE675e0P4xhYCEbF3hJd1Z4s*`;C?|_rzCfkYvu)uLU`=
zvw)qDBQ1HmeVh=}g43j`KleP4{<@EU&tB~!uy~=#{@T`COlI0f`K)?ifv0A8p(&K$
z2p?8NFeo@b+G;>X;YUOQ|9npCk)?T`-JP4gf*TURa+;(Lg|g6xX>;uB+IX!OtM-oI
z+Cffbn9;PSw#l-n1M9cw8->$gqyH5Cxmqt7-u6tvO;RlgR4Zcl1_Z`C6#pn}94D&%
z$8>&xj&NAQp#2Fyg}?g?`b;?fJ!Yvse?F?_)vyH&Ctv>R*dObk%Bm2Mzdp+MjlYEBopoHd+Q3qkkduRyE!X(r0<7#Z4>$7ilt~DsQz#hhGuFJRGmoEn_{`M8
zdZA=ZWF{ct6|xiR#=nUMRKY4Zn?)+iN2u&PJmgu@A>K{MJ#F~Ia@(NMxPwsWObllB
zh%e%e6mf*%HeTF@aD%(w`~qe2?>Uf@UB+B(wB?c*8-^mbwf8MCwE?A_P_5OxL1y&p
z+9+1Rmw}=^2M3l`yeR^$?RCp_5pw$_I~VJdGO?ALKV%Gf{f(gZnW4;%O1NmmegiPQ
z19O$?wdRc&a(u6GF(FH)$q@-5N@*G2Wa(v=m{_kIPMi?#rs@P|Nq(DuxRS7K{O!Jy
zkgPu+=?Aixt~wDX#bAcg%5g`LatkO1a60`E%jMJv?OBqf3D#AX!n9J736JzBE{Z}=
zvhR2Z2CE-eP<~*EkE;>oyfGc=1Q_=Vw{
z#pCqD3FrsT(t&!Aia<3q9`mBO#tZjViq0jNDQq(Wmv*ClmT=*mC#ucBzl#r60^;M<
zSux%wdk4OBT;@S|POW>zKcI!70oigD#7U&VQehFsw7%UIZc2jsNK7)lFfJlWh3C(%
zi0xS_vdYtu4bvlf_3PKL*l~cQ;vz&>bE_?FB%_lTgFVtZN>mL+%4scmPH9s#^)hz!
zh)pmppqtH)Qvjid8}M;QEWdvr7~%KEjPLpKOcp8R$ByQhkhbhW%YwIqWK@qrE3e@Z
z+OrQ_oP|47cs!DJFt``hurFZ$QNlo{Ah*=xW#%#JxRBG0<4BU`#^n$oj@2PN;_5gK
zfn%3nWIy5?5ak$DLPFp+LU;rU
zu!3(KvkB4)aE!
zu4XOTaEs!YUI*@M>2SNbN9bd%>eJO4Z?yZ
za~(J2%2@_%Y{>Bk6z7<)ti0Nm5N7A^nl?8Ib`@79V>^NuqGPQ$Tg&$gG+%kHJumIG
zacW~1W+-IJd9{h(UZ)hEzJ67m+nC5>m3Q9i2nLd`PeaSwJV*gq{4I`cAWvx;JWcbw
z$KH9QJrn`M+--PfMFAgJXa~CEIq20RnKbQ0`O0hwIUP@kqSn3#2=>3A^AmJ_3XW&s
zj+s^aU-A2A>HH*|@qH<)
z#82Z%m|u=S1J|$H%pd5968H#c94%d`CjyN#Ab&-mSBY+=pxcQ+lEvXG=wBkZKru?I
zGQ1&qy09v-;(a0PY2x-bYo8nD;XT?5z`{cL=z-#kyuEtAzbb{!&-$JE#>_q5#HxDs$UpsqtCQdS
zqnZ5Lk9_3UmqNJI7g6{%JUpqOuIFtr)K_V61VMVe_Q3Uv*Kx0in#|x-MMVN@VQ{he
zoOSeQ?f6SqOWBh7gn42S+eh#|K@UX8YDRo(aPudkfBX9`eq?kuznP6*`{#eP&Ak#X
zDPnDng%p;IAv=(Y0{smunM_%}3mL
z6}|4q;Sz#FwDQt<#GE=sj0j1MTl%hWd{{VWjJ}V|NheD$HD~nq16}ZAKZqC2=JVJ~
z&E~uad}mI35m?Z}68>ipDZ3D<=rg=m_!g#i4;sLxlF-Kg;{oZb?0^@+%;dhqX0B@g
z67r;CB$@a^7Zf0{VSi_5(Ub1l2JVOh=%6why69MO9QH|)lOfkm>H-B$$9bES2kC|8j>m9cA$*F
zAf*NaVtMm_j=*?hM@z|QpJsJ?gE~iU$YAP2fENRU$I9-kUx(CB75K}npzz0uo^Ypb
zA7-V(z@I;c2Q-9?HY?`*B?S$XbcOvo+lvbJ(Z!<5en_Naq3S4opC>n2sd
zWmC2R&AeT}{(57ZkpQJd*X>0NCoIn`Aa;$yH)gQ0`GcskJ;b&RM%#FF$L5v&KbYu`
z;BZSQxARUA`@bRPzp#<1xDOefAm4qOsY&?nGbjXZJS#l_KW8n6k_@A#;gMFxu(3k;
z(?1B_5Y(FfF#Q}%xa|(}4i<;xQ&kT|w!x8-z7BGZ8FMndgr{yW)~uNqmAheaA@w{4
z$#c^6ZIg(j3cE^ZCv_@BCRGOq73pHGypc-!BMg3fgzePSx&(axpRn?zL?Ia4^G;G>
z&T!x&Z!2)le!(>6nvv)@&fx2FP
z)VIyJG8ar!b^_a2+V>4$j2lmBMZ>X-@hcYI7}35DJ127KiB6k(XmoK`A;z*?6->PD
z;QC@*NgUaF77!Yj5?|o-wg#}naf6|9dm>yM>5h>Ka?b?X3fNU6UcPbHA`d$w#oCQq
zgcrku8&0imr1=TLuoAHI6!_R(!75r$Y`yL#K#N_J6~s$P*mb#pYg_2I1~FFRe#v+@
zONN{gXAGdS>_!@Y$HvF*bNufHUZ+gym(TRH=_YjG`Z-(-!1m}x{>Ufv#s$dM0pv(3
zY+-q%!HQkJE!fQROY+17Do+G-z!K?T^ZI0~e@E^w`MF{Hi`-w5+|>6(cmc}J+!BlZ
zqyU$0jG3X#Z=#zqHb7IsAp0l2cVEQpo7+H91})tGD_1a6M3^u|s+MC|vc)tVoQG~k
zn2^#TvY#?pF|p-olO70XkDqU>ll60Bd(7YJZ_B)wSU_O^pWXv4MmM+x+I|*vKyH|A
zQQgN$#A0+#Al6fNfu*TEv%e1kijoGpEb3%PyKdr<{|`j0eRzpGEFl7Dzk#q>tOE_}o}&#?H_1NT%a%uN5zG+Q6*#EUbM!w+f9zY=%s$FG3ZK=f
z{4X%;bvn<}p^mfV%H0?FDLQ#pL$wgqwT+EI(N1=$+q-#$7Gp8BuQ@zr+)Bg4(gin=
zgZFUc1RDO^8+=m5)Zu&Ze}7d|F@#_+`ZiEaJr9xmCfAc2@PxX+kYR@(@=>@iVrV@E
z6n?=Uj&NZ=kl|3q$+4~^7TzB0^Y>tXKMI+tPo$`*HW)UcFCElt%C-c3EiFE-U!mf0
z9|~&OjR48__aK_x*Tm06>^F!XSwZYR>NZ2r>j`E{(y!V5>UD&#aa#a#AlV#AseO%J
z_x~R(b;U72Livm9Su1uGmU3Q)9A>2kI%lUkz#gyQ_#Z!TPL&L!I#n54RsSKE>dfOT
z;}treA+!$B&R?vQ7YlhA+lt%yE+&{{f+!3at&Af(!d_JUH(;K=W!s-(5-#ZWS0$aY
zQ&j&iQ)C_gFO(xnZ_$2=CQby?Cg&=QhVVa1p~og_nG8W|dhrW5(To2p>PfN%&K4Xa
zM!5%nzBvkvIw1e}e_yLTex|g^=V0C`~g!353%6CD8Cq^HSdX
zpL=IkE7?g5q^sZDxu55rbM86k-h1vH?X9nme)@L%&71#5lAe=letyIU@tFS^0h1CE
zlbMp26AFL(5`O*;B!c`MN`(14l8EwmU80V^V~H4l*C*=vyCKoQ-;Iez{%%S%X-&Ci
z=41XlWm!5Ru>cF+Cb6K_vgAvwK{~|3NQZg4l__IVXC!qUkc$o*TFTOxS}dsJvzj_u
zoKw{UYHv^f{@(tcy=q@i?}2D^Fg0sU7L9)O8m+*L?38-CIBRK!8I7KtHA+QO>sL!g
zaW2a=Q%&bnrl}gJkS!pYDH^JdRDr1}daUW9S(?-gO-&aIENf+p1yh|hfdOhptw~KC
zJ~lKqrcP(m8rzdkEoz23U1VB58ol03=~{nO#V=zNr&X4+Qb|2uoJi%>>~yJUShZQp
z^*BcR^lBw9oy-83ynQn1S-zzC_yXP>{pfjspB}M^p
zL1PudQGr;ttf^T`ozGg6G;o=0UgP+5si!rypkXkqqRNV@HJQZ-W)0KoQisykY$~5$
z{3KWq?<5@JoIY<*oHd50XNr<`=RA^RqTHCGWQ&!fRWfUtGbRO*lt6W$s
zY3c$9vn!co*|e2R?pEg~v*}3{*cjT(Y}ODMVknqeDrO5};JH+|&AmiBq~jVv#vbs92*}!e$|z|dkj*#2DE0Zm-BrmQGNwrn&K!7b60nSxYr|LkSH`HCaW+mO0Ne?x*
zH-VSS5K=z$P9wM*Oms2zw1*TyNGs_pA9fcgZ_cHPcO@FQQnf74ozZCY94wqPT7K_k
zJ5y$wM!eHh_mHx_+V8TR1xU)p)y!*iTK?jzPirO_j83ytc7h;lI%TB^MepZBWDt)z
zfIyQHGR!l~a>BmiO&XHA{&0E_EJ>{5?f>SPKHImgZHbT*$d)RM5Ts!9@@
zoX)s5VYfPg0V@`uj=j9Sx-*TTg<7O%4MT%M>>#tVLx@1J0ChLHe4<7QhDxRJ=~Bl@
zzs=Mf>@*NdXN$9@I;~9?jm0ii9Zl2ZML&AeGmOW)2Z1H!WF{}kwv<$CACsmchH6VW
zCFiq!OR_9k{+!oDq01nP4Pam{dol!td1<8+K9&GD0bI{|LL9lweB#qs5}g428$12$T;
z4%1XIVi-lk3225nwH~7)O7q)+M?O%N7q_D<0zSeu^xmtbxfO>)stKs?v;!!$b0E%H>Oadd$KsK
zb?X@Z?m08PudmO2?K{wI7Bkj-%Fwz!GrJtAF_T@TMJJquc4uL92LagJgg}yGa#)V}
zVp3FTkk`pUDJTc!s1J`^t|z&Np%!h@RGogr_aL4Vc+7(c7P@RXCoM^bq_=cb6kD<6
z9Em{`Z(oLnR<<#KS^3=<=$Ytg5+R6=jWh;Q9_kBFdW7>{`^
z0*f!-ZiAHNfIbnk1StnOqzQO(LKhBG3>pBbMZV8Xp^Zp$+4PO3Nh0azbz>rK5|y^}
zmX@293IPV_jB*2%XGY=8sM99~fXWk%2DHS$2)F=O;o;Pg
zfV*V{!X%17Qrbv%!1Hp`YK+pv2>Q{xYHxmq1o1Z~P5I0g8x|VY=0&7ofe_nHF&Ycl
z2jVtFiRT~W)gXJF9
zP_(EZv!iA+>b$1HCa9@A%}R?jGf=7M1Cs`yiE5x>I!6oO!mFWo%o!FI6EJ9e#k`Z-
zBz4b#ipcVMk7uN;-RFVRtaO){p;mS>?k?V_Cz?w0W>Ga~W;Ly>v92nVQG;Zp+%m@L
z0s_PbPFY#$SDmo8+=S6ik>xHHIu)Y}O`J$|{pkca%4FB)#<5QDGox5C0;tqkZ>+&f
zh<_lR$0o-hbK&^2g-j8)#xjgLN-GP76XUb9J2x8}sNHS~xE_UE)IH^?f?cIB#yT{a
z!((nkAcb3$s2o*-Fuh^K!v3JF%B}t=3~?(Aa*NXB8DG*fqWc`FdewNvkRU>?!(+Cg
z$X3YyR>&O5tN{wK2{A=Xt;|>PGo`|X+Lqg-O7M)lVA}zUYWtVuDXDnK_Q#|%vhfK!
zUzSF(&D<2cuOeU-+Q=
z-XPhb4cIMXlK%zzhaxhxhMI=ZZ^RC9l|`B_3P1zpK>a1CRCoiF3Nh3tD61f1hFLB{
z5Av!s%G)Fs*@&=0>ghs3&Bj$Vn^vWpy_V~xN*zoO^&XuHoRN#~a8q?tl9>UYBQu%P
zk{tmjKMF2kx#QtK(`N^md_bxMX8KUWe?~SRLn%=YGr{RoSc@Bz*_tUIVEmfbh%qdp
z5QBLEm+HAOBgmjnaH+8F{CWh5&nc}!llU^}@HiLWa>p@jW=N-&cXg&(%&3@6wStLV
zk=jwN>!2L2aUEr)qf3XR(QnXrBf@gWNwP9zc`~$6>Qcuv3qFBmHQm!&ZaTVS8dc`W
z^i)-s>)?$=nfEhnLvd?mouh*ZX?r-17s)-GbhD0GnY)wrC30~GK@LU1mXpT+Vk
zz*kY7F+QO=nBV5HH8ZwSJ!V_v)r;KqSy=?SgwO4VHYk#r9^~wO4!fkwL}mm
z5X`|lwQzwY$EB!#Hy-n91d=c8Bi;7PeoTe!atr?a$O&RvT&J|)-7ZJ{>wNukztRrv
z529Wy7{fcJM3tDlQ*kHHnk7)DGCE4@B$+e6GeYY)T4*MdPBfW>2W2)-@mMlBgKfN9
z5(2-9>15JSffQdB8G9(k*-9wam?&Cj8hrUg(=#pUOfoTHA82R|w}->g9elN&MrQR#
z%e6r~=Gzb~@3ELdJ1p$3u)o5riFuRuRW$#xdhEU$vAb$!Eo=>IWoy|www|@I4R^J(
z_Kd-c`d*rH;xO&96fUN$vuaTj&&bC
zH8eba^u*|xV8-0uUSdW0s?5v3dPc5a#EwkPO_(~Z$Gj1YPc|Z%U51B
z#42v{X38RDCNH|_$f=<{M@Pp;P8~hoJ+YXaOBu;Ry4$q!y~(|k#aSc2m|RF1Mrx7g
z?ea1SORDsG^wj48t?8+6bx(a|?RTWfN~c$Mgsc_)AYH!VqPtnh8=OHOXu(kMq2Uv8pcz#EV)lRV1F|BySX&DHT$x-^3dyf6g_zbRAyX@pi&YxTQZ+hX8ojWi
z=w>W2`A$5e
z8Fs(!J}EGo{agr(U~)SchY^%_;Al|nY2ZFLu^J?2zLv+KlsN2Ub8wJgv(#A+mveT)
zNEwSx2q%n2njFBiv^2Dg8^bQ%bQEUse12kPkOS2~j}rGh7!itXM&N|u+bodF#qm?j
z^DJEBRj66E^A)>FjqsSX1(NoV`iR}L!W)|#d9vNx9;3iP8lt@?-Yn7a@N
zhaC2sw3lxoca;U++O+%hi5(~NkBQx7VDxY~a$4Zqua-9okB)FSWeePs>dy6(Z)9C9Mavbd{t=>y{3D#E9z-zy
zpleAscUy4s5iKYYzsb*!ycF)7=A+aZFisNE
z!l9@sxihfLC5EEgF;|2+QwTLAGskvg7CHz?=
zoDc$DM2NZdgHU~(kgYZft+jV2l1$5_aL&4ewCD<;KxbKSqrl`vl`z~YEpWHA%MBFw
zwZIr|!U@+VxLaFc9Q|^W+=_S`o^tJNf)&>@ye_!=tU5ETP{BV!zYs$b{sxGORFM~M
z!)X|-C&YgNs^^0r76JxE@WTfy+dhaL.ABMm~WlqtV)gB`rtR|!?ZB*0MFNMXBM
z>O*M>r3#foZ6k%4fC9A&RLJk@Cxw9L7Kc)}BNV`SQkyG?fzg-belBQyVxz-yxb;){
zH0&3jP-sXo_N9E*;=YB}tVsi^vURMm$;aYAIKXmk?DO4MZv)p0YpC4Vzz2Qvja
zqgy?K^|kQL>uKnyjf6phhfHbkHIN^SW-iL%!&u%>YZh0uPjlmm=G*aX#lx-Q1I-u`
zKl?81NIMM)*oBkqC7&#vX)b&KC4njBi~^SXSdhmKd0r~rUkNUB*g*_kJJ`$HGH5(m
zDhdh4%pjN>1mk5EBYQ)Kov@;jCE2(iPTu+{$++M0b88Y{4cz7gU7Le9wQa^b+{aJdxVP?;1f)q$ICq$YJ7Qq*S%hCkDi+=bqo7A
zUw4DAg3YB|G0L!BJj!MxObj(-N;+rHx$g$!4pCwc8PQ3YT+h*hVh}
zxSYqiRvHiPRu}2D{EotJG%5wI&NZ^rShx|gvx!U0v*@Wtt{_R9dHy||vdx?-3Y*!M
zjZ$tMJeioeR+c533zwsvigwM$eY{NN?I>(!m-2QU)ppggWb>JZ!jr&cD{uK>7@PIv
z65jk$Jib*!&X{cNyjn88K3bOWW8%jq8Ox|Zfh({yUg1(Lu&wo%PNf^!Ey6k
zE#Q5id_@@`g+=es4Bt%u3eBj2;p%{(V_8FLYSd_osI{yqD{HORm@3Q~jj2MY(U>-_
zG0)X#%nG<36`<(1q#rFq0tz_Bxkck;H`u_K${5GWCf#nfn<$q!h6@(LCecU*iY@L)
z1|qb9n_Mq?XYsW9Y>}?8
z_^iPLOc1uB-@wd`rzu453xCIJK7L^-5^b2<--RfIh6}B35jtL52b%32lT|sB^R)Xv
zN(uur?|{wkc|~mq8lM7>d!Y@8ou>^gRSoD{FV>Tu{X`nHxpP2a(+WvPS?w6t%mExq
zbq=V?B_NTMiPGOzqQxBW{Thkp@M`7&ktVsMIgIz`(ZKf7NU?p8sX87uw@Pe3&%2!G
zQHVTFb%@pEt#0SmIKW#`2)nV7OFTb(6|+EnZX<7Z1+PV6GoQt-yi=*HMQoG3)?Uk6
zraV6WhW%C6+3W1JQvpEQY_DCC8!<0c)-$?c0@@+1H+?pSJMJbFJS3nA{f)pb5DpEMGN(zw%OWncqp+F9!OJ%GPj;JiNlvxfmay%%+o+bu*$deKc5o$6dHS_@
z`YKqlp_RO3S64RFSk9>NZt!y${6y?Le#-1n&D@4KctO%X#I*uxq{5UsqZAt02q3qx
z!!&|59Cl&62A6p)%w+^waQanxdsSG#t_ml_xmU--=1MyoWJd)4RD;Hd^t7^({eTVX
zxSGI@BD|K>BfO4vAw0&8BOGPD2;)=--hxdFyX;MvBRa5I*<9ITZ?QMp?e^v=KP0Jz
zov=6CTlgxxZpMZ^J;{dA_D$?6gr_*1F*c6)An);do^u0_pXNQ@NTD97(AAa@yNOE^
zh0T>q*p2ojb~{Ty6EgXk+z(I9!xWvXP3nLMmkxom%iI0<@^FngQHT%N4+rr*#
zx40|M^H)6=DG_9w*vEU?#I_N@m#{n`aAbk(Jq7nawI}wx-S7*zdCrb}M^3dq+(hg0{qN
zW4E(+vUjmNR+aoS`xkbO{U7!`dtp_{d)OKFUiLore)fS?B|l_8V*kosWItvSppMA*1no2%`zmKv%WglaEYf3(j7Hw!D{_bIa#y(y{
z>7TQE*;d*5`xM*49;zuJ?tGg45B4zo
z4EvvmEn`h3e}&qgMakz7tCrBX^0EDE_BZS-`#k#sJL}~M9K96#B721WFZL+=TQ7&+
ztLl7-eTjV;{XE9L?A2OT<16fO_5^#9{crZfE7W+3eU*KUeVzRs``Rni_y+rX_B8tk
z@bu~B8tj?7HdHQU4*Mqi7W+2)N9-v&pxfUe%Z6RVR@Y)S8b7c%u&c2{L2O~%?toP|
zM(e?q7WV>H)m=fmw$9zJ^7SsQ?yxg}9yiXs4{Oclnfn&baL9L5nparWDvS$S!=~!|
zRo!>Mb^|Wp*R0cpJ?Ht=dL9O3LHNp7th~3nSiJ!2;@TF>bli$oHd-y<-lg_Nc4P|P
zQsUFXfVF`=%l)H$m2I#`+pxaA4C`y+60NT<<*>H_13*T28E9^rc^EV|&D?|a^;5j&
z!>B3BYp|)>Gji9ve4su(jIk!EQnMmo%%f!AR<*$yZ^
zk9YcwEUFrL3Z^K?At*PyK3MU^hE>FIh!5WsCH!>W=lF2or=uEv+XnTww%&K-yXD*B
zls)NY-!!PdeD9CHc6{PwJmII4I!E5+NaZ!;t(&Fmu5`IhTd;5rm53seOc&uTb>q_-
z%g#f$%*-8lTlU5nRpFD9xM@F1pa&xN=b3e>g|SBaSc277`&B>^bu4mg_~i
zo980;Y&_9Suth&ED7WlIw@l#UmL(cJRJ@F)DlTpgSYAYj0?HqMez4qj0a8^O9QcU=
zP58^B0zJw&$I$#79h@ZsSrZ4OhsCAgI=2dCjhENfLZvJY(#0L^$(egDL;;-=(Pjcuh!U@fq*YKqp|3HuAMgObMJ^jpg>~CJBKXCWI{@J6~Jso!i&GXWZoKl@xbDB9Vb241KOfhxjVW(<*Sasq^^cXVia!0PPsQ~w
z{(L8-_ZxAY#I5x{04%mFoqg;hfb-RkZ~oica{m(7H+Fw}Tk!24kL!Q@LHd2hllaei
zt^PKy-}9kI)cXwp`~|`LIU+B;E%#aL{ddImhqn-Be-hVU0DiYW{}C_eJ`auiJ;VSR
zT#}??R}JyVnZTfkNO$ZNkG{qTA~N{kr6SVw{S!m_;<@d;
z;ZNk_`ZXtCZZn^ri0i4o>w9+HI~>>d3~ze=dzZI+kzH~9<=ekFAKDy=>mBoBqpRzIWvB2lT$5WgmIyqxTNzzaGB#o)-pB4d_2&kA7fwYjHs5j33!MsB=5;
zt$l;SBK$dWc+u^BgZe4Xg&t3_5qbIU1If#;`{{uG&buGld|mK~0sYPgr-yF)2_hq(
zzwP11#~&QfD-BHxKGaV^l90bcdjQ
zP^S?+^4;}=IyWY*%AkG?&+-krWARm&;S$D6RDLY(!oipdONYv$)xX8{CmyHYue?@>
zaC|a9@MHs`B}Vy^o{&H?MRW8eB;m=pK)&=uT>tUpb9X%StDg<%VTe0{rkZWe*UQey?3PbM=$n0>5&7-ye|n{Jh#9yc;f@L
z#f&54!@J=lxa#7_B}Yyn(MyTIa35|=7`KB_#vf4dpD1`21$Q7Qx1VRptDhpDqZ{Iw
zm}y2ZGzu~l5YI8s^J7>#kVU+DE?XPV{t~y)v%f&#sry+^-OqaJe%4d>v!1%2b5lp3N7tBz8@7?w4oXu4~g3{J}|V}P2Upq4>%(Zq*eP6T%l3iv{Y
zWsLol?@#BmB?q4`mh!2zhGjtk!(pTfxDZbl@Ui0J@O_0;0iTYU^osI+aFbz5bCePz
z)C;ruyc3wGe*nLSuqrbdZ<`=4ocfRek=UoOi2yH4#CS~?jIltCePbiX62UTG
zgCxRbb9Oq3DLoOmVd&V=Lyml7qM1+bxVwo1c3j}W|13!OkB*PrmV{7o8c71m^&?{_quD+Y89SMDYlmqecf-)Bu|)l$q46O?mZRv%
z&o$`*Nxd=fc0
zI8|CvV>iMWJ~Tk7AE(@M%-@1f59k}nE<8PQv>}G_FmiA@j;Fy#SA^OW6`vn%3AD@G
z@pm&)?Y>R&CSRMplWOBQRuxCG@;0<=m0SE&U&Y4()V7s!YH{%wfLDWjInLMD;ly3w
z0vvr||8k2alx*jiuR{r?mg`Y1KOnbp$V79Cyo>7kwjg3(m^HG5DtZ7(XQxveiyED
ztuX>vP6#$*_|V~fj<1m3=LB#?q_k+9L5XpU0{R-w36^k`KV=&9L5vd=H|Y&BTinhW
zQOfqg3>l4-cbo#+s5x~LDKncME*3Ib-3f|+=^*SN(Q_d}o$x{N>C3=ns9~xI{LmNt
SL8@2$`Tf4I()9b%mj4fj^2mz-

diff --git a/class4gl/__pycache__/interface_functions.cpython-36.pyc b/class4gl/__pycache__/interface_functions.cpython-36.pyc
deleted file mode 100644
index e0ce6f4e3971aba404bf655193fbe325c5269556..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 10781
zcmcIqTWlL=cAgu_;Z35byJX8_*-or%Oev1nxzx=%vDcd{vf54L*vX_WOYtAd91Sn}
zXXH@qa0^61Q4~n)ws~o{=;f^i7Db=?xLBYI6y1jw*oQ%}ZPSM$MIZXmr(n_cJ7-9W
zl;yY$x)SEUU(SEd`Ood=^Z9({FW&9Fzj{Ye{zV!2Gf=*c&wou-6rl*!Q*4!U&DJ?L
z>;&gYJIQ&E*suNW73{f6({W$+$r=X4mIvcdd0@H
zJ>8hGXBtJj*qF6vxjp60-Hsucx953>1$%+>0Yr=TBG;GfCC-=aWzJ98r;wZ8>Bfq^
z!fU0yGY!kOROK6r$cXGmipV-=4|RK0oDq33@sVnuL(ilrpr^n+=TV;$MbwL2zkvGm
z*i1{ziv`SlN-T<{j}-f9u`Es@zbH{-*302-)YV*F`0$tnWC@V14I7Yhz=hpS`rPe)ICpb-z^$KB!1%{l=TG
zZf&hsy^8N&+40ubjA&v|Dc76r{UFNHO647|wO#QdbI=(j_M7#9fKZ=7l5F@vg{$*#
z)|*a6ZaF&*Z0?8~Ew3dTm3Ea5F!<9^xQ@>+A#s=xwkA}e;lR4kg@N1Xplv*W>KU3rCn7VCD#!
zb(~+;DpjXk+izBbdaLP|+6R$Y24?F)xqJp2_;gbgEva@FkDsbE7*{2T>5p>y_;@ri
zq$8=D4CVPp6tTJeUjpb#o9a5|5?%;f<1
zbv0K%RCg0{H84Kx*J<1$jnLdgT&`&h>RM6~T_W9cnvCcuQEv*T6XmypR{ISmC=eAH
zb+1)j)A&e{;qEuuCmgKlbb!XEv7T=tQPiATR1NJ!RK{b+P)Qi%lTDyNP=*jTLi?B?
zH8vR&KEksSFUOhy4K{-@igD&WyhCI>FaZL}kUNT4o$wSC`ut#DZ
zl5<`OQZygsH2PCv$~D6zua|PuhhSxrq8No)7<1CR9NP1bjF=c%d2(LqnL#ErY26^}
z=K7^^SVm?Xs=aiOcPGL$N|`VnLqan};qj0#)Vi43)gG#{7EB&0y=<6u3t?uk?<2Fv
z_5=>ITS};lB397k{Z1{+i0MVeojO!*D);XElGe-h@?l;myJ_t3mtjtE{b$$9RjNN-k}UH#IfOOevem$vt7HAj{XDh;m`sULLTS+BI)ek_A;
zd>eAN(i8{VvMzR)Z9_xgSgur9p*@`Z1`d|}JF(A`n~^u?L)z4VVax``5FE?KF|a-|(*DTkW&TCL_#
z(virgYrlohC)u!r>wIsnr-smoyPBK}6jyEHa{rFDXON8UuL^ajHQVwWW>B%T5viSH
z;^y1z9L$k&tj@|zz-tiZ70-XAbTmE|sY$tpDe@UgzCy{f
zl#mFOWEwyLtW$Y|%8QXf(hycp+p7nptDW7*kWNLA8gF}0w2{$jL$AVl=wix
zNQe?uuLY7M+7dcB(pr9$^bdSkOi(Q#XJioVWs0UvFdGomWl^sNP)``EE
zqrh!;q6!PIevE|C$ffLh&
z>DI4$~M>}M(2~SHQ3%KS4)%(L)$EeEo!XIO#PlgN3ODzZ56toG@r#m&+>d3&PdrN<5d|7;q
z*Cm*mkPW2#VH7~AlOosVp%QvppoJRD1Re6@+Hj09J|-l)5K4YzY%_q-K1c}fPCJYCta?mM(eUk^d5Hy17N!?YN1r@84W7bQ(r!=*9
zl`zHXQ;|OlbvIqdnyS*V#3a^R=$Yt+!V67&x+qZTnzHvJsIrW!y4ldcdGg#hwWx>*
zsMw^NBlyEaobPoomJA2*1h})f*-fv1jsD6TWynmr_L8jgZmQqhU1_$g0kW-HYriS1
z;C>~r8WqqE$|oBSe80SLul%{X8mUs=#R5H>A@lSQm_v7?fh7yT^y_O7~2f)$M?cc5hUC-)J7^?q9~H-^b^Zwouds*!>HTN;ya;$RqyOP$LP&|Jp*_uXkq$
zP?SdR+$Qu43z)TpJdG74sYyxhS!{UIej_lj;mzjuJLvc$eC)+RJmw{}RPhb}NK9SF
z=ifpy6nMv7-Kj@iU5XOeU|EqzZi)#pi99V}L#-8~;T`^RK5)!O?9^xsGCO5`cPnl<
z@-FzK0^09GoS3RvX}EW7&`EwPTuD+&MNJ}
zn~^yJL35(B>f);JCj4ipU#PnJc4}$*hrVH4@NEmaYmFPJ%#qaN9Bk+
z79Zt~{$FC8pyQE-Lh+=%81C#*zpah^$s5GESVn~w|^)D(E5B#G3
zF`Ai76_|LBEyn_4D`G-+y*
z`o@k&H{&`DE+KiWG=5Crk>e<1>`Tbyi$-WUA=Wr1XDO^WCTF?f2$w@S`zZ$gyyJo_
zX8)Bo>NCiR;m0RQSu*dLQ^`DhY)CLjxO8LtlJ!s>m8;a=zrl2W2Qn25XO4(d#?+UU
zr2j^z97s~c2i3Wg1E4?W2qClae>?%>$Ag1UxFcT}2mEK+i>~n?4jAb$@}g9uC7bo;
z&aEmrtn_ydA2At_ENCYO?XY}t|@;K=wVW5
zhnmpA0Qdxa0EE}zXMyCZKo~GglWi|Okwmb~bW?#DrV*q{eGKugD$V=UF3euG^GTTX
z|88`>0OL4#_@uRec*44dNFzX(5t$d1US{tByoD>r*2eyTcWmw9I@$ofIIt*^yM~;3
zXmq|4n!to{?-qyBvIvsq5mJL!fwSe^tT03#{>lW-GdYF>gaQv$1nYhy$cYK|qX;u-
z6$oDr>j-p;0*!=ElkQD6@9-M=J|59~sE)Pps=Elt`9HOpxPG?^DOy_wGI41q2!hJf;UcQ%{>bgFNRM~e8IWM#lVCZG_LWCQoEd@E!VOvqa^Vu6%5dlV
zrSbEP%mDtEg9UdnTt?{>Fn+qz4Np-B-(8}cd(XwaUFn?(&#-r_%O8tXaSl(CU#0MF
z&kB~qGl+6n2n-uC8J-DGhbz~V=0><8uA&X&gH!HlcLnpm0@2OAlw$oef#sgXTyA@)
z@BJ8ILMNkS`)HT~olp^J4uNhQ>|)lf=G+YAH&4N`p=KupDRp5Ja5D|7~`KBy(gH#_a|IE
zx-oZ^y%}IE0SoKJP3c1fNBYKBMWhbn31`t3dlv}7B0!j^-rsNT-ntzd79UV=2T9i`
zwRgSl1tb>5nJm$Ae5=_CET=;k1?~97!>X
zrbP}pul20uw=iEiBP%>Rh(Gn;EUlVX*|F-rbzAN`wAeS)$n@UA=aZYu@$p%9KX1`rQ+Yewwvsr~u_?e^ds*mXRBVlkd!T9gQKsZ^c91TDD
zAs}`x-=IO?M8Yo;-8rkZ?e{TEbO8O~+X(hsaDo(G^(j=|ouSY;>;Q{{kTKg_>`ts9
zP$dUZD+^&J@(6CpUquR&?4)Qt13840!5u`Wh&LI?wWm3NBt0}lCcWFkV?-1ldm$JW
z$C@}?Wz&pZ8f8#;TmAyo*`-|l-AS#Iv*p`~6e)3x&l4<8&9w}G$tS0H`l9J`58
zcI4*Wp&R)FtgL7z%zr~QjX4Cw4RyvakS5`$wTNK(1j31?oo7Pl`AdrWFKUZ*hz#Rjl8#v&w7K}xNt
z#Up8*^H`4{IK^|XAOKXro-1lmUr^=m;S^Q!W9g4fc>36M|1A{AbSLBdV-=zFkVEMK
z-t}R`!%SC0148cqc-U49_&u0c`dSSX}+WNW{@eSm8tD@~sS1ER4O`Xe0zO@2b9BxXs+
zlMR}7O!EE#+Wv{?Ozjjj15SympwkR!A*p}pyptrLkMTwVgYkxV6Y!>WE`}=1CaZ@Z
zgWy*WFe!iq9hifgBMdwiaT}kJekwv|WbjzU-Nu3Sb~S(qJUX0#Bkkh2zmJ{bq2Hs@
zNyng$f2U&%Jk|_Y7G$UP;rmw-l_A+}R0w?7%0}nSPzUHYdkFzz1AN$^O@M^-9-P?Zb499PxwwPf6vg`fbx>z`!1&pg#3qFU
zA(Vx`mbCMDnk-jp5cW+xNZWakaip51YTp-)Wod$LvLB0qapFdq0J6tvR_l)cpGU}{
z2g!ndMU{Vook0YPGMx=68K8V??U^Xq#`9^#XIP^INKjCy@Iube)Qnm4is@
zNHFJtA7yZi0LF?}-A-{{hPefM@^!

diff --git a/class4gl/__pycache__/interface_multi.cpython-36.pyc b/class4gl/__pycache__/interface_multi.cpython-36.pyc
deleted file mode 100644
index f3df1d943e2c469eed04842d450f5bacef790654..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 31044
zcmeHwdw5&Nbtmq<00@92NKq8uq99t5MOhYQNtSIzHf39uACc*Y=*WSsz#s=C0fGQ|
z0ZJqSBxz_TZW1Mm`%2O@P1+=_n{N8td}+Vk?zZXk+io{~H+Q@Jvh8kDx7lpdN4lT4
z`*nZ6GxuIRNJ>ij_^Xue+`02ObLPyu^*+8Tmlckhht%2?otb#>{QVa$Ow8ufrP7J{{KReSmJ^)GuV$Rkyq(Du9Lvrqr&9{N
zoXuRB1@ubyQc{Q3pafI}$6Y5U9cS*;%-x-)iGot0n8G9~0
zn@N?5s|A%U%$KIlO*_Ha%b9t{yu#<0W6sVxW@*_8UAdCYmz~gbshG_>=JYYgsvKQ%
zLh1bSLfWy;-~Z@r8_32#(lLzZK|G~!C=jSx)j%pxHQ*Xm6RugdatO;A%IFVlFg%zI
zzzxyOg_LExOeJSyy?7Un1(~`;pK0
zqh7whe=x97r~M${)blsyY4mTOe+blKZ$Xa(R>OeSQ{E~4ap&4tJ$#UTrMnhYBM94B
zjh+r%Ggp4`VjZht7en?lLh1GMHkOT4YWG$2f498RhPE1HOXqggVxY?&(#20TgqS{c
za4}f!&kgvYOy9R*bb((7nyJpjmRFUNL6P0+p#y?IRfz%5L%wk&1K
zC`FoRZjX%4XP-A~9kqJ@9z$G0wV{1F7R}t~MnLTYhC%xzXtuxFLBZStXD>p*T|5qe
zz8Jk0TsaAhb?dq%Hl#V=wh3bn)a$Am!`Y7e8e06;hCH`!3cqC|00AC?Kpszj4uUE%
zYiH8sOev8rB!mN}XUo~k8SN)GPRtc;M5goWC9vd#CrcBVwdIVRrMxV~VhQ)}(Wxh&
znLg!zGITjzUPvKv;*@w3#koXzA=8jOQCuxUW^c|^oGGhRQBrOrB0;h)7iqL5Alg-rZ*=LDlql*kPUK3_&MQ>QiJ+9ZY(7((a?G>S
zPUyn))T3t|D_h8pn@+g2IyaYHbGlnA48_c?x=saRmz^#zm7SR_+N$IXxSmuxpZ946
z;hl9Xs59F{GbWmzgMcp-iv?$FOBiU~R6R`y?UyM{Lr-giPM1edbc_Wj1R9ppjeqkk@tGGUEBy&y
z>q$VzP3TO}8Hct3t}<(tlXcKu(xAj#)vKy(dDEKZ7QhH`RwBIGSG5%
zc$rgKAet^0Z71T@)x+>VjPG+|^-+c{=8kLE2ZA|4#wVi~>?!GePI%5vFJ(&hA(ZC?
zNjax|K2uJ)c%U`eT23fiKy938o=si`PjO6Dc7ptJqUeO^U5vN##aSn`3`&-hv2q4s
zh1qNd#j5OV*`~VZSRl9)DzH!|qNJ|?l39Slm@O631t(ZuUk3PPbjP#c59f%VW0lG_
zpi@gPtuCSNpn_ezQgXV9Y(8^2lTTevLz!n;AeueNvN{TxwX!z?#^X+~l*!N84
zs%hU(=P;ddI;Y?`7HE*{V$YB&rKFEL*5$OFMDIc+1-%do%PRW-bF{f_^J>7(;QRH*
z^-O7@xRlW|rOTz+6UUBC=+}w6>a!$|Q`aX-bq#xIHDAsiSzdP{DGuFoDs?A-mtKJr
zh=;AP5jJ`{!e-2f@iF2?9AT!>6YMocg55??{O&vCeUDgU)|feB4Oyesh}mmI%(&5k
ze?yl4j>W8AquU%n%*bGm83`UjY_Bn73>ag^s5xXsj8UV<=tj8*&5_`MIf9tV-tFcG
zM?CCjEn<$pARegrTG{^{c&eCdFGlTws;S>)%)mE{cN!8Nv5!hP-Uv7S@TmPhh-pI<
zbtv^Yvx+>=Q&rMoArZ0KABN(fCl3_TArZK43=|rz1OzE`3uj#%zUadt>`)cu1_Rqe
zhuUG(Sb{)N0f_Flsmk#u^O1yZQE5E0NFbL!|yWeI_-EkXiMkelmlfn_Xm!N{)QPhnicvuW{7AgX(kmsc*r*Kayl=Ri_4D+(}l{R$%r8-oF!#1GNmQN
zazfxj1u(_j>JrvY5K$+Rox*1qczTJ`+<+0YPDU&Hw9{(=|Yf}zs^oG*Lrh!vzjNeVWugJxrn
zSqd!vv4VUA&7T8|wS6ifGiU16SPsMRLwkvzL(#y@IV>|H8Ja4LcEl`SG;AHhmNgeS
zwDmV;5sj-V^WmDgcDib!j1w{lb*gyP>JLbt@beAg+lSJ{c$A%kZqNCl&V^ZEL&
ztYdZnR%ioE;CT*o9R}Qee@}ydXM?{Ns|ZKERJpLsQ$}l=7E*r~r(K#B*Nl}Oml_CR
z&#r0g_|=22km?g?_$IW)@HNA}?1iX4gkq%vvt-V`qpH6@P>x}}U|_@S52!(WwO2zM
zSVK`mc!oDLRSCNfROM_npc)~Q5lV;}xoOC|TOf}B@~8*L#o4qEc_C`F335jZ&U;#*
zVGiuJ7qPCl7r(sr5Gwig
z3U`LLi4bfsuMjP>u1xlN`E^Wcc44)!=(L7vC9kI?E$U$JBP9mB2v;Ez6_XWs-I#E!
zqjxIDNL8W?(32v=-X;qmA{|_vZgfky{ye18RiBec1~B9UY#y?4X|nOS@NN4Nu?$l8
zbb?u^s&e2{j(p95v0T4(Q4$
zJ#~W~8moT^I)F~1w(wn%APKVDV*mlkV2p%&pjT19IuRTRorRV*3SBI2^cp?p34Fm7
z|G^Mq!
z)LYZO%6M0ip(NBYkuBBI22Q&OT(|49En-N7wg^4h4sH5+DoaDgf2^RkS5PJd^#}R7
zj-YNAv8$0S6Il`2Cw}Qx59SZwW%I0RomCzXcqyH1F4<^IL*|MSEA=gGxEGN!%mESADvCai4?&
zqE!mgRWCzpI#T7(!k;msfXJeIJxmKLof^W+tubKx?M6RUF18K*ZoAd6q2kvRN<-B|
zNhX#(`?08BhdH7Y(zjp%dh@6%jod-)0JrKO9{jht42h$PAVdVrQvt+K0014xc|(Ju38inZ^?Agzl?H;Jz@
z*!M7c?0OKi7GdAZgncaufh83St;xI#DKu9-0v}e+f+!}#r9~rYrvvR$s{uD3YmNvSp)8B&4~AQS8e=nFPu6G8YO2BB
zKtE+ybQH8D=HSBHI!g$)&mdkDKd=89HQM|@BFhhnyHI1dG2=*BxnAb%)udBtXpZ!h5-wNST)icQ0@JJYD{&2#fPg9m)JAM8@pU_-`BM=4Mi-J>!u=x_YT@E
z&}M^Z>=5aFR59TReL^a)jF!}V8qyY
z!`}Tawbz9M`vurA?AroP172#sn|g@04eH=uD^)Q55hY%-43p`iZ@#no}ppp9)n7cI&e
zvh3rk6K>vvYP-C5Zjm>^eNlDNFZZr(%DsDwyn7|@J$|{{Kid%4np*
za*%yNz0r+7zfJt(Zv3#CQcrNiwB&!%kAG^*_^18&3tPrdyYbH`#*b}+|0dM(h+F$W
z?$+Fda0&Vbaz|bN9sU$9HzoU6kl^75R?FpL;Wb+NyhgUj%{j>XQnr%O8i?pRVXhpM
zQubX;7J{RfGhu|5PtRoX^V*{8Ym@WGn=v
zzyE_@oSc7aFT;L%ay;x_)U
zq-_`ehbQMhKl;*NJoWj{PR;{3O86CQFWQ_Yeg%*Gxt^OdK*JIRmvHT!bb9IRf`i>Q
zXdETCcE3G2|DS)Ie_!>g`7@KUQtjgMM_%xkCSAh(-eiN4|#n5uai5MiriUGVM8K2x9$dZ&d3bXRhH(z
z`V?vLn_eUQ)MTamNvs`bi>oEsF@-{v?3|IAv74pRF@=3IKQiva6aC4h@vlu*b~M6~
zxMbW1B;S=z50Z{{QF9lZT*CZZBSvnNhmrf~V&p}3XpFQf<93`+PFA{nd~{5*Lt~@A
zD(!-cn@RVNuT8#gBwk~`zy^I}t_af$^l&Fyw$p_Y*OKg45m)I*XYEva`IKybRwC}#
z$~?OmXggU6!Q$f7!u%ub(+`8xx($DQvJz#4WSjrO-yTHE{*s$`#ch|Lm~^ZQ4?W-x
zgFl-*hqZ)%i!$xc!FkoPe}R7ALBJOn;5!KTMF#wm=OEyh>HihaLBI~rFQM2e`7aFk
zBAwr)^ILR&8xGR?MTrjqQk0ZI2LW4`*O)@egCeF8M|s>fw11b*@6q{vI)4BM&?H$S
zvQl1Sz}Dq8Qb5Y{5l4AnVgWSrwErueKcw^D;EW$jhL+dCn_$zME>r8wXJ?LF$ttL#
zCHtq5!r9RfvAoVCPS}0N>QPu|%+AB)#EE9GC&(Rs%tmO$q9-_~Ss>RSo%XcCit*|U
z0oh50a1%bw{Fyv#KRU`}YpHyC9UaB#a#O-;L*-UWD1=0g@t
zHMp5yaKf|c!sT=+*}k-#Pt6t!1=zkgZQu}O8cq~e68V|*>|%+g7CN$%k-#psP`sS6
zlQDeG=8+hN5_*rxWeV=@y!X}V@faD{PN&B|Q%;mgpH)|Qj
zs}^c4<=B6VNc$U15zN9uz)th~XX$XTgAkknq6(#!d4&Dri2hx4$`U(Gz=&WDhK+*;A#@u(
zA^NaaFX3CfCFK_3UiuKN4Dc`9Ve}$wWUEIk5n`xEW%ssIBX)1QzIxsrWpDL#lR9Pj8Kh3IhtHHhP27H*N<Gjg8+0f~^V9Ko$+8MjUU@>g}M?VaUx+;YITn>#bb4wS8
z?Q0)qCN3PuFFT~SG0SNw$J+B^Y!j&1nYaJb8K^Uc$w4*1Oj5cSnXG7I)+*^r`@NYkbh1d0g;8^jpl
z{Mt{&5rG(53pIpRkzZB?$lN@Opi@Ns3>~sDa1h}^k1{}1FEM>`6_{~t1tc~%HY9fI
zCZqjV$ks&?$Z8^Z%Fy*}YrNjc<>`M+?a>AQnQCvxQOO(D`s=9m&YP~a*XT{PrmXN4
z2KFYN3-a32-9X|78Cp=n!7X_n2!5STI3;=qgdH}F!m>U`ZWiQbGq~9d{9&wD;-nU~
zf?_^TYj^F!&Y(!WMzN2+@1!HLjS8s!{d9gFj>p~FbT6S8k;$I@BlK;}$<8n;<2y1`
zNY+78jB6zU@UQ`QVIJIU-?mbRwsUSz^fj#$$WI_|#8X;>I9nUVqEtZkBzv&bjKy~$
znNbG0R${XMZedv-q^VJ?gygVO7zcTJqA%__?lN6mYvON6uWe7G61!ZibAVu
z{B9R<(VD8mO(RW1f^Pw*(@n!gY%Yh#3cZ_C)nYtiH*ezT%E5BH8q47*iCe=yEH-1g
zd9ls@YPB85k8rTV2&{Y+wT$JutL^XSQh5x^+ig6cch%&V!V#rh4?=gU_QfEN>><3f
ziUsh20N%~XQ}yerpN?X1ReL@Kgj=K6J9YG)aX#+z%9Lk*C#
zA37&Qsru+kpre~(NYnkAQTj!f@|Jz7+DzY8Y`eFK_psd|*mYNVfD72wdQ^9{Cx=$T
z9_wNFI@R~79iVoPTccrpUX-Ur^CL^UYdEv&#*9{bi1ACX+NC>3MP*lYXKs(`!y!6N
zk3_W#1JUmfwceZSF~H@E)m||K+nXD!_Z775Ej$DUlQ5XF)q34h_Emd@bq_VSLp74y
z&&?%JH$hjYR>O6CSOWU_fqKXwHm1}Fp553YX{>{cfNNIiS?o}A4A8end@=i#@~s>#
zG-Oi=ly(rJXBReZ`tbCxsq%Prmu%MXc+voRC1FLePyGun1o(g_jvk4@;-QT|wXaG`
zgnHO*2PG`|QZO0D9o4S#8Kq
zbF1(Ip_f)TYr{A?%6}g#ANEt-Mlm@Y7zUk&12q~9>N0Py4x*$7bxy)lYzKT-SaUNg2N_zo~9m-WFY3_-Tkh%-y
zHkZwi=CT>`&6RyO4)5x+@7YxLDYtBm&C&85xnnr2+oifTEb*V#{yEG%F}^&kk5|PQ
z65j{dR=Q_0#kJttFMGY}TJX~6-G1fK)oa^&Yok}*xV6!OC)B;@ySv4AAAFfUm{ZZ$vMg!aMc^Kf+#JhE#sc&*J&fR~;m1kmk)t5$7q6YRSXvsdjzoda2Q5He>2|xbW3EzjttSczvNx!G||daGT!YW4^g}KB6Ac^_2ek=+*WntUXMc
z`)M;K?3K-!T&JI22NU#FACr63c9=-7ZJ>L^{?NBX_j}O7H%Ir!Tq@V;{>DvoKaUXh
zbgF!!bAV4hUcS3JsHVio`3WfeccW*>S8c3;i{4WmW1pEb)%UBX%5n8HdtY@Jz3ReM
zQ%xh}8TBT05&mbdQbF9@OWy2_W1qJBwi~0>0ZIa}RC=V5ISJ@aIFd+t44hHo^nrx2aj7FtwANY>9yYc$FKkKHeHP
zO#Y{1ha|W1Qw+7_9#WrZjT^?{k=h_+{Xyu#;G*a`xIF
z&8a!`>*Kh5Vjj|4$BFD#3%Y*LfKY4r$t@Sa^>1_iS@;k8xZb#?Kj7o)QsJAb=@0t3
zT`G7rRXOm0!)j4DLS8t+Z3;9~OA=CWL(m3pND(1trKMS;Wwgpeq^p?JfQN`)B{B=t
zSpj8MK%X}$`)ULWV5QpaTj13+Vn&)dxIa2HdQ-aJwo^Lc*#_EN#7o@pH){{`gWLlVeXvNoU&vM
zUk~SwX+4|$1H9ppme@!A?{!IC|E}QsvD~BTQ-gtdtfbeDl;4Q_J$`yZKc9O%*RS4M
zJ<=Ok1bsf>o@ex40O_|HUnsyYGKM_sDi)spfF=%19DoANVM
zRaLDi=Hc9vsO#HgPVsi|jCWiOqRyMKergM>f9M8SOP-s;`l;MjSWAAv`rFhyziF)5
zHe1p9hp&URpXY{H>y)in`}qayjRsmX=4NQkbX(E-)f-@abF|jbx5Bzk>lYhny*<`<
z`t89!kK8g=?yMe>cfI%evFhw%sCGy7j%z*keJ%aVO|xlK{K$3c?B}^*OX-xYb@ua1
zOTDDt<=1E1I=kcTY4vXPa@J7auHGZpzi8fgT)meychj6w@8g%cdIN5w%WB{@LZ3eq
zeWY>xKAfxb7@ep7&6<$se8%HBg+n)!f1aAp&GM%oy-rJfv;678zZie|H#flg=Ee-`
z_f&2xtm|XupOQa)_BvRz?S-e^u-EI9tyuf{|H=8&kKF+4o1-=H+e~X63aKer-xPns
zIO?3irlsUJ4FuG8dF6_RwmHwm_JI2C8>D%*1@4;x
z_j_)TCkeP8x1Ixa+h*%
z5g!d>EisG@R&R{!2@BO
zpUfj95O~Mj8C)}CzXe`5;PM%5T7Wx2E+eKJn4D9ea#q@u<
z$opqt(iD`N{mjI3mH7TQPwZcs*sl`%A33%E@k;QS=?5>_zlKtr*h10HR$#xE&cAM4
z!c~FRY0F>A$7N{oXY;I%oWs%h2maDCpM(u7
zq5^@Jzi`H$U?S+Bb)!<)t{SuUF4oR~fzxFeGSE%0*$Jlo4mio%v<2b~jvDEsK*>GZ
zCH9U3{R}`l#-eY7Iv%lyQM}+_Ax*Be{5p)}E!;u(cN0R4F^b%7{IeUo9aq
zI-{jB^De4intY8rk?0N~sjeh(%sec~aM-z=+`Aoh>xZ`f6Z1z~Xu4(bdx_E*o$p2-
z`@86T51dNqp=YO0B`U#VM^4OD+72fkf8y+^#8*5^=t?I|;&60^mTu_oA)9wKj+@B{
ztm8`Mb=>A-&f%7xD+{V7!&Fi<`k%SB?u6IY#i|Z>`mB{_
zi*n0Ua0c+~TiO0P7jCp#E+xDCrqr}>BFKVErEtqw_@??E|^^ovhva>3FeStfI$r_+E4KN5FB*4Vqrd8Ud3s(2nRTQ_DhI)tqTqg
z0ZH#=$88{TowRaB`~Xr`+T>mo&^2_&(IZENu;=d=Iv+oBTz@_>h3NR<#4Q(|cxLMC
zqf-xGIF+!gENis2EGePhso0Dzoqx?8@PRK+I-wcdnzaZdj}X5JI!Ec;K?k>+;Fukp
zW^-ZtBPh%MUOFF!b@Z4VE
zA-@DnN7GALoUe;66$?eNg0IAI{VN6&4duZZ
zFV!uWE76t2bBps6i}ROkZa>?1(mBbjL0T?XdT{I>H*1wval;#I-4o9(T(W-y(Mj{d
z6~}l4IOFxOW1fGgGMXu5?D_S?Tz+*e@!Y=WVJ*90FTM5R#L`=i)%IPg>~00+Cs{zW
z9H$mXQ)u~n&Y-s)B2C3vA!5zWjpAF9Z~Fy
z;Ido{T^k0x#lkFZFc5pHX+j7xxh?Wm2I7}lU=NE4_27HMP_Pm
zI1DNAZr|xwoMwsmL&_iH)jjQ}{$uTYaalvt(SLzuR@Zxk~KBEW4V@w*>rvFYj
zpTVWdN=AyVw9@8OJ}uwS76
z<8Yh~5qc2GG!B8av^|d#k8o+jEH=$s^h4Y*tk1=+*~u;;MoP`dS7DINMer>_L3jlPx59&)b@~>pWFlw*L=Ekqo0-<%?H<
ze0FI8T+IFlQt3_-l9D7DLcfEFdUOquZERB4atnpvAXetQtttQ~-jX#|&iM2t9Y
z&5D}?@X-pW1MV@icQ|1ra6A_mw^?zs!#HYnz&>>!(lN#P$S88lF<(I7zX60ZuE)g1
zA}~3RODr#l~lk5RF1N{@sHQUbqM4GI8fYeo&&bXXY$za
z;U<4$ERQDh7_kWmrg5tvz^mqlJ4=s{xDnpFnH#*@t#Rslkq+n3&;T$0~X2gXz9y
z;UKzVJDvyel*XXr!gR%4J6q;5x*=PwFz=Cx_z_IRty&NlkHNB%)|Ifaq{*0{iWj@+
z7#NieU=kfh3T;6Q19~yRFvZBspy894Tv;$TGhl3H!0;>rvmMywVS^3EW!OxDk$x`B
z3nAb}u#MGD1DaeE=2#&)Hyh&#=UTiPhv`=U({*0CPotzL4U+t?7{8FaySAg;QSHRG
zLllMtL3^~?4f8HQK-wLiZw_WrF!qA6KTNNB;R}nA{#k_$`xH-BYs|6PDt=rP!h5$D
zErExtCpZzlW)>Xe45;`^C|Ti90b?5=aQk{Q1f%phpO##_U|EC@Kj1gUb~nVruS;J$
zp!Q<1WK`}9lNRwz{7CC+5ce+|EC24IzM@3pb6p$UqTDJ6%5`&m4I6|WCcwGI?OY!(
z76d!&fiOx6or?jKzD(>N#O`PD21G
z$%p)-UWgjJX~?|mtPwJ90f!k_Gi1x=Cb6y+?rUzqc~=Y0G!JWpHVosGK0hz6F~J6{
z2MM>8srK3~Jbi1gRCjgY{%iYxRQvRM{e^lul-R!^x69Rb)_QCGwSg+{1ox?o{zEe{
znpWvBwz&@TsV}`y!qlGHu8V=fP!QX9i!tQ+zew_|)aHf6{nQm&mVf8M}k*-Lj}NHl?j%?*h-j&5@j3r}sYlAqs3=bdym=)4H0;o>lv-r4WcftbR`rC)OE$fm2n
zWOgQ3NKM=CKnB<=bF$ZPHQ12WPel_Kg@_6t^fJ2)Ex?J&^|Dkwld&ZnaLwpD9z$!?
zkf17vRs~g0IqlF7@;DraDYtutE>@$UC6@GYhSyo@BAs(|-b3fTbRM8X%~kYEn+&+&
zTCyd6UWGG0sFk`R{K*c{o1l?%rmv07(@ZG%QkjYcw<5Up$UPwi7
z?}fbWRVqL$bYv1*ps&bW7W6$$t!o4sn&UcxV?Ff5Ih!W%PFPRyTlJg&g}6_JAV%1*
zfhdlR8HXToBWBp@4nh{=O4&#UG#xXJ(6J!2o336+=nkU?dJJ_TxZV?t;qtR>FF*^9
zKpOJW5>q5CbR9sXt`eqHG~)3}Vq1G@_XOkC0`OUgLJSFrKozK2xY*y!8Nw#}(Ku820yG^ad8P#QOM!Ce_@?E9hr}pAL5UT3!v;61xnZqTMmkctfsU3vwTX`U2QcD*`mQQPmd0ZbTDhSNd^KQ!y6}P-
zO|=lH_N-9t$p)$kRZy2Y4A9<(4xD{I>g3*7r%O(_^t4t!g7J&|DVDqw_lDs*8XN@K
z$8!nStisC}9pg}HFs2b7U{B5U`lHng!7n$#rdy}!%i)JNWH$sqxselrSsub@!T7+K
z^>RSz85S<%LOdk6dhjuR#3Ia3%@GB~V?S*6dj%T48=%3`umRdGbz1|pUCi4AO+s8~
zyM#jS_{$B~`nDXAjQ2*Q_`UwqG$P+6JuZYDHI7KUfoTUW#yPNI!q>|;`cTNONce6y
zd{;f(9j&3#Nq4kvDyyNFg=`!jZW*m1{#XY_YsmhiYMrx?|LCzxc;47`VN3nVGf+Rd
zyHB{0%kf8RRDdL;+F$QY)qae`uzPW_?oXS!0IuuJjmpKvI>kRN9|*7HMJ?B`NAIoo
zbZ`XzAc|iv@PxYGYqy)@&VD|O(|>`n*tT+D`>}`-gi}b@+QH9oN=NNLPl6v)p2ISM
z(d~2#+Niai+Eza6QVOw@41TRVxx{d^mLKl*5Qc-Ae)#T2N?xDhEnGNaJ2EElz9}xu
zk~J5O*q?VfB*QcpW|;2X3=?T{4Ph-@IAZ_KrtuM3c}&Jf6jJl1xv*dVo9pAvJo{Uh
zbkUV5^oC|={~Qt^y7Ey>i4UpU(T~Pz1Neb0jQHvRr+b36wgiTsoZ@`S)x;>vr6YOohx+K=&aMJ(0MDJDxDggcfd);
zn{UU|Tl$}1G-s;V%K13`2RJ#}2M1e45|9)B)D*rYv0&%#Dk0RLZNe{AWQxtR(1f15
zac-ClXA3f~`wUCeyCT@bDdV?`N_L3|BpGpqKAE-3d1RS37Z@Q^#!K{lI~}Pa_wH<&
zf^zj)@BRD*eIKLa?;P0wm!G#0vTS?YMc+v}FEfI>AdT}-jk8QD+lVoym@uvYs&<8W
zWMa7vUvh`HE$(lF*MoR9sH`1mQYjWxW%jqyc|SitK!xWy+DAJ3xbVzaSx7dq@GDbHGal(NhUA{^li?BEw6f(y6wBF!6E|W(PHH=h16Zi-
z>xprBJ&0xM-~mAH!{rii&F=OQ_yrO$;etPx&fW5`xE!|zu%J8|@$Lsd8R{@c+RsJ+
z9SiURJ&0e0>Bem=ZJam{m;=^86lv<%g=9;>nEBI
zV9l%nPSHog*Slz;={}%wlY_12#>8AG~E
zLpQ4Tr9Q-Rudj3xPQ*C)_P_(N!B|(!j7-LM#jME1NL%FXfwu<+V>@C|{0qjyk^UG%
z_-`F?{Wf8|YT500)g1(MFu->Z(8++f=OBQV4R*KZAV3(U9{|gV4c6mDIxo?A7oB&*
zLE5d$lQQURTV7)dDG%T@;wbNB7VzzK-b3fTblwLC(BL*BD^>H6-L||&3P^cA;wbMb
z3wS@B57422!}Zqr&`muqG8Vf;4&8^_HJq8D>Z~i~93G6vyXNhZa4gWfBc;!^gikbf(
DrC9i%

diff --git a/class4gl/__pycache__/model.cpython-36.pyc b/class4gl/__pycache__/model.cpython-36.pyc
deleted file mode 100644
index 22ce5979fd3956be3dbecdc2b853497a0077c69d..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 36319
zcmdUYd0<>ub@#mYX47c3B+J@tkK|QeWLa^X#BnS+UPBzmaZF5xgqgAKXf$IDpOK73xqqKa#-?{J2Jjn}zum4o`
zJ@4FoKlhw_&%JX+eSIwT^77MP^$H{KXu$Vp0Uw4bz8*jYlrbJi81gj}mVASWpnOA#
zkbJ|52)^ccbfP9vGZ9O~3>~jFQLC&(oeHYZU4cZs3abdd4JxW?@NHBvRf}(vs#Ep&
zE;|%Z4XW|3fNDHvCYoVuQp;dl23rek&8h{q7T8*qaciKj^>LIhaJ0{KqVt(jIyqJx
zbfTj(lOx5Eu}pUL-9NeIqiFxu*dFK#
zoHJSjNIrBli$H-u`?=xlcV75kng7_oeK`A>Uv9kPrOngB+4mm$-hAu7>>Wn#=H!%P
zW#*>(497U}&VU_3q+jpbHdD;l+r|nLnQht3WNF)(;>eD{LHD!cs%^!>XlXWWXSR)u
zr;EiM+3_keQBaxj{;9K0FjGvI)*`v0$&wg$4d962K=@@}Nrjq#utH@ie%a_^70jdA
z=e%m)i^hNS=un@I+-KThgmjE4DW(Qdx&)_@*9Kr-UbVnSf&l!B!7!ra^{qnc`~!v(Tu<>7cz!nF~Q>ErcecDhw>L5IS$nsghX^mP74GM+Hv@&l~nT
z%E5(DIZ!sr!7iq30kha8rJ!~@XS4@^Tl7C?tU$N9p-c%C@`VbOk-840tQK@^dtf14
z4ys@|*h)O&;VS|Q(Xvq+C`T@mC)_G|3VzW%y->{44W;vxbfXtZ7jBhw1;1#zUMQyP
zhVtrCj+CPut_wA2!*IDq>K9~-qBTQk^Ml6%(+5um%0Y~Ts5_X^g?dy6o)~hGz;O0A
z@BH~!Z#s2MT;BZ3&WY>2F!y_59_S0(91Bilbi9x*?cCv5=~G3=8ZTs>;8Za;CUBxJ
z=2*vbryOg(I1Vr{K``bRM;!CWfMbr%IpHJ4Qra##!6TVT<%CA(RBm+XPz>9-Q>DWA
zX5?4g0uV4_`kxs!f=1X38bPxS*d0b4{`DK1V0IdHW*z+OE`+blc}|bYur0wN!_lyh
zDpO%-au}#!OJKnO2@1=g#XzPja1)dP15AZmK-$U%zw9FAcJG2Y*{d3mUL$D-2cc?W
zii}@oy3H73X4ypJw;Z)ee7r{W8h&8db$Mtg`}41V;nx4`sqtLUCy0Q*xQ9eNq;)tu
zDu3Vf(pfv~M*qK_OW=X*Q|0f6_h;AL@ba}cynIU~*8SN(e;5B^!`TN1>OM12w`RC6
zsQWgYwns|oNxK0V+a#5JmJ=jtb&NTi1ZGJ`iQ8eg7fBuhM$~9Bb_RP5abMI26}p>8
zIJXaWT+jz0_5j@kQ
z|L&G&$?@kk?`7KgS6=g?!F8m|x3k*?Ll(b!8wny6K%i2DfKD}NmE!!X590%T<2PME5nZGD%4_QXJI_aiT^q
z!HJk&e6gIjs^wU!>r|Usfp5KPR~`5^s7}>|Z=>o~EAeeotJG?Im#Mhw!M9oUsx|nw
zsI_VxzO8D#+JNtJwNdrq+om?D&G@cRThvy3+hu{@=7eM^JUYT!@sH!EGM0|G%aFpH
zEeDe5ab=#avEyaTT`U|}hzu+wbi=}O81vo82lJtGfzx5wcVmvdrKGK>oC)>y6B>oD7`xjDh>^9^WkQ`OvUbp-YY?o2J{
zbnEkt`KB^bD_bjcT-OCDff^yzONQtGp?5vve&RYb~u
zSjrx)ls&pTFj=ST?$(@iDmH&(yqGD)XQtwy>)DJQ-&o9KcE!g^rK#esZQE2~qzJa8
zG*+1GFWA{_gJ5rFONDI=oSV${kCi6I`(knT&t8qrI*N56Z5N$TaW|WPRW#(*>E+?29FN`=fV7sPN
z)=r;sf`ok`Cs@pkk2|PGnGA
z4-g2|fkf5?V`ft@9BPO)84X5rxXI`=4SE9BI86#1sPXP6s-9q#&ppM5(A@?nRLRXVkt9K
zbn5hQKuIv>?CY2`*%ZBqhSIj3KHC?yuVVT&XVVkoI!Oxy)d?!(m52Z(;)Kqm$7eDi
zs~F2j4&|CkG|;nZUplTDz<;S^CE3>@mtrSCz-Y$kYQ_=_XhqFtvn|*F_Nc*_Z`C7e
zam32{%(+NWVjoPg9{@dwQDp??+GSLLnw|~_Rki5KK8Jgdxb&QHl=FmR-7!k`Atc~L
zOSy>*+sdiQ%;idwJp>T8$kOUq_RQo5m@|iWz>EePtU7DHMwb|Vm10sH;mXLRpg2`W
zY3BujBo=!>Ls@{P3IbXx1c(I=Fr*@Y7_@*9RRb7RF~AyCtLn0h8B_JDUa(p&d4kn(
z(G#p5%U-i!4XQ=83f8EWt2V)!D!$9q3fH$83u3$Ywx|x(DOjuOQr&_rS1Z*j!P?Yn
zwOX(hSR&(swW}VKs6+JvcB(aiU1}|0w^|3dQmqGEr8WSrRvQ81F9_s&@(c|Fj4@z<
zF%}Fk#)JXJ*f7AD0t_&w0t2iM7!0sYz+ixF1_lFc3osa9TYjwq{Y#T5bU<1Hl
zfNcl1T?GIK1zw>>f$tE%D}i09n9okdJgySgtAW8F?lr(*fb9YX1MFI0Fu-;L+bwBc
zr&z8%f)5EC7Pwd7^#b<^+%GYi?*XwN6nIGBVSzUYe2Tyu1s)N2lfa__Zx(n=;4K1=
zOS-orT^N+@1TYw2PXz`8>^5L9z)k`?De;~r@OFU-fln8Bhrl}pJ_B(&VK7Sb9j%n}
zFy4_$5~;udO96ucmIej`>=ZBFOT+*>NEt4a%8II$RBy&?b{QaA8x^ukyUq8^+RDE0FLqlVgrl1J6Gy5v6h-Qs?o
zTkkzf>isJnQVmJHhuu(n7ljJhtK3l6OQ?N16n0nURCYfY*E=T{KoRrPWw5Ra^{(J(
z8MHviPM`%2mP080A(mcR;#my65ca|@Y7Z{>Y!58v(jFoEy`?|OKU*C}D}<#LZjcrM
z+kiBm;xoTJnj|$x$m7^+81_b#c*D@HDg(&mRmqr8XCbmI2
znloM^qmb^}t%IRC)=2NB^RuTz9APNI>5hQ3=d*OU-GTF=>Gdbs!do!z{{r8wbpc{%
z;iGQLZaE*de*+(m4ig4@QGoqq<6wEi*HN@Cw#I?FH0xT8IA+FYoa`>GlTllUbW2UF^1>FeVwQ1)_-l;3l^
z)pJ`RDX9}WL~DS#F-^~f_Aa_SOQ*J*xNb$A`WCMh(Q?ldKh{at%*#zmaL&3+2r17w
z2un=C%jtY%dO+IvS?Z~7>)$5%doA@F1xo9-6kD2op3$-Kzl%54X_tAsDs({(-)o^_tiKVMpEI{yOwRd@J8-&qZ

Nce z-W?S85Er|blKx3I#vkkX>#nIU6l(q=fiD(#kHD7*e5t_m0$(QZ46yeCg8}wFU@*Ym4-5v_2Y|r<`yenFU>^bo0}Q>DWRLaaF>{lgy*>F41A_tf z5nwRD{t_4ru#W6_@|Z%K%U1%8`U_Z;abSN5L#w}HXP z_d80#_V>blyZ~+=2W5+%ueBc%7q;2QLD53zYl}Y>e8*{ncnwd=WzHF(cnu3N_&!SC^L5j`LNzub%zn83z>TvB_B&wz zp=*E4{r1A09FMq*pfjoW&>w2vw^#4~jlFa@dxiY{+ferPujAjhhbjk8I?NL|D}uxl z2s~UK&fc5<h7O>z;pS`Q1;#j`ZUn*Zq_4?@V|N^}QiC)6!6O z`nuS+-ZJ%N9sSwifAZ@7#Lz+i#yJE_W4R|v0V+U-zV8>N1kN&ig7F6Gd;mAXjHdgkLN^=8Yi z__3l}+u3xHq0Zz~M#VSI-+nCJwPJ+$6U5;sbrmeJY8>;njyvuv)H(*3{V+L#rP;@;rz~cajY;i{v-xH6Y}O(|NOqn=)Tz4X4v2Pw{iY! zr?c_X*{$0#3!c>9lbk9~Dy@smC)tgiy7NrhzN0#;o>XG~sb}5T*YAXL*~x;P0i_J9 z9Ot_e%BCkK((`vBllVx^9+??WmvV*4cyTsY!u%~vj-QQB6^g~&DUie2F-(7kyzHM# zJd1HCT*R}vGuX_v((2570hDA>9#e(fWQjyb=!uZmxF(*LO$4s8JujKp)q8ueBO31nF7_(>!b9TzkaeQ+=!*1DQ z1v@XeJ%$e#82>?ilec7E?IQ7ayU4RgHS+Ac`uCoAoP@$%oSwKJ8|0NaBy-i4dBvj& zdoe&r5($}Y)sQ-&_FoS7{RqeVXN2rXAsyqiV`S~y8Rh8!PViJ=eBKG760?qZ>wpv6 zck|##VPb0Vnya1gE!ziRMXyiL8r_S&pCcB6M|Q#qp4cvM5RguL#Yx~Gpg9V?g0#XZ z()iR^8u*QKIJ}w2O*+Aek-{J#28R=#oPhFg9nFYF^T;ys=# z**$Cos_@uD1VW?-nOP8{{i*0^oJh@%!epc~kj=~~**TB8ND=cwkBpei3r+Fa_GnfP znDDIjj_5B+FiRB+$2u_x^^0kO3_zH=M6}EhoB^njc*Ple)QJgtM)1(Ie5_+hoVJ_* z2u=?IhS5p<29F;mRP%t=_y`(8$8{pQm}eZfAZKuhqP;RQa2!)jJ7KBAw2XRm$LzFY z%}!?sZnC+ zjZB5Ek;pSrh8Y?2DB2m<8ApFe@r*t&lJlcIZ0g8JVSHv{5}g5MEl@05lwm@kK6Xaf zLnkapIYs**gU4>;xsEJL`hYQn`8-*2tYXQQgC~1GD|>)pqZu1qOS+V?Z=f}to|>X+ zj8&YOu&)po#LU=JdTp0^ND|b8vvQnwL@Y5d#=>5M~2_KJ}mpj{)5ag$?lbcCQ=Rj{21j=gw*hEeIB zr-f8$WX5)CgufN9hP_3o7jjwDgX}PSfhUomUKmRd5OO0$ryePhNabV!MGK@Svl(X@ zT{*SVBblTuZcY=O#=+Nn?oK0JWc_o4)Y4&OW@2W1rr1|2jICoKi9}c!?Xg5?Dp?p; z{7hzML1EH%qDJO~IJ+pl^yGysf`cA|~X`;2AMuXXo6JaROAx13BJerMF!FH*8v%vM? zrnYvRAhQNMNp3<~O~xv-$>Q62isS}C*t})2@??b3YpAU{$#HR`RNePsB8T^~`Er6}3 zsRjy-ADRUUu*-RnQ24&0hQ@Nyf!YYH4W>E2qHJ{F0D@X`Dk_S&32|?sJFwRFKp8^l zf>ml4O(pXj+9e6SJf?8T81TC=4q~wFVs6PhU^h=gKPuS?tP}M(kFU6*Jk*I$f>sscJq-8NzIY8@i^A_0@QeHW8ePB6^QOHP zs2yv zh;&-AnI`gsuM{ZD{AxEk84V~tl`;rl%?+C*=9c)cjlTKr3&Qy`?9v%l6Y+T-lx?jO z^UYZZSXpj3O=jjwXoSpE-x|$KVF?^{u+m}Kwfis^fm6}SUI)S7ehD1x^~CNWwvpIN ziS-eC8L`d8{+QS%V#0HOnO5QAWD8GN@E*E-lR(%g3!H%6%NVbr+Ztft;Y(;1`zv&i z-I2CRr53M}%d76bwbFhqBlaMrzK~uhWAiyR2SEai9&{?P`{eR&Qd_Q<2{8F=k)>NI zw;-j5L63?oYXF22Dr7?9qF8o_`=D6|5(xz}5Wa3G|8Us>`G!_>yU_sEvj+MZ(J;y2 zMv_EmObgV66Cp_~NoqY6G*64CLMFKxL?dMKxu_ooxZWMZo)mD|7sJ5El!ZDpiH>&1 z87EFwp^rlsm@v+Tp{fRqCPKAp8v1(f+680>Iy-PD(Tc&;N~k)Xmcpl=G3W}@AbVt< z50rxQMhVk4Pptl&?4TuuJSq$7ZC`*ZREm==D!5=wLYWgP;Nb-*$wG;<45iV?X`}cU zzR}a6!cRf+paKO_aZILq3ssb%W?a)65K=&LySRKUqL@kIwYYTJT~zTTSP2 zG34Jhe`F}T`{-9*cE^AJZ0K=rD`r={{l|y6u@1{4nIc_;vAhqg&$hK(8YrVu)bVfPKq-dkgPFw)VXQ zA0hYv0rl4G4-vedU^@XfIP6IRnW*m|#=T7YodhBtzMGiHK<_2?5CCo_kZp8AQY>os z3F8iRG_ax+P(vs9Svs>wR<>cQx3~$B17oaatC`22C9>|WrDdOqZqRSI7dU6`aKwpRN|G< z?uLt9B3$ic9mXJa+6Ze)uqmi^m+GN-%=8*%py#Bc>bu zV6O!5*Sw;WQ6L~7V%q=vsi_*(q zM^Vr=fudb9$f)GO@<|VaVQisKNSX4r`0c_)vlWE6aw4r5c4L9*TNFz|d$BKwC8I=` zxx1vdqo>2gS0i+dq#!)IKwO&;oFye)q`QT&6n3d!nD@1yUhXoI zEMp!tOU5S0#&W0z^OhEIGLCwDE|+jCP|oG7YqA|=FEV~BB?QU~`>Kk4wbcu8m>Tv`vG7U)Fb zRGMoSG>amt7Wc~Fs_Jl(ZQ6H0Zz_y315@=2(bC3zUuhHkH-k}c0P{+Gi!iT^nt7dk zK^Ye^B*qU)>v;tx--7(YqS*u;*2|{R;`)^d%|fvuM*NwW@L zj+Q|saD@Ul&J-|N&9}id(xDfg|82GNRC&+cc+Bub;Dfc3j;ko^!zml!*RZ{9A!^0>a9TjN`wa$Mz~$@;~cnn zxN8tjE@>pMMVQ^t`FlCO*U1RmBk6{v94iGM(sf|%nRb3ya@fllFz^M$Z9PC89w|sT zFZ}fqejohz`$GB0Ss=fcHBBBsn1h!LvrobtLYTvt9YN?jz90AvFfsI73r14V{!-Ni z*i+!%DzofH%v#PQXuslgvCTWfuAD4*x&;-6A@ImhNx~jhlL?I;p#1XZ0O*xjcxPyxaVR$ajLy%Aa=|^EXvPJOvQ0To5leOj-+`WXUE5L z6FB$1^dE&4ITbF&v$%=@31e3rr@MAKH_7_|^i#QF8s|ky&hl`0UBum%nNxipi`zec zuXhUF6F*kKc3vsRgYl&$L$QR>zucMoqQiFVY2wK66i(JBa2ix7utvH*A_uv8@|Uh! zeALF-(R`31(|o@cnr2RU#on^0n%D&$=fR^_*W4B?b^4Sh{)u7tO5it!`i^KJipwZ2 zxNtX{C$;wX36cbl68wOmhu{?ivO>R-*oz2wH{AXUf=?3sl0f)3Vc3MbdllVeu@|0B zxIkIm-$0KK61?`PdCBeh={T6|UeZqta>+xZ_eVgC`f(HriC-@G5L-6+mFDCd2!50WVOYnJue<1i0 z!9Nmwj^MilUnF>hK-dvsR)xL!9^Cq3!m%bIv&qs}aqJ5Q`e1Vm^p zWD@n24SumUR5$s>?r!?)pyNs3u(x-y^1}EC5blSvah}}I910S^KHE-XXQ|3{YpjiLG&V97W59$B`<9*N{PHS)<&&E$Jl0AStr13etOrqUP%6#A;BGl^UUfn( zWP3>Ni|0eQ*$qw>ce+7oWz~)1UNm^31xk&v1+F`_3tEoiI$H0pI#>;0qf3M90%^>3 zX>?t9M_q?$a$PLBPQ_TuTo>NJ)?sidPpEka_p`MNI1+IQml=bm)pe;=2Fk=$5v#oY zQ~C?%wj5m2ZlUt@ZF56M%B&Gxz$;0L(K2gCmv+~srYxKvTySSV!Z1Xq=nxzBcgeHo zJ7x3lyK{qT19apzEs(G2D%ZBmiOuV3(0%k&6X5x972<$9gkd29OE zNzTr$d>?L7iSGVFQyKq+8$_R>j7_C&;P#rp5icWl4lamcEMTJ)vl`s-0(LkSmaFE4 zwz&t&%S+o)JG}CNF%T>U%gf8Ha!=zOYcJy7US3hg z75s9089XyaUz_klcOd;M%FDoU!xg%wxXXdE?m!wBq|B?%_n0WFZON>}E0maak_u|2KYz<(3JTDb8QVnayYX~14syUUet z%U@G&MC&a>DpJEy$dmb9=K}d_aht3~b>4&d%Z+Gklw(}t+vV$N+H_xI>wgrtC>FSF z&bit_-8JAV%Pv*8L_Ej`93u$HS-TtNI*Eta!ddQdefZs@y4pE9yfR=`V6;`sfGu`6 ztuER7=*q_#`%-j3BsNV2BKE2ZodV8SFc&Y)kfgc?cO^Q@o%vzhiKxTwS=c_#)>qx` zR{l`FAH5Dv-0gRBp>qzp#+U~mlzA|uDV|y>_a8cCE_rUNJU3F){E%7=UxdJ&2;}B> zpf--=vxh|=fs^o;b554(Ig^+xC_kk&RDV`5cKTYP_CKY#Qe2s z4x3-6T3GhvK7`+Z+bj>vJ)*XtFCJc~M~V0Ii2(O*&=%M!@yP0g@vmq7=88)|ZI$~z zo8@kh8fadTG@x5;bMFQ*C5)Yg?sB)(X+5c-TT{VtBS^1J z(mVOJawCXq(*SP-ACgh2K%sl8a$rxHC9^0)ZI9xheC z^d{;UG4O4H$HBO1MPxTvU|9Ha$55^gc4|l;v_NO9PAk^o7CNuFUyu0n>59m8!Y9~x zn#I()rG$b*naouGWkeFs>!t#ZGR{(tag*2;pZ{unB9q23m0ZLw#v#2y9dZvE@|r_b0XG)=`b7%| z4H`_(<%$&1I_&9Q-mR>K>>OKc)94kXO!0lfDD z5QI!QfS20jW6sbOz&t}l!2E!{6~(u&VIAb{h4fSs3XrN|DW#!;)dWq-Bz$N|7VKQs z)jdmMOT!7x%O{f4+OBvIpj4Pjv{h9W>7B8YvX$fN8z$LYPIz9viO968LAMRXNW^qz zvrs9r`&qaMD=i>O>}t4~=)YnZNWk>z+f;RL%y6XHYKC@9<)0O#mI{t>To)7CFQH9l zuoapv!LYF&2R+@8V%OHfgrks)egC3O;Q?r{@W^x*UhQaV#xarT{y_GP1rZO&L_z4( zSaF1aENwM}Vvvg0TMT957zxKmX0L@qCUX!jYwOxA>g2c@IYunvYUCgmv~M6+qdYOl z+k#OWrwy7oF{5`yLv7Sps_WALU7z_bEmdFiOu8alhtOPXMv<-aXgu#3JgASD!tGi5 zIXoqAr}5+fK2Ub4_;b%!o@SzZSsu&+hKF`88Ul|w@z&px5R{f9k{jNGqvVA&@vxRJ z4Vk#gk(0g^oIklq0yv6D~D*9pf#DPx(IG#7Bf3eE`i z1Gfz+3_kLB4r)i)zJ+_#^aWQU=Nxo{t?JswC#U3$!cvXy=?%{}R={*rT&Th#XCqPy zaP_cv3g_G<96;02>?_)Xw>4~ayt7(wSM|Qka2A~s2t0JgMV{>;6;*C%TD!NXI=x#| zAM@^K;qX^qK>MPXGTwKqTuIZ&WVs!gAV!r@zwImW&QKb`M zD2|S2(h2hfjlFcEO@+%q) z=RrS89RKdw9na{{V;Jjv>J6Bj-cdHgs54-fQHw90g5s$d!z61kF7^3;EypIGmDB$C zLK^|&tsW1|VNR8?TLL$ZSDf+d-G(GbRSnN>@s%AY8H0^-u7}+w8J%*DixI8QizzkB zlXHC9if4)_15a^V@tQ)sv>aHA3hVRT5>L}GwgEX#+s^@mx)SXj5|85*vPDY(EzqsQ zkuw%Gs!n;Gkmp@1_=+OFojg58eY{*Lnf>0eaTD0ftDSiv4N7;Cn~S$E&QD4jb^8di zG}|8ah$HkWNE%|(C+ebbRdtj0O7ycLP@0S*0Tq?B<=Z(poXm^Y} zgyQ*bWE3ioX#0(&KDcnyZIs@0#}`XW*%BwcjL|7Q*CFSc9BCH`i^r+-&aI52GU&Y+ zeSTYY!-*$S{r?*S-rTLUGEX#xHnT^$A}s3Bitk1Gho_z>&qZ3>_1U0U*3Gg$tx-68 zWBB|URqH)3i`d*NVcIx@SF&Cux2gc7#0Kx=4cZeVarkpr1fINARDCo_2mN0{_Pw_W zFaK(7W)Xd?I_0Z#+;0o9|#en9Z21ag>jAF;O( zycHl3#_OYad(<}RA|f-H9{bG%ZvyCxXpN!ZOlEY{{y1C`=E$_&OeBa0geL@!EnRx3 zbU4GM=P&{W>NZ)>t0U8ALY>-JAEfqN8=iODV~mR$cWdyNam+Yk91pjdaU7uZ;?RW3 z+<-U)!7+%@X{|NpS5%j)MLPK-EHnSu#+4f))Z1-xrG zS1w@+mBVD4+$VKkTjmrqaL|G2C1lin78ipL3lUvO>cxh~cxfX$=7yBVci|UY6p|0{ zx*@e<5mYB4~ey-42f^R{l@?gO_M&AS?sdAlKX-jY6^ zzr9HMaP`$qY?r88H6+t_Lwa??QA~|IwHrg*M#?z0M;$|KX|yt*+FgaGc2{9zE`$jj z^(L@)_F6qL$4q`QPwj^6di1$gbCP)=qo0$(y?7z_ZDhPIoj`4jxPqXapo5^3fQ*k% zeB$K$G{^E;9 z>)B%)p8X+zL#Hx-Yw}Vx_yT|#chk~8@t{$2E4sIv8XqgvXKP5KgR3n zLIgo*K~AVI8iK)d^q#AbA5%1y%~BYWFl7=zdBhbQu5f*nM8IPpH_;K}{|!D zgrF5Od#Rt`zL>c<%A#q;5=w_}0fO!`=XxPZLNQ#SYtNZD&S(O0GD}b=Ai-KNr(3Z; zm>^cxQQ4xmpN5Edj-MNc_I}pG9hhW)aMdr37eQ_4AUVe5lB(EAgtkC+i`)qdfM7#^ z!j6@~kqT*J@CwCF0xcgaLWl2$Y>Z0NiVZ5Hn;-lEqN*A2tf*8h%sVmbJzp)~&j<;X3#B8h_*9O>$#CsvGCP_R6g7Ml80U zbYdCZiTa^sa5ta{jFmAN7d!6?#O01a6WI%q%L7{b9{zYzT26Z~XG)x}V6$)rB$9@T za2}q+tdjjI>=@w8h+%3j76$DY%h!s$0O~7g>i8H;K0nH^_4JptT^hnQP=O`Emy)TD z-3V!|gYy|{B!2Ip7I`E0psG7(%I=qhL(gI_0rMZ>kWT~r>WFh^1MH3LpNWKyl~!_K z&GeNp>b-KW3%Ma@mP_10XYeW@*(0P4Hk3Hi7L6S4O>jp7lBDE}Rs(Yg)`}wsF5m@3 zTc@3=N?UahTiTw!_ULqLd#SeSqW_|{sx5V^Mo84uUy+k#sR#Rdwa8|;*d`h|J$N?U zgbqq~xo)9;auWVMs!23)cR)7+Hk7?VuD|uD25~>ei(hnCU=`ulhBHX+1=3~t3cT9F z9~W7`Z=bNI%MGB{*d5}$f!@CBLK|AMzFg~uvY_9i+orY)asd62Z=>ogW2bYWNj0E0 z>=W*BtZ_#hOSe|q#cS6Z^s21#NV}Q(1zd?F&*n1pm$Zv!l#yynne|x56yWFchX#a> z8xbtzlv^cDH?FKl@FVe)F$8~IeyPK9JUq`nkWnMGZUbYj_p6XI^{SaJ zv^$UkjBi{c6!*}mQ!X`>F^4fl#$LmPcBI}`hEA018*D@f?DIeyqNWU4TuRng?w}_A z0>|Vgn9WEH+Sf2!#hv>V{fLL%Tg+|t4_<6k*f~PVEYpA<<3ifIprfL9K)9wtTT74c z0MXs6}{D7UK#I= z=O%Gdl2etV$fc_+R%`t%+1FeOTX|_L(Ch6b1co2y66O+T>^1lW6L>u2E%Y73?mJ(r zhGogh9>x#fy!TMO`&&4l{)QJ2Z+yw`QhX2(i=BA%ZEtw+zW8vq`}5QI|B0b0@|)-X z;yYKVHxKviu)9Ft@lXJsZx8aBXxm1%%$hNnB}OGi``HBN z2!v0Ny=7q*%5+;G_yoao2%bwYLvWU0p5U1T!gmN)vV(5+S~IfVWLX@xX**97~vck;HM8wofOl zV|$M$^z$eqaL+o%S$yy~Lwb86w4bj#BtqE{B$>bq2`B=Xr`bgCcn0s}M_5V$Tq*Hn zPPW*{rS&%0cyorgzQbmdwJDgwSr{)+wPHJ@m)E7Nl(m)@6=C?V3AfGH%E?%pwaV(n z$=4BVlC+sSaZ^2NwIc2on0CW^`5QS^R%&j5un@9Re*%1)$dQyE^&K1VxbA+v+`8d@ zy>QbZ=upa<3<}&NH&Y^$ZDperl|6dgePBGH5E0qK$89_8%f(96B;ha`S zV9Qhu+@TvTh={cX*7BI)6ai|>H?ObG9F0%xCI zB#lbTT{`w=DaG&Zu}i1GHFUGAv0H(m&t%N^1EDeGog`L@Bnw1<4@CI{fp-lW?G zJ^rw!>aiBbce^(xf%``;XRNe_$05MneNSBm% zz~xGqoiJC4|J8E3ehv6;gDpYn8=JA@*|vio7@%ak@D)k{4hnjntP^Jt-nefNzM40P zm>!-|07QDAK;V=mr6^pei+L4{#zif22j)G~9~xn6taRG5#- z5AAusuUGxWJ@1$795Y|E3s`pNdoJLQ+()>oED{$g-_}IsbOcA~l@k<(2~>Y+3uhtw zHJ@=i*Y;fp87qCnmgyY)@1VMgEE&awsDF$az|y!9-bn8?T>3 ziHksZh3XavRa)#C0%0-ghzVy>PppC97febH-}{LPXEIJq&S4rEM3@a>hIqnZx6y4m z!3u&Yx-}DPC1@e&Bxom)9hMGazhSulA`rIdKZ(f>rf3cPUs{_O4vQFc6bXJ!H*Qwi z%V@oq*hc`oz3T*Ap{2y<$kYk@4%&hz$0z0zQ9AJqBw_A5gvXDtowJW-FcF;1W~P^L zBl7aY2)U6HNIYP!g-#8A^8^naG#LXh)S!`n;7@Qb#SC&I+iVU$Y`!`i13z-u*kfFe zqnlQPw^{ZZ33EOE1&uhkkUhqLg3Eanjz>w(LK5TO<*dcd4M$HJb%E zIXbGYRj5dYI`;6juNxBtRaXS2@^1o zCP3K~5`|fAd05!YG{i>2XK|ZEY}_`ma4@fJBxV*>C&Z5=%;IK=*tlp~cmP7%$edYV z(X@@^-oo_{ZR3`v#Z3~4M>20wcoZ8+odsr0+sKAlxa_QL6q_x)pQCLQfvtSAYvVz; zMNw5!p-^ve>qBf5{jB_Q*VZOBFqz^<(a(YasBL7{EU;|aM%8=^@7!pcuxLVLalu(O zjCd$-;b>m_bF0Dv)23~!T?W&_La$xo6_>c{(o=Eiab0>VF1@bH8cB7HYa~#e1Cy;gK1H}G> z;DZFR^nQpK7a;pD2|h|7OSG)XvTVyj`W$-5_JHgb{57o)5PXI}1olr66Ncck#2zI` z5`2N+2Lyjga38^22;NHYCW1E;$eJvRrYy`t*ky;~&*=Mh0%0b8NKCkl<-|mD;t66u zC-_%_iuRmlL}cBURb19{S?FcSm(^Ytby?44jejo^z%P69Rg`x#t;&-b_Iv1Ks(E?B zsL#>|l*br`cZruSDLdizNiHd`MhIRe?QU$q6LS~+2Sr!+pT@BC(C#VXil6IK328K7 z>Gt_}DTvzaC43l$>A3mL>)-C{mlvHXYZ=N-hDER3zXDbcq-d~~rLaDyEEtISDXd;8 ztXV0nR4J@aDXdB6H+(AfEuJg15%jz zDa`lOF&NDB)Nzf(V<}}PW%mmCNOaW#JW*WwZDZ-RsW?;_1%)-pUoQ0W(c_vKRzC;EO@eO`JWTLy0*Bxq2);}3j|7hpEM`WRs%W(^NGrj`D4Ix6Kc??b3I3VjUkIKc zSVHQ4Mz?<_5J5yF9ubAAH1*f?6$wSekKfSxUj+Y6@LPham}0T>gldYUB@&MaUZSro z$Ie2dMfwuvTcoOAFqJ1uXJ5*|FCh@I@x{bm2yls)iH^f-a|Gm+72&z=1^*zQ;Gc$z zF~93#e5uVJ&r8YsR&!jM(^4OQsu#xgeCe~;7borFw%FBQrpifn^`|4vqeuHX_=~EO z_>oTh=4&jOglIN1PP{Ie78kLdRVHxlb& zkS2mg0+G?>^qDJ$me%E=LCR1zS3R43_A$~cEqC7>XZ6^hF=Y>3!@GQV zw+~{t;SNrg5eE2ROeytDR?ZdD0;d^}ekPjdB;d_1f z^*(%`58v;@5BTtdKKzgmKkUPA@ZnGK;Wzs5BR>2lj{!L<+l{zFm$7jjr$q8HggL93 zhu0I6m0VVBS*G7dH{mLTDG**l_<`SF^!Wsns>(W_qV>}RpCNdVpvpHqN^6qfe_G(V zJfmD0`YzT+*<|WeGTAk z0#^uZ7uX>X?@+)OclH3g1+EmhO5kdNae+Modj+l$xK`jgf$Ifs5V%obpTJE5@kR^M z-y#rqKY;fO+$L~9;C6w70--5_FgpZZDR8I2s{~#x@EU=;1YRp}x4`QJ;xaHxL`*2#c4Ds}5P9iNVwaSX67>BLfsnzfKvSh%pQP{o1P>4hqg^Fimlt=0HUB=N zU7iRDAOB;9TOzIejBY}Xeo5@51m_8=(#jtwqloH>2r43vERj(}JP|o)iHt(M8vQ2r zdr*u^2pc2-o1&P;Zv+yk;RC30B*0?p diff --git a/dist/class4gl-0.1dev.tar.gz b/dist/class4gl-0.1dev.tar.gz deleted file mode 100644 index 8a1ace0a61ad7780b2a944794b28c087b83b9899..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 74685 zcmV(basceTe{r-Ij+9T1HXi5Hsdb&*bg%q+GN_NnBH+Oj44abGPb#oBLhvr+VED zfCNC0qMXd0bF0?1JF!8a(P%UpjYgw;5PC_nKMp%xw>R{cyWjl@KizJ(|NQyEcmE6j z_>cedzvM_xt?D|Cjs>jQLOebTN15kKcIyyN8GSrSren?`QMhd%h3ze{k44_>Oh| zJ^z3D&nS+jY#1czA`QaC9YnFug6TYpQ%0=NXwW~c1`|IsZ|Wa6CEGOH*@vYcCqXnj zZgNgizRbPBoj3NA(C>g$X1iwBJ9X1^|_Pt@tX*Zn4cmMwRiuHf-?sezwho67>SFZoVZr_mq z2fgRd|GobI6+iF&)Ej!K*ZG|<-y_y@yNwU9$d8!hjYjs3GfCs)VmkHW$0PRJhj$-- z`ShXjCYt)4Ijr%#=wy*jqS*Yq<4?UH%uCPt(m!I_(r>)V*ZL9L$`O$Q;dJ6pL1x8| z?A)6TJINv*c>|xFq|+#wPkfMJAEO{-A4Bh!O{1Y7vexPO$;Xcl`=9LM5fGV4ag~A0 zez}?i34{No6QB7HDWC_{STy2Tury+~KJ#uvU;MgA{2^Nf>4c5L=++Av{6CDKNfIq) z!(cW}+#Go0BJ|Qx6i;&!{P$Y_Aez6q4)_H9@9^N?`rm)q{NLOJvmm{>`BrM+I{Dw< zd!EmKe-GyW@cHxZzvchGO#gGG%15%YEqS6H%u=5u>&DwiW@J8`7d1DH5xZJ zLYCg#uw#bmpWAI>u}31Gm%j05!#KEOpP~i8OxO<-Ua*3{|Hl|N1<*aGKk{~ANR6&P z|67^+SFe9i(EoeL|NVc<|NpA_Cn@j^Z2*<>f3MerS*Q6wJlN|#Kg9F@;oknW3U`p6Q0 zfNFZ1eFF7=2I{UqW53M;+7eP;{nAf-FCI)7DEDdX+dAs;j{X+K!+`xBgkIlK^!5r8i@5d_P`!UOeE$oLZ zNRd#xQH(oqFnWacPz1*ZKc;gCY?;#pJHS6)fB22P_GdtzkX>lm0dw~A z+p7;}fY8ssT(T2(adLU}_Vl;&lS_8-+vUYCAJ1IIKKh6h@?`~SM>IJ=x*?pH(NxAq z`*)bm1W*fyY~n3_nAU+0%s*rv8vsYGpEH2rp?!-VA&Gc^(%TW6MQIyWmk+8^n$C}Q zcUP;GJDx4vC?4+$lUcI+BjC|!2=$G61Rfa7Wyx$Y-{CMZDu3Y3SVCl!1 zHw^}f1o8?G)hTef_?!3AXpCcT33}g4_eHNU^heB3ytH-Y9MSPP_R~c?V_o;K*Yo$- zHVn!2ALgyzbGLu^avus>SFFSO&-dKkp~H6hpL+-Hi$lk05WIVW*Pa(3_WRIW^ad^G z4PKw))w&FB+w4mc!hgC`{LjQW;zxS;0{8+VqcHN)!+osk&=;t#e}Mw=r8_mgbf@ym z#QZYhU$*8k=;DM3;(=&n-+~dAvmap`VqExhj{iAkJvT4uQc0he_+jE}wU1dxuM7pE zZdLEA35vj$cbl}%f!ya^hcM}Odnz(r{_in6&&oy*vJA2B5W#i<5tv4x0Ps?amtaru zd7cClQF89E9q~614F9ItHd;+ZtEtgyiuC5KwzvDrh*1wn2}fv1BiPQdJ%`hZ7y*DW zzm6V2Xw%mgYh^>>0BekgDw>1hg*dW5vVN+6)>1(2CnJOC2WjD0yXwG@H~>k78bhLr z=8$;dHyx7NPc|gc4<8cohM0>EeRK3h!sMI-NAB{e%qj@ZKW~p7cIivr7M2|`J@R|c z@AVaS$DiBS_wE|1m_c?SslrhOIVzd}B>ap0aLl?2lm~%YtLFl8y+ar1POY}LTV1z* zkfVYCu_g$<{ocXL{^0=(q>ruoE|-&_eWl*4^?;NTBt@`0?189+Gs^$uG}~n_+#LLt zipdW2atQTt?1T;5hQ@t&ch7bB?{Jb(aN8o}5I2qqb}azGe|7-@!qu?|B{{4PK}Q^k zOdWRM?snb111H}DI{3Gw4+;}rvDNDmV$cOvP-g>yD-$yL))O+}vbHejhiz@d7WzJS zUv5a>341buCr>0W??B*{m2VRt9XLhO6E+6@Ck@u%Hg!61a=dUpr2Vm#_~EF{ze3R* z^uJ^FA)5K^p?|v=--MtIhR0X{vy_eM*|TSOb;%ZU(nvr*1vsF1qZXBb8a0dt2^?<>J*To(lTdA{LV_u%qEZZ69SuR3 z77Ab&a6iY|Ev2QS4-d%`!13z~>8+q>k4AnBXDd(uL7|$#Z`omi-+o}DhKiL+(^9PR zix-xpk9g9-`dSqL)r7-O(nJkk0qWk|41)pMknjA*q@~VJ4t)jad3SJX0j8h<4Cqb; z(S`#-$h(6xR-B+F9ASMtISoOlh4X_2pf^hK^cN44!bhSo0lXRDfR0scOOPXV8nKD} z@gnqMJmz^*pX`@}mCHW)?9TV+REvle47|`nCk_)Ye&Fl_`gM*D4vbDj(BUweHB)|$ z06GjN9-OB6J=81>;NV<@$ftJz|8*;d45J;)-60@~Lu#q>gp=n~jX$Epp*ZCySd8aK zvJCqPKia|MEZ}sH$A&nT8!Id4M~0pVh*NwJ6tJc} zb_;EhC~&%53_zIplGi1-@}afyIB6dkoji&40MsD#fvLQ)2OR8i$Z0SopvY;k!U=gB zb{`{#myK3HVXG84?R(a-Ahc+a zS_rxYLKoCe5Q`y^}`$PAcQzT9Wb&SJ73j4r3!NY++YbnFI zZo-s0dfkZD244^GJ1mHn0>d$RUL_9lX#QuWV;8t3QkS?6<1;rkqWSz$!_=te&=9kQ z8ZTyqrPAA$dLH2l=@RA_U8ItRArFGIKl)6&!So8nH&D z91U)X6m@@4r$Z%-F}X(X`T40xORS3E%mDzta3lalcILy{O1$_nA3h^f_|%_9@uN-( z{~<_etSvs#8G(sL1q$QIVmvbebNQ;RQf)F&GXe3P8q;vT%tnSbiUa@9qW?^Mj~49G zn@4f*g>Q1#e`9xJP&RaUgSi5)*X=}^mn1W%~-M2#e9R~Q$gA>{;BOJT#7?8#)B)q7wK|;lWEp?hduY84%}&kQo0)QZ4`dVARoAg z&;8De0_2~dQZ*f6ZdWSLblvK?UC~XWeRe-~RsP#Md|~Ck2v6`iLZ1LoU(t59*h%Mg zPLD(=UujAgWO=QN5SyoubJ%Qzf&6r@BYxTM=$M`U(npcffB6D1W7mM$K^s5ryL;X}$qTkw6cTu5t-Kn$d)=3RQjb8-ldq>|aV|&M zToOl^ggwy+`=a+m!mjPtMq;h+y`Cg7{PJh_JyHc6yY9L#UQ*xii?Z$z@M6kOAnbG$ zdxIitdP4(J_ql-7J5WY3M%xG%2>u8*?EsA%y6E8W?=HN;QuD(3#Otbk^r#d3WncCo zv=lh_t>A2cZa2tr0XH(@GX7fu?z;yAcaA#1RYZjU00MiEnz&z_T-wALsFW(e{(;}w zYcuf+2RK-y!GNs4EvSwZcJ$RR#hDI!I*9r??EQWBugqq>4Sf!KXK{JbI9%U)L_P=C*YEzn~gh;12mQfP8Y ztAut$>xOrx$&7`{evmj*m{dy`XD}%RgexQ=E*Ku z-AqH-n`+g}%YiwA2Y=WJy+=P5=R6Lq04jkqLcH<|pkz5{bNCzR+0iPUu#=z8%OH({ zhf!)lN|tZ2pMwW};}rXTCyk@Id_hzKaTQ2U5FMf{-$IE#o<#|W4Mv4!gDjy}lF>_A zsyW3heRiJhf$HNi z2^6+O3s7FF0mW-hy80XpqQ#8P!ysJ8ekTF&pmD{)aOiVK+f&8J-13G?ArUz(ZxfKX zAZHvl3jAmiU7mE_ez-cjeEXgiah_Xoo)>Y(dyv!*LwW?Km>I}( zaTKJf!1#jSfF&TcGZYGs;+zas-JGJ@l5%;WV?9w1;R@apRkleD1Es@+Z-&=XLr7jE zc0fw`R^h^kU0-+xoMdi?qm*xD4-4tuv(_zC)z20>%MO9h(b<$pj8aTIK_v5d5zS$+ z{LO}@Qoh9|Xzqz-DJo|QOAgVcdrW&89>-CzT^Dzql}b?bD6rWxRIq3yraSYcA4UUy zhO_Kut5FTO>i#0OhT($LZy^Gg+A!=L`ASEfo@>7w#3OPrLyK`K-VJr;mBWBxFP@;s zO>st)@|=^^@y#Q=aKuveq@kW1iobAH1(}ihV$J^yyy#oMC)`xlFb&+p95bDT^70&E z6}5k$*9HiXN%43?ZtA?f22&~D;%@X_{=nBxfY6&KJ|kQ1^*hjv3TKnyQX;B#rIdqK zlnGhjbTU^0tRk!2W>ciYQp$B!;i@$5C(9sC7l1kkUQjekXuMKsECtA2F!O;*(y%%T z7Y{Ua8*fv4zj_g=2(N){E`54PBW6}o(R)SYr_sFZV(sE)vVJm5SUH2tO$NDo3Ow&Ni9&qMv}U1Yu zk90F{R^VhTd#c|L9;b82^ZB%JU-RY(*vEe8ElK9(b8X7};g=eKuVN1`L~v&k&?mv* zZicp!0>E$(uAjp5V1)M@WYEek0l2R!>dFcu{w|h+GPnGS1UChf9r+?#)A!S~8bt|N z)`Pz+VqT?;tm?onZob-%zJ3#4`cK$|YxGf}{Z;$|EQ>dPMfLQ)3yu%@7RV% z+qDppB0?3Zp@&pa!%B=H!XI9(k&q!i^{oOvSi~R&MHi~0L8YLA9XfZ6@&!}wGh&e1>sTRrEmf)Zg}c}lrm{?SDa05wUMH`Ik) zUY^6lGDW-yMs5>ixqE>w$Yi-8$mED^5};(%RK88qXp>*~ zl}mtZb}WETvLmP6OqKyg7pkc=q}GFdSr@i?JyQ>~zlpT12lD+oY`ObHCRdBFly&wA z4{gS&u|yeItWyTWF%m?8UviinE=rVug$1+#rz{N{NLmDol6`+svhQn{s;eZit&l~@ zR===s_3CN5|GC6Gw=5y@@*5C&SwiIH*AaPHLgZx$k(V10d09f_rJcx2JCTvfTkNN^ z*Ka?tPKUkugb_ZTcMrRV3eSJ(3?h0bg4mFmP8z4@@QXM-}CE6fa?MHZj#jjMzpdSZ#5hNcKMLhb&`C*s7D1ch|We|U7m663^ zMQ2v-W^SXb_^`#F;PJDdr@wXB_^v{{eH&nO2t$D8$V|$Rnc$^_7mjKmv-MTz*r#j0 z$#jWZFgabVU^WOB^z!J<$?NmB0_p0_)k*u)$BS2O_VL53-`bx(y!xG8T%LVouIoA_ zGd3@qvAMc-GiDi89Hj3MHA={}-2BJraBx!7nKb=(q4u0EgmWwXTB~-lF8X`x*YnzCZ4nr9CX|TLuRD z0m#nZvD-6zt=4)*{|L8m$pzW-w87(%kWqEwFQ{^i%|KMk(5L=ASyT8s-NZ-JQ5|?F zl3?}3wJP(KE}nIoQkakL`@%yF<>vTa_{XQ&lT-#X2Euyj1tHo`8~R(dC>%=GxJZm| zGk;~U&HP%OGX0rjVa{6|y7I`{VgJEI>G#LH1IIGEJoLn~PyG4H5xWrA2mcHuen8WvyU5F#rEn^wGvQCiK|?W2%gF~|T;dd_~tp&&$VpgVkD z0iu}VI?3net!MTQ1xh%u9qinER8#g-lzJBl6C^SpZcO|{xjERy07J8JLXVRvOcUKS z&7|YvDvIy8lopYw@X2(;{xEnkAEE+)wW2r}gF2u*XeoPRzNIk8b@QGc+NnJA>&Z_a zcgfGa(I^*Af0REq(?!;$Dzd~&g6&q%?L4^m8014d0@%hsi^Ggz4F|C=LW|}50{01ApLfNx z8xz~&Q%nn~NRDKrl72ayMA0mKWQ~nBPkh6cv*KFW{*ck5^&kg%k>DU7+{bQ_Cl@c~?@c+e+8~kIK#ewF_1O}O z!}e}jr8$5TdPuRjTk;I~3ED}zit2`jLOKk6Tq7x)-&QN{-_%a>FW(>M{W>L&p^X=| znZ{z%S87g&P-`+goE`Q|nC3y1sXM3jG@iN~S5%Dn1bl`G_#;=3YJyX>D5eaSD^2fe z>rB?^Epii6=Ft|>Km_7Zj*cS4ifZeyw3~-M?j*oHBCw~W*{z?he4zRl)FZEh&v+KF zfD)f~K+XUe$H9_Ml#qX~LBK;>8}6!@#A^JAHb$pEpl{3EB^SOk6L%(dl)?X}V+k&zFI}bK=Q`&Dt--Z! zsF<)Hk2jyRqC)${!EFX8_gVE0-4YuiKPQPZBdp`hup~VB0y3h8!HCC>0%2Nyer^;_ z__&{3keFS?(QKTB1rxDu2&fj3Zp8!GN~+;#q|&pp*RcFQf8IKt&ZVzPDw- z+LrV;Y>UeL-+NPCtvLS(VUvl=6~#YhaDKt4V1`q8TRC*&n+7=I`Zmc)iU%CxIuxZS zz`y{YQh7ru#-rQBkNG>F#LW}jM79;TjDjJXN5Hx`jU&)qln7gT6r?y>q=3;Hi9A=A z(X}L8=C_vrh9T%b|6zev`m@6}s$S5;=hu5jp9_bLl8822(saNhqTvCmEz}EcF&LWR z!}v>))HE~atA#ideOnV?^(}Vx;gxYYVP4B|n~*XDr{s!(%x$RB+058>?cA`d%U2(C zB+U4MYqU^vjU34?Owb-fTe+gnl--IL)37B^uTw7ik!>T2>*fzUS~S^>2TnrvHg#xP zDO8D2X0q7ISz7~~Mr7-u=@LR-*hKTYqS8AJ5XY;g2(-fC>+IM5W24&(<+9|f+O8Zk z49b==V!^~y-HFjQM$<03y6#!7HBBegf~3VTABwEy($}!O<14~r)j|Rk!0k)|Hcj;- zQI{_U?RbMLqtygH$U|~bEvID3lcfZMGX_J9&N#yBMT~tvk6=m@iv34>9KGj4%88>X zbmrOw>;QfE!pAmeXbfmMA*}?FRv&6*4n8u7oizX?C_+m3In+&UXFVdIn=Ga+e(_N2 z90xSh1Ogwhl!C+`;+CL}QZd4ZUlcG?r`Bzhrm%YVm(B0+hG3MPi}(weX|mdCyGs~*x{j(nadAx231v*+?^M?^6}=A)7N%h zB53^VAwi`PKHAEFG|I$)67&sLLA7p~9mrK8=M8t7HxEs;KEM=xUdIPJ>nfCW@tAet zY_EZ#fykozucV^^a8gc%IbGTo(3IGkWGT*i_;O zV~y}EFX}x2ojf$R8E%y4qP$kt3vCFfQ^S2+b-k!4GPtJck``Bi0uh@sPvsWM4)i3O)GYhoSH)p5s&R(&T z*C%g3u#ay)oSp&Tv&%CEz(2ly{}%JpV3${Cum8?|`v_GreT@y$$vI}TxqAEIH9}w~ zuYNx}y?Xo02X=A!%g=Am&ps9!l_g8HDiFH1t*_I|3tk<>c6|t%RrKurg=5w5Q8zCfU+~oaFP=>+ za!G!gkS7oxzPI;z)8oKu{b6A3R=S$g!`Dg+A&h8Vc_Q4{K=;&5#kRK~Zci|9Ue+1+ zB@KHM_TOv_g|)ExI5@Mf9f_2Ix{+&F{qu6y@BvtVBe$NtYH=4~)+ut_ZgB{_$YPt~ zR;K2}55tII8EZN@3dYM10bfh1uxZ%F_+12;oe_u+5T*1LO zAAA|=r#URIKF4i+BO7e7+cjsqt#5R?+X#-Zl;lvq#{XJ)w7P2 zx2Bk;r$3$B-RfzYIkb9^>b#+~y$HxVs@IPlw)&dZ(#!p(bgB1{)Ha^Ht9m`vc~5HT z=k%9;nk9s*HPgTJl>yk_g$aoD0qSqVENCt^CA=2z!TL;RL;9AyBC&;&*c8|0I$W0< z;<~Jj>+s8lpwo(X`^8tSns<>F1J0OUHQSzuYeF zCec!kyD(Nv@3|Jp?IOHmm%(6ymu)b+v&Hn*7A!ONYGCB~u}nFOFEfKgL~2dsLs$D! z%i<&&fi$wG-$rP|yc|QCr9rwFLMKCioa4;h3q}KGg`kvE`l28T10{l??LnLdYG0)M zn+LnC{=U1tcZiQxcv%vNYZ1U~xA&s!Zc|0a%w;#Tk1a39kfyV-ZE#!`MvqnqN;xL~ z$`T|PzB)ifoU$n|6(-6e%o}hq>&F_nP-Pr#2KRg;e0jdH>cnSG*u)qG_9 zbRC@G&tL4!mzT>4r#L*;D1aO=D-{cB)KcTo_1HcPrF@Gm(bwBgV~hq%0e}b5Eak6K z<~%MiR|&4CdzLW&tA0rr{W@J>(k0ZP*Y$EmHHy6`vAwxpZeujuBs z*v-78oB3CCGq)%Vud}EodT@)pd`pesfpa4wpYv$-s5@XRqWbf+Qsbzo?3|aYU6wNT z+3EXFKjUAcU~Gw>nD3H$FQY+?MMICE*ua~618bzN)hY;WWK+s3pkpPJmE=sJ0bfhJ z(_YDZ<>f|$e(aBI4f1j$7}Mb9)*5+IDF@D5pf4e=+Ig78Z10Ge?IOHNtLwTwJv6(q zvz)xO&XH1%nZ&#iUIu#HQPdK_|CN2EDCJvf7R&E7Ni_oVCKo!s_1A|Q*=H*IUl0PmgqY8!=j`y7@`X zSfD^s%C~gyATCw-RtXSSeya9VID~x)?F3QExA0m&SbCwK*I2!s2z zS||dsQeqvs&-t^rqR7b0$*nfV^eXtgW|8oHD!B`!;l;+~q~)x$wh|v|Dv-4ASZ$uG zaSam@!OS9`#GIMVeJka}AV$6zfpR?e8_sxCPvk04))cnOg&j*Nd}20B%?T8lZ?W!CM%I&)kADvULE<>l)|2B1}O`~ zeJac#`0BrOTYAqngQNr(<1&U-go3OCuPVx9`H7tjMJ17vvjnYp#XXz5qcDnMH4$~Z zm{I#(J-WT)m{(_(8B5JNa0=6A)~rDfP^r6PnBs-O=9xI?>S42^DMnVDQ4qz6Ke$VT zcXJpFjr`1+gM@p%99i7+MV0dKasQtmM|7^F4e+n9yUfs-hq3RXnUY2Tn81%y zt=}IJs?6jn%6>T39JRF>{LpZx`cM^VT1s zL_w}dnobq3D4YU_DJDo_y&8Q$3$=$Q@DDR^&=W!FcvP`i1szm~O@cHXpNrvfZR&fsVhskPhrTF$qkCcu_rrLBV+%%XT!t1$T(3JKG`W=&P#LNSdB zsN~3u@yJH8669+IihajzSUdot?cqyF6g~X;5)2=1ILNYiYV1cDM~z3ds1CFB)$W~` z@jCX^$Hel51{#g>*akdrVD@SwOsw8ylVFrTm68F>+=eCN{b)214mgSvQO9s{%gOcG z_Pi^H@?+WCE)ta@c!*AC6?pde+1bs3mcKbCpvP#ZYGv@-`4@ewu)nI6JhBP;tPcJP z)NeEZ_4Vzfo%h-nrtP-=NWNhamkNZmo$HA5*EaRqE*)Om3@li+(NQpplH^av(n*o4 zs6R5>n6u@}8|f1>OlST>n(MU)rlWvqSZIxPj@3F(B8<2b7XFm!wy3hpZ@<;;`^~2# z?kGuN-EKi@;rVomPqODBKPG6guskN|gCfNDhrSwMl_)ASQzocPsyq5|k7ILlF%*1@ zh$evgc3(c9vJ-uC1kpyJV>l_Coo1UU1w=w9befJDra{Ho|EkS9gHqUxHyBUp4rn0y z6`Ns>r?w@s^?Tx4G%FX0#`k>16u+xyKs_Iuxoi`*181g2xj?n8vtosmz_4mew9{x2 z5jvBW6Oa1?q!A+_flZo@HLQQ8=qoC?Q1umyKz3kT+Op7;D zI_{rZk^tsd%f?m#V0B-To+#HwW7i30GvTd&iLiHH(<1)|Zh-axxY3 zJ4JKUJW7&9ZTJq~9$j#wkQkW`l?8G&VrXWqS`@%K6){JIS%6DeYoaP`uElW1GMZpM z7Q-*AKnMzbXIJ|4YYdU#=D{4Tsiux)CZ1h} zc{f_JpSCUb0j4ts%7x`=ow7`};bTF5raUOI?I<+J9^mVd87|-OPhIz*LN4XCUcP+! zLXER64)A+jrAQMls|heltJUHleh@HT@)*gy@JGBz>~_07hpYQR`UAl&8wfsHIL5`) zq4v%;56i07o!@BbvuZ39sINh|vOuV2PZI(koDL!50s`OWcp9N6Q{`<2ZWrn_2Q9`C}r4U@+o?ml-4t3b;CAMHy)M~ZTc-BnPa`afK!?G(CWHvlr2DD_;v4)`?h-V zAoXUjfDaCESCl(vr&K^XQu5+3x3>YH|-BNy0{X8Z>`Z;C81CSrX4IBIM7AcG*MTCPP^aW>W?V5UZz zDp(=60LUPk&Z7ilo#ERAk32;& zz01liN%mBf5`eE&`S=?;d0-Y1v1~EJFu2?zo`E8Vkteh;T?Vi`Xjh)YTmn>Lqu4Y- zEhX>Wc;J>m%w&^-S}jxAsBd5MQQ8is0AiFHv#`2J zCpV)sTd+#L>rD6<4eqJF)_^-klr@0$j7^RU8N;#$tU@Lz7#bUcf=b~_)@RpcjS4cC z=WDO_*)F7pKhE93l^k$#h`EX-j$V!Gxi8C5z1n6~%7SZOV=2WEEu1Ja`f~u;4kiBF z#<#<}d+yqeTV&nF7eAfDzZR+eyP7jp%_&A3P-$w?OfuZ6+$aWXy9dH0voIC1?4_*r zwhB;+SH6{M0)nQWP{fO34x$SaQKYpJTyAnU&b$QFAQVQ4rE zrN3gg==+-NKdt+qZ?4{L*dRFX&#m25io$dD>T!zUKq-^%YT? z4Aj=*zBHMgt+tx4G5dwg@;CF?*H1SqKU1WW?6;9TFuk|1o%vciv$(9ZvzAH&`IE84 zM0O=Z1Lg2D$b{>+fUS(dG=4X=-kNRgPse(h;D^C9m?5u8rc*S@!Ni|=<6Ky7O_Y&r zL~pI9!35nEXsb1zfH(5H0gZ0f?X6{EkoW7Xi7l-|-;tYCkwP2~G2&4e$-}`=+T#pF zA18!Tq_aqV}LGT zX%yj%@%bWqyC7~QJW{it?J(t*2(3XoP1L*iDDZI!kLNL97vfx$1k*V%b-?+5YNjfr5Pcv) zKf#fNTckjDaN8L6c@ISjx1jXhzT0ye6(Kt`p57jsg0Y;MX#CU~JKKsDZ|tgOKVQ{N zOWQ*$c1p(+Zi6ZFTne@4z*nlHNhCmjncp= zx$?n}dmV!in&Q^ctdSN1-l?}iVCHO74_ulS zmgPzVSwl)&fcX+|>&}6x0u8p4J9L^K_%8cdxIWRno)trqc|(5eNs`4>KZtmUWQbF-vpr~N+ zBwS`+~KcCyEplKkw&arO6*ejRD@UogY<(k^o6?*#$ zGq%wkVYVy{zGbzOZr)=Ka3?c|HcX3|5St4;hhj#-1!ki%No3e&(atD!1lUU zbD{`emKmkhR!%3oKt^HmysvsjlsTM9c)~=P-5c87?h3d03BD@kf$c#oEQ4r~aDO4x zd~%y%SDT1Qk&+}`y1KW8VGHW!esS8@dmVVL9}mTsIowe_ z6}J@)L%~aPw$5ufx<~HZa%km!Np7(9mF+|3sZMZ-#RFNkaBRnM>QHi+t;1olN8z-P z(dNE_HuuGr!~CJWqqV+gqOKamV1#zrQ z`Z-xAhhNZKMFBD>J9nrU^APqiJLnc&C$Rq?@Vi|sl8+0vE_q#7y7%U@xTu)LvR!78 z&*Y+lDkaUAdFRWD&X;9dT;BPzqI2H7QUAq_HNR3tnqpf<9?gD78Ef;zoR&UIa@*`Q zv3~Qj*1{=d8>56oAOj-G7q*zG2{PnMpw%P_Gp~e=LVC*_tc4;QwF`MxbX<96%I245 z>FX4w1E(Ysa32{CleAGJEV*GM(}TRAKVW z9B)()kmuQy=R^0S9|wM7&9mx$am;K7#ob2Q@Fuz8j&BHBSB0zzx*0G3fUy>GCxf-= z)^cwcL2j4*-A_f%FnJ50r8LeO_z$Tc&w%#8wF8(Fw<6kkArHZX3AZ^~WP%br5w1Sc z6VTBVKW{Pz;bS;@H-l`T4wr9|nnaGxYzG57Cf|+t` zwd??_DyxfUcqcdp@*XUVKLX@0KN zwqK*I($eMY{`#m*slC-NVw?`o6x)I>oN9&ay6?7&JzI)CSDa!=I2b3{LXS@x0Q9kL z06MC8kgH4w_SMzp*;UCe#LthPtB=N&S**X<#$C#Hg|p|b8R&Xu^ps3M+w;BBZeeN zyy+Zw&1nSNel!!uZ`dGX-=m;?Bp58i+EQ`5iQ-_)4>-BPp?r7Bo0_d0+xC8gLf2}A z=as&Q3b~26vO|@Ulqq}IG6y52R67{YqS(KoHFJzEekOtxbxVlwJoZ)_vq+mjox(dN zp(xpV7f?UbY^yla*G>;V?DXBE5*7A|W0t+zMqaSu@V!LldL^d8Tb3AKWBD`8U_wb< zmr*dp3>>h+fWZncz7yDkg3!zq`egp9&s~2WTl{H|YpjPgt3fiW`Y4*e^Nr?(LMcxQ ztZX$?>1A286d`w2XJArxgdY#CpxhOsEj)<|Px<*NG-D@ma)`ZT;wLWq$wvvEVCZ7% zle%f=0hWpaK?xdwz_`;8tYD9=yhn~8ro>Cp4HP#BIFtZ#=aOn0_G>E6wWOsJZ$L4aTN0=V&S0jIlo(qW`JB8jPWpU zPE91zGs98D_hyt7Azbj7{-6ckl34(=z7KVEET57anzf3th&rrOLX)jYthQv>AHOpIN*5s%@*KHtyu&z}b~}bhrQj z6n-q3Lq03E4){#{c+6VX{i}l$YDMhGn!Elh_n^Y8tP_q!5yt^EN?&~lQK@34@LE}j zcvZq`YMI`wNE0IO&T-FTL8mthaspTh`McIQb6-Pg=$l}HF62ZO$oK3a z*Z!2pCs&H+subwxpo(u90FXa-YZ-pu;|B1koC%KbNkg5Mh&PS9!+OpRd&YoO(e#R5 zJPB-Zn2QEZ>vMlt3mh)}*e9>qAy>WwJZ&stjAQ?4L#amrk{#3`J&TaEq~lHtPL24! zrqtHS^(@#W2l$XiId!&cTHO`yd_ogqXyWCg&yM4w1u^B^+;Uu`eyd4;HQP^22wm*_ zAxMsU#TB4jd=wYKy|60Qy~fec^6D4U7;)cpELn@62J3R2VMDu56Jt`-5zC^Y4B;0~ zlVSjzi5mXq6=noT#RcnCf7%w6+h&BSZxDp4id1{+X*F-wvY`O(IhJ`Ckxtj`t(ms8 z5_+Mwa1*a?p(lvn6&P0E|Ecc2pH416y#4U{Na1zng`hMiAi3@r0ftb^P5-3c&!QD2 zlx{S>ZTK%=3G;@w*AiPiwEKEjlg2k>Im!yH2TmQhqN#~m~^LlpNO#JK-Ae>W&Swq?&NI0tfI7m>`P?gZ5lI z8)!?VMLsE3PH-yywQA0)`eV4#NQu3%5~B&3uW4;^nNxEhJO8BvX5VVWk4>xh8^~tz zpgU$DrH-ED9%pZ3GpgY=+l;}PE{-08DY1E*#~9~V7{b<*aXeoFWS%X%@)n$pqUxSv zp+-h~H@iN;fW|V@vx%6BQhvuS90>T$D4b#* z`ML4!JeOoJ(V{=1)qzu+>eamYx#%00@%Kg?`W4KrF*f4#-^O>Y6b;a&ng!n(rn#BDW5wO^^ciY?u9!YTp;MgF-6F2yX`$_cvN`d?FaoUr7^!LT+{#qzHoJQB z_9J`s%h|^d&6K@<`}-M`oUyb2_u}mI>g*N!{p9?&Gt{U}5S+at(?p&$y3@$6dfyul|w5XmxThm#sP`dLbPx)Ub9-ZHeMv#^&zMYug>0I z6fc3fBQBm)3StS*sDBh1JY_~VoYTVX;tA?nrBc`8=xn{HZk72dW)FxB`LeoJ?7mz1 z?%iA^O>1t3)f`%AC6{JFY8Ko>fhhwh+A`k2)0QpvRXe7V0rmb zV5_dKwpC9<$IdDa2U}l%I@kn+I3^oU%xbtMKEeAY(+k9DBMBqS5J*=qPIgkjnMA2)e}renAL^JlK~&V-C=wpSG719aUx`YbGP zYoc~li#-nP2)!hvg|Z&7U1r6Nd=tM0H;b#Iik)M>Km)ychzfd=GC%ZrSdCSH!Kb-~ z4Kp>tK!TWuL{dOeuK^}y+$+H zYjJ=W{G0#LCJ%;g&k_Rh$}?i?XVJjPx{L9>%y_G+u{R5s0kwIwW|`z+mnlza^6n_m zYOCuWQW_2lyiO>}WFFY%ein9iP)zOfaiwH@|Ja49S8)c#tjZH!Dhb`U*R(5%sBtbj z?Yb`{h}$UqfsM0$tLg6U$)HP&P<-m*!zIW&eyV0WOvc^s_Sz&q?qw?1#r8IZHs{ZY z)juc(F%~#lFdM)k5jLrQxA&s^e6Pz^+$)U!>rt9AwAd#Y&JZRo;f_-HXpz159&gov zGN3o{)O*~oDfgv9&n&SiO(W2&I??UoZW1lgDPxQm@<1c>u#ZM6wA{T7p^5hStp7i2 z>GPH3DS>+Mbxf+*AzBCbS=I~8k_5eMXBakYIiLz7rvhslCH$gE z>n}-|p|5TuMv9)`HjfOWNAQLg2xD8#0JYQ-IuU-j1tB?bBJ)|ao_%1ETVf9g6tZZX z@HcLleaq4LT#P3UZqq1Kssp0()o*v2{WftZ*))og`C~ePB_d>-638-x{e_}n93v=h zQ9HLBg3s0d-TG+2IU6k z141j1QqrA5ZD_Nd51A)<=-YCpB!6Q5C#CYK!@ zLvkua5P#g^iWl>e=LHedLZqisgXny|>rabFH=`n%%gZdVoj4!=mm;~4NE$yKl^-@j zGG8y{NLH>K3yW1Fx%?B5luNNbreDF=Mb916j!Awe(*ePugQnvqh4b0mT8 zW*l!%@vEj(-E9_~U7OV55fkT>FIf(=Y$MA?o%Lkfkd+Pdi}@~Ow3toX@E`QQgMS9e zVB)uo?lu^nb!${NsO~QINdK{JXZE_2|EsnL%R=%W(%7RbIEwUxQB*VslKl9h%fZ=G&*G zJrvS*3hlLH72(efl!!lB>tKZ2`bf^RNU-O0N-WkGJq*!xoHEh3vY|@LXT9-KZR?=< z7aaK}Hx~7%ROmG$mFSi138fu(V7MHTq*C4CQf=#S`IkRuxP@M`;fh}QaEZ&r{B`J? zQ(UD?@(ghLE@uIo=5TH#V;%7|h0Ceu_xhIWw%geE?(zeF>|^GGyZ$0kf?EskUZu|zXS>bSPwPr z2ZwO3N}DE!*`b~g=P!JJK5%!t?%siu?<*brTha`r8n4&_O?H=G&tL^}?yv*P^iBT! z>6`F-0`Gnb+u&`iB=U zgX9KuzLh7`RqftDjp0X%Ai%+3NSInkEz+$n?4#_45oOqd}&m5|B(00|?rODjfTSqyM5&i~$SZ9qs37d4c=V2;ZY~!E&ceMfM3X zYpC8bv;4t#zkRo3>UV0x{P%5fk>CP#jk0R~)^M3E(@NoZSw|B7D ze{tA7?DD&E6lM=7MhFi@MtEaOPV$dDLJNN}gM*ug9=d-#&Wkym2YZ8kazG#KnI1R; zjY#;)*le#AZfZ>l|w)Dba*!)Y@n`2K} zi~*pv{J8jxltSTaW_hTFwKCO7hLpxaSQpoW&-#r#&LzL|?RB9Vd~dh-#Y?MTgvE6Q zqD}P?)6&ZNNi?54@;DH~#eigUR&zYnFQ@oopJq5|n2e|8h;$?xRf_8_ll!5NF0c>f z$J8Fd*KURAQpj;KRm#$=V2{4)6C6$4o5LJWx!mfy6a;K0FK4;IW6F|Fk8Zm0mqtly zTt4;6Y~$T(FyvR`q3?}YYY-@M5rkQ{Nacs(35Y%U#AYf-U`V z=@^AD!N69&cq%u@pWTq(-9Q&5R3>F}QOxaTg**HxF27V$*%Wt6_4UumSl?QJ<~y8L zT)gCz76@+>jdrz~<~;Y@j)KKvj!Euu8IeXnyEzE_fj5ihk4J3gV|MXqMLGawNubxb zq~f+tlykwOz8|m*A)?*82Tr#lcKKLm)nj#~ve42X$|IAL30F9VJSw&Wd+lsr$JzxF z^*14r(2k_Si9XQ$6OZbh^Hfh8ARh}2E8q4VMJvIZlB_mp8Bp8Hw<=A+(9jS zm!BzH6`I%%Bj6-OwsA7>OjYD^xv6mt1aYj|2MU%F5lmW%yf*OSTaYiqAbJ3evT#5) zh=szC#;2U(IFTyFZw+yrsi>t+jPv{KAyoZpF&`Y-m}9!k#_?p|*{ zUtppHIy~W9!HnhU^HAB$36(oxqe5!l#vzDHewl>hdMXHo796P^ z1$;yI7KSMwN|#}a(GPfj0W1@Hy8otet-avRV41t7{%7v6S@3s8V8WGP!T(vd^EOVy zW;{$nP1}$RcwBJj+}>922~9mCZ$~5A<@=~h>R^6V%P-Ojc*mZ=Iu`y2RJqN)NEQ0= z1QLo4nCjLLe3qpHP8b%>ybSK5l9esBqh z>*QTkP0y~5>2ZdB-z+PbH_m!PeeOkiE{?@9zhee~60huKfjo(}DsSC^!CO2q> zBJ0sVlBRQHanmr&-pX6N+yte<@y1EhmI@0^W6qOpQKE-}IZBjkJQe$LRgU_??hevX z@zz-Ni)QWdlg!$qe(_*mVcj0rv~G{I?FvoY`ld%a#gE1Tp1#rAxMKBI4q%~Ex{W}a|p#=P+WY2>% zeAIimN))>Onk67pB>Z45*F)@SIs6RU+fJ_VXIoh|2*>gS=zOVf-io@BE0S`NZd=@W zNaToi_f7#F6$LznXotJn=+N%_EznV^lTU6nIvL%KpC_Or-~V{i+o6{~Dx%Tps9JV; zjR(HfL_2w&L_UhO6S(oDY2eGIpaiPH@&*)6N2jh^w>SoU`DpyOVtrR?m+BepZ7ab^f9GKV?` z^%%wEcZq{$^~<>YUb!^9cTD7H-5nP>pbD#;8@?qbVwd@PM*m8rbi6)=YU<^VN;X9{ zta8+fAkNRUlP@3Wj>;W73Ru1E8%@h9--+yFEO7rDLrs?J(x$||)(`AbR+>F=v1yKY z9;NwKYEH$kvh8bU{T|!0%u=r7nDQ)Z%O^s^DTX7~e=$y(xx#+Sp7mjKmp*-6Je6H#=tUQap<=$ZASdxS57s2&$#{*GhC<++<`xFQOUE86Ag>$#)-YV22)U>!ZqEZ;cPWlPDYmRpF;Zzp&u8 zYt5O;+~l&R5?5Q(PqNpBc*UJYu`d#8M2mDT9?&EC>61zA)b5%fUni7!+XG}5BidZ2 zpVc;M(1%?f=0Oks(9mXCxM%Ul-XbapE-o2-CwouNQwGw=)X-5{LU@r4Q+nK$;Sv#j8DqtT4@s*) z5~QKpL{=}<(`b?fZh@wsiE+$0kNBa2;U)&R8IP{D_znGT?|HA=-J=YcN3b4}RIpVd zgK;HnKv4oH2<$CUPUOxb<}=aGjX_ySo6Wqb4|A5af)RAWD4O#KI7bzO$+LOjsHFc( zR#?(tAxWS%Sd>&=+A1$gD&N~G-&&yt84+i zvVmTefL_@^uc|@&w%UFvY*Af@?N>0%y^ZrNlQYyRO0rlgu51-o){3f(&nnu+cK_a1 z^L?XqP@r(-Espb(U$Lrh8@~ky-v-jpL8>mW*{K86mF@hhXg9k}DuPGX!@>}u&@vHP zRXGvXsw-PnenYj5c-mX|<`SgBsuq{F7ME2mPHZhsM2ljRg>8~7vT3-mF{!2EVr8kg zSXHc(r40p}tXbMnuvA>yP*_%>aMhm{c?1eA{BNsyZ>zbm)m+$WE^RfJwwk`Jrf;oD zFDnh{$qO46HXu#dZxS{Pi@3IwGi*zwtUWu^)7?kBJV(9!$x0pwkpUs zEVNaH|C*!!s09JbakYUxw}L&lfjzf_{nbW-U#$a+JquZ5i$)RK=pS3?|Hjt&8_R&s zECA2XY!$=czA*BlMU&TE`r~3LJL%HV3`ElR{GwC=X=Yh^u@L*eYu$wt6~sjj5VR@k+Lwi z1q}VrPknwqR8Eg@E`%K%M1F&7_+Zs>Y(M z5Y#M7>7Au)Y$>}O+6rx*V@yIz@yD(mrQ_|Mv%y=OdMA6^&)Y~ zKaK_!A6JP5pOtShQ|H*~cM8hTN4yQu2HI(5sk9+vhVmUg)^Nwo+9Is=q^8&-Y1FKblxVNuqg z?8CCGf%?I+n59MBv5BrR|6=*{DQ{#!p1V92%U3qUZG{*P!d5z|5VRPPsN$P! ztKinQ2YGf*39Sn(jApi}h#u`*6UOamSg}S8+_p{1aJlAq?A6apsvF^rm3g@%CYn8PAziR$9A3~OB{ zpVeDL6|oWz@yU9~CqJEwd^7BN`nf7%ay1+rDtfM~SOE`6e|E*)_4?gs9eS>5|7py4(z^979d$eid{IBM{6e3CiYp`x`^9gS+8$x?ZfPY{{?UaSV%T zFvOVr*LP!fH!ciec)u2|WSSyOg;DI#SL>k{CpLSG#|a}K+b7`OUkBel0UP!4eqA4f z#dHxaU{8XLsY4L!_GEuGRCkhbxl)em+SBcC#zsqTj^SfON=*_sbP?8SJuJ!1X_N|xeA=huQ#S4NZR{l;Db@h0J%6tY z`?4Nvn2320ZwA`0zw+vWd|wTcLVl38SPn}Ex?r{Co61d#j*@oe+9no;i`sw%Fom~l z02iy;vT_8|#u>8g}SU!~o%bW&^y1q$day43VRsT@K(C~_T zbuB2hW%i;LI6+`RRtpoSRZ*zkZn1@ND;Be1>7+`h+8U>TtA5p5 zu{;*6S$8$Atg9wQ;{08G$jqbltX(HH4<=fknres}9(nH7XI0gZ>o18s6vaB2qd4Fp zo0fhU4G5-9@?bRHHH=O9FJ~A=NThS(kt2i{x}FvwPM!o2BYSvpQUrK9T`L@iu{UOe zNQRF|4HQj)$uoZ2GJDlQOP8VQ?|mBUJm#A6Y3tHZK?7qp+bdmv>DX%4R8%3e12XF zXnj;`1O0V93t+I5A{1rnLh^l{LP=OD7e++|wF}UoL&)?+u51@Re+G8p4NDO+<45|Rs3n^9*F!lW+h=H7Q zFP?goekWnAV1xs0V~$Wij+HF!TZlVlcO5#e7A|<~&p>$&1;xuYyv0mjTOG3xI40E2 z7DDm>1Mt>Rjqa?BivfjGWm|$qe6sl+SF*a9h9<0Vi@sG@4f2 zEShB4>^^QwO-p|D=ITTR$-^pn;cj-L#h*>$_{u>Wu)o|us{#pq5q39Q5yj+D96C^Tg*NO!4NYFwfH2Q9}<3{efKX zq!~clB9Gi=)2p;qjj<#&Nbqa-P=)PsWuwt%*LxM0SXG=?#}L5z$%Wp%R|ppr@3teK51!pze_X@Yi*bTI_}9{OWr0AVzB&7 z(iX$$(L!qPQL49(cuXn~Diw(&i^n_B3?W=k*l$;-M$3WiG$Ab@a2%;HrTl7}Qmbj# z*eQlTe<|yeOhvr;Qacc3kjkvY71JCh#W_=2T4!2Bmo2pF6#5fp3f97KwzLS0jB8Lr z5o+n%N#}LN#bDmoG71*}KE{oc46*|_FXAdg3Xw^aORwwc9uQ{F+}!({fE@b%90lPz zsLCy>!exrSV&Z11LRU7lQ~(xC-j(Sao{jxZVZWJET~q{{|rE#>jcZH*XU(fM$W@UTlr#vf^A_=pr0W|*+LvtYr5LU3S zDv%3mUKYJgv-sREf~6Pwk_!d6p+6T|V+B-$A9>_Y6JI~Z6j{SyG+HEF)KHjhA-Z*q z40pcLU78XKn&@9-ISXNP9qct}C&i3|D%ewTZ&b1W4;BRhY&MdcxJ?(pC-DW`u4>-v zY_XrtUcddoIvw`r6XQ|B`p?y43=JQ&W}^$`@{`Wsnn1t)wA*^OJ#{`80L7DNum+Hn zvF!S9y8vW+8}|7vcJ|?wiR5?x@*jCI)9Jdsp}*V>gWFwM=FT6#NgrMKIXv9|?tkGQ z|M7qRH~rgs`v;bH`@Oy1VgI|{-rnBfVYk2E-T$uJJAA&s_Z{p0D+nMCp5HB#!G6EL z|Gcc;c-SAV8zlT>|NdX{v&A~w9bok#Y~Dv~k&Zequ%yv=_Uu`saSB@>DkQMx-oyX@ z=FM3DCF}K%_V$hrddRmg8jbhSa1p{G9SyQ5;w1ZEGF~_|GUh*^ijdH;h(31sRaHts z`Otf55?gE`P@^Z+P#b*dlQ8^Jw&VC{*ti;SW#NU!|;!mQS_&toq zW2h?sgwyE^A<4hzkLeen60Dl3m!i6GEL*rg%d(c!kY%@C5)4jB`)~0!$IZXAyktPJ zO))ljRW4fk`AZ8|McumOjj~CDDQw5y7^$ zW^-ADaXZP50;R(=BNTEGiGul1mT}g`vWJZJK_q4_x${>P$tzDUl+E82sG9HEul&^o z9#-1#gNMuh@2CM^;EAFA8cnXB+CzVQnnv&1>@v9-dTn+($A9vAc;|#oQH^Qj+#=R^ zKALQmjbm>K6a3PB(L>TuTv+%YU0ZqFApt0*iZJ2+AwF@2ZShY|xg&_CO5rCAY#c9q znh*fcDYESM4c`2i{b$Y&%}Mj9Ic>I; z5*~K?sQIqhR`Lxs@1f{!yn8p+Dh~$DBi8KbWuPV(OIK9JjV1brzWu|NXj1Hyj4Sx* zonG88mVN0?3A$EDhoT~eaF8q(4kyDDNwhoBi-}6~>7DuMo{9-=jPUcnj)Qu1o%Je~1X` z6F&^2reoTfsjpmK!;$Ws$_k`$41h{|N6m?=3=`!7x^CfUEm^jBV;6>fjlS;;PFtf@*`8aYsk-~-&Aj1I86;*bofPPo4>+so6jFO}P%tk5 zql%+ks3xasFwtfLSF0~(K3}mymsH&VF^IP(g25+>4w<<}V#c@7?~v1>Vv5L5Acj@g zBQIe^Mzx%hS8EC|p{yEiC#JudHwMTg-!zbKXI#+A{gnd1-}&ey8iO8`T@(c)@@r1D zQQ+ph;@+2dpAn47<%)lBO+SEW5f6OYlK-iu|6>JtYLO*{IxR&!7eAWLrZ}N_HW0qb zK)p&IRZ?e*9i|);0%+nkk5t0DkhE07q^Pcxhg~}Dv`ufyyt?MKdAN-^3ZOdHr}x3= zt(k=xDcIh0mr8y_$mpedVB*e1c+~R3R2?vX_CjE8-ZD|0Fg0ha0)X5PQw31h7gKY? z?01QX?HaC_nipoT+uMVt?e(6RniD3UG2Y!%9Wl2wCrnIGx-THw95Kt>W;8F%{)-m{ zDs1Mlr1>-Ulu_1qSkl}XyKc98C_sF*zmn$7s7}FpcO}i4vFrA;rW-mmYQBtJx0k_r zqSuq=%80~zQ89gWd=51qzz#y}=>RwL1JqmqRg!-LH$cq;uyi`tcmZk-fG{C`XRmBR z>ILh_MZf9?xZm$ruB>9%2+i%Uv%gF?KYnn%2)r8dZ>eC$hN@vAz z>XVI^hRl%4!{%gJ+5l}#mSqjHEbEeGxt=UbJ6SY0y-b%`2=+`0*kMA)YE+Q zfQo$yqQ0M==AwrnwrOu^Z|13|`l2c0FLg)LJkbt%?#t%_@e*$|%@M7$@4noV^-G-5 zG(WWdVXm80`J!oVXov2LY)00(qG?`eDmSn7MAMwm_7zQXjU$@ogQl`;T|cypvZ|*T zxqxLe@vXhZG)FN|gnF{8Cp(L2eqyyC*6?Ef7haE_pN(O1R zk&l?>9tI2Wz|;#YE@GNL7M;BMV%xE&2G?n-NPUecD zAz%!z0s(wduUfsvaWEVT3{Hc1AabFyR<6NwxB1%y1q-Z2hf7MJh(v|sN?4y=o^;-R zxH`Lh`(C48kg6<{Q%|{=vRTLFM0BIYWcim`)CSyCCk_U?+>I_s*aXQMcFttlQWpp73yq1>#Q)EYV^zmjf zwY4!D7lhf(reaPX!qJf5bRx5rq3;v=Sl5cZa1Vm5EkZ9OZyGY~QbHp${=B8Q5v<_6 z(J^wJwlHi(Jo7^5NL)-13!+PsxQ;+?u9eSynKW>}C=tQ+E)caNb|S(zC^sB=W?4xm zy6;)VxG2$OssM}*KytWk;loAyJj`>O@dxpkB$wZY`02p4UNm5H7yI#B%}%HD$4rs9 zeE&Y?b#UFP>p*F@#T_N~U!U_;_*ZemhKY+4AleaX7X(=iyn$`Am>cs*2w-ta15J-^ zfj}vU;yhjlj@Tu}`OhX{*F2(s{@E6E5Ub{96h?5SPai}7nAW-8H738MD`nyOMqSQO zVDYdW=6Yz8OPZPf*M$L0wRFJ*J{Yl9i`)XRm4msL6;a~g#>PMEtZaZ0DTkqP6KAxk z+%g$*>OXk}c4TB1=Ea}0f`3{8MC8TrpT&=gC?Jt$rbaf9#X4NtmEP7{x|Z@WYh4^= zKINj#`l(i%`ynR4)@B3AQN&CkxL7nzJNkJhZ+y$otI}=eiZqyrO)yHCSJGC*H$qcZ zz+dR&LV^8j1?x*DwFY6mq_t53q**rhrnP~xYH)~0V@HV?$ za5xAw!Z1nm(0?el6+XYmMfRYv|Fca{k9*y2-9^jqIP)Eb_fV{38OJJ$?>vfJ=CZdU zJN(@&TCoZIALv0B@GK=iYv3G@Nt(#%zI_DE6gN^D4f_}o7^DMGZ~lHqZ7-)X9KGfgRsyx|R^!?}ZWrtHr)oK5|de8Z}BD1>7A!D-&t zii#86rXD%J(I@iZeS$`n;kdC1#oN=!j`%+u(_4Rp+>8xyifrhVYhUGhSDPOhTIIS^ z+p()ps)GqfxAFh8_paS-8(E@gf99`1$m;GES7Z*f= z651p|6QmyQ?HbqQIZ%5@-GS0fs!Q2W|UyReh=JB^Z`XcU~;#V+Ezg*hGny9J= z2=6m<4h$h#w5cNgj z#BaJg;E+-sc^@TPR2mjED>Fw)*CqADlPfmg6` z`!_OcGRC{*Bh7Qxr-aG+fRBt>l;#$(My#r3VT z^!P^ObBuxrM3>H`tkaqB7p9kFH9?+^ORG2E0*GYJeCuVA1IGaXoWZk%3{RnJ1SX>x znSGryKSVzY#&(8Kktd4+!EK9C@8J(L-<0<{7|d`ryOr)kKrnOl0UnM#oxFc=|cB!UN%EGdqqlN_ETT&Ruw)mwyjq+pQ2ML;u_ zp?i=rq7rc=^#@~=(Jyo)9kj^z?04^vPvJxQdqCU1&rMTx(VP=T6hM;AJp~dvxr*o+B{uSv+d6`4uTX6_!9>KXlAlmYD5z?+E-C{t$dl*YnAqo zWR31I2r8~Lr06@i8L!0&5}@%i?8ED%*Qhx?B`2+75L`S?ZQ;I#%aq7LFWZ7DG3bd6 z%9Nt_zm9%7!_sQ_xbW~xk=$wm+m`U6;PY9W&gHxy z7swbvU2?^+puEm6w2()XbXG`r@X~FP#L}s=5N=!(ILh$IUgW@$9suv`;*26DA!8sC zSW+?#0#!DZF+_0caDT@s89L=H}k1xaVM_#={_6PNEb9Zgb0 z60bCCbX=B&s(9`oijJe*4fsHEisCeWQAEKhMK>oEbzOsilgH)^Kpf-9I5uO9DA~mF z+HyHcKUzF->1-G7Tqx3w7P{hq7b>}A>Z5h0`X`WYLZUk-1f-v01_GBb6wl~tILU!K zzttq!yhS*69`oDS>Lgr-i%Ehi zWGI}&I%SU<`Gr_DmD^-=C(`yx@?Nw-QZ7kQja(-T+>)ee{4Tv#lkiNGFA-4-;>LyC zM(K&9^=-h#C7F;v8)fGr&XON2ad<0RYLv)4w{H}q0Mt0Rssmzvc+q85>bDF4b-vpW z*(ARzOp1@Y_qjIx?d{R>C|85z_E@%}@`4(JsJy0f;a}SI9eP~e z(WFF~b9WkDuSc~A`A)mZJVRyMDBBC3PM@mxO+DV@pN6NNWuCLj`DzpyHX%SkCzX_I zqj(`C+YZ;%9cIz;lwMR-nJUSsmuUfwHxE9qa;T}Uk>sN4*fgjVJw(`fH0}Mr=c*O{ zcx^d0{A2t7h@*ugN66i`U3+VNyUB77(b=u#0VgiHZ#VO6u4Vn2om&E6witTiz$X^> zDoS|>noSl@9JCnG;PCpTh=fqszV#OZ#?<1u9= zxIpZsJWW}T4V~r{No6WPA$n;*p_lW+Zlx=n7PcZ_5#yJ3O2}h$4ny4lErk!&E>JB4 z8BGzbJV_0uy!wvh*;&d1`NcGIp5DtC*_gY&4%ew{waZjV8V0bPom{(wVNWVZmmSMsm{QjV&gI z{cM!|_ziHYU-V=gkWsp!Z1bZ9X{vT$$nYMLq^{n$SS(m(? z!E23d&*Ez|&W`30OKd3cnc|$s&gqpLG_>`6gjyK3J2DS}J*13Z(Ta30{F( zu}mXoV!6?zT9==Yk|Gy)epoX~EB3Q1D-{TC z%SbEV#Au2m2>|mv3JSWp=&NuI+ZUCt83EF5fLSEBX1+-VE1X7aT@I_Hi(<#Qex?IZ znoH$sG%-YQflr1%YOOdx&8oa`gqmk2SdOdZb7$ZIbwkL1NR7{}mvRz%$AAC35>vla%6~M`qNn)tm(}(j$sA$J8sL5X7PCNR2#3>+c(FfXK!Eq`udH63fat+Ba?*7 z{;K)+WUraEJ?MB{@3Hq}Xc*^KHY<48`!K;iPW1>A)8cfhKkl<;suz(KX9-BhcolrU z6Hl5RVSZZG)Z3)r1PzjdAM9E=W!e9;nQIUlg>qKawDTt#K!&>3u0^6h5 zUu&jkaFL)8G`#U~78CMI@GPE5Ov(9Ns4KsH!qy6kotoVAa3P|vMU~^;-cIZ6$zzz# z!_H7FwSheelkp187B=(wEqoH9_R!@;)x|~SPge2LX;O)%*L5kx64~s-L#*C({WG78 zT~=!$(n}oqd$E~gut&CTWT>5CY9?E_!+$YND-dfmh`kP@)kM-)%-acPg^foAhuu7b zF-+FMd@1dG1qv(#P$}lbPor|`dJ-DsL1x22wr98N&;AA`22dwwD`&D!>-#XmOZ?)9 zs&CJe5QFzqm|OzkS(u)9Lu`R{8hAwh|?f_q)CpKu^rN5^{o0*N?#0uvyzdVn+YN={uf>j{mg# z;xbVEd$wd1e&h7_Ekmgahzne8%4F(c&YfO zlIf7a%OQ&kgpQGDNCi9--n$0>!Qpp)Q4g*66V-M8uiyD5$BB!8P2kpnNElTOQVnwQ zn)Fh|c@Tp3~JA!Y9|-2MqS~XJ#nN!c-vRvIhg}Ys~){8YiFy zwMU1{Y91{@{aB+Vh8H5rlT7`wgT<{ar9hdzb28#0ee1tq!lPsnEYvOL`cTaYZla{$ zb*+EXx8G;$N7yIIzEhx87qqfa!DQ4(hbINeSVqh6fN41#!I!a`M7|*hZ&5P9Pbixf}w}yU>HD+bB2EZ*GB=3^Kv@ZD!)G7i>WvgqQ2QXj` zcDR4|-DAJ|w4@dVD<07)yaA7!RbsM$3XClLJ!*D5ni`wivV+gS-zo)ArYLQ z#xe|?8n`2WD^uqB5{`QLb+l1`%xEUgXP5f&^t7p=;qvmd*e!S3NWq^Icv#=_;Kp$j zY`QpU<~kc=9YULGSuItTgn`(qg&MiO768vOz-Y|0GuTjq1&+kgyV2GfcTvnsauEr= zT=rG4X~U!si(CvXv3$hAJ)5DiB;EDTMJ5Ka2VyoXr-vmSKo{urIKtQpNi3i+Hye;g zH#>Jork3r{OI8;T0JW42ti7*DDPWAV`7SE__1OnlKus%Vv8q70rGWBU+sG*q&kDLm zBn9P6VQ5hPYde^hay)85FySr(++l?GFc}iiGY6w2^KD`sO}jqVnv^!dv6ZBz^2+Mg z74+gz8IhIb2m?c>$7kUX`#~90eK%_qQGr!)jLA}kl}H z&qi}x_4=7mL=Ou&yI8Rp0h`a-p8lXaG&V;%I>UJM`=$ZOns*BAcY0m8P#c>t^#+$9 z&Y~uBQ<)xP4SBWBHcdH-SiVum9$l zZKrM->r+miK5cow+4BCX?7bfK`)9P-upSGIItFh_G|C`I(iAPMKZG)-dm!RX!d8oQHrS4Zvum;B)$+s*GXBIUW(9WZ?4x(8Oog721 z&@xl7>Xe1kTA~P)4&NHGBpwua26bGpq&t4sHQ$uQAW1n0jLRwTfMJ670m`@H3=VzI zz`_B7u4@KGqU2$wAjlfnZVg=6PEYT(A!ddXrV`N!? zhI3oAtx65NIUKWOsun2fVQo*gO0dUT@W0C-&!nMY;x(10umaGta|*jjrb9B~m-FWB z8M~u#*diGw+;5Z{HcMjU0m1QrsCY;?3iQ5V2#1iyby5}KwAsmdFuxTIXL)YTW(FIA zmX;Ypx2$Xr(&7+)YcRw`u2^P76MS(I7Mt4(iv!rdE4G{CKz4!@s@=;&KnxVz9c9VM z$3jp&+`6sWTT7He?+o3o!pm(1RNVc~^|7`L^=ja=w-uj^(A~9-{cQy*26S81t+SO| z$AxVK+gR*G^nDT8n;IsGB83?-T>!k4s*r~eUfEQm4ns&MTJu+NG*Qyrp@+ySNK8I; zxV(KCEMMN4;VkpA7p{TKm+$Yv;n3ReYul#sSY&xGwVixgtsV8%W^eD%+E<@k!4p?! zapuBWhkVnJfl0_x2X$}Z0*NSgpGQO}f)sZJq;$zTIFDx0`qsotjPTgVYoC?(ZV3ZY zrtL1WDRR*ZBhP%>ano~V>&`vNk7NgW^tp%YyIAxozUm;dL3g> zmyYI47-9U|2;J5e7opSf@QRbiFdxx^u~F_UC5G?p z8Xe^BxOmD&r)o(R7Y0{01*S{ao{Et&&#I2av>puyO1_G)g0-j%Fi6IA*zR@;f}Q&e z8#HCycAA7KwR+Jf9;nJVUW|jaqtZ!FrAMw$5oCmR+-1%AK*YHldO}fAqT(LM%Ui(_ zi4-)zbg2qna~MzEsIsNiHV87M3^NsvO;Q5}V9upbGw+?uogA<=O~w^vW45}C6?LG0 zX!75)CKN-148}>h_VGfG`<+E67kMR(3cyuBK{$f(816$dg0Dyikefr&t7VcARkm1O zvI`|ueF%mzg5K=`dV5A$TmiyiJqVbgrK>7eb7BTy!cArYGC;pV>$3{tO;(G5l-kPR zjXI=@&gXE2>5In!26ep-IkqcWJXlV$S-6(WVaf)34t|?bNH%hoURW+s~egp8<`N(C#87=in4aVFT6oV-gHA~!FIBCE(-lmM=g6sZi2aek7mkB*)EX~8G-#5;ab*o>N)=vR;IUGy=+W&tC)?+_G+$qh(1U2&6@Qt z+5qtL|C9SaJKZc+Q9{N`$|Il@+9puOWwVB(7Y0XE_!ZqU zn|0+&83FJhY^hI$+(s@ifuMGaFn}Xk$qY@VY-Gc+Szw{fl4D=eN$i#qsS#}M8J_7i zH!)r?qeabi>xk=UeWzu|FbRL(eG0SDuUXlx+e&_Z>Q!@h**`5dDp$OuDW&6OG-W~T zuV>HFX)iN6)TedVL#VcFrexBT)@u%Hd&l9=y5g1|#Je4E%J6>(nV;b-t1B|~XFE@p zK{7{^h?JQg+U1W!MOdRVD2tB?ISNp0HwwNcoRLtHF^S-o=Kz83cPI)R$7~a}SU?jM zP+1lsG*8aVRD@dwrlrnnu?TyOk2B1We;IF9RSTk$C8+|;?8yOeA72A{5@MWQWO5Xi zuwk&_WMeQkn}!*bc4CzMI&M zEA`1mZ>_?sYuY%=NmSy|#OY9{YIU7jp8zFXZI`UyKWveLs4KmT>=oa`;rpj-SvE z@&ln4>ob+IKiR#f@Ij`<^k5?(QK2jN@uktS8bt+?C(VW^O$GPVyeL(5Yo;qP zJe68ShUPYka;RMW>_NV2W!?+vj8$?l^U~~_{Tbw9J8 z?Ox1cfFkw@2NdQ<6nQCL%x*{MrQgSx_+EHJIR~;QVn&Sdg;OV9qET=kSUO(fE(8qoB=zKR(&&(W;Q(~aa7GErL3e<& zTR@@x-HL0%3O=useuj+5N$&0fGaoX8B8y-~&(w!fJq~C$h@D`P`G_zy{AwhFtJ%{J z=%qLPiT)+WzUkRB3JZ%lz3|KD&YV9k;iDpmgpH}>$mI`HJK>k{n0{jXM+*5Wg00%u z)A2S?n8J;`3r}AYg6r^RG>%~;!x1ak}OTG=qI;XVJuo3 zj*c7pl)d@syPD`_Xl6r^x@O}Kaa`$qK;ApM`GNGX9JED7?-s1zBo^j<{F_EL6imYzU5Aygw+mCux$yOm*d8O%2*Gcb6mteO42v1bS}^Waa9KQ(xB zYmI?>F=oyxT(3e*gom}!fZxJxF%f}B&jS=2!)XU?E~6W`+2N^%t63dl$Cj^c3ayHf zr#L<)FZ}eOKG156K!jlDc=(gdBcB6_ra4@NYK8-M&42;UjBT*Xm%(h>)_wUX8W3PK z-OOhEDpT-q>NMS(-?5XW3GY@B^CiNx-O3_}k>0(NX?8+VKj_#^qjqpaEa`#-c%%ok zI`HYk@>D}g-UV}5EIn>)#s?TrB75gdgq81~%3$2Ti&;av z^iaQ+;WmcVv${pG#sy!d7i?52Hym9UW&Ad05ii=(G|Lb_vPD;E928iT7<_1h5ma66 zR)?XbIl^v`hUHSD>?*)Gx-qBeX-~s7RGVO$Ef_ezQER-=xgZU9i(aZl(p0L=H~%>0s%@vNpr<8hZ0n8giV z#Md0a7Cll0@hyM}t^&ATXqDDTi&v9W% zotrYlNht)_>}%xd9N}PBiItD?gZA!e5`2l0YyE&8_HG3_@nQ#b;Iz6EI&!?W;!`p5 zYy-jR=vdJ(!K5?^rIL}PViw0AQK_e0Qb@wG#sKA$P3DwJl9(i#i_E#@L-ZAvJi#Tj zaO}5?I`#F(etn^WzD>~0twsYs7(^s)$sSWEw&1_j&gK8`jBCjF6AhP6MEw=0Ph zrV`azoS@DYc4BpE5B70{v(@3DB!97v-)l4)JB^UAkgPYn^+iT?r9ejU52dNdi+~Oe zeRt=;*4yTgP)Ouj+`dq@0;AQ`moRr(H7JWupTzPOdBbr>#A! zTe)~nCLxQvMBM32X9A2sFjJ!|<6Qw(s?vF>`KVf)E7NFM4rL8PX|rdYy!h!|`}xVK zXC1#kYM;D0JAQxiS|moRfGoW8M7SA6F-3QE=2QeXlTZ@Yt0*C-y;ypM;C((`eXI`m zedmiK?_9Vw8j_=xQ@)lV)gdl!nU9pZ$?66cuUXEaychXa3CN@IS_n1yCNojzIl*Ly ztWaW2xX|5A0~#)I$$30c!}*fKE$QJx5{XiNI*#YT8Tu_J^3<9oM>JSnO7v>5Uh^|MV(-8wd<`o8CNttuy)-W#x8#sA-EA_uh!@S{w0qpSZ$C+;!savJu6^!D+i~-&J)| zu7MvU@!q~<$VnO6W@lvqz!TN?yY4+?A0^;}8&5P-1%7*1dDMQ8glzjD=W!tbo~VA` zt{TBpSE(5QU)`l`{rxJ!Xh95WZTW#M90bI@yqyy`-*GBgHX<$k?!D8MOOYLAX^PXY zUTGhd9jtY9kk%b~h8t4Fl1Dl?K_m^?Pk981yasDk{v+gYO=ItZX3o^?-W_JNWgt5wwCQcyVZV*h@IR z?QPpBN62v8os9HE^{2HpC9z`|%rzYvTBcn^@dj_k%{bSNlldUhVS^NA1OK$`owOBUK1?l~o~#yoaGoL=q3)+4Z%) zJcV0NdpRHUc|eu6&v`9-If5wM?Y?|fh3W$>=Rfe$yz}~cz(sZU1ywp{UuUH~fU&v3 z5|S*(MIKVLsV_Hnx7~c4>J!dinQ%=mjmq>HD!^{_s6g78o`aJrt98p6H0nxuPbkQ!Sb z{4R60lbYz>>Y+cRT4;+p=19iN7Da?RLyKr z$^0%N}xL_f6DJ?cUJZ^?ylUqlTxRu3pJHE zbtj{>CS%#riY#zpg3(4caVV63zif4-3yX>s@lUTH#>;E@?N__X3aVwb(rIgKy~c_S zhIPa^m6Dwl$Nm0JrlzXEte%WycQtA4f-Nnb5>KTf$gcDk!atUyH!1e(GR8A-abdkU zdHwn{8y|ELrH(Y+Ezq{So(IWCUSle{_XOHnh07VbRfoa&GQ*W)JOx(PGq}uhsG@3k zJf2MW5Md;9Sz-(H5juRjhcD8H+r}9G#LQqlku-)8b10j0uwKLq+;`j}G1BlY-*31wI{Re+`-_WhLA|5fKZTs zRgpCLwM06(1HBVX>z)B9{uu}|G*pPsKZ&R7FehjrY94RJMxkH#h_)BsySJxXM4I>K7(jj!m>7st%@~Cm^E~39BQ1x4{fnA7+IK% z15Ok}@~%QuBmf((&8~I;2_p%aUS;B0BY)s1TrDDA^dQd{_>nRgnQ69YEUS?&LLJCX zM)0a4YmT8lvVlmuooTcvgL;4w6&DkXv{3>SN0zS5tp|1B3{pu`Z zO0@}%LTrPUb$a&x=*^2`&pLZ~a%w$)dwlxFK7%vtw`1%5@zE=|;ha3Pemi>g>+z{) zot#;3j*p+8T4!&qpN*&SN$s6nR8v*3t9dj-Q>KynSQ6d;j+5lUK*5g-VyV zz;Y(IL&oUWGXA)L6@{CcSRxK-T1-C)PnQ!6qYty+c4)W=JF)bo#SZ|ELifkK0m z&v^T|j3=KDocGqAb!Ht{G*hObPB!P5au@fz^x@-3OCk>dXT8&Qg;FJ+764)i++*Z> z6s>U?!qRIgi4`0EGHT-JtAQyl;3j4Qzn_>*&TFa)I8k z;2Yd3*M(^?ZZjCg{L)4s=>kmak;im?4K;Ql&gD`jqZ=6p3?)Smuih^?iO79hyDV*v z*5X4qmv1c{V9{#&yU=oL$F^H_I*aR0w2WV~&@LEllj4(%xFwFX$=^MGL4lk^6e2CS zCjK>>&Ak5})yl#Ok#i#E7g6Fk3fD`hMyXsCf6bFO?|wbY>07CAFZIflwK3I2u%J{K z)TCC)fO?M@CW`D3uQ=8rT}MgZ+|^2ptK6tn-$bR$pQlV8Q6(X3rtyz z^(3&4$v=9E70}Z#iP_^&2DHIjGN&D;2tf^f5JoXx+ObtrY-Zuih+fofRCseA6~n}7=Z*|n7A?wF7~HIOkgyRD(GiOGXjCK6iqr2E zqRAmGI+Y}vZ((UWG<+sNhAUl-mX(O#cWpR*(lFj{<+O|Sox*c_cMxW_49oxc28j2X zL-4{T5bf=k#gi`l+49N_*rmbin8uZ_NF(sV&f38e+dO?vI$Q(Mui=&z&dVGYRFo`$ zSup^hc_+bhuM&~k6YxtY40w@{H}5h7O+-nYESp3Cn}DaGMnPQB&w!KlCk3on_+f3Ztg;Q|1Gi z#B}*3ovz$e2*>Nw)E^GZho}V{tWA^SWSd*-hD#K|{5D>=57#}!tGaiHjovvkBxH6` ziCqm0kk^5kVoS;;z(c1*=E!Xmfm~iBc$76LnyWC2oCv;La>W!i zEC&o3T)&k6qF(BkoSklTwHVU=z~;NxZjg+B3~BigkZ>aMl2x%k(;+ehKCQEEb8~Ps znNT_dAhR?)JzOm^9PcDd;D*vh;ZLK6Y(71DP5X>?wR=aTADW&RI)Os>ZGC_;U!e=w za&-@|-C_rb3(Cy`YF9gly zR(8)+*MJ6Wt_^^cunfoLEZC8T7^cIyd+g z>`fBlLBy@6$dWQ;7A-=@RkECt25qvL&u{fcSw|u|s8rR22};3l`j*|Yp1nFcJ$>@x z6)aR_9{*jYNP?Nem{L8|CjNb4o6-ZkiPtCdB*40eK*Hb6;tCziA6~zL;5< zbTPBaq{^KmCG%W%5#FqeWlfV}m|8!r@79?xUIGLHVLF9YTobY-z%VOx3mNEfuoU|< znyyDM?PJu-VW_T2u*8dMn!M-qV|aZA9Lo4(AWV=1!MH$#0!O6m>1o-A5NbFPX`^Yg z$ndazP0v86zB)mx0aGoXN8nw;3eOn6pB>Hz64o$#{3-u%IRX}mzj8JU{24AL%l^|2 z{D~N!jTZHj`q*ft*pK0`!F+W~k3QD?FZ03MRW9WLhN9*FF2^N#Bw5)f~>psvfij*f;!?(0_#42#PC=~=)aHw-x{@(3~ zO4JZP;OcHxUz=GH(8Hwi{PL@_5j^QLq_w-}U}FaY?%tmBJ{e7dgJ(w*VIEJ29<}=)J2Hx$Xmy_XvRBhMs7X%Y=QbyvIck9RJ(H z!+rkYO0=Qf{^66y{?h}e`^mXo~`3wvXMBAXICL{ z0aB&7lH$;KD)QstiIP|jtZz46;JoRw_>EZmCQai2HzfJ((3MsAUKAwbkTT>@(v1}y z4Bv=(MKaJAwrw!!)TcI0h=<)zyQ&nUI`nqXLj|-)#reB3-Mpu8X7dY-0J!STqf&Bd z>FbJrah2kl!rhLQ1!@9TIdWMooV%)8@IKcivG2J1G$MG=AUQpsT{pt z*)-MxG3pgl1|-&CuFI$q>z;KLtuP)YkjOQ&ty=r?Dn9r{uZx}S88CxJ4w7Mp-&@KG zsU!H)tCu}koyod=u|W%n;yC1sZ+aG7_(Rmp%_B-fw!O}*hZl}5bu=cJ{%Ii-98r#F zD@2A1VKoFy#mg6C!J{ox3wH2kg_Z);4!w(TWJev_RjAVHmg9vRo_ASf79H4^;jam2`ZoC5%3fMF> zl0j};a<3^(;;OQERl$VInQJIEW=*KovzoX&-Td>hcrkW!U6J!2sZ)wb{76XTXOl|i z=2Fzg4|Nu0>-Zd7$4^(Sg!)z-f&spvagsl1s1OVJN`A!`IMg&gG)&p!eXYJp^tp_a zBs!nT)vxD37Vf!|XgY-w; zH~?;qRB zNL+G}#lrwq;UZyS65FD(tOqv0j3pTW6%Q?OguQ>szkylFs9Yo*b02ra&y>PmU&8tB zI1X3)0qcDaf2{Q2hd=hMt`FCMuFK}F(fS91ryz~R;BOyRb=PlGHFvMuQFDtgN1_n^ z_2s3n4WJKURpWUAAIOHKC!C?e8YRF$W{l+hJ%cpOj z;NRvW9&Zmjm0ur1Stu6UQsr7{l5vHRTx1@j$(O#!P=+Ao08cwonDH<-VADOHaHziN zBT9Z_+fBdv9{;eN5XeYFFNz>V(cb~DU`+^H&M$(yAk41Ab>TL&T*u4wV#D*#1?ye6 zu@zUVkL^o&g8VJuv9F+LKsW>byLb_YY&E#p`? zCs$>#2xhklEU|c@<(VagWUAS3(S!s44IjTav_rk+N=C(L3CRN#cphHAL-~_~a)-lA zKH*4&#N@jBQeq&M&Wmtuoo>$CNJB7Dw$SUhl&fgJQlzKwu}U2&kSsl7)b_;=MM1(^ zMFIwqrd1&&N)tTB+FXeWVwTLF0!#16`$@Sgt6MBfx0T-k*e%^DXL zc|KSdt7ww3fCx~vn$SSV`yg5W!eAZk|FoAZ8+lKU#Bxw)DLf!30SDQP0t0vfAJBN3 zfX14laXLk~5q2dlOil$w6G~A^=XF+`TW6Pw%+;5zk(G_;iH$;ARw=@?pO zVi_tT#WKq3gIehZR3M4frxc_-JhkY8b|+L|Ha+D>v6MiDOYsk_ka+AGb_CXIE89b8 zEQM;rAp=28VTr2qwOCjzH?(Z7L}hK^a*+}$oU6%ZzRVPRUbNs0wWNaRlR}l17~v{{ z&4|_uHLST2Xp*JE!zA9UFu|)IlGRjOT7Cy$0anpAtgRb6S`IA@Ak40?_Krw!iV1g! zVc@uo!sJ!>;tj*7B*Caj5XOk%`1GtJ*z6*& zugKn;TK=YrS+;$-hHEQKKk!BcUu@$x^o43YMSV)`m1$4)49ZxS zazN9F#z~~e*wffztd32qkw%(2@=u$vvwQUM#S^0`a;}e1-9Adzx3iEpj@xJ$EaArS z5`BL#c3Zk2vTW4kVkpc8bz^?^X5#r`jX+jiFtr?c6urfGw`J@Y>dTRFn0lG*a@3i2qy$w!fnFLHXG9C`>g#IQ6Q~`O2Dv0yvdoL zh=*|CqrxRyK!f#oE(Y-eUSFoydn?y1OsiTzBu(RoTsf70909q@pWW9S5oHGpbK% zE7z<@Ua34&4qElZBwJFeXZ>?;Pni*qf8on7Up&De=pOOcNMF;liA`d1Tfo(OnuTQ0 zOAiFFd^8;uO+n8A(bB=o4VkDh^w%+)D8;bN7VEw}BgZb;D{T;qEnXIzY;9}vvUa2C zNG`XIWcA=J3xg*kGh70T7K_jfeB71b=2nmWa_6yMZXE{Wg2bf`O^L;r7X8bge}AAq zd2MV16F&*B4rbB$fxPYb%i9NO1Eme$e*5Gv{~Le&+yD3f(x-Fs_m22GeEi*Cx{n_} z{;tz~`sDEGUpn1yzx(#-U#!mWF#sEsJy_OXu9ESS!^0=vm6f}g98R`%68_T9|0aKQ zbN8ifA>C;_xh6ci@S}8YL;)4ZeHBH9{Dtkj8;#LO+ywDFwQN{Oe#geg!3NIs6+e9$ zE+(t!qxAtu0p6^Cxa0@d@cVyW%!6o#_R~Ldz1fT^4TgUbi+{!b&!&C4S@6~FUZkLU z|35u^{Ouv_|8Ji@?tBO3I)~lE?;iiT|9_7^vGIjSAV5FthIqz|Iy_n|<)bv_DLkF5 z<1OPnr4YD`9B$xL6u26; zhy=AKtO*E&)LW%*uWw__Vj~9!`6c-sn`>0e$3K#IA-_j)I=q{65(g*gB*0bbGh>;I zqvfrS9-wLabNbcb_w!)AoW(dJKzwKMDlPRYT7<#sG`wK9hiCY59xSC~oaJ^&=FL+u z($_s%MEM+Jt6S-dG8it2zv-_d6jipnIoIsEt`%nA3hy7V!uth(0cuKc=r{XV# zyC^zs*&`C_E#i1V&LnoKT;f*AJ;Wg&5uv-lXk?_c5#KS77%~ri8~(Kg0$3IUnF(hj zs_WweR?8@_af?$U`}Tpo&w@!ux|j$LW~%Agou>eQKiN#D(M==0>y3opQ@n&3k$|NZ zRa~GhV&OH8!(hedKt-+yvh8IxLPpafd>L9kpVOt+uC{q7)v z$G7LJXmSx|H2eML{Py6`f8rmur*V?B4?ErO&JWI`#lZ^RSnU;3Liiv#b34R%3r(Qf ziS@Um*RR;036XT{=6;H7QyZpc0o(m1j6Pg)5p;9kZdyND_I|Oe_}VbY`?e(#wzPit zzB)(mU;NA~(ABzNH!_it@WQDgQSvo{{qAc5O zq)xs9dsH1nHX5=iIw{F|Ngb5P+(UXU60EW-K8KJ>C{*Ag^V$hOxK^P+=2i zpz~5|k3*WEQA0Ks1L3t!!cepJY7%20Qx5;d5jt2v3rxmlzDQ)Ep(~8|vq}Q8sx}e@9==(FYul zWrNzR_2!KXe2ifX3@+=4nFxPFath-bPW2Q8Y)wN!x|B5d5UK)1lx-aA8>Ba4L^*8Q z4dHNOYBPnR=w(_DK~u=RzRLMpoC8=)1PJhiNZHkad~GtAqk`0CqoHa2M1-X&+F>LE zo31Rd5^Z_!$Z>{j`(zKb>?`ZT1*WsnK%0wud>>i3|a<2vE@J<#5?)n(h4z)Uc#u^`~2d zrrfG_U36&n+OqU}F#uwdi0oqAvhr8sLba;GBJiic-J|;w%nE;jxpEaRWf28Slx5?3 z4gr-bBB96&7<@Gr0r;{Ae1#(UQh+1ghq4FR)iH52I3BS!ZXN))P!9lIFcknT0f90u z5R?iCIA*peTSOyW<5Ck}0VLZku0A5AKz(U(m3y2rBfdELx1y!gg=L#cV-`^n*3dXz z3LaRC9vJ2%U(!&jq<6Bc-g--wWTeugOypD8V}!#R-Qcbh3U@3eP;yCclRV*kdA!N{ zwDi1HI1Sh1%ajfX3o^xRB%Nrn=o`ieD`}~H`yf3+P%G+>lPg0+Udeh$54z9f4C(z@n%`lV57Vpu8MK&*gjbvrFrkm ztgsUKSVxiwAfu=V?9`E`TYn-_g!uT6qt4R*z`s#vWDDsL@G2y^aPz){2B5^wcXFe; zbKB)D8({fKyecwqCfx3bKg%V5Lr#O^+SrRYMFbA=4Ott)O>G{)%0$USp+|#JXISVJ ziXKXOrbmaRP`d?O$$p~P108oXgH}a;#R{IivWu1w7vZ81`bfgkORrbA#ft%@(G@3x zB>(|PQ^+0Ki8teLx!wYX4b<{CBdthfa$lYJ1WOo>iSy4d=8F;E!1>O_ThX&+K1WW4 z!#M@*l3YzVEeBLCE(>>% zbGemMs7j*pt4w=YwIK_)8!ga#u4wKktF28j91*1)U%3wDUQwy;s5x;QlkP+!D-H`* zp|Hq87XQKOSZ)oqN)a3MQI01PkXO>EtKg}sOiZZd6*KuVM*R$)fWX8Eby$3D965OY z^|7E`&v?*RBPLlw%TfgJKw$v+^p?truCf#S_+fhFew}f82z6Q-Ael*q=Bb$t%AbnV z2U4Wg-|=$n+&`A?)iP*bTh<-)uc-*2TpcarUBSr4I(ZK>jk|2tH&*&_TSsb$rKvZ)5PWtFIA`Nh4Ivn{Kx zdN$u!ZVBE)McYc3-oxoKTjiR})*bYit%uNJwzlaoTVGFuSy!!TcvhesVpB&iE|-ti}9l7%Ha#^8Ohb~9-1H{X1-+i;Y<@`D5ceukyvpk1F2 z?dIgoyXbpcJ8FAdb#%S??8l_&ZSAJ!)e*~Scw6_?@3ww_?QVAR+(T8G0x8w*wrc8a zTYsut|5UmDsdD{O<@%?}^-q=Sjw)AO7LhpmmcRqYyFlILX7RCa!+y6tOz9i0`{}Hw zN>>3w*STf_D@|ISMOLD-K<-3UJ)|Jo)1m`R7lckO8(~EKpf|{q6Ry2R(R^{e;+(

!sF?se_b~}W)#fz8g4F>ynU1K zfw~Bh*!XB3HK&>Qw7zzuC z7WK;zyk_}vwoUR}PVt~}acAie)y`!yT%IA1AsEpjtkuf<&7Q^LNV#WWy5=OIP>Z>S zy(ge4h*bbnN||7Zj6UHA%9WBhku5GqTvW(CbJkCB$41y3U`orE>{};k`zYM33Us1& z)^2hvIH;nHsNM8^-!{{#TtAJ(h{R5mwz7IktRJkB*cR2J->Wr7zz~EhjuxrJcuQg! ztX25094^zc$njj2Jv58-BrAj^o7`NQ~s&J!wU^jK&{x zTV`m)tz|{2$&P^|DLrvXXiOq|eW?aXro8GkDtUo41XgJ&QZEXT51Ve4QH&fDAhdVY zWtov8L&ZWviV>p%vVysx8066+)1&Dsyj()Jx}9}YkbSEV?r{4M9xy6`GorPQ$-LYMFYZLd|Y7e>H?zQDA{FHazaChFq zY1~v6(W69OS#ve2g*15ChPyldEwZPRC(@51g@Rm(Yy%_Tw)v%Z$Y#L4YgMnzf1PoF z&W0Nd1hhSFv=A_p2}Q@EgRm{zszGTrPw6{NSc%tl>HH$bBA#(yiSi**W%C;e%1h9d zuaDf4yJgvMcQW8Lut@C=qS!wro&3q$+7OC{3%gBFHT0ArEB7|v!dHpd)#_8wd1Z{G&@`Om%#wjiY$6nFXuSVlxj{(RjqcIr{?+o_XV@8qYz;<4ENlzcU=x z0iCxNprJFgR%7T46DQTx1iTIax<}s0;G(W+MP0*;x~}Eb8`*NwRf*}74MS6IXff!u zza1K^Et+oQ?lW-*yNje!uK(u{h_ zLUBz?uh#7nHPzyX1*#pv@UOy&UH8DR#iZAp*RorlTP63AI?bGB(MO>a+sKGPsRe}< ztmMl`8V1KnJ*ap$d14#7UbLWV8uMdSayPv5kySKVozSJovG+9@nyb3WS$37|jfO|~ zWYp>ru+9eQm)Y^FLcnFV4c!W|QaYDIb*QCnk&X?dnoib}RY*x%JhT!JJe1gR{4%dv zmV8!Ua$K3NIdhN`uT)=CgrzvH73Tu6@GszqTAtsgHcYaHzh1$qf|?G;#sdbXwp|<; z=)(>WTKdF==hG- zwakzPpluDTuQZocbkPJS2{8?4viJB~nb9&`Hx&8u&zQg4;{VDp&%aOn@28Ku-KTo| z-|lyv&XYgm|Nf5gf7MvO92@pLc_O31{uBnAbu`_~PJvukMX1e%2X3VSX=lMXm7Eph z*JidM-s3;TvPhNbwlTWD>2?=Nvdop7I={r{(=4juu*&dl>*hgu~ww_aNJ+sh`CL zk6-j`Slik=qh$iUd484)wL?#(PmlgeIyhiBUduk$n-I3#q_@D=q7Qbq4yoAdw{Ol~ z;-n4i{~&Gs&*3@z`#M;`zemdzeZPf&|6{YDf6+?uXmbJoo`&cn_jZh`gE#RN{QEo{ zXX0G~|L*7v&yMD{YzW8ydB$%~-~RgM`N^9Xrw7>u&&r1ed7VZEd7Y2ukmW$tpJQ2z zVu8HYU;KRX>iD!65epF)@uM=FMj}2|W(WFOnt?X`%k_NLbj`pA(ynh61~dHP;2Rd2 z;kPs!7gZSQK+Kb0v5*u{c?08gB&X2=_1UY0W1x7qVX&f1zUU<}3fA+~`=Pu&qcT}N z2T=zs+z)Uq9$+Kr^eHmo!a0af)MJZE7_%X=_sGw03TAEG**DU`gt|SDrH9+%%~8d| z?5InC<++HM3%|0ppgqqPMY`HbS4OTk%4CV`*o;?hDajliBau?IuU~i^4lpLFdR7w~ z^-=E|uJWrgTTmv*J}wQ%zC2VU=pREkL!qbROLnDlG&sPVQ4VOb_Rtl_$OWxvRxz#c zWJ0-Ffx{&G51MRQxoP48FJfTfR`KTIlD#)9j$cQ4-6Q%3*Ou&n1zgd>l2B2@kuJ1G ze&GA^%YL_^n@?$d?cHUmJ@A~}W8Sv)c!#-F5(2^q{E)@Mp;2&MGo2^^lPZ6=Q49;l zrzP3=+Jy)_1ko@ZbYLp_Tqh0dBwF<`ozI~Cq&IXu<;YVvn_lv-z4p_gyWjOnAa%RF z_V+`#^!3wTyZd=UnAU}?!E;vSF zvkTGr??`-8d#Y@te>4tsy6qj-L7#hlW*o<*nCc5#l+$#^k&xD}x|0pI#_P^h8E{X0b#X^qGFD zk?=XTOaz=SjA68}LNeKfVFZ5g)WrW%rR(q<7TAi^YpkC0gLY+0{XO|g)e=Sm^%DS$`u+a+)l2+*ee$30>G}I-XDD0z<;|IGVfmM5NAMdy zj&1(=n>{q|{kLz9N6+5A`t|i2CHuhidg`bS0VuN(2)o{OG+AFJ{lRy`y>8bdd%4u~ zh9mzx)q3ADHbG`_<&7lavcGEnJ=tq|$;W7kfk66$j@R`bdrwT-fmSxgxZHf0E}vX+ zP#=bqqy8{l?&?jyc|D|1-W|O^dVRX_qlc(Yg2(jN5fetg61j< zTxlX3Ed*$jwoaM9iZ4heO7O;4WqALuqn~gwJ6PtR+v#+C_dxlv-}SfPi2!U5|251V z8UsUHz5pxeCVOQ}fORHypzt`q*^<;S2joe9h}fv1!9k-K+XXpl_EtgTqgi~7 z#z?SoqKO*L+_%my;Wm?m;XFYB>{YP3Es7o{H(XK8wU7=!QEe2s!XEuM%lR*uN}v{- z*~~EzKr}KQ=4ZolUDsrNBJIsY^M;s0Igzw2i^9PxE0smKLda=vXu-%TfJ$!&mpNED zh!*wjak)KRal2rV!u36MU~ecsz9X$PO#_(5=<7*085F z?B$yEa?R42lxwt#7%4@O(mg5P;sQx>$W;+^z1*XYe59jlc=%X8M54#v&c|)K(Dt?N#UFjg$ zcf|ceyA!x&*P~WyP%#fa!ft?5d3h@j@M#pzCJDv=qrd@T7oB6=_+VmXNlnggt!J-} zPERfEZihyh@gmIp549fd53--RLtm??&anO>on3p06Ki|v=

Tg$Jzy)FuJ?$_$) z%2r8|P|2+A+@L%a${SVWb`RR<)wWiDd*D_-RW^ugy1GH_M}QHoN0h7W`;Ol(0gBh< z0?>o*UpxLD{9Zi4R-z4z{O~ezcV@<+~RTw6|uH{-z z*hve8eu~Ub$43$S70KoOMt8%bi+E7;NwjdpAl-N3u5zHqaiV}$HE%GO(Zxa#87r8? zOB5wZQc}wuAn<@9iNIyHxf5={Ahfz}emVwBWz>_j5(PiYTo+iEX~JWWt+1i0sgX}h z>uf6?O_kBxsSjzbT{@AiObsphxMC}`Fef!)i0hV@U9wrw%1gbk$rX#O8(kJT*aY{E0&hjIj!1@bk$myb-~4)dJc7QZX0lm7b0IGbCC)KRDblqo_TfzfxkPYx zOx6+Vd)UvHyn!kGT|RkTL_v2bf9E(tqkW{BXjuh>^E}Kz22klX!P0^g#CUMdQZ}%q>=ab-HvvF z%)R}^_DWQmY%Uq%u;L7m=C(}x=~%hmuE=ZY&5nUuXVgLv$zZwvX;r#is)3ws_s<9V(4aldGHl+ zubz?_KsJ(w(s9Mis=b~X#1(o?!4uHpa8-PtY0M@Mu2vr>2 zRXL0+6-LFiu;ySK4%>Gcrb&%)D#ETef|EK!SOmVQIWlzyw+P)l?q!U9CKNksltjwN z71Up99$^~m($K6!Qx*hC!HC*65Th?Z|3|BkC-BiP6_L9$}t$_O` z_Awvuy~YUs8Cv=Op!m-i06##$zpyQsJxB=X%J|RS$KO49T!{btwA1}F{`2qg*ZSsQ zldLFNTe!HgmbdH6c=5Q=vf6uX;Mp-sK+oE&r|rMulSZ@IY&-+5WzCUR&o-g;Ccd() z?|@J4J?-{7k1fpp)@c0CU;~g=J?mw-n5?3Y)`xh*x1e{M)iTB~5^$JYMUyZQGJ+Kp zh!#Ruaj{B0;e+)&PL{}-p`v>dq2eKvUo`N7!Pa&2Xq>uPk-(wR_%#VG1PvplsIs!H zEM~QQl$DdaLKc$0*@->}PexNnV`7SckbLU3tTBxj+<4U1Ip8IA5VMw9BJ4Dku~1gT zw*xLs5Bq9;;qafm<}`ZvEeY3&c&)Up_dqmHg~sp{1J>##QV1tmJnU%AO72t>gzZ`% zLW)C$LZFF|os;*xZym|}N%&BJyN+hF)b2vL6Q))daFvpxZ7B`0r&#fg=+L5?RYW>GD zO$$}Iz8yg!%z7t-oTBuw|2+`AZ{^_9)Uz!q#v2d=t5z?@VNq(S6M?Z|Ioj8jsipuD zGKQ%{O|+)O#cs)1fH;F54;(_5yN6)&7*aTB7-$v!tSPP_R#moO(b^5Ei3|(72-nY^ z|NMkv(2t)q3~}j8g3RB>8?w6v_I3@p(h^ld&o6>;JPRfP#h)Kfs>0Xb&}9yv-K>h? z=_zil_@3ORkHgh!kp{EB4k#l<`f?UuTo5H_v^WUanzFEp$Ntaq8BDEwb`Cef@w0d_ zjV>In)3+ZvK{BQU7fGGm7>^uEx>K_(MfTpM!aPinthxz0S~KlFkK>Pe3}2YqRF5X8 zkJv1NM!}|`HHF4B)_gNtM@x|cDe^GDAYTmH`WWAqEly=|y%TguY2kgR3-{lZclijp zc$UCUg&72J7nl>B&5$?(3#nqUZRjFuVLe_xoQZD{^UPicxQdW7AQNNQ9OFym7#2Rl zrf1Z^H9!q89wN{M3Ulx0Be-X1a!y=*G`aH6(BzwV{WCUrj8Gif?Edkax5sbJJj!?l z$3WqY>j6f&SYHvg1*6sODnn!pJiHQ9gZba^78bxMmKoaM+Q*>^EVktcEGkb&QYvqp z08l>{YYd5gfWf@Am2gc=RRV0-iieLBKgg+Bu%1JB6ECFY4{jYU56eBoQ#%*@R^12L zk7DextXz5cF%4MmNuT5r=_vZ}C=q$~$muyf&;k7Q3N2j_eL>x!S{t+G&ZSV0qhHUQ zy{;^GKx`KlzcVKF!<~xQ7SJt;9CnWJYC@iry}f^8K>>aM--lna z1te2NVP7*$Q$&T?c&n_(Onlov!j7IJ@k=n$0Woqo+8Fp%W|+ za*6?zLQuiV*}DXi8BR(ju$7M$P(G6_gI`_Ml(tHvH`bfmRuKx*LJWy2J6z~iu@b7Z zfX2f~hjYISCnIs*QXZdh%pB`xA>I}NKtE-}$^?~kQfAYFV#NYnIaZj}Voh#Je=T0| zOlGe#5mdfpIzA08guNi1ybQ$;WuLZ)k@f@`Bco}V1vZr#QXP-4o|v2L8Z(DbmCSH( zvqUa5JHV)puWVQV4fCIoa>_R8TzMZkGW|#IDh*68Yl}RiZrN!dz7@zR9VFF!fjDcj zt;evO9G-6dnksItJ_WSsY5eKT^YEWM7nE{`cv)3K6jO7-m@}7N;Iu^9N>u;acWm63TDV+-df^Z<2LefMH!sp4Imu%(t{P1d&GPJ3574&t&|=B1uc5& zuE)M>Xyi;>F|j4&i)TQ3lTVv4{2Wevb#OG>ddFer;hVm~O1NlT;pFGuRipRSUnJUe zr&(JKVUfjhpwE0eJ2ZjO&+-t=?z$3x(K7N+MxWkNmTJ23j13)RAchrjfkhv0RzBT=#AsaqBf&qI2?$_Gi)gDp=hPrj$pM zCn0+zNzgwts6BQhQv51<_D1p)f6_vSj7H+XqSa!?!ZjL=+C-!LMr01A97)30QB$L?){#1r{G)C0 z_~ZhsI|9TH`O7EUmf+?wq+yWoMeY5pQP+Buhtw!xbW1NR9>oG<}Jb7PH)YZ6OnK%QGEZ1ku8D z2-rB`w#=VmUZ;VHMsEg*@9$6#$p?}mH%c_=-^(z*jC&(#6P z3x|*W3I+`KMkY2VgUsxr-n94O$9~Pj$8KV6KE!NJ9xhL?rf*lALFvwkbfeb%odk{C zY7_yKw-)fy0j2?D=YYhmF;8-=6?&(FFfI5A>LKPBELchqX>n3AG|BVJHw8Y1Vq5_M z$OIR8R496hV}3>9Qp#$%2Zu%LxOs3>)fVO9bjshAwbnl5=1hy3GlrzMS+x1%9*gMR z*_$6EdTz;{Qt1!V+&q(!mM8+K%-yhcr7}lYoPOfCwa(=h4*IFNas?K9oG%^RWToAb zsA%Z{lNU|41mRs;Y3y{vlyL3+zSV8yGrN=}_tCi8cUV0w?OUhedad%f?QYJ~tF;Tl z)^xK%qw&mR|-Sbvf|mqx|oH*TzP80cy)aAx_6C92k{TOwk;1J)!_G6kNs{S67aoF zHmm7qZ$z(0^u}L|d;DB6HqP2SynmakI9{9D#OJ0qe|mr4Hh+3wY!9Fs+B*LEzHJ@< zTx<)VW^LVX_EN})*yv&DL>`(a@_y4;)_8tK4@+kBeiLeLU7y>-;@sYEX3dT2lYCg5 z)>M(d~Q#Hwwmd+_u7qi1I)Z{M79Q&*2^H8qu1Uc5Rw?S1ad zo3d;5TfwRaLOVYW7V+|S#5VaOOo~pU2!~tO(Awqi(wZ$t@H&BOc&%wGjv&?r;OfL2 zzdt&8bJjacn!EJ?>b0Tb@%xk42j{n=t6()+j1Q9atUG#)uAH;m5o=uN*`9oeUBd@W z6AE!O``2S}*xY$GOR+dKU~!no;?RJ_c>GXUtjFU#67WdF0e%}7+H`DNcU@@HvA)hS zsMpa9*6-b*UM~?}_)sCge*65%lb?FytMFo1e7Wku^a{aFuQb~H?D+lhDNICtGMR#< zs{HTh^lxvvlklS5eK|S{VffoepEf}o8a_E49Z#MdjZQb`!1?VV2IJWzScSoevs8C4 z)qVQ>^!10{c~k!0Wno~|gP)Jyj_}Lq`8#Y%fKxh5E%?v6Kq8tC)&heowsN=%rw`Z) zfh)AKeAv-i7F!4&wuQhjQZF8MK3?ea(R^|8`t@n=)zQf^x(R2^oz5_gTL3WpFbaodYGlYoqu}# z<~j7m&r^^`1(H%YYJ$ntdUW$p#|=Xpeaokd__-&4v5hky|q& zpB{8XKItR!Ngt6i3~nD3gIgVg+wB-^9&`*g`WS5LjKStX$6%w6!KTg_Ts`O*T>a`f!85&{7omr?x9cf=eZ3E`Ni%5h_wfqf!KK9~8k` z9l_f>At>=nmJfEJSf+Tv*DXuudfD8KX*IWbEnAV7hih#xuGQnR`TF$3@w?t~#r#VB z?ovJecXZYD4@YEkyc~7E|L%KYA`Tv3t`1hg^@xGNy*WG}j+qNa>}b$TQTejn=$KXL z!mf0)h(2x5bZHu{FuWfb0FXJq5JOt$Az6(jm|*)V6VS+IV@UzJ8?y@{8h}wb-WdreYJ%QVd?td;}^${=Mn1=D1H`Z8K6kU z){+hDCRc2CVk?#g(PAXls5GrtmN0Z|y3q8sqO4J6D~tqnEq+n9FT&=1VAO$J?N1l` zO*YM9%P|rAPPUa(gNSA_#W{8(m`s58tnLb#t&zGQ>$6ry!A@9wGj;|OJ>6QSsQV?~ z49!iUZ*$5M8JdzdNU0}=v6Z~y$Y>3un)jr8Js0@2q{H_#4{Au2?=(RB z*X1f6hm#G5ha)nAYdGD}sgIT7mnYgz=U;Wi8wt3=9ui%kMleXw#e}H}XW+%(lQ3E5 z={BlAF>g#(-o?e4tXKNjWbe@`A^SBnLc*5H;ow^d1-}n-G%2^dWuJ8Yoq~|j#z9AT zSTJ3oAXN*E5D#Qar7MJD*B3I z$z`w%UCL^9NEzD@e?hAVBiHR&t@U->PHi>d0Hov4s1?tKM<%*}C&dw_sVW^`F_9T8 z#ZF6(CA@FmS*G6993&!W2p2LVrKaIa*IW@txdbvK2MA|qin^Q#rL?^-TG_YJV~Kup zU3fqy@Akf7Rc7NRG1SiSQp+FxjQ9qqaMrwXPzYyc@|Utb4fH@%81CD?_4kGKHVYr^ z!(X04ns%9>8PrQT$ZlEJzUgOUTkH>jgs!3yuNWi1N*hBz9bjYjK(1c*AU2;L45UE4 zvmGvJc-9Yr(w7R8pCWWR`ckS}(=19&?@)O_)btM49kt{9CDM zB{? z9^Kk_*4fO&IjFPIFtISo>0UTa#04~4_r^2MlPgKAfBtaIJ12F>-U_Bu*uf;Z5O;v< z;FgnqF!LW+hZA`NC^&pFTu#(UY6;*Q$y5VASuBMQP;F!nOK76;zOenkr==s?$B<7# z-E143Xwsbs1<4@AHlNcoyd}^wus;CiKf(VmFNgf>Ub2#u!K3{p(o%2#!5tc zsX$wrFPm|NMS;2Lt#rE0v$x2~AtTt+Y?kb@T$4_m_{(5@=|>4l;G7hU@Mdr<-e(vN z*4?%o(wn=0F|;J(mOk=!X58-1z!e#f6t8|6#@hiAbD{uB>)s)AWe$nLtO?P~b3qiV z5}0m-q@oNUF)Tv@cao37$tYW)lJ*RhuZ5g~C7CvB_bW@}li$`K>>*oRbBTL35ihni zfhF$Igg&M?O-B?%6o!We*db{`-`Roz3mq_N@|Mv^3%dCk@zCnO?_?qo+r9s$DE4^g z?O`HCr9gsfvLYmDJJ8!+$M1ail&CFy?LI}?3ttWokr2a|$DJ<~u&}Lu{V9}0xD4J- zr(=o$8uFJNTw-GL$H1N(1#x^d9$#nHFj{K-j$PWRP(ZEENnk4 zzieQHKX>MKm8)O6{ji|idi*`z8&1ul2N*{jmRGcNfI@XW5@$(PQQMUc==)G%z^_cY1IdoI4 zG==PoVXd_>7aF?7&cpR(46N_7S+Has=gWAtI1g7B;mWhR-~aVH*X;Vm5f#&%lRC!K z<2TSJ3>=R@-@z?m9WN|Y8GrBkW{_{DtTuSOc>f1#G5sR9h}lsS(FdtR_O6%bOB*XkftV_ zZ~#QpZDzUvfJB%4Rk)l5d{@O+healr`XVWV_))^)AcV(e;5aALBitz5 zwcgfSDwW%}N8DW5Z5396dSQj1z`u}{uf1P&p)%fCGhoGQ>r>0!Ut{2<##>^L7y|EpyxMhu%k6o=RZ*c%hFTF+dP1jza$=RhhwFYo^ z5|QlI*K(1GzKR8o4kL*Ki%BT5SVZe-tE8!d-T%IHpmeC|;FtypazV;jPLbDS$Pi3x z8gXzU3eAel25}le^072=5FJ*GW^v(foyTo=d^!osplj=r%T&ajxJCZt#QKQ7>upKKG@~E>eO7xmJ z1A24JA7J~&kQY{Z$6vAD1feC3%wb<&jxXzU2Ba0!A-^0%jhd$4zIp@a7$&xipm%nK zCY?ndA#kDRI6)N)Mk=pV?brf2U6Y={tcyf&O&V4xU>Y^mOgP$2K&odPs&&Bz&&Pf_ zZlLNBsIz6Sa!Q{)fNQM&`6~Ri6aEDI6h9Yrs>peyNz9EU{h?76?J2`p2BSvRw~r}p zple5ba1@R^jn(YA`i&KS>YnI`$=y02+V&@@Fr{UAs_nv#(zyowodoiRD118EyI^vv zlfIv933NIQ`Xe!xSUvaTSph?t9w*ysGSR8{g+3hi-JT2%6|`hKQk!ma2y=KjSXY|Htj&XVUn4pA= zt-REqY(k73XGA*AzrJ|$`X%gkIq zFN;{$PWY6!-bmn7fT$KS|1IT?P6Guxns-dQQ!P~nr1TSsl!F!`rKa)tlg=)puM;qbnWM zQ%tCtHC^2|jTy~g6>B$O#3 z1)wqE&f5~`<66+?J!dF_&H)rb4QoLWfrn1uuCUa1VFI9CCqTjjBM&7nwUGx2`5M>6 z_p~xziNyM~)-$0k(-Hqt-eB2@UFa{B3veDZ`o`FStC!#~b{l%Lz+a0v?%Nt*1`at+ zAsDh1y(6R-dm&`KgA}9$1VSJL+nk&T&|}9qr$84$oIuAM z!*_38W955^2<^Ncngjn_P-v-UiikYGY;^i^k_~VX+|YfPj0t1$%r{H0E$?N_Jx9{Z zU>Ra6Yww+}1A`3qgu>vr?j~u!z7gRrBg_?C?s}1tXtdfNRJ1CzIfs0C9_DsfC)o#$ zPMox18(o90yGsN;0dEhyTuGkEijKnw8<=dCL1KS9kMHhj5qf7a^lyEFcSx3t@s=3Th` zu~YL>Xr(bP&5dP5fe1XcjUbdAW3h=ZM!{IIcR-?J@MNC)$-VC{^S3s{X1|`Gs$}r| zhKeD%vh5Uot50OR>D!+t>1O&r9T`hVdCFyWD4Y5G5%xmfcVN51^Oim$XW~C78cLpV zlS*;m36Ih~SyQ;^Jog{$1~)Ks9np9%{@i=u3&sa8>61BIMe7e38vdG@UWnM?z5-F* zFmjJQcX)b?mEigK_#}Hi5uVSo=ab%9`h0#mWY03_V@KMzo0+%L>|6ihG;M2e+Do5@ z$HMbj_I#Z7r{|uv>D%glMI(o2ee1atzEONU9;DAig3g}%!t;5Cx53GoFqV@t$n;s! zH*c=rw|>^Q-moV;3+)Q{?1}xkIB(v!^&*;;=JaCl>J)1a2!hyS-yNLeBR&sLvghML z_I%RIo==DTnb2NZ_n6=|1<4vx^euQd(8e1>5PcpDvu8S~^tpE`Jokm?L1VnVyYK1s z?fC41knR~iUmOp~^Ep1B4EyQx+37KPzMx1sV~V)?%HK9n#do8-LvKxe0#@y@d$hOvC!6;@GQO`3eN*!eserr zptq{Nz3a^bi5zmA2@S`59@!&%=4%$X!>RE8RFF5a=M(lvqrOdjN#D}i!sF0AN!Qx3 zd!9a@y8ZO|+&#_Krm!F{^6ON$Zy-+Jm>N9fqUUD)Imw=nh4(|@eP4JM>o({ci1Au| zI}!Pn(AGfFw=?1WNgKR%jn{WXDo;Rxvc!w1edRLMFX6 zO~3BHhse+gyIo3yFx|--i8m{LG4;nNSg`A{H|hOYxE`CoJe*#!&D0{Y8@#exB<3{D zh?Ak4{)-3^9*^DLNk*m@!ZZvoGBQ0$mvq1HrXoHslLMGO%aQG`*Aa$Fr8j&BfU_Te zKEJr+_~kcereuJdFbP&+K!a5JN2ju(V!9MlXQ)HMnFfB!0LTJ8?PY%wxwYRHzg}FN zh<~2;gu*G*CSy10;R=;|wXE~)1^LDPHj20IUR7`1y(-_jyE!m)6)p_jW=;&^HJ%@ zpabI(t?^GfmmTT4!l1VC0@E@~{TWy^W})vbeca=2Hn@{dH;EDYi5VLUZ#_5A8WJS9 zs}sqhyn#cre|_-_23R22&;{qd zCYRxm$g{c)ysfmSu|1CW5wc)>EkbL_h$`+==+98GV~wY&06_>&-Yub-7@+Af=yL-6fbiJAv9TZK^~hk{{sVxEU& zwjvnzSF>ofwPplhJYi`$V#0nTwILD8i0YM1+4npQcQTm=vt%-XsQc_X9qt&`+n=wQ z;(V-VCtUjXq(&7$S;f$sl8F$~tQ?w1-qHHbgXAt@6waLka?rg!un*0n@5N7~MgGZt zOyr@_acAryl+PV|G&RWqhEhLZ?=W}6yL%d+9^`2oT-wMh8hD+4Vf>xR()1lhrne!? zi8whldu&WrYXWPmn2Qba-a13b#g8}&C$4i%nD3k$C+izT z@4>hK{q5bib3zUWM?xt`<|`|MX@NLTO3CF=)1tH&GDzBy2lvlGk8Q!g* zq_oEyAY=@8W>ww4?v2U+W$jGB8L)X<-x(@Y<%tTiV8$buf*SQ*g09iHjFfIeL0p4pM1)|3P7V}El`L_pa2>z%jl$3R&L&rRL+gCo*yqg zObssWW+`zsgc4%&aG9H8Cktq2W7JkDnWj=+7bFCcy<$R&{j%3xxlFUv>eX!FjqlwW zF9<4)*Wj)uS?k4>P8!WG_3kQ6+>%h#m;rP@IR|*0b_%N))8#$NQohv#t+$}Xi7@5? zlkscoT1(jrDP(xhyE;yQ5YuFklF}IS)T@?BV^=Z&Qe?h5O9r`A%q>Q zeZz_g+QKkMeA~EP2V^@Uf8~0S&k^;F@e=w36r1+yei6(TMi7G(CE3BCP);aFXii-r z_=OB#K;8sjPC9o^hHJt>B#q>!{KiN_HqZt$#x#cIFF89NkYE0K6)~+TE^i#80hSg# zdmNO7<~uKbM+YOeR+L$3e<*oC4VdsW_i9ARaO%&aYOzp{ z{eLm?PHt>sYNKvWt5%uLC{NqP6h^YnWyy?s$c2z8o{2JXR%aG>Js9t#arAJeTk+ahyEjwXxnc9nsbn1xo>eVl=|MGmgoshwi zF+7ixus<2X$spX)Kg_DLm%H`7H93IsD%Go!dGOq=Ad_sG0-)yDc110WpYOS?r8Gs8 z6hEFwbz9wligVNuzpODJ6Y+E-7fc?JVh19{4tAngPIQCMTUYRv=Ey|r(OfO3W(AsS zce^67q^j2_oa?Dx(}rt?KkS{jbwwGMq{RoW&>d|}@LsjQyI{3|-8bIdbnZ39FfZOU zUPC&Qcys4xIZontC^u{|H2HGAA?LskL9#XCnMa6==5U!tI4qI z)j}05Z_x568uNTh@O2$+ZWl|R&`9VnZwZyuYtJChRe^EtNq@GdBUJCvPEeUP!H*L{ zM}pPu^jHe|4}>84xtt_6rnAZ&#-e+F4GEPJTzb8JUKzvN`c5b)*TR@&b|IBme##-$ z5&ae4d12@peSho1h|52ESyXtXY~_O`)DlA5y2npkvkA{b@EcQmLxMojKC;j&e`3dl{j)Fj)3`>kO+Nb1(M3lbACy7#>DD5^~b-L zgO|_a=q4e1uKyf`sOOx@pOgKC_JzIR?eV`S$Os%7%7N~$4*%vr2o~1YY>C`#mMGrE z9Mm(yR>W`+nF4_`gqOMH*x7n_A%BMUIu#T0_c&Ve=O9Y)yO}bV;k52H`AEAN z&w|y~B@~{d?cd>F9r~R}Kd?!YDRE~}7_HM%Z-ON`^}h9Q;fj?%{{cSUd8^J07%9vd z>gJ|ovmi2-C3WAjdM<`E!hd?M?I;QjPVhVY1HYB!pqP8sqnAFFrOr<9+93W?6zXHQ zNC5|a$qQv~&D*u_FRMlgH%M@lJ-1gk%99?w{QdAQlH%TS{+?I7D_cw}LF#R8rhaOW6M)JOl{-L*U=f2n7?mRnWF&<_emQkrF$I z1A;T>T{Pc>kX+N78^3I(TL?ytOv0E^7Q%T$y7;YV`gebpF!xi47e67z0wR*2_p}2n zZsI?z=tDG(9m;!vg^VBztF!iR{A9M6tbI5y#v{BKkiL!SSu-BN1s4Z-7@R*ar6iuH z9P=K34al$AjQomM@aKIHLLLUH3vfU$;h>1*FpG?X2R3Ic_{|<192m2;Pc{O=otBgl z!U6Y#pD{%|RCr4-+{Sp|;LMhc&|e3Db|G5S>mPsq^(yZO$%=n^k8?M$455 z>I#*}8-M+Qq9=ciB17>ddq$Q`PyGR0O+^*-0&c5D)J+mtpa`Of|4U;YiTTQ=n;@K1 z=gS~Yc$au-!@Ko@<$t6QfBN;RgbJEnl_%0&`^jd#%n*;rrnE(m^x|{?XBY5HCN6#M z2$6<-s^Yj^1F?2;eyACS2qk5I5SaOL>3ogSsgo3dxf!L?lrKPPOu)N1W({G}Ru1@~ z`TRfN$AvA^6x@xwVzTFb-hsIIt`K#+l_P37@rD=L8-dAUIeq5w&}4mkB;>?g~7 zd-db%mm>bL)f;dtj;Z}n_T`##S3I5xH==SS?xS@$2i7~5Uz?KGBC7)_rxJ9-Lf7WY zD=Z4Ux4tk%Nm@R`rDAv#865faww>^z4m(-(T-E&aI~q>)Du4FP%}wwyGRY2QI<29U z_NLOx5Q*+J*$o58+w7z)GB$ANJo7a5&0(p^17LFr2&~bK0EOeIBN~30UQU5b>BSHk z;iC4AGY-k5z-MtpG*@M8upJvY1d0p+x_Ly!aaOLHYms!IFzn4(~jiV?%Pl!9%SW(wtz9*1=mdadA)9L3>5BHYJbFUYNxm(I3sO3j-hT zWd=U!ngZ<#2waGpYjZ)+PIzGyzfb z4#<|NW|O{$zD~WkAZOBVBLlR_7XBfgI_M>mC}!S9b3#suvG6{CH6Ak$1n5Py^%Dm) zi+|t53A+Ax#(Q!QhHnC}J8%6iclHGjjpEr?Hdm74FZPt1;m!XrJN0Ub{N&dYNJ{~1 zvYI8zF!SV_)p)@vbtQ}vbVidqnP5-m=;885sAJUu%GPg@I0Ew8VFd0kH+P`;@U2=X zMV%qpqpnS{i}J_v5^5}^12%IDr_KIMe$Ur_DCHuk%!IENDc(%_6ZU^c2A4@&f#H?nHZT5qzZrClSNiy6=ny}LP+qhxC%zCn8p$nO(V+>unpZ3rZjhr*UV1ldw3xnw=CK-c%OxGrOvM-!?Jc= zWaKmo2Z(uMd~Y1laP~dh_Y6v0)V^k@=CTv9n939=e`0W}As(64z7e;?+ z;YXX02brXJbI1O+4^1O;&w&uc6v6)7v|H+i=8(9s@jB-#T-Y?3%Wk%)Q=?qo{fG*U zC@wA|t_MA^a|D6<4Tk}!u!NM=lP;K9*3rF`W~}CzaX~|hv6)!%sMMM$m%j+5;CCBm z4au-4p%{j&`8ryy2(AtsR;!w{ss>&el1d+a&?8IH7qvlC26Dm;UO9_j`qt9ulWoXO zaaTVXSFhe)U1m`?2^HN%A22H-yCQGK;11!1aRkQ`b_T(~-8HL3-untgQ{B)ToI%Vg zTb(GkgD3Z%p_&DVOTz5ct~vX+2e zZPQz0f|C@5N-Y9RLIB4k(oEV_^i4wgWll)ZJvo3({Wbn6Mx+FWfCYh4zzqTmP_Kxz zR_NR$SRJNZ8f6d8wGQmMlEErxMUEOAoWN|aQ1g)Xf1k5g7DD9tJseGa%qh!LJ zZhz!c*=;|W-JPbhD5aOlo)bMG-w!IH7wEXWSp&t8{vGvM1^k_DmW39fz5q@*jb&EG zB1#@nB5%({t!5FDCp}uoylrHnFYZH%qj)_Vr@=xB3ooc{y zgqAhV(!r!%xolwAEu!i$10+|t>rZbEmK%UQyA_G$o$Mk|NZkbMIAOB{lhlr+E0}EF z6((QYYBmZy?`IrWQJZ^|iWUVQZlNsOw180}f@tM0OJ4lx zy?ZJLAM{Qj)T_y(umE1*PXdv;`qOLS^@svGd|X6PUEOYolCPFs5Je!0h^oa?w&6T3 zwnQhR-EdarQdPhl*)uCJmjaSfRyU34$a@4@_d9wsehzL&%UQb3aIWPTag+uMDcqun z(^7B)-vG55%3dq(QCcAo-cA+3hYjPdby~w$aRQ1A& zB10pmH%d-on_k-xC!gIBs zZZq7bu>OVc`Q*EHw{R!2i|N|4n3n}5s*@gB=tb<69ou! zh?5nI#!s*ZK%q?QFS%t$y|pnFF+LG6GW&6z$6~9KxQ5Rn|H*kR3Ija232qP4@$s;) z%G|$LYVF^w{dh5fa9LUGHVdX;#MZ$igt)m>=U_T50^(#d-l}$nInl4Jm_GJP7=Pvs zurnf3U63~ogC)Jb;KX6H@*V0Q4_Z~ndkEru^tzwPILUf}I@FmS>!5dNT1||V)&&p{ ztSe01+nFaQA0NO!B^Hx!K~Z#Hc?g3$OUTH*G&GX^Iui$GvDEc(G=wH?EJ zcXVWmsx>H7P=2K`Ktpee;X}jhq)`a@c^}Mg{VX!*A2vxM@(UI(G%LT_t`K(sG*Op> z0(U~QrkRCOdzsy6WzIAnILmO0NqQ40yX_xK;bdb9ZgFm0@~~O^7WpR*KKdi8f9zZc z6W~3t=wvXT!ZoUCvgGflIR3a(JR!wd(^PC##_NKw_1VM2dd zfBUp1y39%4_h*2x!@h%k9O3Vxm*R_AvdIF5;rwQI7(7UHod#UQo_>?BFCPMcLFoiB z)G0K%ig5WFEV2+5rZ4eAmI}o#i=Zfm#;X8`k$B0Z_+cTijtG)|W*(IIJXEje73Ii8GA|>EDeoyH@DmB;=`g;;aD@6u58}@d_fb^xVOz zbI+f9=YqJ*2#?Vnj$!Jp%}zDR!CUK^fEt`Q;=RU3c_@ z8mIEQPH%y|Ha{ub%-IeC8O>C})HxA}uA0XNZGmG?B(L@yJGCa&{hbevW)!kaIfK zn}ZsdxFr}C2owY_G~ONOw6TqTzf~>C_ly*c=^3RZCXt071NS~gbe{ZsU{dy-rme{Z zkIj8XZBjl~Y-t)A@1Yd`#~lB?MmuO%a8ims`0H?sSy4sd+*<~B>=JSo4m)#PBK2wT z8Xo*w&|lmyqL6Aa05)QqK0pBM3KXQL#TF#k>-CWe;h_~J)<|&B_RL#;@M4#ocF1fk zm;NlV%vCUZH$i^|!nly_Mav1iv}ILiM1-Asvv=f)1GtM&leHxO+-%E`;F!Ju?1{V( zqO!B|ix4#^mIwV#@jOM}45s^OgyX?E@=6C&vkG(u$EmC;JyBW$JaUHpnY0;18yYPM z+LhLZq0T~rHGj^$JB6;=l4UbR=aGhE8SXfiA4!`H$02r8Iu1TiRDLy^l-^z@g*M!5 zR%o)xs%dF7oZ*hc;q%gDD7lVG<&3F#jjYkk^as1o3~#r=#IP#rd1*M7;f`a$X=yeN zv774ULbWB0rlfbg`;@R|8_Wo;p`MUt0~zi(5S))jgAh7tmt%Y_Q@I_KqUj&SV#Q-X z?(c$G3DtY#cm6fg=H=uw{J_+C@K63jGL0UJvLSxJt#ratW{eEXt5S^M50xP)Am&dr z`Ak4ED~lZ)h^%287zfTxv`kR{NG8P43g1O<4z9gr3|AILRLgv0*)G2OBWg8^7hP$8 zQvQZscS4Wo>F?r{cF7R?)IjO3-=~y@y?|hHbx9+-mS;>&$Y(t+x*~Tu=@IJ8ss5C0 zy>Hac`Dx>JqD^wSXw=9F3}$fVV2XM?QED~P9BbH6zi~tFn>W&H)X3Xce`q%QXSpVZ z?}Fd(K7c7NNocFGD@X$ zol(KdbsuxvplJ>xHtB-Hxrh;hqr@9Jb-Q5 zaMoWM{@p6sQrjNHpu?*-5ZR0Qv=Jzs;&3q@hycDgPTn!r&$Q1P=qRFB1c&2{egB;$ zHeeS@(;ixeVotKOL&5;;(o~7T7sm3BsqBXs?&ncd3 ziI$mcBbzBbs!NZ}g=TrWDejCx+i{Iwmt@0CHcPTIhg&fb7~vm&cDGA}2JSB0wUPyB zKn2x0PFw})KowFncg;sxiVPB-T##PCnwaM>I}vBuK~KOa@1$^1C9AC(Nsiw^sx!{^ z(jB0yv)Zz98AD@2>t&}blSwHYIlnGD%i!gz0n&Qh`A14>LT&L`XL$9zx*A<=M%aFI zU(AU5)gw|>m90s2vC^U}wWx%UEzR;|lq#)D^CMZ7En4c^ZKk&AK31Ib!WLFF+d9-z zwTER@_q=~8sBZA(QeD4Hb^XVox`F*AGL)e%i~Q_nc2J(#L9?0V0eVE?Oc2b#&L$>2 zY|yvi7fB1pUo0&Q%d{}mpanxD2JxOSI{2aD&wBgQuRp$g{o|kBPOe`3;mxbJS1+#K z3Qmp+Ut@Ile0k->i{PeooA~7w^~&VcPLHjZS2pW`%PHQ4AUs)h_Xh5Pva1iE6uuto&UIq)xE zzh!gbO=A`_dS4^XPlItprroynqD%%Xzbon2J!BWPEa)h%WAf@QSkk}k>s~HCFpn&2 zA=TlkXP)R(f_cQ@U007~YuBIGWxkX7%){F;nXLGTK{tx;`i-_ctnP1 z6g)4%so^&Cy*o@%kbvVumR^?Y5LTI0bX%kWolwyj_>m`GAGhiEz|f;-M$S(-IhnCT zlCfHqdv_JBEa-bNNKJb8Lme>Uvty)`rk2rt<_SZEpLuX|e1IU+c<5-x(5$-UZuD*A+sujkafrqK;;8nKF3@C6a``R|iSm`kAB)D1zS8*9 z)41s8R7K*N*CW$3NR|e|>>_vz|1I*qn)V7-KuC-rj;A)SM1Z+-N{jw8LdemRAf%Ja zHIc&zFRcpZq)f@d|Ikctshh@w@Tp`GVEU6y?9Y+=fdLt>sDXe8clnkH2Kgh;hLB_j zy?5PC<7=~zab=btQfLlK@VO^P@67G${>to#eMB2qUcFy_1Q!dv-U$U+@qZ%jMHvW6*oRWN@x*%vdG zr>KA=i?uKsC(``VzlTv{`g7;mGjDwxiz#=yEb-sEZ|3uTk%H`|DO6FfO1^HdNin?g zNJ?MR%0hGp=4zMfxUy*X)T1tSNE0sO(KK%&D=XaBc*J-1j0Zx!pd`Q50d(dx@<;uW4* zNaP+%c@Lgl$|CK~qUFp>tZOz5hxd6*$;xKCiW(4J{(@AKWGbvZuo|4p^w@cQd3n?) zl4<2c^FHp%UQu4~9!_H=;e_GM7QaJF8qNjsw>q-OrYOGZZ*Jy+WDX3gu+D(r7k z1#5T@qYTf*&(Ab{Z8wYN2^nOwpu&(;5y4&%sPNw~53vXT8`q3hubUN!NE_^F zHCR7@UE1n0nmvIt)!c`a;DLW*<@cq^GfGQ~txoB}Q6X--NBJj+3|8N3o^y zCfTMX+3s0x%NPlpUOG-E>j&TXjIOJ-=@iB8(EngkR}TVFAsDlsurerel4^us7G#z# zAxqjYq&RrxXDPvoXt>SGBb-3?5JlmM5(G}GsK7E)U?nPIGq#@eT)&RMt!wh z0^cn&y16k6s3-(I;n7ZJcQrv{vWpnvH!>@5GXjfTUpkj#2T!M~DD>#Yxg=79>98wX zvpe@g0IVrC6j@J8e_BM=t1glesV-0u{`_u{to;G`;f*eET83t;8@8%Q=2LHtD-4c|ta^^HZK zQReDi;c^-Q%-NlEUzUSf;l#jb)WGT_f2tEt#eU&sPX8*L3-THyC<%MzB--GaZx5l& z3JpYSALHoduWe@Ew$g8Dv^Ap?ZY|em`B6W`R3V3THD;vIRMY0H%%`px8ztK}N}ppQs}TfS4_uyV5%{zG zv)wDS+34eb;Q=Qu-*w*)@OU)X51B;(I-eWq1- zS5o72swY`HKa>7EH%?O2V5&8{+i14iPxqrt^q|LQnmp-G^|@U$keJI#);vl59<2Vw zIq2s4wj;hB^!n%cwr=l=QFwx9FwYzs#bXD|C_7uU6f#Tvu!|lfX+%4+;TowjyIA(& zQKXp`a*`mf%i(=OSqOgK6EY+u1qK`U{l-HV919}VUfA-o!LnGk*Xx(dGMl=$TG0S_ z{zJ0%z(*0H^kT?5c;nxtp5j0WpnyrBG##^44u`H<561T+-W}WcJ-qmi4@FvQ+q@Ry z7OaMY-0ZVspBC3?Xrkr1#|#T~0J@Lv-i=-mLx{ zSpBdOtJlfC*_6Yp^Q1Oq?5doVZBAZjEP}i3Bg1_4Ussogc?i48T zK80+qDw=3+ybB<$VWX2jCJ;okRp@{4!wKE`Etc#M@8DN+>r@VLApxs|4R;HCm@!%c zH48ftC*XaAusHi;luzoKV+qG@2!>`(<*Z#MHqg6RBWjh&sf~^lf3)JMM=nYei0-5fP?CW|F*SS*_e3fenSh zN8c*!X*Ig;qK8jmJ55_4<8Xgq58y#<6(LnfEJ~*zbrrfrA7oc4VYfUu)R4f{wnlnj z(XmFhKTZnsGlF003UE0P>f)t}Oqk0tvhnvRI5oRnxSR93MbO{EFCvyv$ZCBRNBK>467{+4tu(S`)fP@2dXos7H{b+`GtTU8tSuNyKu+P=g3;ZT-kFrFo@aC%1~^33Dyg{Da7A5?t>Hw7^kES28xOa zICA#VyNGyks43jB#{EXF*rW>EUd^ICQ&0+5tRnJ76BoX4%zn@9-I%s!1v~b zsGBV;3&$L~LsY-k!};mQ5*$Gy&2z)nRBy3Igpmd_V8LUzceIKQHeAR zYRDO?A?*r?LO>^i#G|Yg%1MR4`{Tk_EvTqPMWt=ub;NJB?ettl*YYrW?m+GwpmmK? zMgn=Wt^Uy>HP<$&QnJ(PSFSM78v0NwOtbnxZf3d5t5#tu^#Rq{VA~(`@@uXEpqy11 zgvuQKeP{IC6Lx}`hTiO*S+kxmR9uK0qgD~B`DVB}qnjJJj%(T1!E%0mKzhyVdq@dMZ+551`k^nvrXTS*Th_>K#92R@x1|%RNTe0Etd-?HZb{@SGm?~a5t^uk1*e0wbtyzpAlE^3Q?HO>``0Ek)`QrKa+{Pq ziLA;Tl@Bpd$cvk?rr)Y#@0UGpF_{SqvbyqwIj#umAoImJlsPS##?kk1cKWT(4M)|l zoEv599=7?Tdnly=oyHH1gI}+HIH(R*m2!=NglotHWJt-(4JADyMCpYFJYO1Vuh?YB zhb4m|#bv|ZX`stEG%CB+c)I1NZn_Q@c55h7s@(w|J#a^xt$XDz zD6+>Y31V0Z;#P0~`xwk0=(IqMEDKMAnSp&`>V+Xer~6SBdP?^5oiT8q0YT8zK0qsn z14Ylcn-+WHl!7NT?_W`7`#6h8Lo~`7Yo-|CL0~Fqs%qH-+)nutA#CFTJ{R)}$MxW8WE3aen?H7y;QZ>~%gffqeH_C(L#>a!(fp=+O>ib>;-PQbzd zIqhC*(=Ocb`TS^C;Skk7*(dcDH@450FYENQIc--MciWMDRW~1L73g4wEsFJ1Fm$Q# zwEOaqua48=r@b!^fz>=62ny6a>2g zUdm+79c>!=eq*JH!es$wM` z9cVSgRwGbY?P`3g6udqkCv@TCrNHaEajo%zRzOlPuK%+X0ja32egD*|+SV%3$53f1=KR96 zzp6AJQKgyRC5mK}=>Z=@3HX?bz(-I9eiDVC{4V4dJr9qsL{%6>2EDx=OVo*=ZJ?TdW{gU=+{~FSNHx20fuiDS{$Kgj;aeiqk%tuyKzW=JE{1T}6%hyC^d(N=p zOuMs=^8Hu+*;!tJPCyR(%c1%wJt z0LGR;O_PFrk&_=9tJ-8lWHiA7s1o*q1puUng|tawYszgskBEf|rAr$u$RSD5P|>Ks z(sD+LwsZf|u;YKWxDU$2 znWb{A;s14JiznX$-tWl&4?Y_{{(eUue(?G5;`h7q;nG-Y*r~d!qEHmC{8?}l%#6k6 zE|>?&RuGa@cE3R}?gG$#ebD!!Ne60`v(R9_=&xGef=3h>WqSRFM%j=)qA9@Eix+-^ku*jjYV-_BgT} zkJ}^M0&~-YEU>z&Vnx=c2R!8?<>KzlTYm6jcfR)Si;RZ;&!J&OFN=LW`17kb zZ+;SF@vf09f*7Nd#s;!P^1@5VFCUW?kSzd04TO=0cgro<64)L$!Rz!!f>k}p?tV-6 zh9tuB-`EBG(ZC0(R>v%hSYo;NiZa5i828coorh6pjHy5KHnDHKHf9(u!bsNM>>Z8+ zu}ft>FiyCn48&!HXso;z6r1`B??Vu6))4U;v2wFnVqhFd5xEKH1j`WdkRaN)Uj(xS z1$<8$AV7s)VHYq8gU|`uK0>CLrIA2m*LWKN42%!!!9)hj02{@5n1$Y&9B@cG1gk=; z0EHmSpCuwN!-64@dGHJjGiH-P!Ms52w1b$|Ac-c7=!AsrX&%jT8ck{FEr|UA&`O6H zv59MQZXVXv=P3>5G}K%QOxO z|DL@OC|eAcl1BFvUuFxuP3Z1YQ1gRTUs&fXIvN3qoFN<5I-!BH@wH3;ERUY_gGTnI z1!%eu6b$WhVb%c=(UMa95SE({+Rj+PwjBD|VW-TF79)Xs4{2 zh4?ALYbr(hF(ZC>bfI%%{M#-+&Qf#*OG*|jP${cDE|wRE%gRVfh~t?>%fvBgTqo$J zlkaT$@NgFq_T?B`{5PFO{EI$50DExC@3*-)?!j`NRwmCvParp1e%=ux*@VS+Tl$V+ zF^}0@d3Lwwq&9%pQkO%3sGKYQ3zZO!_5dGRJwCKjnLp$6M`7z zkrueDd&1_60jcKsI9S<7Iuugq`qJr*9sE~t%cW+tBS99&rA6eL)h1yPnxZLLa?ggR zJtD103*bSI6@b{pxzNNXj*+I_7Q)h&m+s&omyZBU+GJWFr(I8)RN!DWEGXD0?L1%* zu-ep4CNl!u{Jgj6jRRDc%*Oo}2bKGK8fyu7L_iNT)~ZDWusZXTKNAzF$MQjUo} zIi{_Ry0w3^_T$9_*vo`0TYn9~)G(gcNa$@o)smCp+7Z-C4i;bf_rL+Ap|lE%4bmRxvd9Vx8VA^#tgN<)Sokq?rJe=R9Bw*9uUKuam z{POC>%l|h1{Nk^#P*9#lMEIGd+16-`Pu`K|h{*ILz$A1W9Fe5I`9>BL3MF>1SkQw6 zkvmpoE+I8gKJ1x#ScULt#fuv0WL;mL2u#_OWw762GK!(YWiwGb3qU|pCml;G%jayd zS-!)jjb>CJhqG&USVFv=yTjnXgvkRGfS|-gO^!>a6an5bvrK&n9L1uwIlfefX;m#X zmiJwX0sL0mM_JX9c&v-8y6$u;1K-zCq&)gBkVjQzIXOvTk#$s@sR?RCz+b+EtgMmN z_)@n9`jII1ExWTPc$JxpqZRk9TzScY$RMjj^SK!8`}Irbpm#C{7n%u;e2RF4KM8af zOd*7(*Q!{yO2mTj^63yujWzp2EEByR39$sah}cGW&Af^~9d<=e=KbMVfZ7Dp0Vhlxl)xd=!TT=P;Z5>(}z&zOI8W^v)CSk-Er)V?wrvFo~+9h43m zM5b)W@E2>qxF;K7bPv)Dk(Q#Rk7`W?%zH1U=i|9C-69ksk*#gT*0e#Lg{)CC1w%TD?XZA&a!SFLVhgfs$#%hBZ06N@V8S8~t-PSo}0Fq5|7) zz_=_Ams7}-DttbQxdP9MMSDyTC&7}O9ohQji_?KaAh(n$0n?);uY+cxJF-DwcfgcH z`WkK(wfC_nbU*d3VAU-i!%=p%@w(m}RJCB%-DRH^dACYrcqF>hqqRGAJgS-DQUO3_$t+u5SRNIL0j4yRW5sq)DWR$x%`&&J_6S(hA-|YyRLN}g%{)cD zbz5tX8jiNnM{XXF+kT$XjG{LG=#Aj>v;uu1+mFOjR%M8(HtoobGUbsJ^yNwsxhv&c zdo}9_t}AxaKB$%)We%wYL8%pz8yN~FA@r&(gRIH?NCR-%xD|vpX+kh*cP1`8k zps#IB9aA{tqdA;apTh+7(6wJX051$vy2z@TW36Ujv@M#+AxW#j8f%i%AofAcfv_$$ zU6b>mt))XN&Dph8!Y`+z0tYTvEtKnz?Xio!QSn?#@@NQbMsD5mihL}R>wbEo%Qwk) zh=xs~ouj6S)|B1dD{Xi8P2JrmpzlsYEsn(G2@_m=YiT$~+G^BRpr~tH{bXFddV6)r zJ%6X5-iAZ;oE*T(xttuP6ElY6ju(;x`5YA6=nnw?w)YjFn~b<_7Hh{Zf~!A_)%98- zlr`yOg(?Oe>$2b!q^Z@G|7F>A>n)Y~I#r{Xt)nmm_h@dhzz`f&hMCmt>}0v|w5L!< zeP3s!&@*+&%o%ueuQz6JK~S##E;2q1-FHEoZPc}#cDNpNt>NX9Oj$u4Z{_sydI+jh z%bCq3emJ2qpSY@|E~Zi~7pTBq;!P*< z=H@1#YT$Y~`QHo)WVse-gKp>W~der_V3fs+^<1@!`BfVTKhO? zZ@2os4i$9JWZP}z?C@9)4_s=JJ0;Yf(6l@f5W8}(ikbsulLq!F_cix>(Dr?3=l`%b zQ7dQRIjI@gKAk4^?l{rfd7{%~eHH1)X|z7}DqMWNQUya?R-w7$RwG$tcd9LlHap8U zRo%s;<+|cQoxVfLf)QuKlWsk*d}{CEh=>E99Nj(PL;<6$7tiOgEJJ z9^Z!1)CQ!PW zTB24u2!T~$sG4%y1TfJ$dYzdqyVNDz*-x^baot0WgpM*w*-lWpEG6Ks zZsTZYzpK0{l?OZbL#qjBdp{`9y%VY!f2CDshrw6DZEzr}Gp~;7v*5L|Uw2xYx#|c? zQ{|?kWz^SM?r5mY@q%-c4;?sqkZk6@L$@`%9gt{w%b+0Z*V6#f-J#Txp$$4U`jK{u zylSAAD*;YBmxGKQ1<04WOkdwN+uT0e$VU6A2zVdnT1`EozAmwnzcex4RsxdQaP_5w zIwMaDZBkP|msYH}qq*>F_r2x9Uk6##kW{wj*LJA48+OZe-5TyF52VmgZct)Y7EqxO z{-G9dH9ES+ji1aG+zKbL2G-%A+rnN~x39vc4_0o$Bh6aP-IreC{Sg|nt~I?I#}pEUR;xXlDy@zB?2&9DPhtnDR-P0Troxhz=*{DQ>FI5)5q)GW=40J>86!qhjRvRvI;Zw7>j%u|R>@&h`Oizsw{xT{ zo-nJy*)_VC_P$6Bv?U#ht8W$QrtUkucQVnaneS)o*hDpl-n?c~Ts~^rKbJ|zkL71q zbr!BZMYnJQB@0IT0AyTtbj~wIr@hl-t)t^(J2$p*Xl#A@YP7DkQ(ZLidd$2K^p&7Z z{0*D=7%H|Xx-&6_6b)#NPK2>ccuN)QGF=txfR>JZr?gRR{Ru1f-+oWA0i%Tm(1OX4 zX%c3Ymu05;6a7adKzH!1?C$1Kx$B9#;xsKDr}IZ?+x1ou#of|82-@@X^IxTjiZp%J}*D218X~mBmt&UE8DHkbt(l`&dHhe zOEcuOw0Ek`?IX{p%85fR?(EHTL!I}tiwPRIT^Jf<3{PNk$uxQZPXjDNa0#=N5OT51 zH}oBmK25t=OMY#&P=H(GvoE3VudJTi7rRH~^7*!L=YD}&yY-9w{O;?|*PpLHUw^*- reEs?Q^Y!QJ&)1)?KVN^o{(SxU`t$YY>(AGpC;s{W_jD|%0PqC><$U(y diff --git a/dist/class4gl-0.1dev/PKG-INFO b/dist/class4gl-0.1dev/PKG-INFO deleted file mode 100644 index 1cecd19..0000000 --- a/dist/class4gl-0.1dev/PKG-INFO +++ /dev/null @@ -1,14 +0,0 @@ -Metadata-Version: 1.0 -Name: class4gl -Version: 0.1dev -Summary: UNKNOWN -Home-page: UNKNOWN -Author: UNKNOWN -Author-email: UNKNOWN -License: GPLv3 licence -Description: # class4gl - Chemistry Land-surface Atmosphere Soil Slab model (CLASS) | Python version - - This is the extension of class to be able to be used with global balloon soundings. - -Platform: UNKNOWN diff --git a/dist/class4gl-0.1dev/bin/__init__.py b/dist/class4gl-0.1dev/bin/__init__.py deleted file mode 100644 index a21583b..0000000 --- a/dist/class4gl-0.1dev/bin/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from . import model,class4gl,interface_multi,data_air,data_global - -__version__ = '0.1.0' - -__author__ = 'Hendrik Wouters ' - -__all__ = [] diff --git a/dist/class4gl-0.1dev/lib/__init__.py b/dist/class4gl-0.1dev/lib/__init__.py deleted file mode 100644 index a21583b..0000000 --- a/dist/class4gl-0.1dev/lib/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from . import model,class4gl,interface_multi,data_air,data_global - -__version__ = '0.1.0' - -__author__ = 'Hendrik Wouters ' - -__all__ = [] diff --git a/dist/class4gl-0.1dev/lib/class4gl.py b/dist/class4gl-0.1dev/lib/class4gl.py deleted file mode 100644 index 7baaa51..0000000 --- a/dist/class4gl-0.1dev/lib/class4gl.py +++ /dev/null @@ -1,1611 +0,0 @@ -# -*- coding: utf-8 -*- - -""" - -Created on Mon Jan 29 12:33:51 2018 - -Module file for class4gl, which extents the class-model to be able to take -global air profiles as input. It exists of: - -CLASSES: - - an input object, namely class4gl_input. It includes: - - a function to read Wyoming sounding data from a yyoming stream object - - a function to read global data from a globaldata library object - - the model object: class4gl - - .... - -DEPENDENCIES: - - xarray - - numpy - - data_global - - Pysolar - - yaml - -@author: Hendrik Wouters - -""" - - - -""" Setup of envirnoment """ - -# Standard modules of the stand class-boundary-layer model -from model import model -from model import model_output as class4gl_output -from model import model_input -from model import qsat -#from data_soundings import wyoming -import Pysolar -import yaml -import logging -import warnings -import pytz - -#formatter = logging.Formatter() -logging.basicConfig(format='%(asctime)s - \ - %(name)s - \ - %(levelname)s - \ - %(message)s') - - -# Generic Python Packages -import numpy as np -import datetime as dt -import pandas as pd -import xarray as xr -import io -#from skewt.thermodynamics import TempK,DewPoint,MixR2VaporPress,GammaW,degCtoK, Rs_da, Cp_da,VaporPressure,MixRatio -from data_global import data_global -grav = 9.81 - -# this is just a generic input object -class generic_input(object): - def __init__(self): - self.init = True - - -# all units from all variables in CLASS(4GL) should be defined here! -units = { - 'h':'m', - 'theta':'K', - 'q':'kg/kg', - 'cc': '-', - 'cveg': '-', - 'wg': 'm3 m-3', - 'w2': 'm3 m-3', - #'wg': 'kg/kg', - 'Tsoil': 'K', - 'T2': 'K', - 'z0m': 'm', - 'alpha': '-', - 'LAI': '-', - 'dhdt':'m/h', - 'dthetadt':'K/h', - 'dqdt':'kg/kg/h', - 'BR': '-', - 'EF': '-', -} - -class class4gl_input(object): -# this was the way it was defined previously. -#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c')) - - def __init__(self,set_pars_defaults=True,debug_level=None): - - - """ set up logger (see: https://docs.python.org/2/howto/logging.html) - """ - - print('hello') - self.logger = logging.getLogger('class4gl_input') - print(self.logger) - if debug_level is not None: - self.logger.setLevel(debug_level) - - # # create logger - # self.logger = logging.getLogger('class4gl_input') - # self.logger.setLevel(debug_level) - - # # create console handler and set level to debug - # ch = logging.StreamHandler() - # ch.setLevel(debug_level) - - # # create formatter - # formatter = logging.Formatter('%(asctime)s - \ - # %(name)s - \ - # %(levelname)s - \ - # %(message)s') - # add formatter to ch - # ch.setFormatter(formatter) - - # # add ch to logger - # self.logger.addHandler(ch) - - # """ end set up logger """ - - - - # these are the standard model input single-value parameters for class - self.pars = model_input() - - # diagnostic parameters of the initial profile - self.diag = dict() - - # In this variable, we keep track of the different parameters from where it originates from. - self.sources = {} - - if set_pars_defaults: - self.set_pars_defaults() - - def set_pars_defaults(self): - - """ - Create empty model_input and set up case - """ - defaults = dict( - dt = 60. , # time step [s] - runtime = 6*3600 , # total run time [s] - - # mixed-layer input - sw_ml = True , # mixed-layer model switch - sw_shearwe = False , # shear growth mixed-layer switch - sw_fixft = False , # Fix the free-troposphere switch - h = 200. , # initial ABL height [m] - Ps = 101300., # surface pressure [Pa] - divU = 0. , # horizontal large-scale divergence of wind [s-1] - #fc = 1.e-4 , # Coriolis parameter [m s-1] - - theta = 288. , # initial mixed-layer potential temperature [K] - dtheta = 1. , # initial temperature jump at h [K] - gammatheta = 0.006 , # free atmosphere potential temperature lapse rate [K m-1] - advtheta = 0. , # advection of heat [K s-1] - beta = 0.2 , # entrainment ratio for virtual heat [-] - wtheta = 0.1 , # surface kinematic heat flux [K m s-1] - - q = 0.008 , # initial mixed-layer specific humidity [kg kg-1] - dq = -0.001 , # initial specific humidity jump at h [kg kg-1] - gammaq = 0. , # free atmosphere specific humidity lapse rate [kg kg-1 m-1] - advq = 0. , # advection of moisture [kg kg-1 s-1] - wq = 0.1e-3 , # surface kinematic moisture flux [kg kg-1 m s-1] - - CO2 = 422. , # initial mixed-layer CO2 [ppm] - dCO2 = -44. , # initial CO2 jump at h [ppm] - gammaCO2 = 0. , # free atmosphere CO2 lapse rate [ppm m-1] - advCO2 = 0. , # advection of CO2 [ppm s-1] - wCO2 = 0. , # surface kinematic CO2 flux [ppm m s-1] - sw_wind = True , # prognostic wind switch - u = 0. , # initial mixed-layer u-wind speed [m s-1] - du = 0. , # initial u-wind jump at h [m s-1] - gammau = 0. , # free atmosphere u-wind speed lapse rate [s-1] - advu = 0. , # advection of u-wind [m s-2] - v = 0.0 , # initial mixed-layer u-wind speed [m s-1] - dv = 0.0 , # initial u-wind jump at h [m s-1] - gammav = 0. , # free atmosphere v-wind speed lapse rate [s-1] - advv = 0. , # advection of v-wind [m s-2] - sw_sl = True , # surface layer switch - ustar = 0.3 , # surface friction velocity [m s-1] - z0m = 0.02 , # roughness length for momentum [m] - z0h = 0.02* 0.1 , # roughness length for scalars [m] - sw_rad = True , # radiation switch - lat = 51.97 , # latitude [deg] - lon = -4.93 , # longitude [deg] - doy = 268. , # day of the year [-] - tstart = 6.8 , # time of the day [h UTC] - cc = 0.0 , # cloud cover fraction [-] - Q = 400. , # net radiation [W m-2] - dFz = 0. , # cloud top radiative divergence [W m-2] - ls_type = 'js' , # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs) - wg = 0.21 , # volumetric water content top soil layer [m3 m-3] - w2 = 0.21 , # volumetric water content deeper soil layer [m3 m-3] - cveg = 0.85 , # vegetation fraction [-] - Tsoil = 295. , # temperature top soil layer [K] - Ts = 295. , # initial surface temperature [K] - T2 = 296. , # temperature deeper soil layer [K] - a = 0.219 , # Clapp and Hornberger retention curve parameter a - b = 4.90 , # Clapp and Hornberger retention curve parameter b - p = 4. , # Clapp and Hornberger retention curve parameter c - CGsat = 3.56e-6, # saturated soil conductivity for heat - wsat = 0.472 , # saturated volumetric water content ECMWF config [-] - wfc = 0.323 , # volumetric water content field capacity [-] - wwilt = 0.171 , # volumetric water content wilting point [-] - C1sat = 0.132 , - C2ref = 1.8 , - LAI = 2. , # leaf area index [-] - gD = 0.0 , # correction factor transpiration for VPD [-] - rsmin = 110. , # minimum resistance transpiration [s m-1] - rssoilmin = 50. , # minimun resistance soil evaporation [s m-1] - alpha = 0.25 , # surface albedo [-] - Wmax = 0.0012 , # thickness of water layer on wet vegetation [m] - Wl = 0.0000 , # equivalent water layer depth for wet vegetation [m] - Lambda = 5.9 , # thermal diffusivity skin layer [-] - c3c4 = 'c3' , # Plant type ('c3' or 'c4') - sw_cu = False , # Cumulus parameterization switch - dz_h = 150. , # Transition layer thickness [m] - cala = None , # soil heat conductivity [W/(K*m)] - crhoc = None , # soil heat capacity [J/K*m**3] - sw_ls = True , - sw_ap = True , # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input - sw_ac = None , # switch that tells to use large-scale gridded Air Circulation (advection and subsindence) fields as input from eg., ERA-INTERIM - sw_lit = False, - ) - pars = model_input() - for key in defaults: - pars.__dict__[key] = defaults[key] - - self.update(source='defaults',pars=pars) - - def clear(self): - """ this procudure clears the class4gl_input """ - - for key in list(self.__dict__.keys()): - del(self.__dict__[key]) - self.__init__() - - def dump(self,file): - """ this procedure dumps the class4gl_input object into a yaml file - - Input: - - self.__dict__ (internal): the dictionary from which we read - Output: - - file: All the parameters in self.__init__() are written to - the yaml file, including pars, air_ap, sources etc. - """ - file.write('---\n') - index = file.tell() - file.write('# CLASS4GL input; format version: 0.1\n') - - # write out the position of the current record - yaml.dump({'index':index}, file, default_flow_style=False) - - # we do not include the none values - for key,data in self.__dict__.items(): - #if ((type(data) == model_input) or (type(class4gl_input): - if key == 'pars': - - pars = {'pars' : self.__dict__['pars'].__dict__} - parsout = {} - for key in pars.keys(): - if pars[key] is not None: - parsout[key] = pars[key] - - yaml.dump(parsout, file, default_flow_style=False) - elif type(data) == dict: - if key == 'sources': - # in case of sources, we want to have a - # condensed list format as well, so we leave out - # 'default_flow_style=False' - yaml.dump({key : data}, file) - else: - yaml.dump({key : data}, file, - default_flow_style=False) - elif type(data) == pd.DataFrame: - # in case of dataframes (for profiles), we want to have a - # condensed list format as well, so we leave out - # 'default_flow_style=False' - yaml.dump({key: data.to_dict(orient='list')},file) - - # # these are trials to get it into a more human-readable - # fixed-width format, but it is too complex - #stream = yaml.dump({key : False},width=100, default_flow_style=False) - #file.write(stream) - - # workaround. I don't know how to put a table in a readable format by using yaml. So I do it manually here - #file.write(key+': !!str |\n') - #file.write(str(data)+'\n') - - def load_yaml_dict(self,yaml_dict,reset=True): - """ this procedure loads class4gl_input data from a dictionary obtained from yaml - - Input: - - yaml_dict: the dictionary from which we read - - reset: reset data before reading - Output: - - All the parameters in self, eg., (pars, air_ap, sources etc.,). - """ - - if reset: - for key in list(self.__dict__.keys()): - del(self.__dict__[key]) - self.__init__() - - for key,data in yaml_dict.items(): - if key == 'pars': - self.__dict__[key] = model_input() - self.__dict__[key].__dict__ = data - elif key in ['air_ap','air_balloon','air_ac','air_ach']: - self.__dict__[key] = pd.DataFrame(data) - elif key == 'sources': - self.__dict__[key] = data - elif key == 'diag': - self.__dict__[key] = data - else: - warnings.warn("Key '"+key+"' may not be implemented.") - self.__dict__[key] = data - - def update(self,source,**kwargs): - """ this procedure is to make updates of input parameters and tracking - of their source more convenient. It implements the assignment of - parameter source/sensitivity experiment IDs ('eg., - 'defaults', 'sounding balloon', any satellite information, climate - models, sensitivity tests etc.). These are all stored in a convenient - way with as class4gl_input.sources. This way, the user can always consult with - from where parameters data originates from. - - Input: - - source: name of the underlying dataset - - **kwargs: a dictionary of data input, for which the key values - refer to the class4gl data type ('pars', 'air_ap', 'air_balloon', etc.) and - the values is a again a dictionary/dataframe of datakeys/columns - ('wg','PRES','datetime', ...) and datavalues (either single values, - profiles ...), eg., - - pars = {'wg': 0.007 , 'w2', 0.005} - pars = {pd.Dataframe('PRES': [1005.,9523,...] , 'THTA': [295., - 300.,...]} - - Output: - - self.__dict__[datatype] : object to which the parameters are - assigned. They can be consulted with - self.pars, self.profiles, etc. - - - self.sources[source] : It supplements the overview overview of - data sources can be consulted with - self.sources. The structure is as follows: - as: - self.sources = { - 'wyoming': ['pars:datetime','air_balloon:PRES','air_ap:QABS', ...], - 'GLEAM' : ['pars:wg','pars:w2', ...], - ... - } - - """ - - #print(source,kwargs) - - for key,data in kwargs.items(): - - #print(key) - # if the key is not in class4gl_input object, then just add it. In - # that case, the update procedures below will just overwrite it - if key not in self.__dict__: - self.__dict__[key] = data - - - - - #... we do an additional check to see whether there is a type - # match. I not then raise a key error - if (type(data) != type(self.__dict__[key]) \ - # we allow dict input for model_input pars - and not ((key == 'pars') and (type(data) == dict) and \ - (type(self.__dict__[key]) == model_input))): - - raise TypeError('input key '+key+' is not of the same type as the one in the class4gl_object') - - - # This variable keeps track of the added data that is supplemented - # by the current source. We add this to class4gl_input.sources - datakeys = [] - - #... and we update the class4gl_input data, and this depends on the - # data type - - if type(self.__dict__[key]) == pd.DataFrame: - # If the data type is a dataframe, then we update the columns - for column in list(data.columns): - #print(column) - self.__dict__[key][column] = data[column] - datakeys.append(column) - - - elif type(self.__dict__[key]) == model_input: - # if the data type is a model_input, then we update its internal - # dictionary of parameters - if type(data) == model_input: - self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \ - **data.__dict__} - datakeys = list(data.__dict__.keys()) - elif type(data) == dict: - self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \ - **data} - datakeys = list(data.keys()) - else: - raise TypeError('input key '+key+' is not of the same type\ - as the one in the class4gl_object') - - elif type(self.__dict__[key]) == dict: - # if the data type is a dictionary, we update the - # dictionary - self.__dict__[key] = {self.__dict__[key] , data} - datakeys = list(data.keys()) - - - # if source entry is not existing yet, we add it - if source not in self.sources.keys(): - self.sources[source] = [] - - - # self.logger.debug('updating section "'+\ - # key+' ('+' '.join(datakeys)+')'\ - # '" from source \ - # "'+source+'"') - - # Update the source dictionary: add the provided data keys to the - # specified source list - for datakey in datakeys: - # At first, remove the occurences of the keys in the other - # source lists - for sourcekey,sourcelist in self.sources.items(): - if key+':'+datakey in sourcelist: - self.sources[sourcekey].remove(key+':'+datakey) - # Afterwards, add it to the current source list - self.sources[source].append(key+':'+datakey) - - - # # in case the datatype is a class4gl_input_pars, we update its keys - # # according to **kwargs dictionary - # if type(self.__dict__[datatype]) == class4gl_input_pars: - # # add the data parameters to the datatype object dictionary of the - # # datatype - # self.__dict__[datatype].__dict__ = {**self.__dict__[datatype].__dict__ , - # **kwargs} - # # in case, the datatype reflects a dataframe, we update the columns according - # # to the *args list - # elif type(self.__dict__[datatype]) == pd.DataFrame: - # for dataframe in args: - # for column in list(dataframe.columns): - # self.__dict__[datatype][column] = dataframe[column] - - - def get_profile(self,IOBJ, *args, **argv): - # if type(IOBJ) == wyoming: - self.get_profile_wyoming(IOBJ,*args,**argv) - # else: - # raise TypeError('Type '+str(type(IOBJ))+' is not supported') - - def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): - """ - Purpose: - This procedure assigns wyoming air profiles and parameters to the class4gl_input object. - - Input: - 1. wy_strm = wyoming html (beautifulsoup) stream object. The - function will take the profile at the stream's current - position. - 2. air_ap_mode: which air profile do we take? - - b : best - - l : according to lower limit for the mixed-layer height - estimate - - u : according to upper limit for the mixed-layer height - estimate - - - Output: - 1. all single-value parameters are stored in the - class4gl_input.pars object - 2. the souding profiles are stored in the in the - class4gl_input.air_balloon dataframe - 3. modified sounding profiles for which the mixed layer height - is fitted - 4. ... - - """ - - - # Raise an error in case the input stream is not the correct object - # if type(wy_strm) is not wyoming: - # raise TypeError('Not a wyoming type input stream') - - # Let's tell the class_input object that it is a Wyoming fit type - self.air_ap_type = 'wyoming' - # ... and which mode of fitting we apply - self.air_ap_mode = air_ap_mode - - """ Temporary variables used for output """ - # single value parameters derived from the sounding profile - dpars = dict() - # profile values - air_balloon = pd.DataFrame() - # fitted profile values - air_ap = pd.DataFrame() - - string = wy_strm.current.find_next('pre').text - string = string.split('\n')[:-1] - string = '\n'.join(string) - - columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV'] - air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1] - #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4]) - - #string = soup.pre.next_sibling.next_sibling - - string = wy_strm.current.find_next('pre').find_next('pre').text - - # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)). - dpars = {**dpars, - **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict() - } - - # we get weird output when it's a numpy Timestamp, so we convert it to - # pd.datetime type - - dpars['datetime'] = pytz.utc.localize(dt.datetime.strptime(dpars['Observation time'], "%y%m%d/%H%M")) - dpars['STNID'] = dpars['Station number'] - - # altitude above ground level - air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation'] - # absolute humidity in g/kg - air_balloon['q']= (air_balloon.MIXR/1000.) \ - / \ - (air_balloon.MIXR/1000.+1.) - # convert wind speed from knots to m/s - air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT - angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees. - - air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x) - air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x) - - - - cp = 1005. # specific heat of dry air [J kg-1 K-1] - Rd = 287. # gas constant for dry air [J kg-1 K-1] - Rv = 461.5 # gas constant for moist air [J kg-1 K-1] - - air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q) - air_balloon['p'] = air_balloon.PRES*100. - - - # Therefore, determine the sounding that are valid for 'any' column - is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) - #is_valid = (air_balloon.z >= 0) - # # this is an alternative pipe/numpy method - # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0] - valid_indices = air_balloon.index[is_valid].values - print(valid_indices) - - dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]] - - air_balloon['t'] = air_balloon['TEMP']+273.15 - air_balloon['theta'] = (air_balloon.t) * \ - (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp) - air_balloon['thetav'] = air_balloon['theta']*(1. + 0.61 * air_balloon['q']) - - if len(valid_indices) > 0: - #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile - dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) - - dpars['h_b'] = np.max((dpars['h'],10.)) - dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height - dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height - dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height - - # the final mixed-layer height that will be used by class. We round it - # to 1 decimal so that we get a clean yaml output format - dpars['h'] = np.round(dpars['h_'+air_ap_mode],1) - else: - dpars['h_u'] =np.nan - dpars['h_l'] =np.nan - dpars['h_e'] =np.nan - dpars['h'] =np.nan - - - if np.isnan(dpars['h']): - dpars['Ps'] = np.nan - - - - - if ~np.isnan(dpars['h']): - # determine mixed-layer properties (moisture, potential temperature...) from profile - - # ... and those of the mixed layer - is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) - valid_indices_below_h = air_balloon.index[is_valid_below_h].values - if len(valid_indices) > 1: - if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon[is_valid_below_h].mean() - else: - ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() - elif len(valid_indices) == 1: - ml_mean = (air_balloon.iloc[0:1]).mean() - else: - temp = pd.DataFrame(air_balloon) - temp.iloc[0] = np.nan - ml_mean = temp - - dpars['theta']= ml_mean.theta - dpars['q'] = ml_mean.q - dpars['u'] = ml_mean.u - dpars['v'] = ml_mean.v - else: - dpars['theta'] = np.nan - dpars['q'] = np.nan - dpars['u'] = np.nan - dpars['v'] = np.nan - - - - - # First 3 data points of the mixed-layer fit. We create a empty head - # first - air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns) - # All other data points above the mixed-layer fit - air_ap_tail = air_balloon[air_balloon.z > dpars['h']] - - #calculate mixed-layer jump ( this should be larger than 0.1) - - air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']])) - air_ap_head['HGHT'] = air_ap_head['z'] \ - + \ - np.round(dpars[ 'Station elevation'],1) - - # make a row object for defining the jump - jump = air_ap_head.iloc[0] * np.nan - - if air_ap_tail.shape[0] > 1: - - # we originally used THTA, but that has another definition than the - # variable theta that we need which should be the temperature that - # one would have if brought to surface (NOT reference) pressure. - for column in ['theta','q','u','v']: - - # initialize the profile head with the mixed-layer values - air_ap_head[column] = ml_mean[column] - # calculate jump values at mixed-layer height, which will be - # added to the third datapoint of the profile head - jump[column] = (air_ap_tail[column].iloc[1]\ - -\ - air_ap_tail[column].iloc[0])\ - /\ - (air_ap_tail.z.iloc[1]\ - - air_ap_tail.z.iloc[0])\ - *\ - (dpars['h']- air_ap_tail.z.iloc[0])\ - +\ - air_ap_tail[column].iloc[0]\ - -\ - ml_mean[column] - if column == 'theta': - # for potential temperature, we need to set a lower limit to - # avoid the model to crash - jump.theta = np.max((0.1,jump.theta)) - - air_ap_head[column][2] += jump[column] - - air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) - - - - # make theta increase strong enough to avoid numerical - # instability - air_ap_tail_orig = pd.DataFrame(air_ap_tail) - air_ap_tail = pd.DataFrame() - #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True) - #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True) - theta_low = dpars['theta'] - z_low = dpars['h'] - ibottom = 0 - for itop in range(0,len(air_ap_tail_orig)): - theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean() - z_mean = air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean() - if ( - (z_mean > (z_low+10.)) and \ - (theta_mean > (theta_low+0.2) ) and \ - (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)): - - air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True) - ibottom = itop+1 - theta_low = air_ap_tail.theta.iloc[-1] - z_low = air_ap_tail.z.iloc[-1] - # elif (itop > len(air_ap_tail_orig)-10): - # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True) - - - - - - air_ap = \ - pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1) - - # we copy the pressure at ground level from balloon sounding. The - # pressure at mixed-layer height will be determined internally by class - #print(air_ap['PRES'].iloc[0]) - - rho = 1.2 # density of air [kg m-3] - g = 9.81 # gravity acceleration [m s-2] - - air_ap['p'].iloc[0] =dpars['Ps'] - air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h']) - air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1) - - - dpars['lat'] = dpars['Station latitude'] - dpars['latitude'] = dpars['lat'] - - # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich) - dpars['lon'] = 0. - # this is the real longitude that will be used to extract ground data - dpars['longitude'] = dpars['Station longitude'] - - dpars['ldatetime'] = dpars['datetime'] \ - + \ - dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.)) - dpars['doy'] = dpars['datetime'].timetuple().tm_yday - dpars['SolarAltitude'] = \ - Pysolar.GetAltitude(\ - dpars['latitude'],\ - dpars['longitude'],\ - dpars['datetime']\ - ) - dpars['SolarAzimuth'] = Pysolar.GetAzimuth(\ - dpars['latitude'],\ - dpars['longitude'],\ - dpars['datetime']\ - ) - dpars['lSunrise'], dpars['lSunset'] \ - = Pysolar.util.GetSunriseSunset(dpars['latitude'], - 0., - dpars['ldatetime'],0.) - dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise']) - dpars['lSunset'] = pytz.utc.localize(dpars['lSunset']) - # This is the nearest datetime when the sun is up (for class) - dpars['ldatetime_daylight'] = \ - np.min(\ - (np.max(\ - (dpars['ldatetime'],\ - dpars['lSunrise'])\ - ),\ - dpars['lSunset']\ - )\ - ) - # apply the same time shift for UTC datetime - dpars['datetime_daylight'] = dpars['datetime'] \ - +\ - (dpars['ldatetime_daylight']\ - -\ - dpars['ldatetime']) - - dpars['doy'] = dpars['datetime'].timetuple().tm_yday - - # We set the starting time to the local sun time, since the model - # thinks we are always at the meridian (lon=0). This way the solar - # radiation is calculated correctly. - dpars['tstart'] = dpars['ldatetime_daylight'].hour \ - + \ - dpars['ldatetime_daylight'].minute/60.\ - + \ - dpars['ldatetime_daylight'].second/3600. - - - # convert numpy types to native python data types. This provides - # cleaner data IO with yaml: - for key,value in dpars.items(): - if type(value).__module__ == 'numpy': - dpars[key] = dpars[key].item() - - # # we make a pars object that is similar to the destination object - # pars = model_input() - # for key,value in dpars.items(): - # pars.__dict__[key] = value - - - # we round the columns to a specified decimal, so that we get a clean - # output format for yaml - decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\ - 'DRCT':2 ,'SKNT':2, 'theta':4, 'THTE':2, 'THTV':2,\ - 'z':2, 'q':5, 'WSPD':2, 'u':4, 'v':4} -# - for column,decimal in decimals.items(): - air_balloon[column] = air_balloon[column].round(decimal) - air_ap[column] = air_ap[column].round(decimal) - - self.update(source='wyoming',\ - # pars=pars, - pars=dpars,\ - air_balloon=air_balloon,\ - air_ap=air_ap) - - - def get_global_input(self, globaldata,only_keys=None,exclude_keys=None): - - """ - Purpose: This sets copies the parameters from the global datasets into the self (or similar object) - according to the position (lat lon) and the class datetime and timespan - globaldata should be a globaldata multifile object - - Input: - - globaldata: this is the library object - - only_keys: only extract specified keys - - exclude_keys: do not inherit specified keys - """ - classdatetime = np.datetime64(self.pars.datetime_daylight) - classdatetime_stop = np.datetime64(self.pars.datetime_daylight \ - + \ - dt.timedelta(seconds=self.pars.runtime)\ - ) - - - # # list of variables that we get from global ground data - # self.ground_keys = ['fW', 'fB', 'fH', 'fTC', 'alpha', 'z0m', 'z0h', - # 'wsat', 'Tsoil', 'cc', 'T2', 'wg', 'w2', 'wfc', - # 'wwilt', 'DSMW', 'tex_coarse_values', 'tex_medium_values', 'tex_fine_values', 'code_values', - # 'texture', 'itex', 'isoil', 'BR', - # 'b', 'cveg', - # 'C1sat', - # 'C2ref', 'p', 'a', - # ] #globaldata.datasets.keys(): - - # # these are the required class4gl 3d atmospheric input which is not provided by the soundings - # self.atm_keys = ['advtheta_x','advtheta_y','advu_x','advu_y','advv_x','advv_y','advq_x','advq_y','w','p'] - - - if type(globaldata) is not data_global: - raise TypeError("Wrong type of input library") - - # by default, we get all dataset keys - keys = list(globaldata.datasets.keys()) - - # We add LAI manually, because it is not listed in the datasets and - #they its retreival is hard coded below based on LAIpixel and cveg - if ('LAIpixel' in keys) and ('cveg' in keys): - keys.append('LAI') - - # # In case there is surface pressure, we also calculate the half-level - # # and full-level pressure fields - # if ('sp' in keys): - # keys.append('pfull') - # keys.append('phalf') - - # If specified, we only take the keys that are in only_keys - if only_keys is not None: - for key in keys: - if key not in only_keys: - keys.remove(key) - - # If specified, we take out keys that are in exclude keys - if exclude_keys is not None: - for key in keys: - if key in exclude_keys: - keys.remove(key) - - # we set everything to nan first in the pars section (non-profile parameters - # without lev argument), so that we can check afterwards whether the - # data is well-fetched or not. - for key in keys: - if not ((key in globaldata.datasets) and \ - (globaldata.datasets[key].page is not None) and \ - ('lev' in globaldata.datasets[key].page[key].dims)): - self.update(source='globaldata',pars={key:np.nan}) - # # we do not check profile input for now. We assume it is - # # available - #else: - # self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])})) - - self.logger.debug('getting keys "'+', '.join(keys)+'\ - from global data') - - for key in keys: - # If we find it, then we obtain the variables - if ((key in globaldata.datasets) and \ - (globaldata.datasets[key].page is not None)): - - # check first whether the dataset has a height coordinate (3d space) - if 'lev' in globaldata.datasets[key].page[key].dims: - - # first, we browse to the correct file that has the current time - if 'time' in list(globaldata.datasets[key].page[key].dims): - globaldata.datasets[key].browse_page(time=classdatetime) - - - if (globaldata.datasets[key].page is not None): - # find longitude and latitude coordinates - ilats = (np.abs(globaldata.datasets[key].page.lat - - self.pars.latitude) < 0.5) - ilons = (np.abs(globaldata.datasets[key].page.lon - - self.pars.longitude) < 0.5) - - # if we have a time dimension, then we look up the required timesteps during the class simulation - if 'time' in list(globaldata.datasets[key].page[key].dims): - itimes = ((globaldata.datasets[key].page.time >= \ - classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop)) - - # In case we didn't find any correct time, we take the - # closest one. - if np.sum(itimes) == 0.: - - - classdatetimemean = \ - np.datetime64(self.pars.datetime_daylight + \ - dt.timedelta(seconds=int(self.pars.runtime/2.) - )) - - dstimes = globaldata.datasets[key].page.time - time = dstimes.sel(time=classdatetimemean,method='nearest') - itimes = (globaldata.datasets[key].page.time == - time) - - else: - # we don't have a time coordinate so it doesn't matter - # what itimes is - itimes = 0 - - #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value). - - # over which dimensions we take a mean: - dims = globaldata.datasets[key].page[key].dims - namesmean = list(dims) - namesmean.remove('lev') - idxmean = [dims.index(namemean) for namemean in namesmean] - - value = \ - globaldata.datasets[key].page[key].isel(time=itimes, - lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1. - - # Ideally, source should be equal to the datakey of globaldata.library - # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) - # but therefore the globaldata class requires a revision to make this work - self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) - - else: - # this procedure is for reading the ground fields (2d space). - # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again. - - - if 'time' in list(globaldata.datasets[key].page[key].dims): - - # first, we browse to the correct file - #print(key) - globaldata.datasets[key].browse_page(time=classdatetime) - - if globaldata.datasets[key].page is not None: - DIST = \ - np.abs((globaldata.datasets[key].page.variables['lat'].values\ - - self.pars.latitude)) - ilat = np.where((DIST) == np.min(DIST))[0][0] - DIST = \ - np.abs((globaldata.datasets[key].page.variables['lon'].values\ - - self.pars.longitude)) - ilon = np.where((DIST) == np.min(DIST))[0][0] - - DIST = \ - np.abs((globaldata.datasets[key].page.variables['lat'].values\ - - (self.pars.latitude + 0.5))) - ilatmax = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]: - ilatmax = ilat - - DIST = \ - np.abs((globaldata.datasets[key].page.variables['lon'].values\ - - (self.pars.longitude + 0.5))) - ilonmax = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]: - ilonmax = ilon - - DIST = \ - np.abs((globaldata.datasets[key].page.lat.values\ - - (self.pars.latitude - 0.5))) - ilatmin = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]: - ilatmin = ilat - DIST = \ - np.abs((globaldata.datasets[key].page.lon.values\ - - (self.pars.longitude - 0.5))) - ilonmin = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]: - ilonmin = ilon - - if ilatmin < ilatmax: - ilatrange = range(ilatmin,ilatmax+1) - else: - ilatrange = range(ilatmax,ilatmin+1) - - if ilonmin < ilonmax: - ilonrange = range(ilonmin,ilonmax+1) - else: - ilonrange = range(ilonmax,ilonmin+1) - - if 'time' in list(globaldata.datasets[key].page[key].dims): - DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime)) - - idatetime = np.where((DIST) == np.min(DIST))[0][0] - #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime) - if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ): - idatetime += 1 - - classdatetimeend = np.datetime64(\ - self.pars.datetime +\ - dt.timedelta(seconds=self.pars.runtime)\ - ) - DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend)) - idatetimeend = np.where((DIST) == np.min(DIST))[0][0] - #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend) - if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetimeend)): - idatetimeend -= 1 - idatetime = np.min((idatetime,idatetimeend)) - #for gleam, we take the previous day values - if key in ['wg', 'w2']: - idatetime = idatetime - 1 - idatetimeend = idatetimeend - 1 - - # in case of soil temperature, we take the exact - # timing (which is the morning) - if key in ['Tsoil','T2']: - idatetimeend = idatetime - - idts = range(idatetime,idatetimeend+1) - - count = 0 - self.__dict__[key] = 0. - value = 0. - for iilat in ilatrange: - for iilon in ilonrange: - for iidts in idts: - value += np.mean(globaldata.datasets[key].page[key].isel(time=iidts,lat=iilat,lon=iilon,drop=True).values) - count += 1 - value = value/count - self.update(source='globaldata',pars={key:value.item()}) - - else: - - count = 0 - value = 0. - for iilat in ilatrange: - for iilon in ilonrange: - value += np.mean(globaldata.datasets[key].page[key].isel(lat=iilat,lon=iilon,drop=True).values) - count += 1 - value = value/count - - self.update(source='globaldata',pars={key:value.item()}) - - if ('LAIpixel' in keys) and ('cveg' in keys): - self.logger.debug('also update LAI based on LAIpixel and cveg') - # I suppose LAI pixel is already determined in the previous - # procedure. Anyway... - key = 'LAIpixel' - - if globaldata.datasets[key].page is not None: - # first, we browse to the correct file that has the current time - if 'time' in list(globaldata.datasets[key].page[key].dims): - globaldata.datasets[key].browse_page(time=classdatetime) - - DIST = \ - np.abs((globaldata.datasets[key].page.lat.values\ - - self.pars.latitude)) - ilat = np.where((DIST) == np.min(DIST))[0][0] - DIST = \ - np.abs((globaldata.datasets[key].page.lon.values\ - - self.pars.longitude)) - ilon = np.where((DIST) == np.min(DIST))[0][0] - - - DIST = \ - np.abs((globaldata.datasets[key].page.lat.values\ - - (self.pars.latitude + 0.5))) - ilatmax = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lat'].values[ilatmax] < globaldata.datasets[key].page.variables['lat'].values[ilat]: - ilatmax = ilat - - DIST = \ - np.abs((globaldata.datasets[key].page.lon.values \ - - (self.pars.longitude + 0.5))) - ilonmax = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lon'].values[ilonmax] < globaldata.datasets[key].page.variables['lon'].values[ilon]: - ilonmax = ilon - - DIST = \ - np.abs((globaldata.datasets[key].page.lat.values\ - - (self.pars.latitude - 0.5))) - ilatmin = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lat'].values[ilatmin] > globaldata.datasets[key].page.variables['lat'].values[ilat]: - ilatmin = ilat - DIST = \ - np.abs((globaldata.datasets[key].page.lon.values\ - - (self.pars.longitude - 0.5))) - ilonmin = np.where((DIST) == np.min(DIST))[0][0] - if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]: - ilonmin = ilon - DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime)) - idatetime = np.where((DIST) == np.min(DIST))[0][0] - - - if ilatmin < ilatmax: - ilatrange = range(ilatmin,ilatmax+1) - else: - ilatrange = range(ilatmax,ilatmin+1) - - if ilonmin < ilonmax: - ilonrange = range(ilonmin,ilonmax+1) - else: - ilonrange = range(ilonmax,ilonmin+1) - - #tarray_res = np.zeros(shape=globaldata.datasets[key]['time'].shape) - LAIpixel = 0. - count = 0 - for iilat in [ilat]: #ilatrange - for iilon in [ilon]: #ilonrange - LAIpixel += globaldata.datasets[key].page[key].isel(time = idatetime,lat=iilat,lon=iilon,drop=True).values - - - # if np.isnan(tarray[idatetime]): - # print("interpolating GIMMS LAIpixel nan value") - # - # mask = np.isnan(tarray) - # - # #replace each nan value with a interpolated value - # if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]: - # tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask]) - # - # else: - # print("Warning. Could not interpolate GIMMS LAIpixel nan value") - - # tarray *= np.nan - - count += 1 - #tarray_res += tarray - LAIpixel = LAIpixel/count - - count = 0 - #tarray = globaldata.keys[dataset][key].isel({'lat':[ilat],'lon':[ilon]}).mean(dim=['lat','lon']).values - - self.update(source='globaldata',pars={'LAIpixel':np.float(LAIpixel)}) - #print('LAIpixel:',self.__dict__['LAIpixel']) - #print('cveg:',self.__dict__['cveg']) - - # finally, we rescale the LAI according to the vegetation - # fraction - value = 0. - if ((self.pars.cveg is not None) and (self.pars.cveg > 0.1)): - value =self.pars.LAIpixel/self.pars.cveg - else: - # in case of small vegetation fraction, we take just a standard - # LAI value. It doesn't have a big influence anyway for - # small vegetation - value = 2. - #print('LAI:',self.__dict__['LAI']) - self.update(source='globaldata',pars={'LAI':value}) - - - # in case we have 'sp', we also calculate the 3d pressure fields at - # full level and half level - if ('sp' in keys) and ('sp' in self.pars.__dict__): - pdAB = pd.read_fwf('/user/data/gent/gvo000/gvo00090/EXT/scripts/ECMWF/ecmwf_coeffs_L60_wrf.txt',header=None,names=['A','B'],index_col=0) - - phalf,pfull =calc_air_ac_pres_L60(self.pars.sp,pdAB.A.values,pdAB.B.values) - - - # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE - # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR - # # # CALCULATING THE ADVECTION PROFILES - # # hydrostatic thickness of each model layer - delpdgrav = -(phalf[:-1] - phalf[1:])/grav - # # dz = rhodz/(R * T / pfull) - - - # # subsidence multiplied by density. We calculate the subsidence of - # # the in class itself - # wrho = np.zeros_like(phalf) - # wrho[-1] = 0. - - # for ihlev in range(0,wrho.shape[0]-1): - # # subsidence multiplied by density is the integral of - # # divergences multiplied by the layer thicknessies - # wrho[ihlev] = ((self.air_ac['divU_x'][ihlev:] + \ - # self.air_ac['divU_y'][ihlev:]) * \ - # delpdgrav[ihlev:]).sum() - - - - self.update(source='globaldata',\ - air_ac=pd.DataFrame({'p':list(pfull)})) - self.update(source='globaldata',\ - air_ach=pd.DataFrame({'p':list(phalf)})) - self.update(source='globaldata',\ - air_ac=pd.DataFrame({'delpdgrav':list(delpdgrav)})) - # self.update(source='globaldata',\ - # air_ach=pd.DataFrame({'wrho':list(wrho)})) - - def check_source(self,source,check_only_sections=None): - """ this procedure checks whether data of a specified source is valid. - - INPUT: - source: the data source we want to check - check_only_sections: a string or list with sections to be checked - OUTPUT: - returns True or False - """ - - # we set source ok to false as soon as we find a invalid input - source_ok = True - - # convert to a single-item list in case of a string - check_only_sections_def = (([check_only_sections]) if \ - type(check_only_sections) is str else \ - check_only_sections) - - if source not in self.sources.keys(): - self.logger.info('Source '+source+' does not exist') - source_ok = False - - for sectiondatakey in self.sources[source]: - section,datakey = sectiondatakey.split(':') - if ((check_only_sections_def is None) or \ - (section in check_only_sections_def)): - checkdatakeys = [] - if type(self.__dict__[section]) is pd.DataFrame: - checkdata = self.__dict__[section] - elif type(self.__dict__[section]) is model_input: - checkdata = self.__dict__[section].__dict__ - - if (datakey not in checkdata): - # self.logger.info('Expected key '+datakey+\ - # ' is not in parameter input') - source_ok = False - elif (checkdata[datakey] is None) or \ - (pd.isnull(checkdata[datakey]) is True): - - # self.logger.info('Key value of "'+datakey+\ - # '" is invalid: ('+ \ - # str(self.__dict__[section].__dict__[datakey])+')') - source_ok = False - - return source_ok - - def check_source_globaldata(self): - """ this procedure checks whether all global parameter data is - available, according to the keys in the self.sources""" - - source_globaldata_ok = True - - #self.get_values_air_input() - - # and now we can get the surface values - #class_settings = class4gl_input() - #class_settings.set_air_input(input_atm) - - # we only allow non-polar stations - if not (self.pars.lat <= 60.): - source_globaldata_ok = False - self.logger.info('cveg is invalid: ('+str(self.pars.cveg)+')') - - # check lat and lon - if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)): - source_globaldata_ok = False - self.logger.info('lat is invalid: ('+str(self.pars.lat)+')') - self.logger.info('or lon is invalid: ('+str(self.pars.lon)+')') - else: - # we only check the ground parameter data (pars section). The - # profile data (air_ap section) are supposed to be valid in any - # case. - source_ok = self.check_source(source='globaldata',\ - check_only_sections=['air_ac',\ - 'air_ap',\ - 'pars']) - if not source_ok: - source_globaldata_ok = False - - # Additional check: we exclude desert-like - if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)): - source_globaldata_ok = False - self.logger.info('cveg is invalid: ('+str(self.pars.cveg)+')') - if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)): - source_globaldata_ok = False - self.logger.info('LAI is invalid: ('+str(self.pars.LAI)+')') - elif self.pars.cveg < 0.02: - self.logger.info('cveg is too low: ('+str(self.pars.cveg)+')') - source_globaldata_ok = False - - return source_globaldata_ok - - -class c4gli_iterator(): - """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially - - for information/documentation on creating such iterator classes, see: https://stackoverflow.com/questions/19151/build-a-basic-python-iterator - """ - def __init__(self,file): - # take file as IO stream - self.file = file - self.yaml_generator = yaml.load_all(file) - self.current_dict = {} - self.current_class4gl_input = class4gl_input() - separator = self.file.readline() # this is just dummy - self.header = file.readline() - if self.header != '# CLASS4GL record; format version: 0.1\n': - raise NotImplementedError("Wrong format version: '"+self.header+"'") - def __iter__(self): - return self - def __next__(self): - self.current_dict = self.yaml_generator.__next__() - self.current_class4gl_input.load_yaml_dict(self.current_dict) - return self.current_class4gl_input - - - -#get_cape and lift_parcel are adapted from the SkewT package - -class gl_dia(object): - def get_lifted_index(self,timestep=-1): - self.LI = get_lifted_index(self.input.Ps,self.out.T2m[timestep],self.out.q[timestep],self.p_pro,self.theta_pro,endp=50000.) - -#from SkewT -#def get_lcl(startp,startt,startdp,nsteps=101): -# from numpy import interp -# #-------------------------------------------------------------------- -# # Lift a parcel dry adiabatically from startp to LCL. -# # Init temp is startt in K, Init dew point is stwrtdp, -# # pressure levels are in Pa -# #-------------------------------------------------------------------- -# -# assert startdp<=startt -# -# if startdp==startt: -# return np.array([startp]),np.array([startt]),np.array([startdp]), -# -# # Pres=linspace(startp,60000.,nsteps) -# Pres=np.logspace(np.log10(startp),np.log10(60000.),nsteps) -# -# # Lift the dry parcel -# T_dry=(startt)*(Pres/startp)**(Rs_da/Cp_da) -# # Mixing ratio isopleth -# starte=VaporPressure(startdp) -# startw=MixRatio(starte,startp) -# e=Pres*startw/(.622+startw) -# T_iso=243.5/(17.67/np.log(e/6.112)-1.) + degCtoK -# -# # Solve for the intersection of these lines (LCL). -# # interp requires the x argument (argument 2) -# # to be ascending in order! -# P_lcl=interp(0.,T_iso-T_dry,Pres) -# T_lcl=interp(P_lcl,Pres[::-1],T_dry[::-1]) -# -# # # presdry=linspace(startp,P_lcl) -# # presdry=logspace(log10(startp),log10(P_lcl),nsteps) -# -# # tempdry=interp(presdry,Pres[::-1],T_dry[::-1]) -# # tempiso=interp(presdry,Pres[::-1],T_iso[::-1]) -# -# return P_lcl,T_lcl - - - -def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25): - """ Calculate mixed-layer height from temperature and wind speed profile - - Input: - HAGL: height coordinates [m] - THTV: virtual potential temperature profile [K] - WSPD: wind speed profile [m/s] - - Output: - BLH: best-guess mixed-layer height - BLHu: upper limit of mixed-layer height - BLHl: lower limit of mixed-layer height - - """ - - #initialize error BLH - BLHe = 0. - eps = 2.#security limit - iTHTV_0 = np.where(~np.isnan(THTV))[0] - if len(iTHTV_0) > 0: - iTHTV_0 = iTHTV_0[0] - THTV_0 = THTV[iTHTV_0] - else: - THTV_0 = np.nan - - RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2. - - - #RiB = 9.81/THTV_0 * ( THTV[i-1] + (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2 - #RiB - RiBc = 0 - - #best guess of BLH - - #print("RiB: ",RiB) - #print("RiBc: ",RiBc) - - - - BLHi = np.where(RiB > RiBc)[0] - if len(BLHi ) > 0: - BLHi = BLHi[0] - #print("BLHi: ",BLHi) - BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] - - # possible error is calculated as the difference height levels used for the interpolation - BLHu = np.max([BLH,HAGL[BLHi]-eps]) - BLHl = np.min([BLH,HAGL[BLHi-1]+eps]) - # calculate an alternative BLH based on another critical Richardson number (RiBce): - BLHi =np.where(RiB > RiBce)[0] - if len(BLHi ) > 0: - BLHi = BLHi[0] - - BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] - BLHu = np.max([BLHu,HAGL[BLHi]-eps]) - BLHl = np.min([BLHl,HAGL[BLHi-1]+eps]) - - BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)]) - BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)]) - - else: - BLH,BLHu,BLHl = np.nan, np.nan,np.nan - - else: - BLH,BLHu,BLHl = np.nan, np.nan,np.nan - - return BLH,BLHu,BLHl - - - -#from class -def get_lcl(startp,startt,startqv): - # Find lifting condensation level iteratively - lcl = 20. - RHlcl = 0.5 - - itmax = 30 - it = 0 - while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0: - iTHTV_0 = iTHTV_0[0] - THTV_0 = THTV[iTHTV_0] - else: - THTV_0 = np.nan - RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2. - - - #RiB = 9.81/THTV_0 * ( THTV[i-1] + (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2 - #RiB - RiBc = 0 - - #best guess of BLH - - #print("RiB: ",RiB) - #print("RiBc: ",RiBc) - - - - BLHi = np.where(RiB > RiBc)[0] - if len(BLHi ) > 0: - BLHi = BLHi[0] - #print("BLHi: ",BLHi) - BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] - - # possible error is calculated as the difference height levels used for the interpolation - BLHu = np.max([BLH,HAGL[BLHi]-eps]) - BLHd = np.min([BLH,HAGL[BLHi-1]+eps]) - # calculate an alternative BLH based on another critical Richardson number (RiBce): - BLHi =np.where(RiB > RiBce)[0] - if len(BLHi ) > 0: - BLHi = BLHi[0] - - BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1] - BLHu = np.max([BLHu,HAGL[BLHi]-eps]) - BLHd = np.min([BLHd,HAGL[BLHi-1]+eps]) - - BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)]) - BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)]) - - else: - BLH,BLHu,BLHd = np.nan, np.nan,np.nan - - else: - BLH,BLHu,BLHd = np.nan, np.nan,np.nan - - return BLH,BLHu,BLHd - -def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)): - STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds()) - return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)] - - -#from os import listdir -#from os.path import isfile #,join -import glob - - -class wyoming(object): - def __init__(self): - self.status = 'init' - self.found = False - self.DT = None - self.current = None - #self.mode = 'b' - self.profile_type = 'wyoming' - self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] - self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/" - - def set_STNM(self,STNM): - self.__init__() - self.STNM = STNM - self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html") - self.FILES = [os.path.realpath(FILE) for FILE in self.FILES] - self.current = None - self.found = False - self.FILES.sort() - - def find_first(self,year=None,get_atm=False): - self.found = False - - # check first file/year or specified year - if year == None: - self.iFN = 0 - self.FN = self.FILES[self.iFN] - else: - self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html") - self.iFN = self.FILES.index(self.FN) - self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") - self.current = self.sounding_series.find('h2') - keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)" - - # go through other files and find first sounding when year is not specified - self.iFN=self.iFN+1 - while keepsearching: - self.FN = self.FILES[self.iFN] - self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") - self.current = self.sounding_series.find('h2') - self.iFN=self.iFN+1 - keepsearching = (self.current is None) and (self.iFN < len(self.FILES)) - self.found = (self.current is not None) - - self.status = 'fetch' - if self.found: - self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) - - if self.found and get_atm: - self.get_values_air_input() - - - def find(self,DT,get_atm=False): - - self.found = False - keepsearching = True - #print(DT) - # we open a new file only when it's needed. Otherwise we just scroll to the right sounding. - if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)): - self.DT = DT - self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html") - self.iFN = self.FILES.index(self.FN) - self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") - self.current = self.sounding_series.find('h2') - - keepsearching = (self.current is not None) - while keepsearching: - DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) - if DTcurrent == DT: - self.found = True - keepsearching = False - if get_atm: - self.get_values_air_input() - self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) - elif DTcurrent > DT: - keepsearching = False - self.current = None - else: - self.current = self.current.find_next('h2') - if self.current is None: - keepsearching = False - self.found = (self.current is not None) - self.status = 'fetch' - - def find_next(self,get_atm=False): - self.found = False - self.DT = None - if self.current is None: - self.find_first() - else: - self.current = self.current.find_next('h2') - self.found = (self.current is not None) - keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES))) - while keepsearching: - self.iFN=self.iFN+1 - self.FN = self.FILES[self.iFN] - self.sounding_series = BeautifulSoup(open(self.FN), "html.parser") - self.current = self.sounding_series.find('h2') - - self.found = (self.current is not None) - keepsearching = ((self.current is None) and (self.iFN < len(self.FILES))) - if self.found: - self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13])) - if self.found and get_atm: - self.get_values_air_input() - - - - def get_values_air_input(self,latitude=None,longitude=None): - - # for iDT,DT in enumerate(DTS): - - #websource = urllib.request.urlopen(webpage) - #soup = BeautifulSoup(open(webpage), "html.parser") - - - #workaround for ...last line has
 which results in stringlike first column
-        string = self.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = self.current.find_next('pre').find_next('pre').text
-
-        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
-        #PARAMS.insert(0,'date',DT)
-
-        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
-        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
-        
-        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
-        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
-        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
-        HAGL = HGHT - np.float(PARAMS['Station elevation'])
-        ONE_COLUMN.insert(0,'HAGL',HAGL)
-
-        
-        
-        
-        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
-        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
-        ONE_COLUMN.insert(0,'QABS',QABS)
-        
-        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
-
-        #mixed layer potential temperature
-        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
-
-        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
-        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
-        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
-
-        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
-        BLHV = np.max((BLHV,10.))
-        BLHVu = np.max((BLHVu,10.))
-        BLHVd = np.max((BLHVd,10.))
-        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
-
-        #security values for mixed-layer jump values dthetav, dtheta and dq
-        
-        # fit new profiles taking the above-estimated mixed-layer height
-        ONE_COLUMNNEW = []
-        for BLH in [BLHV,BLHVu,BLHVd]:
-            ONE_COLUMNNEW.append(pd.DataFrame())
-            
-            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
-            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
-            
-            listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
-                
-                # get index of lowest valid observation. This seems to vary
-                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
-                if len(idxvalid) > 0:
-                    #print('idxvalid',idxvalid)
-                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
-                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
-                    else:
-                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
-                else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
-                    #print(col,meanabl)
-               
-                
-                # if col == 'PRES':
-                #     meanabl =  
-            
-                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
-                #THTVM = np.nanmean(THTV[HAGL <= BLH])
-                #print("new_pro_h",new_pro_h)
-                # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV',]:
-                    #for moisture
-                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
-                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
-                    if len(listHAGLNEW) > 4:
-                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
-                        dtheta = np.max((0.1,dtheta_pre))
-                        #meanabl = meanabl - (dtheta - dtheta_pre)
-                        #print('dtheta_pre',dtheta_pre)
-                        #print('dtheta',dtheta)
-                        #print('meanabl',meanabl)
-                        #stop
-                        
-                    else:
-                        dtheta = np.nan
-                else:
-                    if len(listHAGLNEW) > 4:
-                        #for moisture (it can have both negative and positive slope)
-                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
-                    else:
-                        dtheta = np.nan
-                #print('dtheta',dtheta)
-                
-                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
-            
-                
-                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
-                
-            #QABSM = np.nanmean(QABS[HAGL <= BLH])
-            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
-            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
-            
-        # we just make a copy of the fields, so that it can be read correctly by CLASS 
-        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
-            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
-            
-            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
-        
-            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
-            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
-
-
-        # assign fields adopted by CLASS
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'b':
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'u':
-            PARAMS.insert(0,'h',   BLHVu)
-        elif self.mode == 'd':
-            PARAMS.insert(0,'h',   BLHVd)
-        else:
-            PARAMS.insert(0,'h',   BLHV)
-            
-
-        try:
-            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
-        except:
-            print("could not convert latitude coordinate")
-            PARAMS.insert(0,'latitude', np.nan)
-            PARAMS.insert(0,'lat', np.nan)
-        try:
-            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
-            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
-            PARAMS.insert(0,'lon', 0.)
-        except:
-            print("could not convert longitude coordinate")
-            PARAMS.insert(0,'longitude', np.nan)
-            PARAMS.insert(0,'lon', 0.)
-
-        if latitude is not None:
-            print('overwriting latitude with specified value')
-            PARAMS['latitude'] = np.float(latitude)
-            PARAMS['lat'] = np.float(latitude)
-        if longitude is not None:
-            print('overwriting longitude with specified value')
-            PARAMS['longitude'] = np.float(longitude)
-        try:
-            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
-            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
-            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            # This is the nearest datetime when sun is up (for class)
-            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
-            # apply the same time shift for UTC datetime
-            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
-            
-        except:
-            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
-            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
-            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
-            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
-
-        
-
-        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
-        # as we are forcing lon equal to zero this is is expressed in local suntime
-        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
-
-           
-        ONE_COLUMNb = ONE_COLUMNNEW[0]
-        ONE_COLUMNu = ONE_COLUMNNEW[1]
-        ONE_COLUMNd = ONE_COLUMNNEW[2]
-        
-
-        THTVM = np.nanmean(THTV[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
-        
-        QABSM = np.nanmean(QABS[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
-        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
-        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
-
-        BLHVe = abs(BLHV - BLHVu)
-        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-
-        #PARAMS.insert(0,'dq',0.)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
-        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
-        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
-        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
-        
-        if self.mode == 'o': #original 
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
-        elif self.mode == 'b': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNb
-            BLCOLUMN = ONE_COLUMNb
-        elif self.mode == 'u': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNu
-            BLCOLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNd
-            BLCOLUMN = ONE_COLUMNd
-        else:
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb
-
-        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
-        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
-        # print(BLCOLUMN['HAGL'][lt6000])
-        # print(BLCOLUMN['HAGL'][lt2500])
-        # 
-        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
-
-        #print(BLCOLUMN['HAGL'][lt2500])
-        PARAMS.insert(0,'OK',
-                      ((BLHVe < 200.) and 
-                       ( len(np.where(lt6000)[0]) > 5) and
-                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
-                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
-                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
-                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
-                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
-                      )
-                     )
-
-        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
-        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
-        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
-        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
-        
-        
-        PARAMS = PARAMS.T
-
-        
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = USE_ONECOLUMN
-        # if self.mode == 'o': #original 
-        #     self.ONE_COLUMN = ONE_COLUMN
-        # elif self.mode == 'b': # best BLH
-        #     self.ONE_COLUMN = ONE_COLUMNb
-        # elif self.mode == 'u':# upper BLH
-        #     self.ONE_COLUMN = ONE_COLUMNu
-        # elif self.mode == 'd': # lower BLH
-        #     self.ONE_COLUMN=ONE_COLUMNd
-        # else:
-        #     self.ONE_COLUMN = ONE_COLUMN
-
diff --git a/dist/class4gl-0.1dev/lib/data_global.py b/dist/class4gl-0.1dev/lib/data_global.py
deleted file mode 100644
index 9c3d9b5..0000000
--- a/dist/class4gl-0.1dev/lib/data_global.py
+++ /dev/null
@@ -1,936 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov  7 10:51:03 2017
-
-@author: Hendrik Wouters
-
-Purpose: provides class routines for ground and atmosphere conditions used for
-the CLASS miced-layer model
-
-Usage:
-    from data_global import data_global
-    from class4gl import class4gl_input
-    from data_soundings import wyoming
-
-    # create a data_global object and load initial data pages
-    globaldata = data_global()
-    globaldata.load_datasets()
-    # create a class4gl_input object
-    c4gli = class4gl_input()
-    # Initialize it with profile data. We need to do this first. Actually this
-    # will set the coordinate parameters (datetime, latitude, longitude) in
-    # class4gl_input.pars.__dict__, which is required to read point data from
-    # the data_global object.
-
-    # open a Wyoming stream for a specific station
-    wy_strm = wyoming(STNM=91376)
-    # load the first profile
-    wy_strm.find_first()
-    # load the profile data into the class4gl_input object
-    c4gli.get_profile_wyoming(wy_strm)
-    
-    # and finally, read the global input data for this profile
-    c4gli.get_global_input(globaldata)
-
-
-"""
-
-import netCDF4 as nc4
-import numpy as np
-import datetime as dt
-#you can install with
-#import pynacolada as pcd
-import pandas as pd
-import xarray as xr
-import os
-import glob
-import sys
-import errno
-import warnings
-import logging
-
-
-#formatter = logging.Formatter()
-logging.basicConfig(format='%(asctime)s - \
-                               %(name)s - \
-                               %(levelname)s - \
-                               %(message)s')
-
-class book(object):
-    """ this is a class for a dataset spread over multiple files. It has a
-    similar purpose  open_mfdataset, but only 1 file (called current 'page')
-    one is loaded at a time. This saves precious memory.  """
-    def __init__(self,fn,concat_dim = None,debug_level=None):
-        self.logger = logging.getLogger('book')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        # filenames are expanded as a list and sorted by filename
-        self.pages = glob.glob(fn); self.pages.sort()
-        # In case length of the resulting list is zero, this means no file was found that matches fn. In that case we raise an error.
-        if len(self.pages) == 0:
-            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
-        self.ipage = -1; self.page = None
-        self.renames = {} # each time when opening a file, a renaming should be done.
-        self.set_page(0)
-
-        # we consider that the outer dimension is the one we concatenate
-        self.concat_dim = concat_dim
-        if self.concat_dim is None:
-            self.concat_dim = self.concat_dim=list(self.page.dims.keys())[0]
-
-    # this wraps the xarray sel-commmand
-    def sel(*args, **kwargs):
-        for dim in kwargs.keys():
-            if dim == self.concat_dim:
-                self.browse_page(**{dim: kwargs[dim]})
-        return page.sel(*args,**kwargs)
-
-
-    ## this wraps the xarray class -> some issues with that, so I just copy the sel command (which I do not use yet)
-    #def __getattr__(self,attr):
-    #    orig_attr = self.page.__getattribute__(attr)
-    #    if callable(orig_attr):
-    #        def hooked(*args, **kwargs):
-    #            for dim in kwargs.keys():
-    #                if dim == self.concat_dim:
-    #                    self.browse_page(**{dim: kwargs[dim]})
-    #
-    #            result = orig_attr(*args, **kwargs)
-    #            # prevent wrapped_class from becoming unwrapped
-    #            if result == self.page:
-    #                return self
-    #            self.post()
-    #            return result
-    #        return hooked
-    #    else:
-    #        return orig_attr
-
-    def set_renames(self,renames):
-        #first, we convert back to original names, and afterwards, we apply the update of the renames.
-        reverse_renames = dict((v,k) for k,v in self.renames.items())
-        self.renames = renames
-        if self.page is not None:
-            self.page = self.page.rename(reverse_renames)
-            self.page = self.page.rename(self.renames)
-
-    def set_page(self,ipage,page=None):
-        """ this sets the right page according to ipage:
-                - We do not switch the page if we are already at the right one
-                - we set the correct renamings (level -> lev, latitude -> lat,
-                etc.)
-                - The dataset is also squeezed.
-        """
-
-        if ((ipage != self.ipage) or (page is not None)):
-
-            if self.page is not None:
-                self.page.close()
-
-            self.ipage = ipage
-            if page is not None:
-                self.page = page
-            else:
-                if self.ipage == -1:
-                   self.page = None
-                else:
-                    #try:
-
-                    self.logger.info("Switching to page "+str(self.ipage)+': '\
-                                     +self.pages[self.ipage])
-                    self.page = xr.open_dataset(self.pages[self.ipage])
-
-
-            # do some final corrections to the dataset to make them uniform
-            if self.page is not None:
-               if 'latitude' in self.page.dims:
-#    sel       f.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-                   self.page = self.page.rename({'latitude':'lat','longitude':'lon'})
-               if 'level' in self.page.dims:
-                   self.page = self.page.rename({'level':'lev'})
-
-               self.page = self.page.rename(self.renames)
-               self.page = self.page.squeeze(drop=True)
-
-    def browse_page(self,rewind=2,**args):
-
-        # at the moment, this is only tested with files that are stacked according to the time dimension.
-        dims = args.keys()
-
-
-        if self.ipage == -1:
-            self.set_page(0)
-
-        found = False
-        iipage = 0
-        startipage = self.ipage - rewind
-        while (iipage < len(self.pages)) and not found:
-            ipage = (iipage+startipage) % len(self.pages)
-            for dim in args.keys():
-                this_file = True
-
-                # here we store the datetimes in a directly-readable dictionary, so that we don't need to load it every time again
-                if 'dims' not in self.__dict__:
-                    self.dims = {}
-                if dim not in self.dims.keys():
-                    self.dims[dim] = [None]*len(self.pages)
-
-                if self.dims[dim][ipage] is None:
-                    self.logger.info('Loading coordinates of dimension "'+dim+\
-                                     '" of page "' +str(ipage)+'".')
-                    self.set_page(ipage)
-                    # print(ipage)
-                    # print(dim)
-                    # print(dim,self.page[dim].values)
-                    self.dims[dim][ipage] = self.page[dim].values
-
-                # determine current time range of the current page
-                mindim = self.dims[dim][ipage][0] -(self.dims[dim][ipage][1] - self.dims[dim][ipage][0])/2.
-                maxdim = self.dims[dim][ipage][-1] +(self.dims[dim][ipage][-1] - self.dims[dim][ipage][-2])/2.
-
-                if not ((args[dim] >= mindim) and (args[dim] < maxdim )):
-                    this_file = False
-
-            if this_file:
-                found = True
-                self.set_page(ipage)
-            else:
-
-                #if ((args[dim] >= self.page[dim].min().values) and (args[dim] < self.page[dim].max().values)):
-                #    iipage = len(self.pages) # we stop searching
-
-                iipage += 1
-
-        if not found:
-            self.logger.info("Page not found. Setting to page -1")
-            #iipage = len(self.pages) # we stop searching further
-            self.set_page(-1)
-
-        if self.ipage != -1:
-            self.logger.debug("I'm now at page "+ str(self.ipage)+': '+self.pages[self.ipage])
-        else:
-            self.logger.debug("I'm now at page "+ str(self.ipage))
-
-
-class data_global(object):
-    def __init__(self,sources= {
-        # # old gleam
-        # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
-        # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
-        # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
-        # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
-        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
-        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
-        #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
-        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
-        "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
-        "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
-        'IGBPDIS:wsat'  : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc',
-        "ERAINT:Ts"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:Tsoil"  : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc:stl1",
-        "ERAINT:T2"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc:stl2",
-        "ERAINT:cc"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc:tcc",
-        'IGBPDIS:wfc'   : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc',
-        'IGBPDIS:wwilt' : '/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc:wwp',
-        'MOD44B:cveg'   : '/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc:fv',
-        #'CERES:cc'      : '/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset*.nc:cldarea_total_1h',
-        "DSMW:b"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:b",
-        #"DSMW.C1sat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C1sat",
-        #"DSMW.C2ref"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:C2ref",
-        #"DSMW.p"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:p",
-        #"DSMW.a"        : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:a",
-        #"DSMW.CGsat"    : "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc:DSMW:CGsat",
-        "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI",
-        #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h',
-        #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h'
-        "ERAINT:advt_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x",
-        "ERAINT:advt_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y",
-        "ERAINT:advq_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc",
-        "ERAINT:advq_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc",
-        "ERAINT:advu_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc",
-        "ERAINT:advu_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc",
-        "ERAINT:advv_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc",
-        "ERAINT:advv_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc",
-        #"ERAINT:divU_x"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__",
-        #"ERAINT:divU_y"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__",
-        "ERAINT:sp"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc",
-        "ERAINT:wp"  : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w',
-        #"MSWEP:pr"    :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation"
-        },debug_level=None):
-        self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it).
-        self.sources = sources
-        self.datarefs = {}
-        self.datasets = {}
-        self.datetime = dt.datetime(1981,1,1)
-
-        self.logger = logging.getLogger('data_global')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-        self.debug_level = debug_level
-
-        warnings.warn('omitting pressure field p and advection')
-
-    def in_library(self,fn):
-        if fn not in self.library.keys():
-            return False
-        else:
-            print("Warning: "+fn+" is already in the library.")
-            return True
-
-    def add_to_library(self,fn):
-        if not self.in_library(fn):
-            print("opening: "+fn)
-            self.library[fn] = \
-                book(fn,concat_dim='time',debug_level=self.debug_level)
-
-            #self.library[fn] = xr.open_mfdataset(fn,concat_dim='time')
-            #if 'latitude' in self.library[fn].variables:
-            #    self.library[fn] = self.library[fn].rename({'latitude':'lat','longitude':'lon'})
-
-
-    # default procedure for loading datasets into the globaldata library
-    def load_dataset_default(self,input_fn,varssource=None,varsdest=None):
-        if type(varssource) is str:
-            varssource = [varssource]
-        if type(varsdest) is str:
-            varsdest = [varsdest]
-
-        self.add_to_library(input_fn)
-
-        if varssource is None:
-            varssource = []
-            for var in self.sources[input_fn].variables:
-                avoid = \
-                ['lat','lon','latitude','longitude','time','lev','level']
-                if ((len(list(var.shape)) >= 2) & (var not in avoid)): #two-dimensional array
-                    varssource.append(var)
-
-        if varsdest is None:
-            varsdest = varssource
-
-        #input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
-        for ivar,vardest in enumerate(varsdest):
-            varsource = varssource[ivar]
-            print('setting '+vardest+' as '+varsource+' from '+input_fn)
-
-            if vardest in self.datarefs.keys():
-                print("Warning! "+vardest+' is already provided by ',self.datarefs[vardest]+'. \n Overwriting....')
-            #self.add_to_library(fn,varsource,vardest)
-            if vardest != varsource:
-                libkey = input_fn+'.'+varsource+'.'+vardest
-                if libkey not in self.library.keys():
-                    #self.library[libkey] = self.library[input_fn].rename({varsource:vardest})
-                    self.library[libkey] = book(input_fn,\
-                                                debug_level=self.debug_level)
-                    self.library[libkey].set_renames({varsource: vardest})
-
-                self.datarefs[vardest] = libkey # this is to remember that it was originally varsource in input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-            else:
-                self.datarefs[vardest] = input_fn
-                self.datasets[vardest] =self.library[self.datarefs[vardest]]
-
-            # if ((vardest is not None) & (vardest not in self.datasets[vardest].variables)):
-            #     print('Warning: '+ vardest "not in " + input_fn)
-
-
-
-    def load_datasets(self,sources = None,recalc=0):
-
-        if sources is None:
-            sources = self.sources
-        for key in sources.keys():
-            #datakey,vardest,*args = key.split(':')
-            datakey,vardest = key.split(':')
-            #print(datakey)
-
-            fnvarsource = sources[key].split(':')
-            if len(fnvarsource) > 2:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource,fnargs = fnvarsource
-                fnargs = [fnargs]
-            elif len(fnvarsource) > 1:
-                #fn,varsource,*fnargs = fnvarsource
-                fn,varsource = fnvarsource
-                fnargs = []
-            else:
-                fn = sources[key]
-                varsource = vardest
-            self.load_dataset(fn,varsource,vardest,datakey,recalc=recalc)
-
-    def load_dataset(self,fn,varsource,vardest,datakey,recalc=0):
-            # the default way of loading a 2d dataset
-            if datakey in ['CERES','GLEAM','ERAINT','GIMMS']:
-                self.load_dataset_default(fn,varsource,vardest)
-            elif datakey == 'IGBPDIS':
-                if vardest == 'alpha':
-                    ltypes = ['W','B','H','TC']
-                    for ltype in ltypes:
-                        self.load_dataset_default(fn,'f'+ltype,'f'+ltype)
-                        ##self.datasets['f'+ltype]['f'+ltype]=  self.datasets['f'+ltype]['f'+ltype].squeeze(drop=True)
-
-
-                    # landfr = {}
-                    # for ltype in ['W','B','H','TC']:
-                    #     landfr[ltype] = datasets['f'+ltype]['f'+ltype].values
-
-
-
-                    keytemp = 'alpha'
-                    fnkeytemp = fn+':IGBPDIS:alpha'
-                    if (os.path.isfile(fnkeytemp)) and ( recalc < 6):
-                        self.library[fnkeytemp]  = book(fnkeytemp,
-                                                        debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-                    else:
-                        self.library[fn+':IGBPDIS:alpha'] = xr.Dataset()
-                        #self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.zeros_like(self.datasets['IGBPDIS']['IGBPDIS'],dtype=np.float)*np.nan
-                        self.library[fn+':IGBPDIS:alpha']['lat'] = self.datasets['fW'].page['lat']
-                        self.library[fn+':IGBPDIS:alpha']['lon'] = self.datasets['fW'].page['lon']
-                        self.library[fn+':IGBPDIS:alpha'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['fW'].page['lon'].shape[0],self.datasets['fW'].page['lat'].shape[0]),dtype=np.float),dims=('lon','lat'))
-                        self.datasets[keytemp] = self.library[fn+':IGBPDIS:alpha']
-                        self.datarefs[keytemp] =fn+':IGBPDIS:alpha'
-
-                        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-
-                        alpha=self.library[fn+':IGBPDIS:alpha'][keytemp].values
-                        for ltype in ltypes:
-                            alpha += self.datasets['f'+ltype].page['f'+ltype].values*aweights[ltype]
-
-                        self.library[fn+':IGBPDIS:alpha'][keytemp].values = alpha
-                        print('writing file to: '+fnkeytemp)
-                        os.system('rm '+fnkeytemp)
-                        self.library[fnkeytemp].to_netcdf(fnkeytemp)
-                        self.library[fnkeytemp].close()
-
-
-                        self.library[fnkeytemp]  = \
-                            book(fnkeytemp,debug_level=self.debug_level)
-                        self.datasets[keytemp] = self.library[fnkeytemp]
-                        self.datarefs[keytemp] = fnkeytemp
-
-
-                else:
-                    self.load_dataset_default(fn,varsource,vardest)
-
-
-            elif datakey == 'GLAS':
-                self.load_dataset_default(fn,varsource,vardest)
-                if vardest == 'z0m':
-                    self.datasets['z0m'].page['z0m'].values = (self.datasets['z0m'].page['z0m'].values/10.).clip(0.01,None)
-                elif vardest == 'z0h':
-                    self.datasets['z0h'].page['z0h'].values = (self.datasets['z0h'].page['z0h'].values/100.).clip(0.001,None)
-            elif datakey == 'DSMW':
-
-
-                # Procedure of the thermal properties:
-                # 1. determine soil texture from DSMW/10.
-                # 2. soil type with look-up table (according to DWD/EXTPAR)
-                # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987)
-                #    with parameter look-up table from Noilhan and Planton (1989).
-                #    Note: The look-up table is inspired on DWD/COSMO
-
-                # to do: implement inheretance, so that the the preliminary output of DSMW or any other dataset can be calculated first
-
-
-
-                fnout = fn.replace('*','') # for storing computationally heavy soil properties, instead of calculating everytime
-                self.load_dataset_default(fn,'DSMW')
-                print('calculating texture')
-                SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code','undefined']
-                TEMP  = {}
-                TEMP2 = self.datasets['DSMW'].page['DSMW'].values
-                TEMP3 = {}
-                for SPKEY in SPKEYS:
-
-
-                    keytemp = SPKEY+'_values'
-                    fnoutkeytemp = fnout+':DSMW:'+keytemp
-                    if (os.path.isfile(fnoutkeytemp)) and ( recalc < 5 ):
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                    else:
-                        #DSMW = self.datasets['DSMW']['DSMW']#   self.input_nc.variables['DSMW'][ilat,ilon]
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lat'] = self.datasets['DSMW'].page['lat']
-                        self.library[fn+':DSMW:'+SPKEY+'_values']['lon'] = self.datasets['DSMW'].page['lon']
-                        self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.int),dims=('lat','lon'))
-                        #self.library[fn+':DSMW:'+SPKEY+'_values'][SPKEY+'_values'] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=(np.int if SPKEY == 'code' else np.float))
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-                        # for faster computation, we need to get it to memory out of Dask.
-                        TEMP[SPKEY] = self.datasets[SPKEY+'_values'][SPKEY+'_values'].values
-                        TEMP3[SPKEY] = self.datasets['DSMW'].page[SPKEY].values
-
-                # yes, I know I only check the last file.
-                if not ((os.path.isfile(fnoutkeytemp)) and ( recalc < 5)):
-                    for idx in range(len(self.datasets['DSMW'].page['tex_coarse'].values))[:]:
-                        print('idx',idx,SPKEY)
-                        SEL = (TEMP2 == idx)
-                    #     print(idx,len(TEMP3))
-                        for SPKEY in SPKEYS:
-                            TEMP[SPKEY][SEL] = TEMP3[SPKEY][idx]
-
-                    for SPKEY in SPKEYS:
-                        keytemp = SPKEY+'_values'
-                        fnoutkeytemp = fnout+':DSMW:'+keytemp
-                        self.datasets[SPKEY+'_values'][SPKEY+'_values'].values = TEMP[SPKEY][:]
-                        os.system('rm '+fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].to_netcdf(fnoutkeytemp)
-                        self.datasets[SPKEY+'_values'].close()
-
-
-                        self.library[fn+':DSMW:'+SPKEY+'_values'] = \
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY+'_values'] = self.library[fn+':DSMW:'+SPKEY+'_values']
-                        self.datarefs[SPKEY+'_values'] =fn+':DSMW:'+SPKEY+'_values'
-
-
-                keytemp = 'texture'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 3 ):
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-                else:
-                    self.library[fn+':DSMW:texture'] = xr.Dataset()
-                    #self.library[fn+':DSMW:texture'][keytemp] = xr.zeros_like(self.datasets['DSMW']['DSMW'],dtype=np.float)*np.nan
-                    self.library[fn+':DSMW:texture']['lat'] = self.datasets['DSMW'].page['lat']
-                    self.library[fn+':DSMW:texture']['lon'] = self.datasets['DSMW'].page['lon']
-                    self.library[fn+':DSMW:texture'][keytemp] = xr.DataArray(np.zeros(shape=(self.datasets['DSMW'].page['lat'].shape[0],self.datasets['DSMW'].page['lon'].shape[0]),dtype=np.float),dims=('lat','lon'))
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-
-                    self.datasets[keytemp][keytemp].values = (0.5*self.datasets['tex_medium_values'].page['tex_medium_values'].values+1.0*self.datasets['tex_coarse_values'].page['tex_coarse_values'].values)/(self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+self.datasets['tex_fine_values'].page['tex_fine_values'].values)
-
-                    zundef = np.array(self.datasets['undefined_values'].page['undefined_values'].values,dtype=np.float)
-                    zundef[zundef < 0] = np.nan
-                    zsum_tex = self.datasets['tex_coarse_values'].page['tex_coarse_values'].values+self.datasets['tex_medium_values'].page['tex_medium_values'].values+ self.datasets['tex_fine_values'].page['tex_fine_values'].values
-                    VALID  = (zsum_tex >= zundef) *( ~np.isnan(zundef))
-
-                    self.datasets[keytemp][keytemp].values[~VALID] = 9012.
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-
-
-                    self.library[fnoutkeytemp]  = \
-                        book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:texture']
-                    self.datarefs[keytemp] =fn+':DSMW:texture'
-
-
-                print('calculating texture type')
-
-
-
-                keytemp = 'itex'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 2 ):
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-                else:
-                    self.library[fnoutkeytemp] = xr.Dataset()
-                    self.library[fnoutkeytemp][keytemp] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-                    X = self.datasets['texture'].page['texture'].values*100
-                    X[pd.isnull(X)] = -9
-
-
-                    self.datasets[keytemp][keytemp].values = X
-
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets['itex'].to_netcdf(fnoutkeytemp)
-                    self.datasets['itex'].close()
-
-
-                    self.library[fnoutkeytemp] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets[keytemp] = self.library[fn+':DSMW:itex']
-                    self.datarefs[keytemp] =fn+':DSMW:itex'
-
-
-                keytemp = 'isoil'
-                fnoutkeytemp=fnout+':DSMW:'+keytemp
-                isoil_reprocessed = False
-                if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-                else:
-                    isoil_reprocessed = True
-                    print('calculating soil type')
-                    self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                    self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    self.datasets['isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                    ITEX = self.datasets['itex'].page['itex'].values
-                    ISOIL = 9 + 0.*self.datasets['isoil']['isoil'].values
-                    LOOKUP = [
-                              [-10 ,9],# ocean
-                              [0 ,7],# fine textured, clay (soil type 7)
-                              [20,6],# medium to fine textured, loamy clay (soil type 6)
-                              [40,5],# medium textured, loam (soil type 5)
-                              [60,4],# coarse to medium textured, sandy loam (soil type 4)
-                              [80,3],# coarse textured, sand (soil type 3)
-                              [100,9],# coarse textured, sand (soil type 3)
-                            ]
-                    for iitex,iisoil in LOOKUP:
-                        ISOIL[ITEX > iitex] = iisoil
-                        print('iitex,iisoil',iitex,iisoil)
-
-
-                    #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-                    LOOKUP = [
-                              [9001, 1 ], # ice, glacier (soil type 1)
-                              [9002, 2 ], # rock, lithosols (soil type 2)
-                              [9003, 3 ], # salt, set soiltype to sand (soil type 3)
-                              [9004, 8 ], # histosol, e.g. peat (soil type 8)
-                              [9,    9 ], # undefined (ocean)
-                              [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
-                              [9000, 9 ], # undefined (inland lake)
-                              [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
-                              [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
-                            ]
-                    # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
-                    CODE_VALUES = self.datasets['code_values'].page['code_values'].values
-
-                    CODE_VALUES[ITEX == 901200] = 9012
-                    for icode,iisoil in LOOKUP:
-                        ISOIL[CODE_VALUES == icode] = iisoil
-
-                    self.datasets['isoil']['isoil'].values = ISOIL
-                    os.system('rm '+fnoutkeytemp)
-                    self.datasets[keytemp].to_netcdf(fnoutkeytemp)
-                    self.datasets[keytemp].close()
-                    print('saved inbetween file to: '+fnoutkeytemp)
-
-                    self.library[fn+':DSMW:isoil'] = \
-                            book(fnoutkeytemp,debug_level=self.debug_level)
-                    self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                    self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-                #adopted from data_soil.f90 (COSMO5.0)
-                SP_LOOKUP = {
-                  # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea
-                  # (by index)                                           loam                    loam                                water      ice
-                  'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
-                  'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
-                  'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
-                  'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
-                  'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
-                  'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
-                  'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
-                  'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
-                  'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
-                  'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
-                  'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
-                  'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
-                  'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
-                  'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
-                  'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
-                  'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
-                  'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
-                  'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
-                  'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
-                  # Important note: For peat, the unknown values below are set equal to that of loam
-                  #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
-                  'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , 5.39    , np.nan   ,  np.nan  ],
-                  #error in table 2 of NP89: values need to be multiplied by e-6
-                  'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
-                  'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , 6.    , np.nan   ,  np.nan  ],
-
-                  'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , 0.148    , np.nan   ,  np.nan  ],
-                  'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , 0.191    , np.nan   ,  np.nan  ],
-                  'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , 0.8    , np.nan   ,  np.nan  ],
-                }
-
-
-                # isoil_reprocessed = False
-                # if (os.path.isfile(fnoutkeytemp)) and ( recalc < 1):
-
-                #     self.library[fn+':DSMW:isoil'] = \
-                #             book(fnoutkeytemp,debug_level=self.debug_level)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-                # else:
-                #     isoil_reprocessed = True
-                #     print('calculating soil type')
-                #     self.library[fn+':DSMW:isoil'] = xr.Dataset()
-                #     self.library[fn+':DSMW:isoil']['isoil'] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.int)
-                #     self.datasets['isoil'] = self.library[fn+':DSMW:isoil']
-                #     self.datarefs['isoil'] =fn+':DSMW:isoil'
-
-
-
-
-                # this should become cleaner in future but let's hard code it for now.
-                DSMWVARS = ["b", "C1sat","C2ref","p","a" ]
-                print('calculating soil parameter')
-                DATATEMPSPKEY = {}
-                if (recalc < 1) and (isoil_reprocessed == False): 
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        keytemp = SPKEY
-                        fnoutkeytemp=fnout+':DSMW:'+keytemp
-                        self.library[fn+':DSMW:'+SPKEY] =\
-                                book(fnoutkeytemp,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fnoutkeytemp]
-                        self.datarefs[SPKEY] =fnoutkeytemp
-                else:
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-
-                        self.library[fn+':DSMW:'+SPKEY] = xr.Dataset()
-                        self.library[fn+':DSMW:'+SPKEY][SPKEY] = xr.zeros_like(self.datasets['DSMW'].page['DSMW'],dtype=np.float)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-                        DATATEMPSPKEY[SPKEY] = self.datasets[SPKEY][SPKEY].values
-                    ISOIL = self.datasets['isoil'].page['isoil'].values
-                    print(np.where(ISOIL>0.))
-                    for i in range(11):
-                        SELECT = (ISOIL == i)
-                        for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                            DATATEMPSPKEY[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
-
-                    for SPKEY in DSMWVARS:#SP_LOOKUP.keys():
-                        self.datasets[SPKEY][SPKEY].values = DATATEMPSPKEY[SPKEY]
-
-                        os.system('rm '+fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].to_netcdf(fn+':DSMW:'+SPKEY)
-                        self.datasets[SPKEY].close()
-                        print('saved inbetween file to: '+fn+':DSMW:'+SPKEY)
-
-                        self.library[fn+':DSMW:'+SPKEY] = \
-                                book(fn+':DSMW:'+SPKEY,debug_level=self.debug_level)
-                        self.datasets[SPKEY] = self.library[fn+':DSMW:'+SPKEY]
-                        self.datarefs[SPKEY] =fn+':DSMW:'+SPKEY
-
-
-            else:
-                self.load_dataset_default(fn,varsource,vardest)
-
-
-
-
-
-
-#
-#                 # only print the last parameter value in the plot
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'cala'
-#                 #class_settings.__dict__[var] = np.float(SP['cala0'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#                 #inputs.append(cp.deepcopy(class_settings))
-#                 #var = 'crhoc'
-#                 #class_settings.__dict__[var] = np.float(SP['crhoc'])
-#                 #valnew = class_settings.__dict__[var]
-#                 #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#     key = "CERES"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         CERES_start_date = dt.datetime(2000,3,1)
-#         DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
-#         DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
-#         print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
-#
-#         var = 'cc'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime_end = np.where(np.array(pcd.ncgetdatetime(input_nc))  < (class_settings.datetime+dt.timedelta(hours=int(class_settings.runtime/3600.))))[0][-1]
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#         print(class_settings.lat,class_settings.lon)
-#
-#         class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:idatetime_end,ilat,ilon])/100.
-#
-#         input_nc.close()
-#
-
-
-#     key = "GIMMS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
-#         print("Reading Leag Area Index from "+input_fn)
-#         var = 'LAI'
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
-#
-#         print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
-#         tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
-#
-#         if np.isnan(tarray[idatetime]):
-#             print("interpolating GIMMS cveg nan value")
-#
-#             mask = np.isnan(tarray)
-#             if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-#                 tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-#             else:
-#                 print("Warning. Could not interpolate GIMMS cveg nan value")
-#
-#         class_settings.__dict__[var] = tarray[idatetime]
-#
-#         input_nc.close()
-#
-#     key = "IGBPDIS_ALPHA"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         var = 'alpha'
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
-#         print("Reading albedo from "+input_fn)
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#
-#         landfr = {}
-#         for ltype in ['W','B','H','TC']:
-#             landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
-#
-#         aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-#
-#         alpha=0.
-#         for ltype in landfr.keys():
-#             alpha += landfr[ltype]*aweights[ltype]
-#
-#
-#         class_settings.__dict__[var] = alpha
-#         input_nc.close()
-#
-#
-#     key = "ERAINT_ST"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         print("Reading soil temperature from "+input_fn)
-#
-#         var = 'Tsoil'
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
-#
-#         input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
-#         var = 'T2'
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-#
-#         ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-#         ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-#
-#
-#         class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
-#
-#
-#         input_nc.close()
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #var = 'T2'
-#     #valold = class_settings.__dict__[var]
-#     #
-#     #class_settings.__dict__[var] = 305.
-#     #class_settings.__dict__['Tsoil'] = 302.
-#     #valnew = class_settings.__dict__[var]
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     #inputs.append(cp.deepcopy(class_settings))
-#     #
-#     #var = 'Lambda'
-#     #valold = class_settings.__dict__[var]
-#
-#     ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book.
-#     ## I need to ask Chiel.
-#     ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
-#     #
-#     #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg']
-#     #class_settings.__dict__[var] = valnew
-#     #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-#
-#
-#
-#     key = "GLAS"
-#     if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-#
-#         input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
-#         print("Reading canopy height for determining roughness length from "+input_fn)
-#         var = 'z0m'
-#
-#
-#         #plt.plot
-#
-#         input_nc = nc4.Dataset(input_fn,'r')
-#
-#         ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
-#         ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-#
-#         testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
-#
-#         lowerlimit = 0.01
-#         if testval < lowerlimit:
-#             print('forest canopy height very very small. We take a value of '+str(lowerlimit))
-#             class_settings.__dict__[var] = lowerlimit
-#         else:
-#             class_settings.__dict__[var] = testval
-#
-#         class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
-#
-#
-#         input_nc.close()
-
-
-
-
-
diff --git a/dist/class4gl-0.1dev/lib/interface_functions.py b/dist/class4gl-0.1dev/lib/interface_functions.py
deleted file mode 100644
index 3e483f3..0000000
--- a/dist/class4gl-0.1dev/lib/interface_functions.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-#from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-#'_afternoon.yaml'
-def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
-    filename = yaml_file.name
-    #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-    #yaml_file = open(filename)
-
-    #print('going to next observation',filename)
-    yaml_file.seek(index_start)
-
-    buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-
-    filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-    filebuffer.write(buf)
-    filebuffer.close()
-    # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-    
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-
-    #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-    print(command)
-    os.system(command)
-    jsonstream = open(filename+'.buffer.json.'+str(index_start))
-    record_dict = json.load(jsonstream)
-    jsonstream.close()
-    os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-
-
-    if mode =='mod':
-        modelout = class4gl()
-        modelout.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        return modelout
-    elif mode == 'ini':
-
- 
-        # datetimes are incorrectly converted to strings. We need to convert them
-        # again to datetimes
-        for key,value in record_dict['pars'].items():
-            # we don't want the key with columns that have none values
-            if value is not None: 
-                if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str):
-               # elif (type(value) == str):
-                    record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-
-            if (value == 0.9e19) or (value == '.9e19'):
-                record_dict['pars'][key] = np.nan
-        for key in record_dict.keys():
-            #print(key)
-            if key in ['air_ap','air_balloon',]:
-                #NNprint('check')
-                for datakey,datavalue in record_dict[key].items():
-                    record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-
-        #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-
-        c4gli = class4gl_input()
-        print(c4gli.logger,'hello')
-        c4gli.load_yaml_dict(record_dict)
-        os.system('rm '+filename+'.buffer.json.'+str(index_start))
-        return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-
-class stations(object):
-    def __init__(self,path,suffix='ini',refetch_stations=False):
-
-        self.path = path
-
-        self.file = self.path+'/stations_list.csv'
-        if (os.path.isfile(self.file)) and (not refetch_stations):
-            self.table = pd.read_csv(self.file)
-        else:
-            self.table = self.get_stations(suffix=suffix)
-            self.table.to_csv(self.file)
-        
-        self.table = self.table.set_index('STNID')
-        #print(self.table)
-
-    def get_stations(self,suffix):
-        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
-        if len(stations_list_files) == 0:
-            stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
-        stations_list_files.sort()
-        print(stations_list_files)
-        if len(stations_list_files) == 0:
-            raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
-        stations_list = []
-        for stations_list_file in stations_list_files:
-            thisfile = open(stations_list_file,'r')
-            yamlgen = yaml.load_all(thisfile)
-            try:
-                first_record  = yamlgen.__next__()
-            except:
-                first_record = None
-            if first_record is not None:
-                stations_list.append({})
-                for column in ['STNID','latitude','longitude']:
-                    #print(first_record['pars'].keys())
-                    stations_list[-1][column] = first_record['pars'][column]
-                stations_list[-1]['filename'] = os.path.split(stations_list_file)[1]
-            yamlgen.close()
-            thisfile.close()
-    
-        print(stations_list)
-        return pd.DataFrame(stations_list)
-
-class stations_iterator(object):
-    def __init__(self,stations):
-        self.stations = stations
-        self.ix = -1 
-    def __iter__(self):
-        return self
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.stations.table)) 
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_row(self,row):
-        self.ix = row
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-    def set_STNID(self,STNID):
-        self.ix = np.where((self.stations.table.index == STNID))[0][0]
-        print(self.ix)
-        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
-        return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
-
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-    def close():
-        del(self.ix)
-
-class records_iterator(object):
-    def __init__(self,records):
-            
-        self.records = records
-        self.ix = -1 
-        
-    def __iter__(self):
-        return self
-
-    def __next__(self,jump=1):
-        self.ix = (self.ix+jump) 
-        if self.ix >= len(self.records.index):
-            raise StopIteration
-        self.ix = np.mod(self.ix,len(self.records))
-        return self.records.index[self.ix], self.records.iloc[self.ix]
-    def __prev__(self):
-        return self.__next__(self,jump=-1)
-
-
-# #'_afternoon.yaml'
-# def get_record_yaml(yaml_file,index_start,index_end):
-#     filename = yaml_file.name
-#     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
-#     #yaml_file = open(filename)
-# 
-#     #print('going to next observation',filename)
-#     yaml_file.seek(index_start)
-# 
-#     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
-# 
-#     filebuffer = open(filename+'.buffer.yaml.'+str(index_start),'w')
-#     filebuffer.write(buf)
-#     filebuffer.close()
-#     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
-#     
-#     command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+filename+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+filename+'.buffer.json.'+str(index_start)+' '
-# 
-#     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
-#     print(command)
-#     os.system(command)
-#     jsonstream = open(filename+'.buffer.json.'+str(index_start))
-#     record_dict = json.load(jsonstream)
-#     jsonstream.close()
-#     os.system('rm '+filename+'.buffer.yaml.'+str(index_start))
-#  
-#     # datetimes are incorrectly converted to strings. We need to convert them
-#     # again to datetimes
-#     for key,value in record_dict['pars'].items():
-#         # we don't want the key with columns that have none values
-#         if value is not None: 
-#             if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','ldatetime_daylight','datetime_daylight','datetime_daylight']:#(type(value) == str):
-#            # elif (type(value) == str):
-#                 record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-#                 
-#                 # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-#                 record_dict['pars'][key] = record_dict['pars'][key].astimezone(pytz.UTC)
-# 
-#         if (value == 0.9e19) or (value == '.9e19'):
-#             record_dict['pars'][key] = np.nan
-#     for key in record_dict.keys():
-#         print(key)
-#         if key in ['air_ap','air_balloon',]:
-#             print('check')
-#             for datakey,datavalue in record_dict[key].items():
-#                 record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]]
-# 
-#     #os.system('rm '+filename+'.buffer.json.'+str(index_start))
-# 
-#     c4gli = class4gl_input()
-#     c4gli.load_yaml_dict(record_dict)
-#     return c4gli
-
-
-
-
-
-
-        # self.frames['stats']['records_current_station_index'] = \
-        #     (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-        #      == \
-        #      self.frames['stats']['current_station'].name)
-
-        # # create the value table of the records of the current station
-        # tab_suffixes = \
-        #         ['_mod','_obs','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        # for tab_suffix in tab_suffixes:
-        #     self.frames['stats']['records_current_station'+tab_suffix] = \
-        #         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-# class records_selection(object):
-#     def __init__
-
-# class records(object):
-#     def __init__(self,stations,path_obs,path_mod):
-#         self.stations = stations
-#         self.path_obs = path_obs
-#         self.path_mod = path_mod
-# 
-#         self.ini =       self.get_records(self.path_mod,'ini')
-#         self.mod =       self.get_records(self.path_mod,'mod')
-#         #self.morning =   self.get_records(self.path_obs,'morning')
-#         self.afternoon = self.get_records(self.path_obs,'afternoon')
-# 
-#         
-#         self.afternoon.index = self.afternoon.ldatetime.dt.date
-#         self.afternoon = self.afternoon.loc[records_ini.ldatetime.dt.date]
-# 
-#         self.index = self.ini.index
-#         self.mod.index = self.index
-#         self.afternoon.index = self.index
-# 
-# 
-#         #self.records_iterator = records_current_station_mod.iterrows()
-
-
-
-def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
-
-    records = pd.DataFrame()
-    for STNID,station in stations.iterrows():
-        dictfnchunks = []
-        if getchunk is 'all':
-
-            # we try the old single-chunk filename format first (usually for
-            # original profile pairs)
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.yaml'
-            if os.path.isfile(fn):
-                chunk = 0
-                dictfnchunks.append(dict(fn=fn,chunk=chunk))
-
-            # otherwise, we use the new multi-chunk filename format
-            else:
-                chunk = 0
-                end_of_chunks = False
-                while not end_of_chunks:
-                    fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
-                    if os.path.isfile(fn):
-                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
-                    else:
-                        end_of_chunks = True
-                    chunk += 1
-
-            # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
-            # yamlfilenames = glob.glob(globyamlfilenames)
-            # yamlfilenames.sort()
-        else:
-            fn = path_yaml+'/'+format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
-            dictfnchunks.append(dict(fn=fn,chunk=getchunk))
-            
-        if len(dictfnchunks) > 0:
-            for dictfnchunk in dictfnchunks:
-                yamlfilename = dictfnchunk['fn']
-                chunk = dictfnchunk['chunk']
-                print(chunk)
-
-                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
-                pklfilename = yamlfilename.replace('.yaml','.pkl')
-
-                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
-                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
-                generate_pkl = False
-                if not os.path.isfile(pklfilename): 
-                    print('pkl file does not exist. I generate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                elif not (os.path.getmtime(yamlfilename) <  \
-                    os.path.getmtime(pklfilename)):
-                    print('pkl file older than yaml file, so I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-
-                if refetch_records:
-                    print('refetch_records flag is True. I regenerate "'+\
-                          pklfilename+'" from "'+yamlfilename+'"...')
-                    generate_pkl = True
-                if not generate_pkl:
-                    records = pd.concat([records,pd.read_pickle(pklfilename)])
-                   # irecord = 0
-                else:
-                    with open(yamlfilename) as yaml_file:
-
-                        dictout = {}
-
-                        next_record_found = False
-                        end_of_file = False
-                        while (not next_record_found) and (not end_of_file):
-                            linebuffer = yaml_file.readline()
-                            next_record_found = (linebuffer == '---\n')
-                            end_of_file = (linebuffer == '')
-                        next_tell = yaml_file.tell()
-                        
-                        while not end_of_file:
-
-                            print(' next record:',next_tell)
-                            current_tell = next_tell
-                            next_record_found = False
-                            yaml_file.seek(current_tell)
-                            filebuffer = open(yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
-                            linebuffer = ''
-                            while ( (not next_record_found) and (not end_of_file)):
-                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
-                                linebuffer = yaml_file.readline()
-                                next_record_found = (linebuffer == '---\n')
-                                end_of_file = (linebuffer == '')
-                            filebuffer.close()
-                            
-                            next_tell = yaml_file.tell()
-                            index_start = current_tell
-                            index_end = next_tell
-
-                            
-                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
-                            print(command)
-                            
-                            os.system(command)
-                            #jsonoutput = subprocess.check_output(command,shell=True) 
-                            #print(jsonoutput)
-                            #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(yamlfilename+'.buffer.json.'+str(current_tell))
-                            record = json.load(jsonstream)
-                            dictouttemp = {}
-                            for key,value in record['pars'].items():
-                                # we don't want the key with columns that have none values
-                                if value is not None: 
-                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
-                                   if (type(value) in regular_numeric_types):
-                                        dictouttemp[key] = value
-                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
-                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
-                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
-                            recordindex = record['index']
-                            dictouttemp['chunk'] = chunk
-                            dictouttemp['index_start'] = index_start
-                            dictouttemp['index_end'] = index_end
-                            os.system('rm '+yamlfilename+'.buffer.json.'+str(current_tell))
-                            for key,value in dictouttemp.items():
-                                if key not in dictout.keys():
-                                    dictout[key] = {}
-                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
-                            print(' obs record registered')
-                            jsonstream.close()
-                            os.system('rm '+yamlfilename+'.buffer.yaml.'+str(current_tell))
-                    records_station = pd.DataFrame.from_dict(dictout)
-                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+pklfilename+') for station '\
-                          +str(STNID))
-                    records_station.to_pickle(pklfilename)
-                    # else:
-                    #     os.system('rm '+pklfilename)
-                    records = pd.concat([records,records_station])
-    return records
-
-def stdrel(mod,obs,columns):
-    stdrel = pd.DataFrame(columns = columns)
-    for column in columns:
-        stdrel[column] = \
-                (mod.groupby('STNID')[column].transform('mean') -
-                 obs.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') + \
-                (mod[column] -
-                 mod.groupby('STNID')[column].transform('mean')) /\
-                obs.groupby('STNID')[column].transform('std') 
-    return stdrel
-
-def pct(obs,columns):
-    pct = pd.DataFrame(columns=columns)
-    for column in columns:
-        #print(column)
-        pct[column] = ""
-        pct[column] = obs[column].rank(pct=True)
-    return pct
-
-def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
-    stats = pd.DataFrame()
-    for key in keys: 
-        stats['d'+key+'dt'] = ""
-        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
-                              (obs_afternoon.ldatetime - \
-                               obs_morning.ldatetime).dt.seconds*3600.
-    return stats
-
diff --git a/dist/class4gl-0.1dev/lib/interface_multi.py b/dist/class4gl-0.1dev/lib/interface_multi.py
deleted file mode 100644
index 83148e5..0000000
--- a/dist/class4gl-0.1dev/lib/interface_multi.py
+++ /dev/null
@@ -1,2061 +0,0 @@
-import pandas as pd
-import numpy as np
-import datetime as dt
-import os
-import xarray as xr
-import sys
-from contextlib import suppress
-from time import sleep
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl,units
-from interface_functions import *
-# from data_soundings import wyoming
-import yaml
-import glob
-import pandas as pd
-import json
-import io
-import subprocess
-import pytz
-from scipy.stats import mstats
-
-from matplotlib.colors import LinearSegmentedColormap
-cdictpres = {'blue': (\
-                   (0.,    0.,  0.),
-                   (0.25,  0.25, 0.25),
-                   (0.5,  .70, 0.70),
-                   (0.75, 1.0, 1.0),
-                   (1,     1.,  1.),
-                   ),
-       'green': (\
-                   (0. ,   0., 0.0),
-                   (0.25,  0.50, 0.50),
-                   (0.5,  .70, 0.70),
-                   (0.75,  0.50, 0.50),
-                   (1  ,    0,  0.),
-                   ),
-       'red':  (\
-                  (0 ,  1.0, 1.0),
-                  (0.25 ,  1.0, 1.0),
-                   (0.5,  .70, 0.70),
-                  (0.75 , 0.25, 0.25),
-                  (1,    0., 0.),
-                  )}
-
-statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-os.system('module load Ruby')
-
-class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
-        """ creates an interactive interface for analysing class4gl experiments
-
-        INPUT:
-            path_exp : path of the experiment output
-            path_obs : path of the observations 
-            globaldata: global data that is being shown on the map
-            refetch_stations: do we need to build the list of the stations again?
-        OUTPUT:
-            the procedure returns an interface object with interactive plots
-
-        """
-        
-        # set the ground
-        self.globaldata = globaldata
-
- 
-        self.path_exp = path_exp
-        self.path_obs = path_obs
-        self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
-
-        # # get the list of stations
-        # stationsfile = self.path_exp+'/stations_list.csv'
-        # if (os.path.isfile(stationsfile)) and (not refetch_stations):
-        #     stations = pd.read_csv(stationsfile)
-        # else:
-        #     stations = get_stations(self.path_exp)
-        #     stations.to_csv(stationsfile)
-
-        # stations = stations.set_index('STNID')
-
-        self.frames = {}
-
-        self.frames['stats'] = {}
-        self.frames['worldmap'] = {}
-                
-        self.frames['profiles'] = {}
-        self.frames['profiles'] = {}
-        self.frames['profiles']['DT'] = None
-        self.frames['profiles']['STNID'] = None
-
-        #self.frames['worldmap']['stationsfile'] = stationsfile
-        self.frames['worldmap']['stations'] = stations(self.path_exp, \
-                                                       suffix='ini',\
-                                                       refetch_stations=refetch_stations)
-
-        # Initially, the stats frame inherets the values/iterators of
-        # worldmap
-        for key in self.frames['worldmap'].keys():
-            self.frames['stats'][key] = self.frames['worldmap'][key]
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_ini'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='ini',\
-                                           refetch_records=refetch_records
-                                           )
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_mod'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_exp,\
-                                           subset='mod',\
-                                           refetch_records=refetch_records
-                                           )
-
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_obs_afternoon'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_obs,\
-                                           subset='afternoon',\
-                                           refetch_records=refetch_records
-                                           )
-
-        self.frames['stats']['records_all_stations_mod'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['records_all_stations_ini']['dates'] = \
-            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
-
-
-        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
-
-        self.frames['stats']['records_all_stations_obs_afternoon'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
-
-        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
-
-        self.frames['stats']['viewkeys'] = ['h','theta','q']
-        print('Calculating table statistics')
-        self.frames['stats']['records_all_stations_mod_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_mod'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-
-        self.frames['stats']['inputkeys'] = inputkeys
-        
-        # self.frames['stats']['inputkeys'] = \
-        #     [ key for key in \
-        #       self.globaldata.datasets.keys() \
-        #       if key in \
-        #       list(self.frames['stats']['records_all_stations_obs'].columns)]
-
-
-        # get units from the class4gl units database
-        self.units = dict(units)
-        # for those that don't have a definition yet, we just ask a question
-        # mark
-        for var in self.frames['stats']['inputkeys']:
-            self.units[var] = '?'
-
-        self.frames['worldmap']['inputkeys'] = self.frames['stats']['inputkeys'] 
-        self.frames['stats']['records_all_stations_ini_pct'] = \
-                  pct(self.frames['stats']['records_all_stations_ini'], \
-                      columns = self.frames['stats']['inputkeys'])
-
-        #     pd.DataFrame(columns = self.frames['stats']['viewkeys'])
-        # for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-        #     mod['
-
-        # 
-        # 
-        # \
-        #        self.frames['stats']['records_all_stations_mod'], \
-
-
-
-        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
-        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
-        #               columns = [ 'd'+key+'dt' for key in \
-        #                           self.frames['stats']['viewkeys']], \
-        #              )
-
-        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
-        #               obs = self.frames['stats']['records_all_stations_ini'], \
-        #               columns = self.frames['stats']['viewkeys'], \
-        #              )
-        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
-        
-        print('filtering pathological data')
-        # some observational sounding still seem problematic, which needs to be
-        # investigated. In the meantime, we filter them
-        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
-
-        # we filter ALL data frames!!!
-        for key in self.frames['stats'].keys():
-            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
-               (self.frames['stats'][key].index.names == indextype):
-                self.frames['stats'][key] = self.frames['stats'][key][valid]
-        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
-
-        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
-
-
-        print("filtering stations from interface that have no records")
-        for STNID,station in self.frames['worldmap']['stations'].table.iterrows():
-            if ((self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                    == STNID).sum() == 0):
-                print("dropping", STNID)
-                self.frames['worldmap']['stations'].table = \
-                        self.frames['worldmap']['stations'].table.drop(STNID)
-                    
-        self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-        
-        # TO TEST: should be removed, since it's is also done just below
-        self.frames['stats']['stations_iterator'] = \
-            self.frames['worldmap']['stations_iterator'] 
-
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkeys'][0]
-        self.frames['worldmap']['inputkey'] = self.frames['worldmap']['inputkey']
-        self.next_station()
-
-        # self.goto_datetime_worldmap(
-        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-        #     'after')
-    def sel_station(self,STNID=None,rownumber=None):
-
-        if (STNID is not None) and (rownumber is not None):
-            raise ValueError('Please provide either STNID or rownumber, not both.')
-
-        if (STNID is None) and (rownumber is None):
-            raise ValueError('Please provide either STNID or rownumber.')
-            
-        if STNID is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = self.frames['worldmap']['stations_iterator'].set_STNID(STNID)
-            print(
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-            )
-            self.update_station()
-        elif rownumber is not None:
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-             = STNID,station = self.frames['worldmap']['stations_iterator'].set_row(rownumber)
-            self.update_station()
-
-
-
-    def next_station(self,event=None,jump=1):
-        with suppress(StopIteration):
-            self.frames['worldmap']['STNID'],\
-            self.frames['worldmap']['current_station'] \
-                = self.frames['worldmap']['stations_iterator'].__next__(jump)
-            # self.frames['worldmap']['stations_iterator'].close()
-            # del(self.frames['worldmap']['stations_iterator'])
-            # self.frames['worldmap']['stations_iterator'] = \
-            #                 selfself.frames['worldmap']['stations'].iterrows()
-            # self.frames['worldmap']['STNID'],\
-            # self.frames['worldmap']['current_station'] \
-            #     = self.frames['worldmap']['stations_iterator'].__next__()
-
-        self.update_station()
-
-    def prev_station(self,event=None):
-        self.next_station(jump = -1,event=event)
-    def update_station(self):
-        for key in ['STNID','current_station','stations_iterator']: 
-            self.frames['stats'][key] = self.frames['worldmap'][key] 
-
-
-
-        # generate index of the current station
-        self.frames['stats']['records_current_station_index'] = \
-            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-             == \
-             self.frames['stats']['current_station'].name)
-
-        # create the value table of the records of the current station
-        tab_suffixes = \
-                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-        for tab_suffix in tab_suffixes:
-            self.frames['stats']['records_current_station'+tab_suffix] = \
-                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-        # go to first record of current station
-        self.frames['stats']['records_iterator'] = \
-                        records_iterator(self.frames['stats']['records_current_station_mod'])
-        (self.frames['stats']['STNID'] , \
-        self.frames['stats']['current_record_chunk'] , \
-        self.frames['stats']['current_record_index']) , \
-        self.frames['stats']['current_record_mod'] = \
-                        self.frames['stats']['records_iterator'].__next__()
-
-        for key in self.frames['stats'].keys():
-            self.frames['profiles'][key] = self.frames['stats'][key]
-
-        STNID = self.frames['profiles']['STNID']
-        chunk = self.frames['profiles']['current_record_chunk']
-        if 'current_station_file_ini' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_ini'].close()
-        self.frames['profiles']['current_station_file_ini'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-        if 'current_station_file_mod' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_mod'].close()
-        self.frames['profiles']['current_station_file_mod'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_afternoon'].close()
-        self.frames['profiles']['current_station_file_afternoon'] = \
-            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-        self.frames['profiles']['records_iterator'] = \
-                        records_iterator(self.frames['profiles']['records_current_station_mod'])
-        (self.frames['profiles']['STNID'] , \
-        self.frames['profiles']['current_record_chunk'] , \
-        self.frames['profiles']['current_record_index']) , \
-        self.frames['profiles']['current_record_mod'] = \
-                        self.frames['profiles']['records_iterator'].__next__()
-
-
-        # for the profiles we make a distinct record iterator, so that the
-        # stats iterator can move independently
-
-        self.update_record()
-
-    def next_record(self,event=None,jump=1):
-        with suppress(StopIteration):
-            (self.frames['profiles']['STNID'] , \
-            self.frames['profiles']['current_record_chunk'] , \
-            self.frames['profiles']['current_record_index']) , \
-            self.frames['profiles']['current_record_mod'] = \
-                      self.frames['profiles']['records_iterator'].__next__(jump)
-        # except (StopIteration):
-        #     self.frames['profiles']['records_iterator'].close()
-        #     del( self.frames['profiles']['records_iterator'])
-        #     self.frames['profiles']['records_iterator'] = \
-        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
-        #     (self.frames['profiles']['STNID'] , \
-        #     self.frames['profiles']['current_record_index']) , \
-        #     self.frames['profiles']['current_record_mod'] = \
-        #                     self.frames['profiles']['records_iterator'].__next__()
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        self.update_record()
-
-    def prev_record(self,event=None):
-        self.next_record(jump=-1,event=event)
-
-    def update_record(self):
-        self.frames['profiles']['current_record_ini'] =  \
-            self.frames['profiles']['records_current_station_ini'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'],\
-                  self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon'] =  \
-            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'] , \
-                  self.frames['profiles']['current_record_index'])]
-
-        self.frames['profiles']['current_record_mod_stats'] = \
-                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
-                    self.frames['profiles']['STNID'], \
-                    self.frames['profiles']['current_record_chunk'], \
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
-                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_ini_pct'] = \
-                self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
-
-        for key in self.frames['profiles'].keys():
-            self.frames['stats'][key] = self.frames['profiles'][key]
-        # frame
-        # note that the current station, record is the same as the stats frame for initialization
-
-        # select first 
-        #self.frames['profiles']['current_record_index'], \
-        #self.frames['profiles']['record_yaml_mod'] = \
-        #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
-        #                   self.frames['stats']['current_record_index'])
-        self.frames['profiles']['record_yaml_mod'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_mod'], \
-               self.frames['profiles']['current_record_mod'].index_start,
-               self.frames['profiles']['current_record_mod'].index_end,
-               mode='mod')
-                                
-        record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_ini'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_ini'], \
-               record_ini.index_start,
-               record_ini.index_end,
-                mode='ini')
-
-        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
-
-        self.frames['profiles']['record_yaml_obs_afternoon'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_afternoon'], \
-               record_afternoon.index_start,
-               record_afternoon.index_end,
-                mode='ini')
-
-
-        key = self.frames['worldmap']['inputkey']
-        # only redraw the map if the current world map has a time
-        # dimension
-        if 'time' in self.globaldata.datasets[key].page[key].dims:
-            self.goto_datetime_worldmap(
-                self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                'after')
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap',
-                                                  'profiles'])
-        else:
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap_stations',
-                                                  'profiles'])
-
-    def abline(self,slope, intercept,axis):
-        """Plot a line from slope and intercept"""
-        #axis = plt.gca()
-        x_vals = np.array(axis.get_xlim())
-        y_vals = intercept + slope * x_vals
-        axis.plot(x_vals, y_vals, 'k--')
-
-    def plot(self):
-        import pylab as pl
-        from matplotlib.widgets import Button
-        import matplotlib.pyplot as plt
-        import matplotlib as mpl
-        '''
-        Definition of the axes for the sounding table stats
-        '''
-        
-        fig = pl.figure(figsize=(14,9))
-        axes = {} #axes
-        btns = {} #buttons
-
-        # frames, which sets attributes for a group of axes, buttens, 
-        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
-            label = 'stats_'+str(key)
-            axes[label] = fig.add_subplot(\
-                            len(self.frames['stats']['viewkeys']),\
-                            5,\
-                            5*ikey+1,label=label)
-            # Actually, the axes should be a part of the frame!
-            #self.frames['stats']['axes'] = axes[
-
-            # pointer to the axes' point data
-            axes[label].data = {}
-
-            # pointer to the axes' color fields
-            axes[label].fields = {}
-
-
-        fig.tight_layout()
-        fig.subplots_adjust(top=0.95,bottom=0.15,left=0.05,right=0.99,hspace=0.26,wspace=0.08)
-
-        label ='stats_colorbar'
-        axes[label] = fig.add_axes([0.025,0.06,0.18,0.025])
-        axes[label].fields = {}
-
-        from matplotlib.colors import LinearSegmentedColormap
-        cdictpres = {'blue': (\
-                           (0.,    0.,  0.),
-                           (0.25,  0.25, 0.25),
-                           (0.5,  .70, 0.70),
-                           (0.75, 1.0, 1.0),
-                           (1,     1.,  1.),
-                           ),
-               'green': (\
-                           (0. ,   0., 0.0),
-                           (0.25,  0.50, 0.50),
-                           (0.5,  .70, 0.70),
-                           (0.75,  0.50, 0.50),
-                           (1  ,    0,  0.),
-                           ),
-               'red':  (\
-                          (0 ,  1.0, 1.0),
-                          (0.25 ,  1.0, 1.0),
-                           (0.5,  .70, 0.70),
-                          (0.75 , 0.25, 0.25),
-                          (1,    0., 0.),
-                          )}
-        
-        self.statsviewcmap = LinearSegmentedColormap('statsviewcmap', cdictpres)
-
-
-        label = 'times'
-               
-        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-
-
-        label = 'worldmap'
-               
-        axes[label] = fig.add_axes([0.25,0.48,0.40,0.35]) #[*left*, *bottom*, *width*,    *height*]
-        # add pointers to the data of the axes
-        axes[label].data = {}
-        # add pointers to color fields (for maps and colorbars) in the axes
-        axes[label].fields = {}
-        axes[label].lat = None
-        axes[label].lon = None
-
-        label = 'worldmap_colorbar'
-        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
-        axes[label].fields = {}
-
-        # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
-        label = 'worldmap_stations'
-        axes[label] = fig.add_axes([0.25,0.48,0.40001,0.350001]) #[*left*, *bottom*, *width*,    *height*]
-        axes[label].data = {}
-
-        fig.canvas.mpl_connect('pick_event', self.on_pick)
-        fig.canvas.callbacks.connect('motion_notify_event', self.on_plot_hover)
-
-
-        """ buttons definitions """
-        
-        label = 'bprev_dataset'
-        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous dataset')
-        btns[label].on_clicked(self.prev_dataset)
-
-        label = 'bnext_dataset'
-        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next dataset')
-        btns[label].on_clicked(self.next_dataset)
-
-        label = 'bprev_datetime'
-        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous datetime')
-        btns[label].on_clicked(self.prev_datetime)
-
-        label = 'bnext_datetime'
-        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next datetime')
-        btns[label].on_clicked(self.next_datetime)
-
-
-        label = 'bprev_station'
-        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous station')
-        btns[label].on_clicked(self.prev_station)
-
-        label = 'bnext_station'
-        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next station')
-        btns[label].on_clicked(self.next_station)
-
-        label = 'bprev_record'
-        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous record')
-        btns[label].on_clicked(self.prev_record)
-
-        label = 'bnext_record'
-        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next record')
-        btns[label].on_clicked(self.next_record)
-
-
-        # self.nstatsview = nstatsview
-        # self.statsviewcmap = statsviewcmap
-        self.fig = fig
-        self.axes = axes
-        self.btns = btns
-        self.tbox = {}
-        # self.hover_active = False
-
-        #self.tbox['loading'] = fig.text(0.30,0.01, " ",fontsize=10, 
-        #                                transform=plt.gcf().transFigure)
-
-        self.tbox['datetime'] =  fig.text(0.70, 0.96, " ", fontsize=10,
-                                          transform=plt.gcf().transFigure)
-
-        label = 'air_ap:theta'
-        self.axes[label] = fig.add_axes([0.70,0.44,0.12,0.50], label=label)
-
-        label = 'air_ap:q'
-        self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
-
-        label = 'out:h'
-        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
-
-        label = 'out:theta'
-        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
-
-        label = 'out:q'
-        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
-
-        label = 'SEB'
-        self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
-
-
-        self.hover_active = False
-        self.fig = fig
-        self.fig.show()
-        self.fig.canvas.draw()
-        self.refresh_plot_interface()
-
-
-    # def scan_stations(self):
-    #     blabla
-        
-
-
-    # def get_records(current_file):
-    #     records = pd.DataFrame()
-
-    #     # initial position
-    #     next_record_found = False
-    #     while(not next_record_found):
-    #         next_record_found = (current_file.readline() == '---\n')
-    #     next_tell = current_file.tell() 
-    #     end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #     while not end_of_file:
-    #         current_tell = next_tell
-    #         next_record_found = False
-    #         current_file.seek(current_tell)
-    #         while ( (not next_record_found) and (not end_of_file)):
-    #             current_line = current_file.readline()
-    #             next_record_found = (currentline == '---\n')
-    #             end_of_file = (currentline == '') # an empty line means we are at the end
-
-    #         # we store the position of the next record
-    #         next_tell = current_file.tell() 
-    #         
-    #         # we get the current record. Unfortunately we need to reset the
-    #         # yaml record generator first.
-    #         current_yamlgen.close()
-    #         current_yamlgen = yaml.load_all(current_file)
-    #         current_file.seek(current_tell)
-    #         current_record_mod = current_yamlgen.__next__()
-    #     current_yamlgen.close()
-
-    #     return records
-
-       #      next_record_found = False
-       #      while(not record):
-       #          next_record_found = (self.current_file.readline() == '---\n')
-       #      self.current_tell0 = self.current_file.tell() 
-
-       #  
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell0 = self.current_file.tell() 
-
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell1 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell0)
-       #  self.r0 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell1)
-       #  next_record_found = False
-       #  while ( (not next_record_found) and (not end_of_file):
-       #      current_line = self.current_file.readline()
-       #      next_record_found = (currentline == '---\n')
-       #      end_of_file = (currentline == '') # an empty line means we are at the end
-
-       #  self.current_tell2 = self.current_file.tell() 
-
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell1)
-       #  self.r1 = self.current_yamlgen.__next__()
-
-       #  self.current_file.seek(self.current_tell2)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell3 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell2)
-       #  self.r2 = self.current_yamlgen.__next__()
-
-       #  # go to position of next record in file
-       #  self.current_file.seek(self.current_tell3)
-       #  next_record_found = False
-       #  while(not next_record_found):
-       #      next_record_found = (self.current_file.readline() == '---\n')
-       #  self.current_tell4 = self.current_file.tell() 
-
-       #  self.current_yamlgen.close()
-       #  self.current_yamlgen = yaml.load_all(self.current_file)
-       #  self.current_file.seek(self.current_tell3)
-       #  self.r3 = self.current_yamlgen.__next__()
- 
-       #  #self.update_tablestats(SOUNDINGS_TABLESTATS)
-
-    def goto_datetime_worldmap(self,DT,shift=None):
-        DT = np.datetime64(DT) #self.globaldata.datasets[self.axes['worldmap'].focus['key']].variables['time'].values[self.axes['worldmap'].focus['iDT']]
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            self.globaldata.datasets[self.frames['worldmap']['inputkey']].browse_page(time=DT)
-            DIST = np.abs((self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values - DT))
-            self.frames['worldmap']['iDT'] = np.where((DIST) == np.min(DIST))[0][0]
-            if ((shift == 'after') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] < DT)):
-                self.frames['worldmap']['iDT'] += 1
-            elif ((shift == 'before') and (self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']] > DT)):
-                self.frames['worldmap']['iDT'] -= 1 
-            # for gleam, we take the values of the previous day
-            if self.frames['worldmap']['inputkey'] in ['wg','w2']:
-                self.frames['worldmap']['iDT'] -= 2 
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-        #else:
-        #    self.frames['worldmap'].pop('DT')
-
-    def next_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] + 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def prev_datetime(self,event=None):
-        if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims:
-            # for now we don't go to different files, so we cannot go to
-            # another file 
-            self.frames['worldmap']['iDT'] = (self.frames['worldmap']['iDT'] - 1) % len(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values)
-            self.frames['worldmap']['DT'] = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables['time'].values[self.frames['worldmap']['iDT']]
-            if "fig" in self.__dict__.keys():
-                self.refresh_plot_interface(only='worldmap') 
-
-    def next_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-    def prev_dataset(self,event=None):
-        ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey'])
-        ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
-        self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
-
-
-    def sel_dataset(self,inputkey):
-        self.frames['worldmap']['inputkey'] = inputkey
-        self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
-        self.goto_datetime_worldmap(
-            self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-            'after')# get nearest datetime of the current dataset to the profile
-        if "fig" in self.__dict__.keys():
-            self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
-       
-    # def prev_station(self,event=None):
-    #     self.istation = (self.istation - 1) % self.stations.shape[0]
-    #     self.update_station()
-
-
-
-
-    #def update_datetime(self):
-    #    if 'time' in self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims:
-    #    #if 'time' in list(dict(self.globaldata.datasets[self.worldmapfocus['key']].variables[self.worldmapfocus['key']].dims).keys()):
-    #        #self.worldmapfocus['DT'] = self.globaldata.datasets[self.worldmapfocus['key']].variables['time'].values[self.worldmapfocus['iDT']]
-    #        print(self.worldmapfocus['DT'])
-    #        self.refresh_plot_interface(only='worldmap')
-
-    def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
-
-        #print('r1')
-        for argkey in args.keys():
-            self.__dict__[arg] = args[argkey]
-
-        axes = self.axes
-        tbox = self.tbox
-        frames = self.frames
-        fig = self.fig
- 
-        if (only is None) or ('worldmap' in only):
-            globaldata = self.globaldata
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
-            else:
-                datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
-            keystotranspose = ['lat','lon']
-            for key in dict(datasetxr.dims).keys():
-                if key not in keystotranspose:
-                    keystotranspose.append(key)
-
-            datasetxr = datasetxr.transpose(*keystotranspose)
-            datasetxr = datasetxr.sortby('lat',ascending=False)
-
-            lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
-            lonleft = lonleft - 360.
-            lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
-            label = 'worldmap'
-            axes[label].clear()
-            axes[label].lon = xr.concat([lonleft,lonright],'lon').values
-            axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
-
-        if (only is None) or ('worldmap' in only):
-            #if 'axmap' not in self.__dict__ :
-            #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
-            #else:
-
-            #stations = self.stations
-
-
-            # self.gmap = Basemap(projection='kav7', lat_0 = 0, lon_0 =0,
-            #     resolution = 'l', 
-            # area_thresh = 0.1,
-            #     llcrnrlon=-180., llcrnrlat=-90.0,
-            #     urcrnrlon=180., urcrnrlat=90.0,ax=self.axmap)
-            # 
-            # self.gmap.drawcoastlines(color='white',linewidth=0.3)
-            # self.gmap.drawcountries(color='white',linewidth=0.3)
-            # #self.gmap.fillcontinents(color = 'gray')
-            # self.gmap.drawmapboundary(color='white',linewidth=0.3)
-            # # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),labels=[1,1,0,1])
-            # # self.gmap.drawparallels(np.arange(-90, 90, 30.),labels=[1,0,0,0])
-            # self.gmap.drawmeridians(np.arange(-180, 180+45, 60.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # self.gmap.drawparallels(np.arange(-90, 90, 30.),color='white',linewidth=0.3,labels=[0,0,0,0])
-            # #self.ax5.shadedrelief()
-
-           #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
-
-
-            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
-            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
-
-            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
-            if 'lev' in field.dims:
-                field = field.isel(lev=-1)
-
-            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
-            axes[label].axis('off')
-
-            from matplotlib import cm
-            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
-            
-            
-            title=frames['worldmap']['inputkey']
-            if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
-            axes[label].set_title(title)
-
-            label ='worldmap_colorbar'
-            axes[label].clear()
-            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
-
-
-            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
-            # x,y = self.gmap(lons,lats)
-            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
-            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
-
-        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
-
-            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
-            store_xlim = {}
-            store_ylim = {}
-            for ikey, key in enumerate(statskeys_out):
-                if (only is not None) and ('stats_lightupdate' in only):
-                    store_xlim[key] = axes['stats_'+key].get_xlim()
-                    store_ylim[key] = axes['stats_'+key].get_ylim()
-                self.axes['stats_'+key].clear()    
-
-            label = 'times'
-            self.axes[label].clear()
-
-            key = 'dthetadt'
-            x = self.frames['stats']['records_all_stations_ini']['datetime']
-            #print(x)
-            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-            #print(y)
-            z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            #print(z)
-
-            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
-            self.axes[label].data[label] = self.axes[label].scatter(x.values,
-                                                                    y.values,
-                                                                    c=z.values,
-                                                                    cmap=self.statsviewcmap,
-                                                                    s=2,
-                                                                    vmin=0.,
-                                                                    vmax=1.,
-                                                                    alpha=alpha_cloud_pixels)
-
-            
-            x = self.frames['stats']['records_current_station_ini']['datetime']
-            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-            self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-
-            x = self.frames['profiles']['records_current_station_ini']['datetime']
-            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-            z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-            self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-            self.axes[label].set_xlim((dt.datetime(1981,1,1),dt.datetime(2018,1,1)))
-            self.axes[label].set_ylabel(key+ ' ['+self.units[key]+']')
-
-            for ikey, key in enumerate(statskeys_out):
-
-                # show data of all stations
-                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_all_stations_mod_stats'][key]
-                z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                qvalmax = x.quantile(0.999)
-                qvalmin = x.quantile(0.001)
-                print('applying extra filter over extreme values for plotting stats')
-                selx = (x >= qvalmin) & (x < qvalmax)
-                sely = (x >= qvalmin) & (x < qvalmax)
-                x = x[selx & sely]
-                y = y[selx & sely]
-                z = z[selx & sely]
-                self.axes['stats_'+key].data['stats_'+key] = \
-                       self.axes['stats_'+key].scatter(x,y, c=z,\
-                                cmap=self.statsviewcmap,\
-                                s=3,picker=3,label=key,vmin=0.,vmax=1.,alpha=alpha_cloud_pixels)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
-
-                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_current_station_mod_stats'][key]
-                z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
-
-                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['profiles']['records_current_station_mod_stats'][key]
-                z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
-                self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
-                       self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
-                                cmap=self.statsviewcmap,\
-                                s=20,picker=20,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
-
-                if len(x) > 1:
-                    fit = np.polyfit(x, y, deg=1)
-                    self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
-                         self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
-
-                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
-                y = self.frames['stats']['current_record_mod_stats'][key]
-                z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
-
-                text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
-                self.axes['stats_'+key].data['stats_'+key+'_current_record'] = \
-                    axes['stats_'+key].annotate(text, \
-                                               xy=(x,y),\
-                                               xytext=(0.05,0.05),\
-                                               textcoords='axes fraction',\
-                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
-                                               color='white',\
-                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
-                # self.axes['stats_'+key].data[key+'_current_record'] = \
-                #        self.axes['stats_'+key].scatter(x,y, c=z,\
-                #                 cmap=self.statsviewcmap,\
-                #                 s=30,picker=15,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=1.1)
-
-                # axes['stats_'+key].set_title('relative deviation per station of '+ key)
-                self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
-                # # highlight data for curent station
-                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
-
-                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
-
-                if ikey == len(statskeys_out)-1:
-                    self.axes['stats_'+key].set_xlabel('external')
-                    #axes[label].set_xlabel('ext: '+ key+' ['+statsunits[ikey]+']')
-                axes['stats_'+key].set_ylabel('model')
-
-
-                if (only is not None) and ('stats_lightupdate' in only):
-                    self.axes['stats_'+key].set_xlim(*store_xlim[key])
-                    self.axes['stats_'+key].set_ylim(*store_ylim[key])
-                else:
-                    limlow = np.min((axes['stats_'+key].get_xlim()[0],axes['stats_'+key].get_ylim()[0]))
-                    limhigh = np.max((axes['stats_'+key].get_xlim()[1],axes['stats_'+key].get_ylim()[1]))
-                    self.axes['stats_'+key].set_xlim(limlow,limhigh)
-                    self.axes['stats_'+key].set_ylim(limlow,limhigh)
-                self.abline(1,0,axis=self.axes['stats_'+key])
-
-        if (only is None) or ('stats_colorbar' in only):
-            label ='stats_colorbar'
-            axes[label].clear()
-            import matplotlib as mpl
-            norm = mpl.colors.Normalize(vmin=0.,vmax=1.)
-            self.axes[label].fields[label] = \
-             mpl.colorbar.ColorbarBase(self.axes[label],\
-                        orientation='horizontal',\
-                        label="percentile of "+self.frames['worldmap']['inputkey'],
-                        alpha=1.,
-                                cmap=self.statsviewcmap,\
-                                       norm=norm
-                         )
-
-        #print('r1')
-        if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
-            #print('r2')
-            label = 'worldmap_stations'
-            axes[label].clear()
-            
-            stations = self.frames['worldmap']['stations'].table
-            globaldata = self.globaldata
-            
-            key = label
-
-            #print('r3')
-            if (stations is not None):
-                xlist = []
-                ylist = []
-                #print('r4')
-                for iSTN,STN in frames['worldmap']['stations'].table.iterrows():
-            #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-                    x,y = len(axes['worldmap'].lon)*(STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]) ,len(axes['worldmap'].lat)*(STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    xlist.append(x)
-                    ylist.append(y)
-                #picker is needed to make it clickable (pick_event)
-                axes[label].data[label] = axes[label].scatter(xlist,ylist,
-                                                              c='r', s=15,
-                                                              picker = 15,
-                                                              label=key,
-                                                              edgecolor='k',
-                                                              linewidth=0.8)
-
-            # cb.set_label('Wilting point [kg kg-3]')
-                #print('r5')
-
-                
-            #     xseries = []
-            #     yseries = []
-            #     for iSTN,STN in stations.iterrows():
-            # #        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            # #        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #         x,y = len(axes[label].lon)*(STN['longitude_ext']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(axes[label].lat)*(STN['latitude_ext']- axes[label].axes[label].lat[0])/(axes[label].lat[-1] - axes[label].axes[label].lat[0])
-            #         xseries.append(x)                    
-            #         yseries.append(y)
-            #         
-            #         
-            #     axes[label].data[label] = axes[label].scatter(xseries,yseries, c='r' , s=15, edgecolor='none',label=key)
-                    
-                if ('current_station' in frames['worldmap']):
-                    #print('r5')
-                    STN = frames['stats']['current_station']
-                    STNID = frames['stats']['STNID']
-                    #print('r5')
-
-                    x,y = len(axes['worldmap'].lon)* \
-                            (STN['longitude']- axes['worldmap'].lon[0])/(axes['worldmap'].lon[-1] - axes['worldmap'].lon[0]),\
-                          len(axes['worldmap'].lat)* \
-                            (STN['latitude']- axes['worldmap'].lat[0])/(axes['worldmap'].lat[-1] - axes['worldmap'].lat[0])
-                    #print('r6')
-                    #VAL = self.seltablestats[(self.seltablestats['STNID'] \
-                    #                          == \
-                    #                          self.frames['worldmap']['STNID'])\
-                    #                         & \
-                    #                         (self.seltablestats['DT'] \
-                    #                          == self.axes['statsview0].focus['DT']) \
-                    #                        ][self.axes['worldmap'].focus['key']+'_ext'].iloc[0]
-                    #print('r7')
-                    text = 'STNID: '+ format(STNID,'10.0f') + \
-                            ', LAT: '+format(STN['latitude'],'3.3f')+ \
-                            ', LON: '+format(STN['longitude'],'3.3f')+ \
-                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
-
-                            #+', VAL: '+format(VAL,'.3e')
-
-                    axes[label].scatter(x,y, c='r', s=30,\
-                                        edgecolor='k',picker=30,label=key,linewidth=1.1)
-                    #print('r8')
-            
-                    #colorrange = list(axes[label].fields['worldmap'].get_clim())
-                    #colorstation = (VAL-colorrange[0])/(colorrange[1]-colorrange[0])
-                    #colorstation = max((min((1.,colorstation)),0.))
-                    colorstation =0.2
-                    from matplotlib import cm
-                    axes[label].annotate(text,
-                                         xy=(x,y),
-                                         xytext=(0.05,0.05),
-                                         textcoords='axes fraction', 
-                                         bbox=dict(boxstyle="round",
-                                         fc = cm.viridis(colorstation)),
-                                         arrowprops=dict(arrowstyle="->",
-                                                         linewidth=1.1),
-                                         color='white' if colorstation < 0.5 else 'black')
-                    #print('r9')
-
-                    # #pos = sc.get_offsets()[ind["ind"][0]]
-                    # 
-                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
-                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
-                    # axes[label].data[label+'statannotate'].set_text(text)
-                    #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
-                    # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
-            #print('r9')
-            axes[label].axis('off')
-            axes[label].set_xlim(0,(len(axes['worldmap'].lon)))
-            axes[label].set_ylim((len(axes['worldmap'].lat),0))
-            #print('r10')
-
-        if (only is None) or ('profiles' in only): 
-            #print('r11')
-
-            # # self.istation = np.where(self.stations['ID'] == STNID)[0][0]
-            # # self.update_station(goto_first_sounding=False)
-            # isounding = np.where(pd.DatetimeIndex(self.df_soundings_eval_pairs.datetime) == self.profilefocus['DT'])[0][0]
-            # #self.isounding = (self.isounding - 1) % self.df_soundings_eval_pairs.shape[0]
-            # self.morning_sounding = self.df_soundings_eval_pairs.loc[isounding]
-            # self.evening_sounding = self.df_soundings.loc[self.morning_sounding['eval0']]
-
-            label = 'air_ap:theta'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-                # +\
-                # ' -> '+ \
-                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
-            
-            
-            
-            
-            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-            #print('r12')
-
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            #print(self.frames['profiles']['record_yaml_ini'].pars.h)
-            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
-            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
-            hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            #print('r13')
-            # 
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r14')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-
-            #print('r15')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-                          
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r16')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r17')
-            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
-            print(hmax)
-            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
-            if valid_mod:
-
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="mod "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                                 +'LT')
-
-            #print('r18')
-            axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('theta [K]')
-
-            label = 'air_ap:q'
-            axes[label].clear()
-
-            tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
-            # 
-
-            #print('r19')
-            # #axes[label].set_title(self.morning_sounding.ldatetime.strftime("local time:  %H:%M")+' -> '+self.evening_sounding.ldatetime.strftime("%H:%M"))
-            # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # 
-            if valid_mod:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            else:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            # 
-            #print('r20')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r21')
-
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_ap.z.values[zco],"b:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
-
-
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
-
-            #print('r23')
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-
-            #print('r24')
-            if valid_mod:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
-                zco = range(zidxmax)
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
-                                 label="fit ")#+\
-                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
-                             #+'LT')
-            #print('r25')
-            #axes[label].legend()
-
-            #axes[label].legend(prop={'family':'monospace'},loc='upper left')
-            #axes[label].set_ylabel('height [m]')
-            axes[label].set_xlabel('q [kg/kg]')
-
-            # #axes[label].set_title(self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.theta_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.theta_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.theta_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod "+self.evening_sounding.ldatetime.strftime("%H:%M")+'LT')
-
-            # #pl.subplots_adjust(right=0.6)
-
-            # label = 'q_pro'
-            # axes[label].clear()
-
-            # hmax = np.max([self.morning_sounding.c4gl.input.h,self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.h])
-            # 
-            # zco =  self.morning_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.obs.q_pro[zco], self.morning_sounding.obs.z_pro[zco],"b*",label="obs")
-            # 
-            # zco =  self.morning_sounding.c4gl.input.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.input.q_pro[zco], self.morning_sounding.c4gl.input.z_pro[zco ],"b:",label="fit")
-
-            # #self.ax5.set_title(self.evening_sounding.ldatetime.strftime("local time: %H:%M"))
-            # zco =  self.evening_sounding.obs.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.obs.q_pro[zco], self.evening_sounding.obs.z_pro[zco],"r*",label="obs")
-            # 
-            # zco =  self.evening_sounding.fit.z_pro < 2.*hmax
-            # axes[label].plot(self.evening_sounding.fit.q_pro[zco], self.evening_sounding.fit.z_pro[zco],"r:",label="fit")
-            # 
-            # zco = self.morning_sounding.c4gl.z_pro < 2.*hmax
-            # axes[label].plot(self.morning_sounding.c4gl.q_pro[zco], self.morning_sounding.c4gl.z_pro[zco],"r-",label="mod")
-            # #pl.subplots_adjust(right=0.6)
-            # axes[label].set_xlabel('specific humidity [kg/kg]')
- 
-
-            #print('r26')
-            time = self.frames['profiles']['record_yaml_mod'].out.time
-            for ilabel,label in enumerate(['h','theta','q']):
-                axes["out:"+label].clear()
-                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
-                axes["out:"+label].set_ylabel(label)
-                if ilabel == 2:
-                    axes["out:"+label].set_xlabel('local sun time [h]')
-                
-            #print('r27')
-            label = 'SEB'
-            axes[label].clear()
-            
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
-            axes[label].hlines(0.,*axes[label].get_xlim(),'k')
-            axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
-            axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
-                
-            #print('r28')
-            
-            axes[label].legend()
-            
-            #         for ax in self.fig_timeseries_axes:
-#             ax.clear()
-#         
-#         self.fig_timeseries_axes[0].plot(self.morning_sounding.c4gl.out.h,label='h')
-#         self.fig_timeseries_axes[1].plot(self.morning_sounding.c4gl.out.theta,label='theta')
-#         self.fig_timeseries_axes[2].plot(self.morning_sounding.c4gl.out.q,label='q')
-#         #print(self.morning_sounding.c4gl.out.Swin)
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Swin - self.morning_sounding.c4gl.out.Swout,label='Sw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.H,label='H')
-#         self.fig_timeseries_axes[3].plot(self.morning_sounding.c4gl.out.Lwin - self.morning_sounding.c4gl.out.Lwout,label='Lw')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.G,label='G')
-#         self.fig_timeseries_axes[3].plot(-self.morning_sounding.c4gl.out.LE,label='LE')
-#         self.fig_timeseries_axes[3].hlines(0.,*self.fig_timeseries_axes[3].get_xlim(),'k')
-#         self.fig_timeseries_axes[3].legend()
-#         self.fig.canvas.draw()
-            
-
-
-
-
-
-
-        #self.ready()
-        #print('r29')
-        fig.canvas.draw()
-        #fig.show()
-
-        self.axes = axes
-        self.tbox = tbox
-        self.fig = fig
-
-    def on_pick(self,event):
-        #print("HELLO")
-        # this makes clear that the dataset is loading (set_profile_focus takes a long time to load!)
-        #self.axes['theta_pro'].clear()
-        #self.axes['q_pro'].clear()
-        
-
-        # workaround because I cannot track the axes label here. I need it because the behaviour of this function should depend on which axes we are.
-        # I can only track the label of the data points. So we make a definition that clarifies to which axes the select data points (having a 'key') belongs to
-        keys_to_axes = {}
-        for ikey,key in enumerate(self.frames['stats']['viewkeys']):
-            keys_to_axes['d'+self.frames['stats']['viewkeys'][ikey]+'dt'] = 'stats_d'+key+'dt'
-
-        keys_to_axes['worldmap_stations'] = 'worldmap_stations'
-        keys_to_axes['worldmap'] = 'worldmap'
-        
-        axes = self.axes
-        #nstatsview = self.nstatsview
-        #statsviewcmap = self.statsviewcmap
-        stations = self.frames['worldmap']['stations'].table
-
-
-        #print("p1")
-        current = event
-        artist = event.artist
-        
-        selkey = artist.get_label()
-        
-        #print(keys_to_axes)
-        
-        label = keys_to_axes[selkey]
-        #print("HELLO",selkey,label)
-
-        # # Get to know in which axes we are
-        # label = None
-        # for axeskey in axes.keys():
-        #     if event.inaxes == axes[axeskey]:
-        #         label = axeskey
-        #         
-
-        # cont, pos = None, None
-        
-        xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata
-        ind = event.ind
-        # x, y = artist.get_xdata(), artist.get_ydata() # for some reason this doesnt work yet :/
-        d = axes[label].collections[0]
-        #d.set_offset_position('data')
-        xy = d.get_offsets()
-        x, y =  xy[:,0],xy[:,1]
-        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
-
-        #print("p2")
-        if len(ind) > 0:
-            #print("p3")
-            pos = x[ind[0]], y[ind[0]]
-
-            #if label[:-1] == 'statsview':
-            #    #seltablestatsstdrel = self.seltablestatsstdrel
-            #    #seltablestatspct = self.seltablestatspct
-
-            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-            #    
-            #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-            #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
-            #    self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-            #    
-            #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap','profiles'],statsnewdata=False)
-            #el
-            if (label == 'worldmap') or (label == 'worldmap_stations'):
-                self.hover_active = False
-                if (self.frames['worldmap']['STNID'] !=
-                    self.frames['profiles']['STNID']):
-                # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
-                # so we just need to perform update_station
-                    self.update_station()
-            elif (label[:5] == 'stats'):
-
-                self.hover_active = False
-                if (self.frames['stats']['STNID'] !=
-                self.frames['profiles']['STNID']) or \
-                   (self.frames['stats']['current_record_chunk'] != 
-                    self.frames['profiles']['current_record_chunk']) or \
-                   (self.frames['stats']['current_record_index'] != 
-                    self.frames['profiles']['current_record_index']):
-
-
-
-                    for key in ['STNID','current_station','stations_iterator']: 
-                        self.frames['worldmap'][key] = self.frames['stats'][key] 
-
-                    for key in self.frames['stats'].keys():
-                        self.frames['profiles'][key] = self.frames['stats'][key]
-
-                    STNID = self.frames['profiles']['STNID']
-                    chunk = self.frames['profiles']['current_record_chunk']
-                    if 'current_station_file_ini' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_ini'].close()
-                    self.frames['profiles']['current_station_file_ini'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
-
-                    if 'current_station_file_mod' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_mod'].close()
-                    self.frames['profiles']['current_station_file_mod'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_afternoon'].close()
-                    self.frames['profiles']['current_station_file_afternoon'] = \
-                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
-
-                    # go to hovered record of current station
-                    self.frames['profiles']['records_iterator'] = \
-                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # ... and go to the record of the profile window (last one that
-                    # was picked by the user)
-                    found = False
-                    EOF = False
-                    while (not found) and (not EOF):
-                        try:
-                            (STNID,chunk,index),record = self.frames['profiles']['records_iterator'].__next__()
-                            #print("hello*")
-                            #print(self.frames['profiles']['current_record_index'])
-                            if (chunk == self.frames['profiles']['current_record_chunk']) and \
-                               (index == self.frames['profiles']['current_record_index']) and \
-                               (STNID == self.frames['profiles']['STNID']):
-                                #print('found!')
-                                found = True
-                        except StopIteration:
-                            EOF = True
-                    if found:
-                        self.frames['stats']['current_record_mod'] = record
-                        self.frames['stats']['current_record_chunk'] = chunk
-                        self.frames['stats']['current_record_index'] = index
-                    # # for the profiles we make a distinct record iterator, so that the
-                    # # stats iterator can move independently
-                    # self.frames['profiles']['records_iterator'] = \
-                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
-                    # (self.frames['profiles']['STNID'] , \
-                    # self.frames['profiles']['current_record_index']) , \
-                    # self.frames['profiles']['current_record_mod'] = \
-                    #                 self.frames['profiles']['records_iterator'].__next__()
-
-
-                    # for the profiles we make a distinct record iterator, so that the
-                    # stats iterator can move independently
-
-                    self.update_record()
-
-
-
-    def on_plot_hover(self,event):
-        axes = self.axes
-        #print('h1')
-
-        # Get to know in which axes we are
-        label = None
-        for axeskey in axes.keys():
-            if event.inaxes == axes[axeskey]:
-                label = axeskey
-                
-        #print('h2')
-
-        cont, pos = None, None
-        #print (label)
-        
-        if label is not None:
-            if  ('data' in axes[label].__dict__.keys()) and \
-                (label in axes[label].data.keys()) and \
-                (axes[label].data[label] is not None):
-                
-                #print('h3')
-                cont, ind =  axes[label].data[label].contains(event)
-                selkey = axes[label].data[label].get_label()
-                if len(ind["ind"]) > 0:
-                    #print('h4')
-                    pos = axes[label].data[label].get_offsets()[ind["ind"][0]]
-                    #print('pos',pos,selkey)
-
-
-                    #if label[:-1] == 'statsview':
-                    #    seltablestatsstdrel = self.seltablestatsstdrel
-                    #    seltablestatspct = self.seltablestatspct
-
-                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
-                    #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
-                    #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
-                    #    self.hover_active = True
-                    #    
-                    #    self.refresh_plot_interface(only=['statsviews_lightupdate','worldmap_stations'])
-                    #    
-                    #el
-                    #print(label[:5])
-                    if (label[:5] == 'stats') or (label == 'times'):
-                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
-                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
-                        
-
-                        if label[:5] == 'stats':
-                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                            (self.frames['stats']['STNID'] ,
-                             self.frames['stats']['current_record_chunk'], 
-                             self.frames['stats']['current_record_index']) = \
-                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-                        # elif label[:5] == 'stats':
-                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
-                        #     (self.frames['stats']['STNID'] ,
-                        #      self.frames['stats']['current_record_chunk'], 
-                        #      self.frames['stats']['current_record_index']) = \
-                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-
-
-                        self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-                        
-                        # # TO TEST: should be removed, since it's is also done just below
-                        # self.frames['stats']['stations_iterator'] = \
-                        #     self.frames['worldmap']['stations_iterator'] 
-                
-                
-                        # self.goto_datetime_worldmap(
-                        #     self.frames['profiles']['current_record_obs'].datetime.to_pydatetime(),
-                        #     'after')
-
-
-                        # scrolling to the right station
-                        STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                        EOF = False
-                        found = False
-                        while (not found and not EOF):
-                            if (STNID == self.frames['stats']['STNID']):
-                                   found = True 
-                            if not found:
-                                try:
-                                    STNID,station = self.frames['stats']['stations_iterator'].__next__()
-                                except (StopIteration):
-                                    EOF = True
-                        if found:
-                        #    self.frames['stats']['STNID'] = STNID
-                            self.frames['stats']['current_station'] =  station
-
-                        #STNID = self.frames['profiles']['current_record_index'].iloc[0].name[0]
-                        #index = self.frames['profiles']['current_record_index'].iloc[0].name[1]
-
-
-                        # generate index of the current station
-                        self.frames['stats']['records_current_station_index'] = \
-                            (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                             == self.frames['stats']['STNID'])
-
-
-                        tab_suffixes = \
-                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            self.frames['stats']['records_current_station'+tab_suffix] = \
-                                self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-
-                        # go to hovered record of current station
-                        self.frames['stats']['records_iterator'] = \
-                                        records_iterator(self.frames['stats']['records_current_station_mod'])
-
-
-                        # ... and go to the record of the profile window (last one that
-                        # was picked by the user)
-                        found = False
-                        EOF = False
-                        while (not found) and (not EOF):
-                            try:
-                                (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                                #print("hello*")
-                                #print(self.frames['profiles']['current_record_index'])
-                                if (index == self.frames['stats']['current_record_index']) and \
-                                   (chunk == self.frames['stats']['current_record_chunk']) and \
-                                   (STNID == self.frames['stats']['STNID']):
-                                    #print('found!')
-                                    found = True
-                            except StopIteration:
-                                EOF = True
-                        if found:
-                            #print('h5')
-                            self.frames['stats']['current_record_mod'] = record
-                            self.frames['stats']['current_record_chunk'] = chunk
-                            self.frames['stats']['current_record_index'] = index
-
-                        #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
-                        tab_suffixes = \
-                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                        for tab_suffix in tab_suffixes:
-                            #print(tab_suffix)
-                            #print(self.frames['stats']['records_current_station'+tab_suffix])
-                            self.frames['stats']['current_record'+tab_suffix] =  \
-                                self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                      (self.frames['stats']['STNID'] , \
-                                       self.frames['stats']['current_record_chunk'] , \
-                                       self.frames['stats']['current_record_index'])]
-
-
-                        self.hover_active = True
-                        self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                        # print('h13')
-                        # if 'time' in self.globaldata.datasets[key].page[key].dims:
-                        #     self.goto_datetime_worldmap(
-                        #         self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
-                        #         'after')
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap',
-                        #                                           'profiles'])
-                        # else:
-                        #     if "fig" in self.__dict__.keys():
-                        #         self.refresh_plot_interface(only=['stats_lightupdate',
-                        #                                           'worldmap_stations',
-                        #                                           'profiles'])
-
-
-
-                    elif label in ['worldmap_stations','worldmap']:
-                        #print('h5')
-
-                        if (self.axes['worldmap'].lat is not None) and \
-                           (self.axes['worldmap'].lon is not None):
-
-
-                            #self.loading()
-                            self.fig.canvas.draw()
-                            self.fig.show()
-
-
-                            # get position of 
-                            latmap = round(pos[1]/len(self.axes['worldmap'].lat)*(self.axes['worldmap'].lat[-1] - \
-                                                                 self.axes['worldmap'].lat[0]) + \
-                                           self.axes['worldmap'].lat[0],4)
-                            lonmap = round(pos[0]/len(self.axes['worldmap'].lon)*(self.axes['worldmap'].lon[-1] - \
-                                                                 self.axes['worldmap'].lon[0]) + \
-                                           self.axes['worldmap'].lon[0],4)
-                        
-                            stations = self.frames['worldmap']['stations'].table
-                            #print('h7')
-                        
-                            #reset stations iterator:
-                            # if 'stations_iterator' in self.frames['worldmap'].keys():
-                            #     self.frames['worldmap']['stations_iterator'].close()
-                            #     del(self.frames['worldmap']['stations_iterator'])
-                            # if 'stations_iterator' in self.frames['stats'].keys():
-                            #     self.frames['stats']['stations_iterator'].close()
-                            #     del(self.frames['stats']['stations_iterator'])
-                            self.frames['worldmap']['stations_iterator'] =\
-                               stations_iterator(self.frames['worldmap']['stations'])
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                            EOF = False
-                            found = False
-                            while (not found and not EOF):
-                                #print('h8',station.latitude,latmap)
-                                #print('h8',station.longitude,lonmap)
-                                if (round(station.latitude,3) == round(latmap,3)) and \
-                                    (round(station.longitude,3) == round(lonmap,3)):
-                                       found = True 
-                                if not found:
-                                    try:
-                                        STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                                    except (StopIteration):
-                                        EOF = True
-                            if found:
-                                self.frames['worldmap']['STNID'] = STNID
-                                self.frames['worldmap']['current_station'] = \
-                                        station
-                        
-                            self.frames['stats']['stations_iterator'] = \
-                                self.frames['worldmap']['stations_iterator'] 
-                            #print('h8')
-                            # inherit station position for the stats frame...
-                            for key in self.frames['worldmap'].keys():
-                                self.frames['stats'][key] = self.frames['worldmap'][key]
-                                
-                            ## fetch records of current station...
-                            #self.frames['stats']['records_current_station_mod'] =\
-                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                            # ... and their indices
-                            self.frames['stats']['records_current_station_index'] = \
-                                    (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                                     == \
-                                     self.frames['stats']['current_station'].name)
-
-
-                            tab_suffixes = \
-                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['records_current_station'+tab_suffix] = \
-                                    self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-
-                            # ... create a record iterator ...
-                            #self.frames['stats']['records_iterator'].close()
-                            del(self.frames['stats']['records_iterator'])
-                            self.frames['stats']['records_iterator'] = \
-                                self.frames['stats']['records_current_station_mod'].iterrows()
-
-
-
-                        
-                            #print('h9')
-                            # ... and go to to the first record of the current station
-                            (self.frames['stats']['STNID'] , \
-                             self.frames['stats']['current_record_chunk'] , \
-                             self.frames['stats']['current_record_index']) , \
-                            self.frames['stats']['current_record_mod'] = \
-                                self.frames['stats']['records_iterator'].__next__()
-                        
-
-
-
-                            #print('h10')
-                            # cash the current record
-                            tab_suffixes = \
-                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                            for tab_suffix in tab_suffixes:
-                                self.frames['stats']['current_record'+tab_suffix] =  \
-                                    self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                                          (self.frames['stats']['STNID'] , \
-                                           self.frames['stats']['current_record_chunk'] , \
-                                           self.frames['stats']['current_record_index'])]
-
-                            #print('h11')
-                            
-                            self.hover_active = True
-                            self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations','profiles'])
-                            #print('h13')
-
-                        
-
-            #if (stations is not None):
-            #    for iSTN,STN in stations.iterrows():
-            ##        x,y =self.gmap(STN['longitude'],STN['latitude'])
-            ##        self.gmap.plot(x,y, 'mo' if (self.STNID == STN['ID']) else 'ro' , markersize=1)
-            #        x,y = len(axes[label].lon)*(STN['longitude']- axes[label].lon[0])/(axes[label].lon[-1] - axes[label].lon[0])  ,len(lat)*(STN['latitude']- axes[label].lat[0])/(lat[-1] - axes[label].lat[0])
-            #        axes['worldmap'].plot(x,y, 'mo' if (axes['worldmap'].focus['STNID'] == STN['ID']) else 'ro' , markersize=2)
-
-        # self.fig.show()
- 
-        # we are hovering on nothing, so we are going back to the position of
-        # the profile sounding
-        if pos is None:
-            if self.hover_active == True:
-                #print('h1*')
-                
-                #self.loading()
-                # to do: reset stations iterators
-
-                # get station and record index from the current profile
-                for key in ['STNID', 'current_station']:
-                    self.frames['stats'][key] = self.frames['profiles'][key]
-
-                self.frames['stats']['STNID'] = self.frames['profiles']['STNID']
-                self.frames['stats']['current_station'] = \
-                        self.frames['profiles']['current_station']
-                #print('h3a*')
-                self.frames['stats']['records_current_station_mod'] = \
-                        self.frames['profiles']['records_current_station_mod']
-                #print('h3b*')
-
-                # the next lines recreate the records iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-
-                # reset stations iterator...
-                #self.frames['stats']['records_iterator'].close()
-                del(self.frames['stats']['records_iterator'])
-                self.frames['stats']['records_iterator'] = \
-                    self.frames['stats']['records_current_station_mod'].iterrows()
-                #print('h4*')
-
-                # ... and go to the record of the profile window (last one that
-                # was picked by the user)
-                found = False
-                EOF = False
-                while (not found) and (not EOF):
-                    try:
-                        (STNID,chunk,index),record = self.frames['stats']['records_iterator'].__next__()
-                        #print("hello*")
-                        #print(self.frames['profiles']['current_record_index'])
-                        #print(self.frames['profiles']['STNID'])
-                        #print(STNID,index)
-                        if (index == self.frames['profiles']['current_record_index']) and \
-                            (chunk == self.frames['profiles']['current_record_chunk']) and \
-                            (STNID == self.frames['profiles']['STNID']):
-                            #print('found!')
-                            found = True
-                    except StopIteration:
-                        EOF = True
-                if found:
-                    #print('h5*')
-                    self.frames['stats']['current_record_mod'] = record
-                    self.frames['stats']['current_record_chunk'] = chunk
-                    self.frames['stats']['current_record_index'] = index
-
-                #print('h6*')
-
-
-
-                # # fetch records of current station...
-                # self.frames['stats']['records_current_station_mod'] =\
-                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
-
-                # ... and their indices
-                self.frames['stats']['records_current_station_index'] = \
-                        (self.frames['stats']['records_all_stations_index'].get_level_values('STNID')\
-                         == \
-                         self.frames['stats']['current_station'].name)
-
-
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['records_current_station'+tab_suffix] = \
-                        self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
-
-                
-
-                # cash the records of the current stations
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
-                for tab_suffix in tab_suffixes:
-                    self.frames['stats']['current_record'+tab_suffix] =  \
-                        self.frames['stats']['records_current_station'+tab_suffix].loc[\
-                              (self.frames['stats']['STNID'] , \
-                               self.frames['stats']['current_record_chunk'] , \
-                               self.frames['stats']['current_record_index'])]
-
-
-                # the next lines recreate the stations iterator. Probably it's
-                # better to just copy the profile iterator and its position to
-                # the worldmap/stats 
-                #print('h7*')
-
-                # reset the stations iterators
-                for framekey in ['stats','worldmap']:
-                    ##print(framekey)
-                    if 'stations_iterator' in self.frames[framekey]:
-                        #self.frames[framekey]['stations_iterator'].close()
-                        del(self.frames[framekey]['stations_iterator'])
-
-                self.frames['worldmap']['current_station'] = \
-                        self.frames['profiles']['current_station']
-
-                #recreate the stations iterator for the worldmap...
-                self.frames['worldmap']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
-
-                # ... and go the position of the profile
-                #print('h8*')
-                STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                EOF = False
-                found = False
-                while (not found and not EOF):
-                    if STNID == self.frames['profiles']['STNID'] :
-                        found = True 
-                    if not found:
-                        try:
-                            STNID,station = self.frames['worldmap']['stations_iterator'].__next__()
-                        except (StopIteration):
-                            EOF = True
-                if found:
-                    self.frames['worldmap']['current_station'] = station
-                    self.frames['worldmap']['STNID'] = STNID
-                #print('h9*')
-                self.frames['stats']['stations_iterator'] = \
-                    self.frames['worldmap']['stations_iterator'] 
-
-                # the stats window now inherits the current station from the
-                # worldmap
-                for key in ['STNID','current_station','stations_iterator']: 
-                    self.frames['stats'][key] = self.frames['worldmap'][key] 
-                #print('h10*')
-
-                # # we now only need inherit station position and go to first record
-                # for key in self.frames['worldmap'].keys():
-                #     self.frames['stats'][key] = self.frames['worldmap'][key]
-
-                # self.frames['stats']['records_current_station'] =\
-                #     get_records(pd.DataFrame().append(self.frames['stats']['current_station']))
-
-                # #print(self.frames['stats']['records_current_station'])
-                # self.frames['stats']['records_iterator'] = \
-                #                 self.frames['stats']['records_current_station'].iterrows()
-                # (self.frames['stats']['STNID'] , \
-                # self.frames['stats']['current_record_index']) , \
-                # self.frames['stats']['current_record_mod'] = \
-                #                 self.frames['stats']['records_iterator'].__next__()
-                
-
-
-
-
-
-
-                #self.set_statsviewfocus('STNID', self.profilefocus['STNID'])
-                ##self.set_statsviewfocus('DT'], self.profilefocus['DT'])
-                #self.axes['worldmap'].focus['STNID'] = self.profilefocus['STNID']
-                ##self.goto_datetime_worldmap(self.profilefocus['DT'],'after')
-                self.hover_active = False
-                self.refresh_plot_interface(only=['stats_lightupdate','worldmap_stations'],statsnewdata=False)
-    # def loading(self):
-    #     self.tbox['loading'].set_text('Loading...')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-    #     sleep(0.1)
-    # def ready(self):
-    #     self.tbox['loading'].set_text('Ready')
-    #     self.fig.canvas.draw()
-    #     self.fig.show()
-
-
-
diff --git a/dist/class4gl-0.1dev/lib/model.py b/dist/class4gl-0.1dev/lib/model.py
deleted file mode 100644
index 8760411..0000000
--- a/dist/class4gl-0.1dev/lib/model.py
+++ /dev/null
@@ -1,2214 +0,0 @@
-# 
-# CLASS
-# Copyright (c) 2010-2015 Meteorology and Air Quality section, Wageningen University and Research centre
-# Copyright (c) 2011-2015 Jordi Vila-Guerau de Arellano
-# Copyright (c) 2011-2015 Chiel van Heerwaarden
-# Copyright (c) 2011-2015 Bart van Stratum
-# Copyright (c) 2011-2015 Kees van den Dries
-# 
-# This file is part of CLASS
-# 
-# CLASS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published bygamma
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-# 
-# CLASS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with CLASS.  If not, see .
-#
-
-import copy as cp
-import numpy as np
-import sys
-import warnings
-import pandas as pd
-from ribtol_hw import zeta_hs2 , funcsche
-import logging
-#from SkewT.thermodynamics import Density
-#import ribtol
-
-grav = 9.81
-def esat(T):
-    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
-
-def qsat(T,p):
-    return 0.622 * esat(T) / p
-
-
-def ribtol(Rib, zsl, z0m, z0h): 
-    Rib = np.float64(Rib)
-    zsl = np.float64(zsl)
-    z0m = np.float64(z0m)
-    z0h = np.float64(z0h)
-    #print(Rib,zsl,z0m,z0h)
-    if(Rib > 0.):
-        L    = 1.
-        L0   = 2.
-    else:
-        L  = -1.
-        L0 = -2.
-    #print(Rib,zsl,z0m,z0h)
-    while (abs(L - L0) > 0.001):
-        L0      = L
-        fx      = Rib - zsl / L * (np.log(zsl / z0h) - psih(zsl / L) + psih(z0h / L)) / (np.log(zsl / z0m) - psim(zsl / L) + psim(z0m / L))**2.
-        Lstart  = L - 0.001*L
-        Lend    = L + 0.001*L
-        fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - psih(zsl / Lstart) + psih(z0h / Lstart)) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lstart) + psim(z0m / Lstart))**2.) \
-                  - (-zsl /  Lend   * (np.log(zsl / z0h) - psih(zsl / Lend  ) + psih(z0h / Lend  )) / \
-                                      (np.log(zsl / z0m) - psim(zsl / Lend  ) + psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-        L       = L - fx / fxdif
-        #print(L,fx/fxdif)
-        if(abs(L) > 1e12):
-            break
-
-    return L
-  
-def psim(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-        #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psim = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-    return psim
-  
-def psih(zeta):
-    if(zeta <= 0):
-        x     = (1. - 16. * zeta)**(0.25)
-        psih  = 2. * np.log( (1. + x*x) / 2.)
-        #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-        #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-    else:
-        psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-    return psih
- 
-class model:
-    def __init__(self, model_input = None,debug_level=None):
-
-        """ set up logger (see: https://docs.python.org/2/howto/logging.html)
-        """
-
-        self.logger = logging.getLogger('model')
-        if debug_level is not None:
-            self.logger.setLevel(debug_level)
-
-        """ initialize the different components of the model """ 
-
-        if model_input is not None:
-            # class4gl style input
-            if 'pars' in model_input.__dict__.keys():
-
-                # we make a reference to the full input first, so we can dump it
-                # afterwards
-                self.input_c4gl = model_input
-
-                # we copy the regular parameters first. We keep the classical input
-                # format as self.input so that we don't have to change the entire
-                # model code.
-                self.input = cp.deepcopy(model_input.pars)
-
-                # we copy other sections we are interested in, such as profile
-                # data, and store it also under input
-
-                # I know we mess up a bit the structure of the class4gl_input, but
-                # we will make it clean again at the time of dumping data
-
-                # So here, we copy the profile data into self.input
-                # 1. Air circulation data 
-                if 'sw_ac' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ac']:
-                    self.input.__dict__['air_ac'] = model_input.__dict__['air_ac']
-                    #self.input.__dict__['air_ach'] = model_input.__dict__['air_ach']
-
-                    # correct pressure of levels according to surface pressure
-                    # error (so that interpolation is done in a consistent way)
-
-                    p_e = self.input.Ps - self.input.sp
-                    for irow in self.input.air_ac.index[::-1]:
-                       self.input.air_ac.p.iloc[irow] =\
-                        self.input.air_ac.p.iloc[irow] + p_e
-                       p_e = p_e -\
-                       (self.input.air_ac.p.iloc[irow]+p_e)/\
-                        self.input.air_ac.p.iloc[irow] *\
-                        self.input.air_ac.delpdgrav.iloc[irow]*grav
-
-
-
-                # 2. Air circulation data 
-                if 'sw_ap' in self.input.__dict__.keys() \
-                   and self.input.__dict__['sw_ap']:
-                    self.input.__dict__['air_ap'] = model_input.__dict__['air_ap']
-
-            # standard class input
-            else:
-                self.input = cp.deepcopy(model_input)
-
-    def load_yaml_dict(self,yaml_dict):
-        for key,data in yaml_dict.items():
-            if key == 'pars':
-                for keydata,value in data.items():
-                    self.__dict__[keydata] = value
-            elif key in ['air_ap','air_balloon','air_ac','air_ach']:
-                self.__dict__[key] = pd.DataFrame(data)
-            #elif key == 'sources':
-            #    self.__dict__[key] = data
-            elif key == 'out':
-                # lets convert it to a list of dictionaries
-                dictouttemp = pd.DataFrame(data).to_dict('list')
-            else: 
-                 warnings.warn("Key '"+key+"' is be implemented.")
-            #     self.__dict__[key] = data
-
-
-        self.tsteps = len(dictouttemp['h'])
-        self.out = model_output(self.tsteps)
-        for keydictouttemp in dictouttemp.keys():
-            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
-
-
-  
-    def run(self):
-        # initialize model variables
-        self.init()
-  
-        # time integrate model 
-        #for self.t in range(self.tsteps):
-        while self.t < self.tsteps:
-          
-            # time integrate components
-            self.timestep()
-  
-        # delete unnecessary variables from memory
-        self.exitmodel()
-    
-    def init(self):
-        # assign variables from input data
-        # initialize constants
-        self.Lv         = 2.5e6                 # heat of vaporization [J kg-1]
-        self.cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
-        self.rho        = 1.2                   # density of air [kg m-3]
-        self.k          = 0.4                   # Von Karman constant [-]
-        self.g          = 9.81                  # gravity acceleration [m s-2]
-        self.Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
-        self.Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
-        self.bolz       = 5.67e-8               # Bolzman constant [-]
-        self.rhow       = 1000.                 # density of water [kg m-3]
-        self.S0         = 1368.                 # solar constant [W m-2]
-
-        # A-Gs constants and settings
-        # Plant type:       -C3-     -C4-
-        self.CO2comp298 =  [68.5,    4.3    ]   # CO2 compensation concentration [mg m-3]
-        self.Q10CO2     =  [1.5,     1.5    ]   # function parameter to calculate CO2 compensation concentration [-]
-        self.gm298      =  [7.0,     17.5   ]   # mesophyill conductance at 298 K [mm s-1]
-        self.Ammax298   =  [2.2,     1.7    ]   # CO2 maximal primary productivity [mg m-2 s-1]
-        self.Q10gm      =  [2.0,     2.0    ]   # function parameter to calculate mesophyll conductance [-]
-        self.T1gm       =  [278.,    286.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.T2gm       =  [301.,    309.   ]   # reference temperature to calculate mesophyll conductance gm [K]
-        self.Q10Am      =  [2.0,     2.0    ]   # function parameter to calculate maximal primary profuctivity Ammax
-        self.T1Am       =  [281.,    286.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.T2Am       =  [311.,    311.   ]   # reference temperature to calculate maximal primary profuctivity Ammax [K]
-        self.f0         =  [0.89,    0.85   ]   # maximum value Cfrac [-]
-        self.ad         =  [0.07,    0.15   ]   # regression coefficient to calculate Cfrac [kPa-1]
-        self.alpha0     =  [0.017,   0.014  ]   # initial low light conditions [mg J-1]
-        self.Kx         =  [0.7,     0.7    ]   # extinction coefficient PAR [-]
-        self.gmin       =  [0.25e-3, 0.25e-3]   # cuticular (minimum) conductance [mm s-1]
-
-        self.mco2       =  44.;                 # molecular weight CO2 [g mol -1]
-        self.mair       =  28.9;                # molecular weight air [g mol -1]
-        self.nuco2q     =  1.6;                 # ratio molecular viscosity water to carbon dioxide
-
-        self.Cw         =  0.0016;              # constant water stress correction (eq. 13 Jacobs et al. 2007) [-]
-        self.wmax       =  0.55;                # upper reference value soil water [-]
-        self.wmin       =  0.005;               # lower reference value soil water [-]
-        self.R10        =  0.23;                # respiration at 10 C [mg CO2 m-2 s-1]
-        self.E0         =  53.3e3;              # activation energy [53.3 kJ kmol-1]
-
-        # Read switches
-        self.sw_ml      = self.input.sw_ml      # mixed-layer model switch
-        self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch
-        self.sw_fixft   = self.input.sw_fixft   # Fix the free-troposphere switch
-        self.sw_wind    = self.input.sw_wind    # prognostic wind switch
-        self.sw_sl      = self.input.sw_sl      # surface layer switch
-        self.sw_rad     = self.input.sw_rad     # radiation switch
-        self.sw_ls      = self.input.sw_ls      # land surface switch
-        self.ls_type    = self.input.ls_type    # land surface paramaterization (js or ags)
-        self.sw_cu      = self.input.sw_cu      # cumulus parameterization switch
-
-        self.sw_lit   = self.input.sw_lit       # switch for iterative L calculation
-        self.sw_ac    = self.input.sw_ac        # switch to take account of large-scale gridded Air Circulation (advection and subsidence) fields as input., eg., from ERA-INTERIM 
-        self.sw_ap    = self.input.sw_ap        # switch that tells to initialize with fitted Air Profiles (eg., from balloon soundings) as input
-  
-        # initialize mixed-layer
-        self.h          = self.input.h          # initial ABL height [m]
-        self.Ps         = self.input.Ps         # surface pressure [Pa]
-        self.sp         = self.input.sp         # This is also surface pressure
-                                                #but derived from the global data [Pa]
-        self.divU       = self.input.divU       # horizontal large-scale divergence of wind [s-1]
-        self.ws         = None                  # large-scale vertical velocity [m s-1]
-        self.wf         = None                  # mixed-layer growth due to radiative divergence [m s-1]
-        self.we         = -1.                   # entrainment velocity [m s-1]
-       
-         # Temperature 
-        self.theta      = self.input.theta      # initial mixed-layer potential temperature [K]
-        
-        
-        self.substep    = False
-        self.substeps   = 0
-
-
-
-        self.dtheta     = self.input.dtheta     # initial temperature jump at h [K]
-        self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = self.input.advtheta   # advection of heat [K s-1]
-        self.beta       = self.input.beta       # entrainment ratio for virtual heat [-]
-        self.wtheta     = self.input.wtheta     # surface kinematic heat flux [K m s-1]
-        self.wthetae    = None                  # entrainment kinematic heat flux [K m s-1]
- 
-        self.wstar      = 0.                    # convective velocity scale [m s-1]
- 
-        # 2m diagnostic variables 
-        self.T2m        = None                  # 2m temperature [K]
-        self.q2m        = None                  # 2m specific humidity [kg kg-1]
-        self.e2m        = None                  # 2m vapor pressure [Pa]
-        self.esat2m     = None                  # 2m saturated vapor pressure [Pa]
-        self.u2m        = None                  # 2m u-wind [m s-1]
-        self.v2m        = None                  # 2m v-wind [m s-1]
- 
-        # Surface variables 
-        self.thetasurf  = self.input.theta      # surface potential temperature [K]
-        self.thetavsurf = None                  # surface virtual potential temperature [K]
-        self.qsurf      = None                  # surface specific humidity [g kg-1]
-
-        # Mixed-layer top variables
-        self.P_h        = None                  # Mixed-layer top pressure [pa]
-        self.T_h        = None                  # Mixed-layer top absolute temperature [K]
-        self.q2_h       = None                  # Mixed-layer top specific humidity variance [kg2 kg-2]
-        self.CO22_h     = None                  # Mixed-layer top CO2 variance [ppm2]
-        self.RH_h       = None                  # Mixed-layer top relavtive humidity [-]
-        self.dz_h       = None                  # Transition layer thickness [-]
-        self.lcl        = None                  # Lifting condensation level [m]
-
-        # Virtual temperatures and fluxes
-        self.thetav     = None                  # initial mixed-layer potential temperature [K]
-        self.dthetav    = None                  # initial virtual temperature jump at h [K]
-        self.wthetav    = None                  # surface kinematic virtual heat flux [K m s-1]
-        self.wthetave   = None                  # entrainment kinematic virtual heat flux [K m s-1]
-       
-        
-        
-        
-        
-        
-        # Moisture 
-        self.q          = self.input.q          # initial mixed-layer specific humidity [kg kg-1]
-
-        self.dq         = self.input.dq         # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = self.input.gammaq     # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = self.input.advq       # advection of moisture [kg kg-1 s-1]
-        self.wq         = self.input.wq         # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = None                  # entrainment moisture flux [kg kg-1 m s-1]
-        self.wqM        = None                  # moisture cumulus mass flux [kg kg-1 m s-1]
-  
-        self.qsat       = None                  # mixed-layer saturated specific humidity [kg kg-1]
-        self.esat       = None                  # mixed-layer saturated vapor pressure [Pa]
-        self.e          = None                  # mixed-layer vapor pressure [Pa]
-        self.qsatsurf   = None                  # surface saturated specific humidity [g kg-1]
-        self.dqsatdT    = None                  # slope saturated specific humidity curve [g kg-1 K-1]
-      
-        
-        
-        # CO2
-        fac = self.mair / (self.rho*self.mco2)  # Conversion factor mgC m-2 s-1 to ppm m s-1
-        self.CO2        = self.input.CO2        # initial mixed-layer CO2 [ppm]
-        self.dCO2       = self.input.dCO2       # initial CO2 jump at h [ppm]
-        self.gammaCO2   = self.input.gammaCO2   # free atmosphere CO2 lapse rate [ppm m-1]
-        self.advCO2     = self.input.advCO2     # advection of CO2 [ppm s-1]
-        self.wCO2       = self.input.wCO2 * fac # surface kinematic CO2 flux [ppm m s-1]
-        self.wCO2A      = 0                     # surface assimulation CO2 flux [ppm m s-1]
-        self.wCO2R      = 0                     # surface respiration CO2 flux [ppm m s-1]
-        self.wCO2e      = None                  # entrainment CO2 flux [ppm m s-1]
-        self.wCO2M      = 0                     # CO2 mass flux [ppm m s-1]
-       
-        # Wind 
-        self.u          = self.input.u          # initial mixed-layer u-wind speed [m s-1]
-        self.du         = self.input.du         # initial u-wind jump at h [m s-1]
-        self.gammau     = self.input.gammau     # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = self.input.advu       # advection of u-wind [m s-2]
-        
-        self.v          = self.input.v          # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = self.input.dv         # initial u-wind jump at h [m s-1]
-        self.gammav     = self.input.gammav     # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = self.input.advv       # advection of v-wind [m s-2]
-         
-  # BEGIN -- HW 20170606
-        # z-coordinate for vertical profiles of stratification above the mixed-layer height
-
-        if self.sw_ac:
-        # this is the data frame with the grided profile on the L60 grid
-        # (subsidence, and advection) 
-            self.air_ac      = self.input.air_ac  # full level air circulation
-                                                  # forcing
-            # self.air_ach     = self.input.air_ach # half level air circulation
-            #                                       # forcing
-            
-
-        if self.sw_ap:
-        # this is the data frame with the fitted profile (including HAGL,
-        # THTA,WSPD, SNDU,WNDV PRES ...)
-            self.air_ap      = self.input.air_ap  # initial profile of potential temperature [K]
-
-            # just for legacy reasons...
-            if 'z' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(z= lambda x: x.HAGL)
-            if 'p' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(p= lambda x: x.PRES*100.)
-
-            indexh = np.where(self.air_ap.z.values == self.h)
-            if (len(indexh) == 0) or (indexh[0][0] !=1) or (indexh[0][1] !=2):
-                raise ValueError("Error input profile consistency: mixed- \
-                                 layer height needs to be equal to the second \
-                                 and third \
-                                 level of the vertical profile input!")
-            # initialize q from its profile when available
-            p_old = self.Ps
-            p_new = self.air_ap.p[indexh[0][0]]
-            
-            if ((p_old is not None) & (p_old != p_new)):
-                print("Warning: Ps input was provided ("+str(p_old)+\
-                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
-                    +str(p_new)+"Pa).")
-                                    
-            self.Ps = p_new
-            # these variables/namings are more convenient to work with in the code
-            # we will update the original variables afterwards
-            #self.air_ap['q'] = self.air_ap.QABS/1000.
-
-            self.air_ap = \
-                    self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q)
-            # we require the temperature fields, since we need to consider
-            # advection
-            # if self.sw_ac:
-            #     #self.air_ap['theta'] = self.air_ap['t'] *
-
-            #     # we consider self.sp in case of air-circulation input (for
-            #     # consistence)
-            #     self.air_ap['t'] = \
-            #                 self.air_ap.theta *  \
-            #                 (self.air_ap.p/self.sp)**(self.air_ap['R']/self.cp)
-            # else:
-            # we consider self.Ps in case of balloon input only 
-            self.air_ap = self.air_ap.assign(t = lambda x: \
-                               x.theta * (x.p/self.Ps)**(x.R/self.cp))
-
-            #self.air_ap['theta'] = self.air_ap.THTA
-            if 'u' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(u = lambda x: x.WNDU)
-            if 'v' not in list(self.air_ap.columns):
-                self.air_ap = self.air_ap.assign(v = lambda x: x.WNDV)
-
-            for var in ['theta','q','u','v']:
-
-                
-                if self.air_ap[var][1] != self.air_ap[var][0]:
-                    raise ValueError("Error input profile consistency: two \
-                                     lowest profile levels for "+var+" should \
-                                     be equal.")
-                
-                # initialize the value from its profile when available
-                value_old = self.__dict__[var]
-                value_new = self.air_ap[var][indexh[0][0]]
-                
-                if ((value_old is not None) & (value_old != value_new)):
-                    warnings.warn("Warning:  input was provided \
-                                     ("+str(value_old)+ "kg kg-1), \
-                                     but it is now overwritten by the first \
-                                     level (index 0) of air_ap]var\ which is \
-                                     different (" +str(value_new)+"K).")
-                                        
-                self.__dict__[var] = value_new
-
-                # make a profile of the stratification 
-                # please note that the stratification between z_pro[i] and
-                # z_pro[i+1] is given by air_ap.GTHT[i]
-
-                # self.air_ap.GTHT = np.gradient(self.air_ap.THTA) /
-                # np.gradient(self.z_pro)
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar})
-
-
-                self.__dict__['gamma'+var] = \
-                    self.air_ap['gamma'+var][np.where(self.h >= \
-                                                     self.air_ap.z)[0][-1]]
-
-
-
-        # the variable p_pro is just for diagnosis of lifted index
-            
-            
-
-            # input Ph is wrong, so we correct it according to hydrostatic equation
-            #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-
-            #if self.sw_ac:
-                # note that we use sp as surface pressure, which is determined
-                # from era-interim instead of the observations. This is to
-                # avoid possible failure of the interpolation routine
-                # self.air_ap.p = np.array([self.Ps, self.P_h, self.P_h-0.1]\
-                #                          + \
-                #                          list(self.air_ap.p[3:]))
-
-            # else:
-                # in the other case, it is updated at the time of calculting
-                # the statistics 
-
-# END -- HW 20170606      
-        #print(self.air_ap)
-
-        if self.sw_ac and not self.sw_ap:
-            raise ValueError("air circulation switch only possible when air \
-                             profiles are given")
-        
-        if self.sw_ac:
-
-            # # # we comment this out, because subsidence is calculated
-            # according to advection
-            # #interpolate subsidence towards the air_ap height coordinate
-            # self.air_ap['w'] = np.interp(self.air_ap.p,\
-            #                               self.air_ac.p,\
-            #                               self.air_ac.w) 
-            # #subsidence at the mixed-layer top
-            # self.w = self.air_ap.w[1]
-        
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-                # in case we didn't find any points, we just take the lowest one.
-                # actually, this can happen if ERA-INTERIM pressure levels are
-                # inconsistent with 
-                if in_ml.sum() == 0:
-                    warnings.warn(" no circulation points in the mixed layer \
-                                  found. We just take the bottom one.")
-                    in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-
-                for var in ['t','q','u','v']:
-    
-                   # calculation of the advection variables for the mixed layer
-                   # we weight by the hydrostatic thickness of each layer and
-                   # divide by the total thickness
-                   self.__dict__['adv'+var] = \
-                            ((self.air_ac['adv'+var+'_x'][in_ml] \
-                             + \
-                             self.air_ac['adv'+var+'_y'][in_ml])* \
-                            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                            self.air_ac['delpdgrav'][in_ml].sum()
-
-                   # calculation of the advection variables for the profile above
-                   # (lowest 3 values are not used by class)
-                   self.air_ap = self.air_ap.assign(**{'adv'+var : 0.})
-                   self.air_ap['adv'+var] = \
-                           np.interp(self.air_ap.p,\
-                                     self.air_ac.p,\
-                                     self.air_ac['adv'+var+'_x']) \
-                           + \
-                           np.interp(self.air_ap.p, \
-                                       self.air_ac.p, \
-                                       self.air_ac['adv'+var+'_y'])
-
-                # as an approximation, we consider that advection of theta in the
-                # mixed layer is equal to advection of t. This is a sufficient
-                # approximation since theta and t are very similar at the surface
-                # pressure.
-                self.__dict__['advtheta'] = self.__dict__['advt']
-
-
-            # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE
-            # # # CHECKED AGAIN SINCE THERE IS SIMILAR STRATEGY USED FOR 
-            # # # CALCULATING THE ADVECTION PROFILES
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # self.wrho = np.interp(self.P_h,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) 
-            # self.ws   = self.air_ap.w.iloc[1]
-
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                self.air_ap = self.air_ap.assign(wp = 0.)
-                self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                              self.air_ac.p, \
-                                              self.air_ac['wp'])
-                self.air_ap = self.air_ap.assign(R = 0.)
-                self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                     self.Rv*self.air_ap.q)
-                self.air_ap = self.air_ap.assign(rho = 0.)
-                self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-                
-                self.air_ap = self.air_ap.assign(w = 0.)
-                self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-                #print('hello w ini')
-
-                # Note: in case of sw_ac is False, we update it from prescribed
-                # divergence
-                self.ws   = self.air_ap.w[1]
-
-                # self.ws   = self.wrho/self.rho
-                # self.ws   = self.wrho/(self.P_h/ \
-                #                        (self.Rd*(1.-self.q) + self.Rv*self.q) * \
-                #                         self.theta) # this should be T!!!
-
-                # self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-                #                         + \
-                #                         self.air_ac['divU_y'][in_ml])* \
-                #             self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                #             self.air_ac['delpdgrav'][in_ml].sum() \
-        
-
-        # Tendencies 
-        self.htend      = None                  # tendency of CBL [m s-1]
-        self.thetatend  = None                  # tendency of mixed-layer potential temperature [K s-1]
-        self.dthetatend = None                  # tendency of potential temperature jump at h [K s-1]
-        self.qtend      = None                  # tendency of mixed-layer specific humidity [kg kg-1 s-1]
-        self.dqtend     = None                  # tendency of specific humidity jump at h [kg kg-1 s-1]
-        self.CO2tend    = None                  # tendency of CO2 humidity [ppm]
-        self.dCO2tend   = None                  # tendency of CO2 jump at h [ppm s-1]
-        self.utend      = None                  # tendency of u-wind [m s-1 s-1]
-        self.dutend     = None                  # tendency of u-wind jump at h [m s-1 s-1]
-        self.vtend      = None                  # tendency of v-wind [m s-1 s-1]
-        self.dvtend     = None                  # tendency of v-wind jump at h [m s-1 s-1]
-        self.dztend     = None                  # tendency of transition layer thickness [m s-1]
-  
-        # initialize surface layer
-        self.ustar      = self.input.ustar      # surface friction velocity [m s-1]
-        self.uw         = None                  # surface momentum flux in u-direction [m2 s-2]
-        self.vw         = None                  # surface momentum flux in v-direction [m2 s-2]
-        self.z0m        = self.input.z0m        # roughness length for momentum [m]
-        self.z0h        = self.input.z0h        # roughness length for scalars [m]
-        self.Cm         = 1e12                  # drag coefficient for momentum [-]
-        self.Cs         = 1e12                  # drag coefficient for scalars [-]
-        self.L          = None                  # Obukhov length [m]
-        self.Rib        = None                  # bulk Richardson number [-]
-        self.ra         = None                  # aerodynamic resistance [s m-1]
-  
-        # initialize radiation
-        self.lat        = self.input.lat        # latitude [deg]
-        #self.fc         = self.input.fc         # coriolis parameter [s-1]
-        self.fc         = 4. * np.pi/(24.*3600.) * np.sin(self.lat/180.*np.pi)
-        self.lon        = self.input.lon        # longitude [deg]
-        self.doy        = self.input.doy        # day of the year [-]
-        self.tstart     = self.input.tstart     # time of the day [-]
-        self.cc         = self.input.cc         # cloud cover fraction [-]
-        self.Swin       = None                  # incoming short wave radiation [W m-2]
-        self.Swout      = None                  # outgoing short wave radiation [W m-2]
-        self.Lwin       = None                  # incoming long wave radiation [W m-2]
-        self.Lwout      = None                  # outgoing long wave radiation [W m-2]
-        self.Q          = self.input.Q          # net radiation [W m-2]
-        self.dFz        = self.input.dFz        # cloud top radiative divergence [W m-2] 
-  
-        # initialize land surface
-        self.wg         = self.input.wg         # volumetric water content top soil layer [m3 m-3]
-        self.w2         = self.input.w2         # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = self.input.Tsoil      # temperature top soil layer [K]
-        self.T2         = self.input.T2         # temperature deeper soil layer [K]
-                           
-        self.a          = self.input.a          # Clapp and Hornberger retention curve parameter a [-]
-        self.b          = self.input.b          # Clapp and Hornberger retention curve parameter b [-]
-        self.p          = self.input.p          # Clapp and Hornberger retention curve parameter p [-]
-        self.CGsat      = self.input.CGsat      # saturated soil conductivity for heat
-                           
-        self.wsat       = self.input.wsat       # saturated volumetric water content ECMWF config [-]
-        self.wfc        = self.input.wfc        # volumetric water content field capacity [-]
-        self.wwilt      = self.input.wwilt      # volumetric water content wilting point [-]
-                           
-        self.C1sat      = self.input.C1sat      
-        self.C2ref      = self.input.C2ref      
-
-        self.c_beta     = self.input.c_beta     # Curvature plant water-stress factor (0..1) [-]
-        
-        self.LAI        = self.input.LAI        # leaf area index [-]
-        self.gD         = self.input.gD         # correction factor transpiration for VPD [-]
-        self.rsmin      = self.input.rsmin      # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = self.input.rssoilmin  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = self.input.alpha      # surface albedo [-]
-  
-        self.rs         = 1.e6                  # resistance transpiration [s m-1]
-        self.rssoil     = 1.e6                  # resistance soil [s m-1]
-                           
-        self.Ts         = self.input.Ts         # surface temperature [K]
-                           
-        self.cveg       = self.input.cveg       # vegetation fraction [-]
-        self.Wmax       = self.input.Wmax       # thickness of water layer on wet vegetation [m]
-        self.Wl         = self.input.Wl         # equivalent water layer depth for wet vegetation [m]
-        self.cliq       = None                  # wet fraction [-]
-                          
-        self.Lambda     = self.input.Lambda     # thermal diffusivity skin layer [-]
-  
-        self.Tsoiltend  = None                  # soil temperature tendency [K s-1]
-        self.wgtend     = None                  # soil moisture tendency [m3 m-3 s-1]
-        self.Wltend     = None                  # equivalent liquid water tendency [m s-1]
-  
-        self.H          = None                  # sensible heat flux [W m-2]
-        self.LE         = None                  # evapotranspiration [W m-2]
-        self.LEliq      = None                  # open water evaporation [W m-2]
-        self.LEveg      = None                  # transpiration [W m-2]
-        self.LEsoil     = None                  # soil evaporation [W m-2]
-        self.LEpot      = None                  # potential evaporation [W m-2]
-        self.LEref      = None                  # reference evaporation using rs = rsmin / LAI [W m-2]
-        self.G          = None                  # ground heat flux [W m-2]
-
-        # initialize A-Gs surface scheme
-        self.c3c4       = self.input.c3c4       # plant type ('c3' or 'c4')
-
-        # initialize cumulus parameterization
-        self.sw_cu      = self.input.sw_cu      # Cumulus parameterization switch
-        self.dz_h       = self.input.dz_h       # Transition layer thickness [m]
-        self.ac         = 0.                    # Cloud core fraction [-]
-        self.M          = 0.                    # Cloud core mass flux [m s-1] 
-        self.wqM        = 0.                    # Cloud core moisture flux [kg kg-1 m s-1] 
-  
-        # initialize time variables
-        self.tsteps = int(np.floor(self.input.runtime / self.input.dt))
-        self.dt     = self.input.dt
-        self.dtcur      = self.dt
-        self.firsttime = True
-        self.t      = 0
- 
-        # Some sanity checks for valid input
-        if (self.c_beta is None): 
-            self.c_beta = 0                     # Zero curvature; linear response
-        assert(self.c_beta >= 0 or self.c_beta <= 1)
-
-        # initialize output
-        self.out = model_output(self.tsteps)
- 
-        self.statistics()
-  
-        # calculate initial diagnostic variables
-        if(self.sw_rad):
-            self.run_radiation()
- 
-        if(self.sw_sl):
-            for i in range(10): 
-                self.run_surface_layer()
-  
-        if(self.sw_ls):
-            self.run_land_surface()
-
-        if(self.sw_cu):
-            self.run_mixed_layer()
-            self.run_cumulus()
-        
-        if(self.sw_ml):
-            self.run_mixed_layer()
-
-    def timestep(self):
-
-        self.dtmax = +np.inf
-        self.logger.debug('before stats') 
-        self.statistics()
-
-        # run radiation model
-        self.logger.debug('before rad') 
-        if(self.sw_rad):
-            self.run_radiation()
-  
-        # run surface layer model
-        if(self.sw_sl):
-            self.logger.debug('before surface layer') 
-            self.run_surface_layer()
-        
-        # run land surface model
-        if(self.sw_ls):
-            self.logger.debug('before land surface') 
-            self.run_land_surface()
- 
-        # run cumulus parameterization
-        if(self.sw_cu):
-            self.logger.debug('before cumulus') 
-            self.run_cumulus()
-   
-        self.logger.debug('before mixed layer') 
-        # run mixed-layer model
-        if(self.sw_ml):
-            self.run_mixed_layer()
-        self.logger.debug('after mixed layer') 
- 
-        #get first profile data point above mixed layer
-        if self.sw_ap:
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            
-            if (self.sw_ac is not None) and ('w' in self.sw_ac):
-                # here we correct for the fact that the upper profile also
-                # shifts in the vertical.
-
-                diffhtend = self.htend - self.air_ap.w[zidx_first]
-                if diffhtend > 0:
-                    dtmax_new = (self.air_ap.z[zidx_first] - self.h)/ diffhtend
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            else:
-                if self.htend > 0:
-                    dtmax_new = ( self.air_ap.z[zidx_first] - self.h)/self.htend 
-                    self.dtmax= min(dtmax_new,self.dtmax)
-            #print(self.h,zidx_first,self.ws,self.air_ap.z)
-
-        
-        #print(self.t,self.dtcur,self.dt,dtmax,self.air_ap.z[zidx_first],self.h)
-        self.logger.debug('before store') 
-        self.substep =  (self.dtcur > self.dtmax)
-        if self.substep:
-            dtnext = self.dtcur - self.dtmax
-            self.dtcur = self.dtmax
-
-        #print(self.t,self.dtcur,self.dt,dtmax,self.tstart + self.t*self.dt/3600.)
-
-        # HW: this will be done multiple times in case of a substep is needed
-        # store output before time integration
-        if self.firsttime:
-            self.store()
-  
-        self.logger.debug('before integrate land surface ('+str(self.t)+', '+str(self.dtcur)+')')
-        # time integrate land surface model
-        if(self.sw_ls):
-            self.integrate_land_surface()
-        self.logger.debug('before integrate mixed layer') 
-        # time integrate mixed-layer model
-        if(self.sw_ml):
-            self.integrate_mixed_layer() 
-        self.logger.debug('after integrate mixed layer') 
-        if self.substep:
-            self.dtcur = dtnext
-            self.firsttime = False
-            self.substeps += 1
-        else:
-            self.dtcur = self.dt
-            self.t += 1 
-            self.firsttime = True
-            self.substeps = 0
-        self.logger.debug('going to next step')
-        
-        
-  
-    def statistics(self):
-        # Calculate virtual temperatures 
-        self.thetav   = self.theta  + 0.61 * self.theta * self.q
-        self.wthetav  = self.wtheta + 0.61 * self.theta * self.wq
-        self.dthetav  = (self.theta + self.dtheta) * (1. + 0.61 * (self.q + self.dq)) - self.theta * (1. + 0.61 * self.q)
-        # Mixed-layer top properties
-        self.P_h    = self.Ps - self.rho * self.g * self.h
-        # else:
-            # in the other case, it is updated at the time that the profile is
-            # updated (and at the initialization
-
-        self.T_h    = self.theta - self.g/self.cp * self.h
-
-        #self.P_h    = self.Ps / np.exp((self.g * self.h)/(self.Rd * self.theta))
-        #self.T_h    = self.theta / (self.Ps / self.P_h)**(self.Rd/self.cp)
-
-        self.RH_h   = self.q / qsat(self.T_h, self.P_h)
-
-        # Find lifting condensation level iteratively
-        if(self.t == 0):
-            self.lcl = self.h
-            RHlcl = 0.5
-        else:
-            RHlcl = 0.9998 
-
-        itmax = 30
-        it = 0
-        while(((RHlcl <= 0.9999) or (RHlcl >= 1.0001)) and it 0):
-            self.q2_h   = -(self.wqe  + self.wqM  ) * self.dq   * self.h / (self.dz_h * self.wstar)
-            self.CO22_h = -(self.wCO2e+ self.wCO2M) * self.dCO2 * self.h / (self.dz_h * self.wstar)
-        else:
-            self.q2_h   = 0.
-            self.CO22_h = 0.
-
-        # calculate cloud core fraction (ac), mass flux (M) and moisture flux (wqM)
-        self.ac     = max(0., 0.5 + (0.36 * np.arctan(1.55 * ((self.q - qsat(self.T_h, self.P_h)) / self.q2_h**0.5))))
-        self.M      = self.ac * self.wstar
-        self.wqM    = self.M * self.q2_h**0.5
-
-        # Only calculate CO2 mass-flux if mixed-layer top jump is negative
-        if(self.dCO2 < 0):
-            self.wCO2M  = self.M * self.CO22_h**0.5
-        else:
-            self.wCO2M  = 0.
-
-    def run_mixed_layer(self):
-        if(not self.sw_sl):
-            # decompose ustar along the wind components
-            self.uw = - np.sign(self.u) * (self.ustar ** 4. / (self.v ** 2. / self.u ** 2. + 1.)) ** (0.5)
-            self.vw = - np.sign(self.v) * (self.ustar ** 4. / (self.u ** 2. / self.v ** 2. + 1.)) ** (0.5)
-
-
-
-        # calculate large-scale vertical velocity (subsidence)
-        if not ((self.sw_ac is not None) and ('w' in self.sw_ac)):
-            self.ws = -self.divU * self.h
-        # else:
-        #     in case the air circulation switch is turned on, subsidence is
-        #     calculated from the circulate profile at the initialization and
-        #     in the integrate_mixed_layer routine
-              
-        # calculate compensation to fix the free troposphere in case of subsidence 
-        if(self.sw_fixft):
-            w_th_ft  = self.gammatheta * self.ws
-            w_q_ft   = self.gammaq     * self.ws
-            w_CO2_ft = self.gammaCO2   * self.ws 
-        else:
-            w_th_ft  = 0.
-            w_q_ft   = 0.
-            w_CO2_ft = 0. 
-      
-        # calculate mixed-layer growth due to cloud top radiative divergence
-        self.wf = self.dFz / (self.rho * self.cp * self.dtheta)
-       
-        # calculate convective velocity scale w* 
-        if(self.wthetav > 0.):
-            self.wstar = ((self.g * self.h * self.wthetav) / self.thetav)**(1./3.)
-        else:
-            self.wstar  = 1e-6;
-      
-        # Virtual heat entrainment flux 
-        self.wthetave    = -self.beta * self.wthetav 
-        
-        # compute mixed-layer tendencies
-        if(self.sw_shearwe):
-            self.we    = (-self.wthetave + 5. * self.ustar ** 3. * self.thetav / (self.g * self.h)) / self.dthetav
-        else:
-            self.we    = -self.wthetave / self.dthetav
-        # Don't allow boundary layer shrinking if wtheta < 0 
-        if(self.we < 0):
-            self.we = 0.
-
-        # Calculate entrainment fluxes
-        self.wthetae     = -self.we * self.dtheta
-        self.wqe         = -self.we * self.dq
-        self.wCO2e       = -self.we * self.dCO2
-        
-        htend_pre       = self.we + self.ws + self.wf - self.M
-        
-        #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-        
- 
-        #print('thetatend_pre',thetatend_pre)
-        
-        #preliminary boundary-layer top chenage
-        #htend_pre = self.we + self.ws + self.wf - self.M
-        #preliminary change in temperature jump
-        dthetatend_pre  = self.gammatheta * (self.we + self.wf - self.M) - \
-                          thetatend_pre + w_th_ft
-        
-        dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt)
-        l_entrainment = True
-
-        if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
-            l_entrainment = False
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! temperature jump is at the lower limit \
-                          and is not growing: entrainment is disabled for this (sub)timestep.") 
-        elif dtheta_pre < 0.1:
-            dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
-            l_entrainment = True
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          " Warning! Potential temperature jump at mixed- \
-                          layer height would become too low limiting timestep \
-                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
-            self.dtmax = min(self.dtmax,dtmax_new)
-            warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "next subtimestep, entrainment will be disabled")
-            #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
-
-
-
-        # when entrainment is disabled, we just use the simplified formulation
-        # as in Wouters et al., 2013 (section 2.2.1)
-
-        self.dthetatend = l_entrainment*dthetatend_pre + \
-                        (1.-l_entrainment)*0.
-        self.thetatend = l_entrainment*thetatend_pre + \
-                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
-        self.htend = l_entrainment*htend_pre + \
-                     (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
-        #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
-        #stop
-
-
-        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
-        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
-
-
-        # self.qtend = l_entrainment*qtend_pre + \
-        #              (1.-l_entrainment)*( (self.wq  - self.wqM)/self.h + self.advq)
-        # self.CO2tend = l_entrainment*CO2tend_pre + \
-        #              (1.-l_entrainment)*( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-
-
-        #     # part of the timestep for which the temperature mixed-layer jump
-        #     # was changing, and for which entrainment took place. For the other
-        #     # part, we don't assume entrainment anymore, and we use the
-        #     # simplified formulation  of Wouters et al., 2013
-
-        #     #self.htend =(self.dthetatend + self.thetatend - w_th_ft)/self.gammatheta +self.ws
-        #   
-        #     self.thetatend = l_entrainment*(self.gammatheta * (self.we + self.wf - self.M) - \
-        #                      self.dthetatend + w_th_ft) + \
-        #                      l_entrainment*((self.wtheta  ) / self.h + self.advtheta)
-        #     self.htend = fac*self.htend + \
-        #                  (1.-fac)* (( self.ws  - self.M)+((self.wtheta) / self.h + self.advtheta)/self.gammatheta)
-        #     self.qtend = fac*self.qtend + (1.-fac)* ( (self.wq  - self.wqM)/self.h + self.advq)
-        #     self.CO2tend = fac*self.qtend + (1.-fac)* ( (self.wCO2  - self.wCO2M)/self.h + self.advCO2)
-
-        #     #self.thetatend += (self.wtheta - self.wthetae             ) / self.h + self.advtheta
-
-        # else:
-        #     #self.htend = htend_pre
-        #     self.dthetatend = dthetatend_pre
-        #     self.thetatend = thetatend_pre
-        
-        self.dqtend      = self.gammaq     * (self.we*l_entrainment + self.wf - self.M) - self.qtend     + w_q_ft
-        self.dCO2tend    = self.gammaCO2   * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend   + w_CO2_ft
-     
-        # assume u + du = ug, so ug - u = du
-        if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
-  
-            self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
-            self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
-        
-        # tendency of the transition layer thickness
-        if(self.ac > 0 or self.lcl - self.h < 300):
-            self.dztend = ((self.lcl - self.h)-self.dz_h) / 7200.
-        else:
-            self.dztend = 0.
-
-   
-    def integrate_mixed_layer(self):
-        # set values previous time step
-        h0      = self.h
-        
-        theta0  = self.theta
-        dtheta0 = self.dtheta
-        q0      = self.q
-        dq0     = self.dq
-        CO20    = self.CO2
-        dCO20   = self.dCO2
-        
-        u0      = self.u
-        du0     = self.du
-        v0      = self.v
-        dv0     = self.dv
-
-        dz0     = self.dz_h
-  
-        # integrate mixed-layer equations
-        
-            
-
-# END -- HW 20170606        
-        self.h        = h0      + self.dtcur * self.htend
-        # print(self.h,self.htend)
-        # stop
-        self.theta    = theta0  + self.dtcur * self.thetatend
-        #print(dtheta0,self.dtcur,self.dthetatend)
-        self.dtheta   = dtheta0 + self.dtcur * self.dthetatend
-        self.q        = q0      + self.dtcur * self.qtend
-        self.dq       = dq0     + self.dtcur * self.dqtend
-        self.CO2      = CO20    + self.dtcur * self.CO2tend
-        self.dCO2     = dCO20   + self.dtcur * self.dCO2tend
-        self.dz_h     = dz0     + self.dtcur * self.dztend
-            
-        # Limit dz to minimal value
-        dz0 = 50
-        if(self.dz_h < dz0):
-            self.dz_h = dz0 
-  
-        if(self.sw_wind):
-            self.u        = u0      + self.dtcur * self.utend
-            self.du       = du0     + self.dtcur * self.dutend
-            self.v        = v0      + self.dtcur * self.vtend
-            self.dv       = dv0     + self.dtcur * self.dvtend
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-            for var in ['t','q','u','v']:
-                #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)):
-
-            # take into account advection for the whole profile
-                
-                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
-
-            var = 'z'
-            #print(self.air_ap[var])
-                #     print(self.air_ap['adv'+var])
-
-
-
-
-            #moving the profile vertically according to the vertical wind
-                #if ((self.air_ap.z is not None) and (self.air_ap.w is not None)):
-
-
-            # air_apvarold = pd.Series(np.array(self.air_ap.z))
-            # print(self.h,self.ws,self.htend,self.dtcur,air_apvarold )
-            # stop
-
-
-                # # recalculate subsidence at the mixed-layer top from the profile. Yet, this would be overwritten from the external forcing.
-                # self.ws = np.interp(self.h , self.z_pro,self.w_pro)
-
-            #As t is updated, we also need to recalculate theta (and R)
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-
-            # air_aptheta_old = pd.Series(self.air_ap['theta'])
-            self.air_ap['theta'] = \
-                        self.air_ap.t * \
-                        (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp)
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            zidx_first = np.where(self.air_ap.z > self.h)[0][0]
-            self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \
-                                         self.dtcur * self.air_ap.w[zidx_first:]
-
-#            print(self.t, self.dtcur,self.dt,self.air_ap.w[zidx_first])
-#            print(self.t, self.dtcur,self.dt,self.htend)
-
-            # # the pressure levels of the profiles are recalculated according to
-            # # there new height (after subsidence)
-            # self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] - \
-            #         self.air_ap.p[zidx_first:]/self.air_ap['R'][zidx_first:]/self.air_ap['t'][zidx_first:] \
-            #         * self.dtcur *  self.air_ap.w[zidx_first:]
-
-            self.air_ap.p[zidx_first:] = self.air_ap.p[zidx_first:] + \
-                    self.dtcur * self.air_ap.wp[zidx_first:]
-
-            #print(pd.DataFrame([self.air_ap.z,air_apvarold]))
-        # note that theta and q itself are updatet by class itself
-
-    
-        if self.sw_ap:
-            # Just for model consistency preservation purposes, we set the
-            # theta variables of the mixed-layer to nan values, since the
-            # mixed-layer values should overwritte by the mixed-layer
-            # calculations of class.
-            self.air_ap['theta'][0:3] = np.nan 
-            self.air_ap['p'][0:3] = np.nan 
-            self.air_ap['q'][0:3] = np.nan 
-            self.air_ap['u'][0:3] = np.nan 
-            self.air_ap['v'][0:3] = np.nan 
-            self.air_ap['t'][0:3] = np.nan 
-            self.air_ap['z'][0:3] = np.nan 
-
-            # Update the vertical profiles: 
-            #   - new mixed layer properties( h, theta, q ...)
-            #   - any data points below the new ixed-layer height are removed
-
-            # Three data points at the bottom that describe the mixed-layer
-            # properties
-            air_ap_head = self.air_ap.iloc[0:3] # make an empty table with similar
-                                           # columns as air_ap
-            # air_ap_head['z'].iloc[0] = 2.
-            # air_ap_head['z'].iloc[1] = self.__dict__['h']
-            # air_ap_head['z'].iloc[2] = self.__dict__['h']
-            air_ap_head.values[:,list(air_ap_head.columns).index('z')] = \
-                        [2.,self.__dict__['h'],self.__dict__['h']]
-            for var in ['theta','q','u','v']:
-
-                air_ap_head.values[:,list(air_ap_head.columns).index(var)] = \
-                        [self.__dict__[var], \
-                         self.__dict__[var], \
-                         self.__dict__[var] + self.__dict__['d'+var]]
-                
-            #print(self.air_ap)
-
-            # This is the remaining profile considering the remaining
-            # datapoints above the mixed layer height
-            air_ap_tail = self.air_ap.iloc[3:]
-            air_ap_tail = air_ap_tail[air_ap_tail.z > self.h]
-
-            # print('h',self.h)
-            # # only select samples monotonically increasing with height
-            # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            # air_ap_tail = pd.DataFrame()
-            # theta_low = self.theta
-            # z_low =     self.h
-            # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            # for ibottom in range(1,len(air_ap_tail_orig)):
-            #     if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +2.:
-            #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
-
-
-
-
-            # make theta increase strong enough to avoid numerical
-            # instability
-            air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-            air_ap_tail = pd.DataFrame()
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-            theta_low = self.theta
-            z_low =     self.h
-            ibottom = 0
-            itop = 0
-            # print(air_ap_tail_orig)
-            # stop
-
-            # HW: this is the lower limit that we use for gammatheta, which is
-            # there to avoid model crashes. Besides on this limit, the upper
-            # air profile is modified in a way that is still conserves total
-            # quantities of moisture and temperature. The limit is set by trial
-            # and error. The numerics behind the crash should be investigated
-            # so that a cleaner solution can be provided.
-            gammatheta_lower_limit = 0.002
-            while ((itop in range(0,1)) or (itop != ibottom)):
-                theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-                z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-                if (
-                    #(z_mean > (z_low+0.2)) and \
-                    #(theta_mean > (theta_low+0.02) ) and \
-                    (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \
-                  (itop >= (len(air_ap_tail_orig)-1)) \
-                   :
-
-                    air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                    ibottom = itop+1
-                    theta_low = air_ap_tail.theta.iloc[-1]
-                    z_low =     air_ap_tail.z.iloc[-1]
-    
-
-                itop +=1
-                # elif  (itop > len(air_ap_tail_orig)-10):
-                #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-                #print(itop,ibottom)
-
-            if itop > 1:
-                    warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! Temperature profile was too steep. \
-                                  Modifying profile: "+ \
-                                  str(itop - 1)+ " measurements were dropped \
-                                  and replaced with its average \
-                                  Modifying profile. \
-                                  mean with next profile point(s).") 
-
-
-            self.air_ap = pd.concat((air_ap_head,\
-                                     air_ap_tail,\
-                                     air_ap_tail_orig[itop:])).reset_index().drop('index',\
-                                                                      axis=1)
-
-            if  self.sw_ac:
-                qvalues = \
-                        self.air_ap.values[:,list(self.air_ap.columns).index('q')]
-
-                self.air_ap.values[:,list(self.air_ap.columns).index('R')] = \
-                        (self.Rd*(1.-qvalues) + self.Rv*qvalues)
-                #self.Ph = self.Ps - self.h * self.g * Density(self.T2m,self.Ps,self.q)
-                self.P_h    = self.Ps - self.rho * self.g * self.h
-                self.air_ap.values[:3,list(self.air_ap.columns).index('p')] = \
-                        [self.Ps,  self.P_h, self.P_h-0.1]
-
-                self.air_ap.t = \
-                            self.air_ap.theta * \
-                            (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp)
-
-
-        # WARNING: self.sw_ac always requires self.sw_ap for now!!!
-
-
-
-
-        # else:
-            # in the other case, it is updated at the time the statistics are
-            # calculated 
-
-        if (self.sw_ac is not None) and ('adv' in self.sw_ac):
-
-
-            self.P_h    = self.Ps - self.rho * self.g * self.h
-            in_ml = (self.air_ac.p >= self.P_h)
-
-            if in_ml.sum() == 0:
-                warnings.warn(" no circulation points in the mixed layer \
-                              found. We just take the bottom one.")
-                in_ml = self.air_ac.index == (len(self.air_ac) - 1)
-            for var in ['t','q','u','v']:
-
-                # calculation of the advection variables for the mixed-layer
-                # these will be used for the next timestep
-                # Warning: w is excluded for now.
-
-                self.__dict__['adv'+var] = \
-                        ((self.air_ac['adv'+var+'_x'][in_ml] \
-                         + \
-                         self.air_ac['adv'+var+'_y'][in_ml])* \
-                        self.air_ac['delpdgrav'][in_ml]).sum()/ \
-                        self.air_ac['delpdgrav'][in_ml].sum()
-
-                # calculation of the advection variables for the profile above
-                # the mixed layer (also for the next timestep)
-                self.air_ap['adv'+var] = \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p,\
-                                              self.air_ac['adv'+var+'_x']) \
-                                    + \
-                                    np.interp(self.air_ap.p,\
-                                              self.air_ac.p, \
-                                              self.air_ac['adv'+var+'_y'])
-                # if var == 't':
-                #     print(self.air_ap['adv'+var])
-                #     stop
-
-            # as an approximation, we consider that advection of theta in the
-            # mixed layer is equal to advection of t. This is a sufficient
-            # approximation since theta and t are very similar at the surface
-            # pressure.
-
-            self.__dict__['advtheta'] = self.__dict__['advt']
-
-        if (self.sw_ac is not None) and ('w' in self.sw_ac):
-            # update the vertical wind profile
-            self.air_ap['wp'] = np.interp(self.air_ap.p, \
-                                          self.air_ac.p, \
-                                          self.air_ac['wp'])
-            self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \
-                                                 self.Rv*self.air_ap.q)
-            self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/  self.air_ap.t
-            
-            air_apwold = self.air_ap['w']
-            self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g
-            #print('hello w upd')
-
-            # # # WARNING, THIS DOESN't GIVE THE EXPECTED VALUE!!!
-            # # interpolate subsidence x density
-            # self.air_ap['wrho'] = \
-            #            np.interp(self.air_ap.p,\
-            #                      self.air_ach.p,\
-            #                      self.air_ach['wrho']) \
-            #     
-            # self.air_ap['w'] = \
-            #     self.air_ap['wrho']/(self.air_ap.p/ \
-            #                          (self.Rd*(1.-self.air_ap.q) + \
-            #                           self.Rv*self.air_ap.q)* \
-            #                          self.air_ap.TEMP)
-            # # self.wrho = np.interp(self.P_h,\
-            # #                      self.air_ach.p,\
-            # #                      self.air_ach['wrho']) \
-
-
-
-            # Also update the vertical wind at the mixed-layer height
-            # (subsidence)
-            self.ws   = self.air_ap.w[1]
-        #    print('ws',self.ws,self.air_ap.wp[1],self.air_ap.R[1],self.air_ap.t[1],self.air_ap.q[1])
-
-            ## Finally, we update he 
-            #self.__dict__['divU'] = ((self.air_ac['divU_x'][in_ml] \
-            #                        + \
-            #                        self.air_ac['divU_y'][in_ml])* \
-            #            self.air_ac['delpdgrav'][in_ml]).sum()/ \
-            #            self.air_ac['delpdgrav'][in_ml].sum() 
-            
-
-        if self.sw_ap:
-            for var in ['theta','q','u','v']:
-
-                # update of the slope (gamma) for the different variables, for
-                # the next timestep!
-
-                # there is an warning message that tells about dividing through
-                # zero, which we ignore
-
-                with np.errstate(divide='ignore'):
-                    gammavar = list(np.array(self.air_ap[var][1:].values - \
-                                             self.air_ap[var][:-1].values) \
-                                    / np.array(self.air_ap['z'][1:].values - \
-                                               self.air_ap['z'][:-1].values))
-
-                    # add last element twice (since we have one element less)
-                gammavar.append(gammavar[-1])
-                gammavar = np.array(gammavar)
-                self.air_ap['gamma'+var] = gammavar
-
-                # Based on the above, update the gamma value at the mixed-layer
-                # top
-                self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >=
-                                                                     self.air_ap.z)[0][-1]]
-
-            
-    def run_radiation(self):
-        sda    = 0.409 * np.cos(2. * np.pi * (self.doy - 173.) / 365.)
-        sinlea = np.sin(2. * np.pi * self.lat / 360.) * np.sin(sda) - np.cos(2. * np.pi * self.lat / 360.) * np.cos(sda) * np.cos(2. * np.pi * (self.t * self.dt + self.tstart * 3600.) / 86400. - 2. * np.pi * self.lon / 360.)
-        sinlea = max(sinlea, 0.0001)
-        
-        Ta  = self.theta * ((self.Ps - 0.1 * self.h * self.rho * self.g) / self.Ps ) ** (self.Rd / self.cp)
-  
-        Tr  = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc)
-  
-        self.Swin  = self.S0 * Tr * sinlea
-        self.Swout = self.alpha * self.S0 * Tr * sinlea
-        
-        
-        self.Lwin  = 0.8 * self.bolz * Ta ** 4.
-        self.Lwout = self.bolz * self.Ts ** 4.
-          
-        self.Q     = self.Swin - self.Swout + self.Lwin - self.Lwout
-        #print('Q',self.Q,self.Swin,self.Swout,self.Lwin,self.Lwout)
-  
-    def run_surface_layer(self):
-        # HW: I had to raise the minimum wind speed to make the simulation with
-        # the non-iterative solution stable (this solution was a wild guess, so I don't
-        # know the exact problem of the instability in case of very low wind
-        # speeds yet)
-        #ueff           = max(0.01, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        # version of 20180730 where there are still some runs crashing. Maybe
-        # an upper limit should be set on the monin-obukhov length instead of
-        # a lower limmit on the wind speed?
-        #ueff           = max(0.1, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        ueff           = max(0.5, np.sqrt(self.u**2. + self.v**2. + self.wstar**2.))
-
-        
-        self.thetasurf = self.theta + self.wtheta / (self.Cs * ueff)
-        qsatsurf       = qsat(self.thetasurf, self.Ps)
-        cq             = (1. + self.Cs * ueff * self.rs) ** -1.
-        self.qsurf     = (1. - cq) * self.q + cq * qsatsurf
-
-        self.thetavsurf = self.thetasurf * (1. + 0.61 * self.qsurf)
-  
-        zsl       = 0.1 * self.h
-        self.Rib  = self.g / self.thetav * zsl * (self.thetav - self.thetavsurf) / ueff**2.
-        
-
-
-        if self.sw_lit:
-            self.Rib  = min(self.Rib, 0.2)
-            self.L     = ribtol(self.Rib, zsl, self.z0m, self.z0h)  # Slow python iteration
-            self.zeta  = zsl/self.L
-            #self.L    = ribtol.ribtol(self.Rib, zsl, self.z0m, self.z0h) # Fast C++ iteration
-            
-        
-            self.Cm   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) ** 2.
-            self.Cs   = self.k**2. / (np.log(zsl / self.z0m) - psim(self.zeta) + psim(self.z0m / zsl* self.zeta)) / (np.log(zsl / self.z0h) - self.psih(self.zeta) + self.psih(self.z0h / zsl* self.zeta))
-            
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-        
-     
-            # diagnostic meteorological variables
-            self.T2m    = self.thetasurf - self.wtheta / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.q2m    = self.qsurf     - self.wq     / self.ustar / self.k * (np.log(2. / self.z0h) - psih(2. / zsl* self.zeta) + psih(self.z0h / zsl* self.zeta))
-            self.u2m    =                - self.uw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + psim(self.z0m / zsl* self.zeta))
-            self.v2m    =                - self.vw     / self.ustar / self.k * (np.log(2. / self.z0m) - psim(2. / zsl* self.zeta) + self.psim(self.z0m / zsl* self.zeta))
-            
-            # diagnostic meteorological variables
-        else:
-            
-            ## circumventing any iteration with Wouters et al., 2012
-            self.zslz0m = np.max((zsl/self.z0m,10.))
-            #self.Rib  = self.Rib / zsl*self.z0m *self.zslz0m
-            self.zeta = zeta_hs2(self.Rib, self.zslz0m, np.log(self.z0m/self.z0h))
-            #print(str(self.t)+'/'+str(self.tsteps)+' zeta: ',self.zeta,self.Rib, zsl,self.z0m,self.z0h)
-            self.L = zsl/self.zeta
-            funm,funh = funcsche(self.zeta,self.zslz0m, np.log(self.z0m/self.z0h))
-        
-            self.Cm = self.k**2.0/funm/funm
-            self.Cs = self.k**2.0/funm/funh
-            
-            self.ustar = np.sqrt(self.Cm) * ueff
-            self.uw    = - self.Cm * ueff * self.u
-            self.vw    = - self.Cm * ueff * self.v
-            
-            # extrapolation from mixed layer (instead of from surface) to 2meter
-            self.T2m    = self.theta - self.wtheta / self.ustar / self.k * funh
-            self.q2m    = self.q     - self.wq     / self.ustar / self.k * funh
-            self.u2m    =                - self.uw     / self.ustar / self.k * funm
-            self.v2m    =                - self.vw     / self.ustar / self.k * funm
-        
-        
-        self.esat2m = 0.611e3 * np.exp(17.2694 * (self.T2m - 273.16) / (self.T2m - 35.86))
-        self.e2m    = self.q2m * self.Ps / 0.622
-     
-    def ribtol(self, Rib, zsl, z0m, z0h): 
-        if(Rib > 0.):
-            L    = 1.
-            L0   = 2.
-        else:
-            L  = -1.
-            L0 = -2.
-        #print(Rib,zsl,z0m,z0h)
-        
-        while (abs(L - L0) > 0.001):
-            L0      = L
-            fx      = Rib - zsl / L * (np.log(zsl / z0h) - self.psih(zsl / L) + self.psih(z0h / L)) / (np.log(zsl / z0m) - self.psim(zsl / L) + self.psim(z0m / L))**2.
-            Lstart  = L - 0.001*L
-            Lend    = L + 0.001*L
-            fxdif   = ( (- zsl / Lstart * (np.log(zsl / z0h) - self.psih(zsl / Lstart) + self.psih(z0h / Lstart)) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lstart) + self.psim(z0m / Lstart))**2.) \
-                      - (-zsl /  Lend   * (np.log(zsl / z0h) - self.psih(zsl / Lend  ) + self.psih(z0h / Lend  )) / \
-                                          (np.log(zsl / z0m) - self.psim(zsl / Lend  ) + self.psim(z0m / Lend  ))**2.) ) / (Lstart - Lend)
-            L       = L - fx / fxdif
-            #print(L)
-            if(abs(L) > 1e12):
-                break
-
-        return L
-      
-    def psim(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psim  = 3.14159265 / 2. - 2. * np.arctan(x) + np.log((1. + x)**2. * (1. + x**2.) / 8.)
-            #x     = (1. + 3.6 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psim = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psim  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - zeta - (10./3.) / 0.35
-        return psim
-      
-    def psih(self, zeta):
-        if(zeta <= 0):
-            x     = (1. - 16. * zeta)**(0.25)
-            psih  = 2. * np.log( (1. + x*x) / 2.)
-            #x     = (1. + 7.9 * abs(zeta) ** (2./3.)) ** (-0.5)
-            #psih  = 3. * np.log( (1. + 1. / x) / 2.)
-        else:
-            psih  = -2./3. * (zeta - 5./0.35) * np.exp(-0.35 * zeta) - (1. + (2./3.) * zeta) ** (1.5) - (10./3.) / 0.35 + 1.
-        return psih
- 
-    def jarvis_stewart(self):
-        # calculate surface resistances using Jarvis-Stewart model
-        if(self.sw_rad):
-            f1 = 1. / min(1.,((0.004 * self.Swin + 0.05) / (0.81 * (0.004 * self.Swin + 1.))))
-        else:
-            f1 = 1.
-  
-        if(self.w2 > self.wwilt):# and self.w2 <= self.wfc):
-            f2 = (self.wfc - self.wwilt) / (self.w2 - self.wwilt)
-        else:
-            f2 = 1.e8
- 
-        # Limit f2 in case w2 > wfc, where f2 < 1
-        f2 = max(f2, 1.);
- 
-        f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
-        f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
-  
-        self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
-
-    def factorial(self,k):
-        factorial = 1
-        for n in range(2,k+1):
-            factorial = factorial * float(n)
-        return factorial;
-
-    def E1(self,x):
-        E1sum = 0
-        for k in range(1,100):
-            E1sum += pow((-1.),(k + 0.0)) * pow(x,(k + 0.0)) / ((k + 0.0) * self.factorial(k))
-        return -0.57721566490153286060 - np.log(x) - E1sum
- 
-    def ags(self):
-        # Select index for plant type
-        if(self.c3c4 == 'c3'):
-            c = 0
-        elif(self.c3c4 == 'c4'):
-            c = 1
-        else:
-            sys.exit('option \"%s\" for \"c3c4\" invalid'%self.c3c4)
-
-        # calculate CO2 compensation concentration
-        CO2comp       = self.CO2comp298[c] * self.rho * pow(self.Q10CO2[c],(0.1 * (self.thetasurf - 298.)))  
-
-        # calculate mesophyll conductance
-        gm            = self.gm298[c] *  pow(self.Q10gm[c],(0.1 * (self.thetasurf-298.))) \
-                          / ( (1. + np.exp(0.3 * (self.T1gm[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2gm[c]))))
-        gm            = gm / 1000. # conversion from mm s-1 to m s-1
-  
-        # calculate CO2 concentration inside the leaf (ci)
-        fmin0         = self.gmin[c] / self.nuco2q - 1. / 9. * gm
-        fmin          = -fmin0 + pow((pow(fmin0,2.) + 4 * self.gmin[c]/self.nuco2q * gm),0.5) / (2. * gm)
-  
-        Ds            = (esat(self.Ts) - self.e) / 1000. # kPa
-        D0            = (self.f0[c] - fmin) / self.ad[c]
-  
-        cfrac         = self.f0[c] * (1. - (Ds / D0)) + fmin * (Ds / D0)
-        co2abs        = self.CO2 * (self.mco2 / self.mair) * self.rho # conversion mumol mol-1 (ppm) to mgCO2 m3
-        ci            = cfrac * (co2abs - CO2comp) + CO2comp
-  
-        # calculate maximal gross primary production in high light conditions (Ag)
-        Ammax         = self.Ammax298[c] *  pow(self.Q10Am[c],(0.1 * (self.thetasurf - 298.))) / ( (1. + np.exp(0.3 * (self.T1Am[c] - self.thetasurf))) * (1. + np.exp(0.3 * (self.thetasurf - self.T2Am[c]))))
-  
-        # calculate effect of soil moisture stress on gross assimilation rate
-        betaw         = max(1e-3, min(1.,(self.w2 - self.wwilt)/(self.wfc - self.wwilt)))
-  
-        # calculate stress function
-        if (self.c_beta == 0):
-            fstr = betaw;
-        else:
-            # Following Combe et al (2016)
-            if (self.c_beta < 0.25):
-                P = 6.4 * self.c_beta
-            elif (self.c_beta < 0.50):
-                P = 7.6 * self.c_beta - 0.3
-            else:
-                P = 2**(3.66 * self.c_beta + 0.34) - 1
-            fstr = (1. - np.exp(-P * betaw)) / (1 - np.exp(-P))
-  
-        # calculate gross assimilation rate (Am)
-        Am           = Ammax * (1. - np.exp(-(gm * (ci - CO2comp) / Ammax)))
-        Rdark        = (1. / 9.) * Am
-        PAR          = 0.5 * max(1e-1,self.Swin * self.cveg)
-  
-        # calculate  light use efficiency
-        alphac       = self.alpha0[c] * (co2abs - CO2comp) / (co2abs + 2. * CO2comp)
-  
-        # calculate gross primary productivity
-        Ag           = (Am + Rdark) * (1 - np.exp(alphac * PAR / (Am + Rdark)))
-  
-        # 1.- calculate upscaling from leaf to canopy: net flow CO2 into the plant (An)
-        y            =  alphac * self.Kx[c] * PAR / (Am + Rdark)
-        An           = (Am + Rdark) * (1. - 1. / (self.Kx[c] * self.LAI) * (self.E1(y * np.exp(-self.Kx[c] * self.LAI)) - self.E1(y)))
-  
-        # 2.- calculate upscaling from leaf to canopy: CO2 conductance at canopy level
-        a1           = 1. / (1. - self.f0[c])
-        Dstar        = D0 / (a1 * (self.f0[c] - fmin))
-  
-        gcco2        = self.LAI * (self.gmin[c] / self.nuco2q + a1 * fstr * An / ((co2abs - CO2comp) * (1. + Ds / Dstar)))
-  
-        # calculate surface resistance for moisture and carbon dioxide
-        self.rs      = 1. / (1.6 * gcco2)
-        rsCO2        = 1. / gcco2
-  
-        # calculate net flux of CO2 into the plant (An)
-        An           = -(co2abs - ci) / (self.ra + rsCO2)
-  
-        # CO2 soil surface flux
-        fw           = self.Cw * self.wmax / (self.wg + self.wmin)
-        Resp         = self.R10 * (1. - fw) * np.exp(self.E0 / (283.15 * 8.314) * (1. - 283.15 / (self.Tsoil)))
-  
-        # CO2 flux
-        self.wCO2A   = An   * (self.mair / (self.rho * self.mco2))
-        self.wCO2R   = Resp * (self.mair / (self.rho * self.mco2))
-        self.wCO2    = self.wCO2A + self.wCO2R
- 
-    def run_land_surface(self):
-        # compute ra
-        ueff = np.sqrt(self.u ** 2. + self.v ** 2. + self.wstar**2.)
-        #print('ueff',self.u,self.v,self.wstar)
-
-        if(self.sw_sl):
-          self.ra = (self.Cs * ueff)**-1.
-        else:
-          self.ra = ueff / max(1.e-3, self.ustar)**2.
-
-        #print('ra',self.ra,self.ustar,ueff)
-
-        # first calculate essential thermodynamic variables
-        self.esat    = esat(self.theta)
-        self.qsat    = qsat(self.theta, self.Ps)
-        desatdT      = self.esat * (17.2694 / (self.theta - 35.86) - 17.2694 * (self.theta - 273.16) / (self.theta - 35.86)**2.)
-        self.dqsatdT = 0.622 * desatdT / self.Ps
-        self.e       = self.q * self.Ps / 0.622
-
-        if(self.ls_type == 'js'): 
-            self.jarvis_stewart() 
-        elif(self.ls_type == 'ags'):
-            self.ags()
-        else:
-            sys.exit('option \"%s\" for \"ls_type\" invalid'%self.ls_type)
-
-        # recompute f2 using wg instead of w2
-        if(self.wg > self.wwilt):# and self.w2 <= self.wfc):
-          f2          = (self.wfc - self.wwilt) / (self.wg - self.wwilt)
-        else:
-          f2        = 1.e8
-        self.rssoil = self.rssoilmin * f2 
- 
-        Wlmx = self.LAI * self.Wmax
-        #print('Wlmx',Wlmx,self.LAI,self.Wmax,self.Wl)
-        self.cliq = min(1., self.Wl / Wlmx) 
-     
-        # calculate skin temperature implictly
-        self.Ts   = (self.Q  + self.rho * self.cp / self.ra * self.theta \
-            + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs    ) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + (1. - self.cveg)             * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * self.theta - self.qsat + self.q) \
-            + self.cveg * self.cliq        * self.rho * self.Lv /  self.ra                * (self.dqsatdT * self.theta - self.qsat + self.q) + self.Lambda * self.Tsoil) \
-            / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
-            + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
-
-        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
-        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
-        #print('Ts',self.rs)
-
-        esatsurf      = esat(self.Ts)
-        self.qsatsurf = qsat(self.Ts, self.Ps)
-
-        self.LEveg  = (1. - self.cliq) * self.cveg * self.rho * self.Lv / (self.ra + self.rs) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEliq  = self.cliq * self.cveg * self.rho * self.Lv / self.ra * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-        self.LEsoil = (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * (self.dqsatdT * (self.Ts - self.theta) + self.qsat - self.q)
-  
-        self.Wltend      = - self.LEliq / (self.rhow * self.Lv)
-  
-        self.LE     = self.LEsoil + self.LEveg + self.LEliq
-        self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
-        #print('H',self.ra,self.Ts,self.theta)
-        self.G      = self.Lambda * (self.Ts - self.Tsoil)
-        self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
-        self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
-        
-        CG          = self.CGsat * (self.wsat / self.w2)**(self.b / (2. * np.log(10.)))
-  
-        self.Tsoiltend   = CG * self.G - 2. * np.pi / 86400. * (self.Tsoil - self.T2)
-   
-        d1          = 0.1
-        C1          = self.C1sat * (self.wsat / self.wg) ** (self.b / 2. + 1.)
-        C2          = self.C2ref * (self.w2 / (self.wsat - self.w2) )
-        wgeq        = self.w2 - self.wsat * self.a * ( (self.w2 / self.wsat) ** self.p * (1. - (self.w2 / self.wsat) ** (8. * self.p)) )
-        self.wgtend = - C1 / (self.rhow * d1) * self.LEsoil / self.Lv - C2 / 86400. * (self.wg - wgeq)
-  
-        # calculate kinematic heat fluxes
-        self.wtheta   = self.H  / (self.rho * self.cp)
-        #print('wtheta',self.wtheta,self.H,self.rho,self.cp)
-        self.wq       = self.LE / (self.rho * self.Lv)
- 
-    def integrate_land_surface(self):
-        # integrate soil equations
-        Tsoil0        = self.Tsoil
-        wg0           = self.wg
-        Wl0           = self.Wl
-  
-        self.Tsoil    = Tsoil0  + self.dtcur * self.Tsoiltend
-        self.wg       = wg0     + self.dtcur * self.wgtend
-        self.Wl       = Wl0     + self.dtcur * self.Wltend
-  
-    # store model output
-    def store(self):
-        t                      = self.t
-        
-        self.out.time[t]          = t * self.dt / 3600. + self.tstart
-
-        # in case we are at the end of the simulation, we store the vertical
-        # profiles to the output
-        
-        # if t == (len(self.out.time) - 1):
-        #     self.out.air_ac = self.air_ac
-        #     self.out.air_ap = self.air_ap
-
-        
-        # this way, we only need to define the output variables in the output class, so we don't need to specify het again here.
-        #  for key in self.out.__dict__.keys():
-        #      if key in self.__dict__:
-        #          self.out.__dict__[key][t]  = self.__dict__[key]
-        
-        self.out.h[t]          = self.h
-        
-        # HW20171003 note: most of these updates could also be done with the self.out.__dict__ and self.__dict__ , namely with the key-loop above:
-        
-        self.out.gammatheta[t] = self.gammatheta
-        self.out.gammau[t]     = self.gammau
-        self.out.gammav[t]     = self.gammav
-        self.out.gammaq[t]     = self.gammaq
-        self.out.theta[t]      = self.theta
-        self.out.thetav[t]     = self.thetav
-        self.out.dtheta[t]     = self.dtheta
-        self.out.dthetav[t]    = self.dthetav
-        self.out.wtheta[t]     = self.wtheta
-        self.out.wthetav[t]    = self.wthetav
-        self.out.wthetae[t]    = self.wthetae
-        self.out.wthetave[t]   = self.wthetave
-        
-        self.out.q[t]          = self.q
-        self.out.dq[t]         = self.dq
-        self.out.wq[t]         = self.wq
-        self.out.wqe[t]        = self.wqe
-        self.out.wqM[t]        = self.wqM
-      
-        self.out.qsat[t]       = self.qsat
-        self.out.e[t]          = self.e
-        self.out.esat[t]       = self.esat
-      
-        fac = (self.rho*self.mco2)/self.mair
-        self.out.CO2[t]        = self.CO2
-        self.out.dCO2[t]       = self.dCO2
-        self.out.wCO2[t]       = self.wCO2  * fac
-        self.out.wCO2e[t]      = self.wCO2e * fac
-        self.out.wCO2R[t]      = self.wCO2R * fac
-        self.out.wCO2A[t]      = self.wCO2A * fac
-
-        self.out.u[t]          = self.u
-        self.out.du[t]         = self.du
-        self.out.uw[t]         = self.uw
-        
-        self.out.v[t]          = self.v
-        self.out.dv[t]         = self.dv
-        self.out.vw[t]         = self.vw
-        
-        self.out.T2m[t]        = self.T2m
-        self.out.q2m[t]        = self.q2m
-        self.out.u2m[t]        = self.u2m
-        self.out.v2m[t]        = self.v2m
-        self.out.e2m[t]        = self.e2m
-        self.out.esat2m[t]     = self.esat2m
-
-
-        self.out.Tsoil[t]      = self.Tsoil
-        self.out.T2[t]         = self.T2
-        self.out.Ts[t]         = self.Ts
-        self.out.wg[t]         = self.wg
-        
-        self.out.thetasurf[t]  = self.thetasurf
-        self.out.thetavsurf[t] = self.thetavsurf
-        self.out.qsurf[t]      = self.qsurf
-        self.out.ustar[t]      = self.ustar
-        self.out.Cm[t]         = self.Cm
-        self.out.Cs[t]         = self.Cs
-        self.out.L[t]          = self.L
-        self.out.Rib[t]        = self.Rib
-  
-        self.out.Swin[t]       = self.Swin
-        self.out.Swout[t]      = self.Swout
-        self.out.Lwin[t]       = self.Lwin
-        self.out.Lwout[t]      = self.Lwout
-        self.out.Q[t]          = self.Q
-  
-        self.out.ra[t]         = self.ra
-        self.out.rs[t]         = self.rs
-        self.out.H[t]          = self.H
-        self.out.LE[t]         = self.LE
-        self.out.LEliq[t]      = self.LEliq
-        self.out.LEveg[t]      = self.LEveg
-        self.out.LEsoil[t]     = self.LEsoil
-        self.out.LEpot[t]      = self.LEpot
-        self.out.LEref[t]      = self.LEref
-        self.out.G[t]          = self.G
-
-        self.out.zlcl[t]       = self.lcl
-        self.out.RH_h[t]       = self.RH_h
-
-        self.out.ac[t]         = self.ac
-        self.out.M[t]          = self.M
-        self.out.dz[t]         = self.dz_h
-        self.out.substeps[t]   = self.substeps
-  
-    # delete class variables to facilitate analysis in ipython
-    def exitmodel(self):
-        del(self.Lv)
-        del(self.cp)
-        del(self.rho)
-        del(self.k)
-        del(self.g)
-        del(self.Rd)
-        del(self.Rv)
-        del(self.bolz)
-        del(self.S0)
-        del(self.rhow)
-  
-        del(self.t)
-        del(self.dt)
-        del(self.tsteps)
-         
-        del(self.h)          
-        del(self.Ps)        
-        del(self.fc)        
-        del(self.ws)
-        del(self.we)
-        
-        del(self.theta)
-        del(self.dtheta)
-        del(self.gammatheta)
-        del(self.advtheta)
-        del(self.beta)
-        del(self.wtheta)
-    
-        del(self.T2m)
-        del(self.q2m)
-        del(self.e2m)
-        del(self.esat2m)
-        del(self.u2m)
-        del(self.v2m)
-        
-        del(self.thetasurf)
-        del(self.qsatsurf)
-        del(self.thetav)
-        del(self.dthetav)
-        del(self.thetavsurf)
-        del(self.qsurf)
-        del(self.wthetav)
-        
-        del(self.q)
-        del(self.qsat)
-        del(self.dqsatdT)
-        del(self.e)
-        del(self.esat)
-        del(self.dq)
-        del(self.gammaq)
-        del(self.advq)
-        del(self.wq)
-        
-        del(self.u)
-        del(self.du)
-        del(self.gammau)
-        del(self.advu)
-        
-        del(self.v)
-        del(self.dv)
-        del(self.gammav)
-        del(self.advv)
-  
-        del(self.htend)
-        del(self.thetatend)
-        del(self.dthetatend)
-        del(self.qtend)
-        del(self.dqtend)
-        del(self.utend)
-        del(self.dutend)
-        del(self.vtend)
-        del(self.dvtend)
-     
-        del(self.Tsoiltend) 
-        del(self.wgtend)  
-        del(self.Wltend) 
-  
-        del(self.ustar)
-        del(self.uw)
-        del(self.vw)
-        del(self.z0m)
-        del(self.z0h)        
-        del(self.Cm)         
-        del(self.Cs)
-        del(self.L)
-        del(self.Rib)
-        del(self.ra)
-  
-        del(self.lat)
-        del(self.lon)
-        del(self.doy)
-        del(self.tstart)
-   
-        del(self.Swin)
-        del(self.Swout)
-        del(self.Lwin)
-        del(self.Lwout)
-        del(self.cc)
-  
-        del(self.wg)
-        del(self.w2)
-        del(self.cveg)
-        del(self.cliq)
-        del(self.Tsoil)
-        del(self.T2)
-        del(self.a)
-        del(self.b)
-        del(self.p)
-        del(self.CGsat)
-  
-        del(self.wsat)
-        del(self.wfc)
-        del(self.wwilt)
-  
-        del(self.C1sat)
-        del(self.C2ref)
-  
-        del(self.LAI)
-        del(self.rs)
-        del(self.rssoil)
-        del(self.rsmin)
-        del(self.rssoilmin)
-        del(self.alpha)
-        del(self.gD)
-  
-        del(self.Ts)
-  
-        del(self.Wmax)
-        del(self.Wl)
-  
-        del(self.Lambda)
-        
-        del(self.Q)
-        del(self.H)
-        del(self.LE)
-        del(self.LEliq)
-        del(self.LEveg)
-        del(self.LEsoil)
-        del(self.LEpot)
-        del(self.LEref)
-        del(self.G)
-  
-        del(self.sw_ls)
-        del(self.sw_rad)
-        del(self.sw_sl)
-        del(self.sw_wind)
-        del(self.sw_shearwe)
-
-# class for storing mixed-layer model output data
-class model_output:
-    def __init__(self, tsteps):
-        self.time          = np.zeros(tsteps)    # time [s]
-
-        # mixed-layer variables
-        self.h          = np.zeros(tsteps)    # ABL height [m]
-        
-        self.theta      = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammatheta = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammaq     = np.zeros(tsteps)    # initial mixed-layer potential temperature [K]
-        self.gammau     = np.zeros(tsteps)
-        self.gammav     = np.zeros(tsteps)
-        self.thetav     = np.zeros(tsteps)    # initial mixed-layer virtual potential temperature [K]
-        self.dtheta     = np.zeros(tsteps)    # initial potential temperature jump at h [K]
-        self.dthetav    = np.zeros(tsteps)    # initial virtual potential temperature jump at h [K]
-        self.wtheta     = np.zeros(tsteps)    # surface kinematic heat flux [K m s-1]
-        self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
-        self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
-        self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
-        
-        self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
-        self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
-        self.wq         = np.zeros(tsteps)    # surface kinematic moisture flux [kg kg-1 m s-1]
-        self.wqe        = np.zeros(tsteps)    # entrainment kinematic moisture flux [kg kg-1 m s-1]
-        self.wqM        = np.zeros(tsteps)    # cumulus mass-flux kinematic moisture flux [kg kg-1 m s-1]
-
-        self.qsat       = np.zeros(tsteps)    # mixed-layer saturated specific humidity [kg kg-1]
-        self.e          = np.zeros(tsteps)    # mixed-layer vapor pressure [Pa]
-        self.esat       = np.zeros(tsteps)    # mixed-layer saturated vapor pressure [Pa]
-
-        self.CO2        = np.zeros(tsteps)    # mixed-layer CO2 [ppm]
-        self.dCO2       = np.zeros(tsteps)    # initial CO2 jump at h [ppm]
-        self.wCO2       = np.zeros(tsteps)    # surface total CO2 flux [mgC m-2 s-1]
-        self.wCO2A      = np.zeros(tsteps)    # surface assimilation CO2 flux [mgC m-2 s-1]
-        self.wCO2R      = np.zeros(tsteps)    # surface respiration CO2 flux [mgC m-2 s-1]
-        self.wCO2e      = np.zeros(tsteps)    # entrainment CO2 flux [mgC m-2 s-1]
-        self.wCO2M      = np.zeros(tsteps)    # CO2 mass flux [mgC m-2 s-1]
-        
-        self.u          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.du         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.uw         = np.zeros(tsteps)    # surface momentum flux u [m2 s-2]
-        
-        self.v          = np.zeros(tsteps)    # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = np.zeros(tsteps)    # initial u-wind jump at h [m s-1]
-        self.vw         = np.zeros(tsteps)    # surface momentum flux v [m2 s-2]
-
-        # diagnostic meteorological variables
-        self.T2m        = np.zeros(tsteps)    # 2m temperature [K]   
-        self.q2m        = np.zeros(tsteps)    # 2m specific humidity [kg kg-1]
-        self.u2m        = np.zeros(tsteps)    # 2m u-wind [m s-1]    
-        self.v2m        = np.zeros(tsteps)    # 2m v-wind [m s-1]    
-        self.e2m        = np.zeros(tsteps)    # 2m vapor pressure [Pa]
-        self.esat2m     = np.zeros(tsteps)    # 2m saturated vapor pressure [Pa]
-
-        # ground variables
-        self.Tsoil       = np.zeros(tsteps)
-        self.T2          = np.zeros(tsteps)
-        self.Ts          = np.zeros(tsteps)
-        self.wg          = np.zeros(tsteps)
-
-        # surface-layer variables
-        self.thetasurf  = np.zeros(tsteps)    # surface potential temperature [K]
-        self.thetavsurf = np.zeros(tsteps)    # surface virtual potential temperature [K]
-        self.qsurf      = np.zeros(tsteps)    # surface specific humidity [kg kg-1]
-        self.ustar      = np.zeros(tsteps)    # surface friction velocity [m s-1]
-        self.z0m        = np.zeros(tsteps)    # roughness length for momentum [m]
-        self.z0h        = np.zeros(tsteps)    # roughness length for scalars [m]
-        self.Cm         = np.zeros(tsteps)    # drag coefficient for momentum []
-        self.Cs         = np.zeros(tsteps)    # drag coefficient for scalars []
-        self.L          = np.zeros(tsteps)    # Obukhov length [m]
-        self.Rib        = np.zeros(tsteps)    # bulk Richardson number [-]
-
-        # radiation variables
-        self.Swin       = np.zeros(tsteps)    # incoming short wave radiation [W m-2]
-        self.Swout      = np.zeros(tsteps)    # outgoing short wave radiation [W m-2]
-        self.Lwin       = np.zeros(tsteps)    # incoming long wave radiation [W m-2]
-        self.Lwout      = np.zeros(tsteps)    # outgoing long wave radiation [W m-2]
-        self.Q          = np.zeros(tsteps)    # net radiation [W m-2]
-
-        # land surface variables
-        self.ra         = np.zeros(tsteps)    # aerodynamic resistance [s m-1]
-        self.rs         = np.zeros(tsteps)    # surface resistance [s m-1]
-        self.H          = np.zeros(tsteps)    # sensible heat flux [W m-2]
-        self.LE         = np.zeros(tsteps)    # evapotranspiration [W m-2]
-        self.LEliq      = np.zeros(tsteps)    # open water evaporation [W m-2]
-        self.LEveg      = np.zeros(tsteps)    # transpiration [W m-2]
-        self.LEsoil     = np.zeros(tsteps)    # soil evaporation [W m-2]
-        self.LEpot      = np.zeros(tsteps)    # potential evaporation [W m-2]
-        self.LEref      = np.zeros(tsteps)    # reference evaporation at rs = rsmin / LAI [W m-2]
-        self.G          = np.zeros(tsteps)    # ground heat flux [W m-2]
-
-        # Mixed-layer top variables
-        self.zlcl       = np.zeros(tsteps)    # lifting condensation level [m]
-        self.RH_h       = np.zeros(tsteps)    # mixed-layer top relative humidity [-]
-
-        # cumulus variables
-        self.ac         = np.zeros(tsteps)    # cloud core fraction [-]
-        self.M          = np.zeros(tsteps)    # cloud core mass flux [m s-1]
-        self.dz         = np.zeros(tsteps)    # transition layer thickness [m]
-        
-        
-        self.substeps   = np.zeros(tsteps)    # number of additional substep time integrations needed [-]
-
-# class for storing mixed-layer model input data
-class model_input:
-    def __init__(self):
-
-        # # comment not valid
-        # we comment out the initialization, because there is a problem when
-        # inheriting values from one the another class4gl_iput. We also expect
-        # that the user specifies all the required parmameters (if not, an error
-        # is raised). 
-
-        # general model variables
-        self.runtime    = None  # duration of model run [s]
-        self.dt         = None  # time step [s]
-
-        # mixed-layer variables
-        self.sw_ml      = None  # mixed-layer model switch
-        self.sw_shearwe = None  # Shear growth ABL switch
-        self.sw_fixft   = None  # Fix the free-troposphere switch
-        self.h          = None  # initial ABL height [m]
-        self.Ps         = None  # surface pressure [Pa]
-        self.divU       = None  # horizontal large-scale divergence of wind [s-1]
-        self.fc         = None  # Coriolis parameter [s-1]
-        
-        self.theta      = None  # initial mixed-layer potential temperature [K]
-        #self.air_ap.THTA  = None  # optional/initial profile of potential temperature [K]
-
-        #self.z_pro      = None  # height coordinate of the optional input profiles [m]
-
-        self.dtheta     = None  # initial temperature jump at h [K]
-        self.gammatheta = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advtheta   = None  # advection of heat [K s-1]
-        self.beta       = None  # entrainment ratio for virtual heat [-]
-        self.wtheta     = None  # surface kinematic heat flux [K m s-1]
-        
-        self.q          = None  # initial mixed-layer specific humidity [kg kg-1]
-        #self.q_pro      = None  # optional/initial profile of specific humidity [kg kg-1]
-        #self.p_pro      = None  # optional/initial profile of pressure, just for diagnosis purposes [Pa]
-
-        self.dq         = None  # initial specific humidity jump at h [kg kg-1]
-        self.gammaq     = None  # free atmosphere specific humidity lapse rate [kg kg-1 m-1]
-        self.advq       = None  # advection of moisture [kg kg-1 s-1]
-        self.wq         = None  # surface kinematic moisture flux [kg kg-1 m s-1]
-
-        self.CO2        = None  # initial mixed-layer potential temperature [K]
-        self.dCO2       = None  # initial temperature jump at h [K]
-        self.gammaCO2   = None  # free atmosphere potential temperature lapse rate [K m-1]
-        self.advCO2     = None  # advection of heat [K s-1]
-        self.wCO2       = None  # surface kinematic heat flux [K m s-1]
-        
-        self.sw_wind    = None  # prognostic wind switch
-        self.u          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.du         = None  # initial u-wind jump at h [m s-1]
-        self.gammau     = None  # free atmosphere u-wind speed lapse rate [s-1]
-        self.advu       = None  # advection of u-wind [m s-2]
-
-        self.v          = None  # initial mixed-layer u-wind speed [m s-1]
-        self.dv         = None  # initial u-wind jump at h [m s-1]
-        self.gammav     = None  # free atmosphere v-wind speed lapse rate [s-1]
-        self.advv       = None  # advection of v-wind [m s-2]
-
-        # surface layer variables
-        self.sw_sl      = None  # surface layer switch
-        self.ustar      = None  # surface friction velocity [m s-1]
-        self.z0m        = None  # roughness length for momentum [m]
-        self.z0h        = None  # roughness length for scalars [m]
-        self.Cm         = None  # drag coefficient for momentum [-]
-        self.Cs         = None  # drag coefficient for scalars [-]
-        self.L          = None  # Obukhov length [-]
-        self.Rib        = None  # bulk Richardson number [-]
-
-        # radiation parameters
-        self.sw_rad     = None  # radiation switch
-        self.lat        = None  # latitude [deg]
-        self.lon        = None  # longitude [deg]
-        self.doy        = None  # day of the year [-]
-        self.tstart     = None  # time of the day [h UTC]
-        self.cc         = None  # cloud cover fraction [-]
-        self.Q          = None  # net radiation [W m-2] 
-        self.dFz        = None  # cloud top radiative divergence [W m-2] 
-
-        # land surface parameters
-        self.sw_ls      = None  # land surface switch
-        self.ls_type    = None  # land-surface parameterization ('js' for Jarvis-Stewart or 'ags' for A-Gs)
-        self.wg         = None  # volumetric water content top soil layer [m3 m-3]
-        self.w2         = None  # volumetric water content deeper soil layer [m3 m-3]
-        self.Tsoil      = None  # temperature top soil layer [K]
-        self.T2         = None  # temperature deeper soil layer [K]
-        
-        self.a          = None  # Clapp and Hornberger retention curve parameter a
-        self.b          = None  # Clapp and Hornberger retention curve parameter b
-        self.p          = None  # Clapp and Hornberger retention curve parameter p 
-        self.CGsat      = None  # saturated soil conductivity for heat
-        
-        self.wsat       = None  # saturated volumetric water content ECMWF config [-]
-        self.wfc        = None  # volumetric water content field capacity [-]
-        self.wwilt      = None  # volumetric water content wilting point [-]
-        
-        self.C1sat      = None 
-        self.C2ref      = None
-
-        self.c_beta     = None  # Curvatur plant water-stress factor (0..1) [-]
-        
-        self.LAI        = None  # leaf area index [-]
-        self.gD         = None  # correction factor transpiration for VPD [-]
-        self.rsmin      = None  # minimum resistance transpiration [s m-1]
-        self.rssoilmin  = None  # minimum resistance soil evaporation [s m-1]
-        self.alpha      = None  # surface albedo [-]
-        
-        self.Ts         = None  # initial surface temperature [K]
-        
-        self.cveg       = None  # vegetation fraction [-]
-        self.Wmax       = None  # thickness of water layer on wet vegetation [m]
-        self.Wl         = None  # equivalent water layer depth for wet vegetation [m]
-        
-        self.Lambda     = None  # thermal diffusivity skin layer [-]
-
-        # A-Gs parameters
-        self.c3c4       = None  # Plant type ('c3' or 'c4')
-
-        # Cumulus parameters
-        self.sw_cu      = None  # Cumulus parameterization switch
-        self.dz_h       = None  # Transition layer thickness [m]
-        
-# BEGIN -- HW 20171027
-        # self.cala       = None      # soil heat conductivity [W/(K*m)]
-        # self.crhoc      = None      # soil heat capacity  [J/K*m**3]
-# END -- HW 20171027
diff --git a/dist/class4gl-0.1dev/setup.py b/dist/class4gl-0.1dev/setup.py
deleted file mode 100644
index a806fa0..0000000
--- a/dist/class4gl-0.1dev/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from distutils.core import setup
-
-setup(
-        name='class4gl',
-        version='0.1dev',
-        packages=['lib','bin'],
-        license='GPLv3 licence',
-        long_description=open('README.md').read(),
-)

From 44874f7d5ef7616783fbc913920c002209271d3e Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 18:40:21 +0200
Subject: [PATCH 021/129] get rid of skilearn dependency

---
 class4gl/evaluation/evaluation.py | 31 +++++++++++++++++--------------
 1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 75b3171..4b8c232 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -2,13 +2,24 @@
 
 import pandas as pd
 import sys
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load-globaldata',default=False)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
 from interface_multi import c4gl_interface_soundings,get_record_yaml
 from class4gl import class4gl_input, data_global,class4gl,units
-from sklearn.metrics import mean_squared_error
+#from sklearn.metrics import mean_squared_error
 import matplotlib as mpl
 import matplotlib.pyplot as plt
-import seaborn.apionly as sns
+#import seaborn.apionly as sns
 import pylab as pl
 import numpy as np
 import matplotlib.pyplot as plt
@@ -19,16 +30,7 @@
 # import importlib
 # importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
 
-import argparse
 
-#if __name__ == '__main__':
-parser = argparse.ArgumentParser()
-parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--experiments')
-parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--load-globaldata',default=False)
-args = parser.parse_args()
 
 
 def abline(slope, intercept,axis):
@@ -76,8 +78,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         # which is the case for evaluating eg., mixed-layer estimates)
         y_predicted_temp = y_actual_temp*0. + y_predicted_temp
         
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
 
 
 

From 7e580428cad165edb8ce10ce136475614a5701f3 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 18:47:01 +0200
Subject: [PATCH 022/129] test

---
 class4gl/evaluation/evaluation.py | 31 +++++++++++++++++--------------
 class4gl/interface_multi.py       |  2 +-
 2 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 75b3171..4b8c232 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -2,13 +2,24 @@
 
 import pandas as pd
 import sys
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load-globaldata',default=False)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
 from interface_multi import c4gl_interface_soundings,get_record_yaml
 from class4gl import class4gl_input, data_global,class4gl,units
-from sklearn.metrics import mean_squared_error
+#from sklearn.metrics import mean_squared_error
 import matplotlib as mpl
 import matplotlib.pyplot as plt
-import seaborn.apionly as sns
+#import seaborn.apionly as sns
 import pylab as pl
 import numpy as np
 import matplotlib.pyplot as plt
@@ -19,16 +30,7 @@
 # import importlib
 # importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
 
-import argparse
 
-#if __name__ == '__main__':
-parser = argparse.ArgumentParser()
-parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--experiments')
-parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--load-globaldata',default=False)
-args = parser.parse_args()
 
 
 def abline(slope, intercept,axis):
@@ -76,8 +78,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         # which is the case for evaluating eg., mixed-layer estimates)
         y_predicted_temp = y_actual_temp*0. + y_predicted_temp
         
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
 
 
 
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 6f70487..a9f6ce8 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -8,7 +8,7 @@
 from time import sleep
 
 
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+# sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl,units
 from interface_functions import *
 # from data_soundings import wyoming

From ed9bf770bf48fcb9a7759803bf082a862741d456 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 18:53:12 +0200
Subject: [PATCH 023/129] add eval report

---
 class4gl/evaluation/taylorDiagram.py | 246 +++++++++++++++++++++++++++
 1 file changed, 246 insertions(+)
 create mode 100644 class4gl/evaluation/taylorDiagram.py

diff --git a/class4gl/evaluation/taylorDiagram.py b/class4gl/evaluation/taylorDiagram.py
new file mode 100644
index 0000000..9c51b48
--- /dev/null
+++ b/class4gl/evaluation/taylorDiagram.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+# Copyright: This document has been placed in the public domain.
+
+"""
+Taylor diagram (Taylor, 2001) implementation.
+"""
+
+__version__ = "Time-stamp: <2018-05-17 19:41:54 ycopin>"
+__author__ = "Yannick Copin "
+
+import numpy as NP
+import matplotlib.pyplot as PLT
+
+
+class TaylorDiagram(object):
+    """
+    Taylor diagram.
+
+    Plot model standard deviation and correlation to reference (data)
+    sample in a single-quadrant polar plot, with r=stddev and
+    theta=arccos(correlation).
+    """
+
+    def __init__(self, refstd,
+                 fig=None, rect=111, label='_', srange=(0, 1.5), extend=False):
+        """
+        Set up Taylor diagram axes, i.e. single quadrant polar
+        plot, using `mpl_toolkits.axisartist.floating_axes`.
+
+        Parameters:
+
+        * refstd: reference standard deviation to be compared to
+        * fig: input Figure or None
+        * rect: subplot definition
+        * label: reference label
+        * srange: stddev axis extension, in units of *refstd*
+        * extend: extend diagram to negative correlations
+        """
+
+        from matplotlib.projections import PolarAxes
+        import mpl_toolkits.axisartist.floating_axes as FA
+        import mpl_toolkits.axisartist.grid_finder as GF
+
+        self.refstd = refstd            # Reference standard deviation
+
+        tr = PolarAxes.PolarTransform()
+
+        # Correlation labels
+        rlocs = NP.array([0, 0.2, 0.4, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1])
+        if extend:
+            # Diagram extended to negative correlations
+            self.tmax = NP.pi
+            rlocs = NP.concatenate((-rlocs[:0:-1], rlocs))
+        else:
+            # Diagram limited to positive correlations
+            self.tmax = NP.pi/2
+        tlocs = NP.arccos(rlocs)        # Conversion to polar angles
+        gl1 = GF.FixedLocator(tlocs)    # Positions
+        tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))
+
+        # Standard deviation axis extent (in units of reference stddev)
+        self.smin = srange[0] * self.refstd
+        self.smax = srange[1] * self.refstd
+
+        ghelper = FA.GridHelperCurveLinear(
+            tr,
+            extremes=(0, self.tmax, self.smin, self.smax),
+            grid_locator1=gl1, tick_formatter1=tf1)
+
+        if fig is None:
+            fig = PLT.figure()
+
+        ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)
+        fig.add_subplot(ax)
+
+        # Adjust axes
+        ax.axis["top"].set_axis_direction("bottom")   # "Angle axis"
+        ax.axis["top"].toggle(ticklabels=True, label=True)
+        ax.axis["top"].major_ticklabels.set_axis_direction("top")
+        ax.axis["top"].label.set_axis_direction("top")
+        ax.axis["top"].label.set_text("Correlation")
+
+        ax.axis["left"].set_axis_direction("bottom")  # "X axis"
+        #ax.axis["left"].label.set_text("Standard deviation (model)/ Observed (observations)")
+
+        ax.axis["right"].set_axis_direction("top")    # "Y-axis"
+        ax.axis["right"].toggle(ticklabels=True)
+        ax.axis["right"].major_ticklabels.set_axis_direction(
+            "bottom" if extend else "left")
+
+        ax.axis["bottom"].set_visible(False)          # Unused
+
+        self._ax = ax                   # Graphical axes
+        self.ax = ax.get_aux_axes(tr)   # Polar coordinates
+        # # DOESNT WORK!!!!
+        # ax.axes.set_xticks([0.,0.2,0.4,1.])
+        # ax.axes.set_yticks([0.,0.2,0.4,1.])
+
+        # Add reference point and stddev contour
+        l, = self.ax.plot([0], self.refstd, 'k*',
+                          ls='', ms=10, label=label)
+        t = NP.linspace(0, self.tmax)
+        r = NP.zeros_like(t) + self.refstd
+        self.ax.plot(t, r, 'k--', label='_')
+
+        # Collect sample points for latter use (e.g. legend)
+        self.samplePoints = [l]
+
+    def add_sample(self, stddev, corrcoef, *args, **kwargs):
+        """
+        Add sample (*stddev*, *corrcoeff*) to the Taylor
+        diagram. *args* and *kwargs* are directly propagated to the
+        `Figure.plot` command.
+        """
+
+        l, = self.ax.plot(NP.arccos(corrcoef), stddev,
+                          *args, **kwargs)  # (theta, radius)
+        self.samplePoints.append(l)
+
+        return l
+
+    def add_grid(self, *args, **kwargs):
+        """Add a grid."""
+
+        self._ax.grid(*args, **kwargs)
+
+    def add_contours(self, levels=5, **kwargs):
+        """
+        Add constant centered RMS difference contours, defined by *levels*.
+        """
+
+        rs, ts = NP.meshgrid(NP.linspace(self.smin, self.smax),
+                             NP.linspace(0, self.tmax))
+        # Compute centered RMS difference
+        rms = NP.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*NP.cos(ts))
+
+        contours = self.ax.contour(ts, rs, rms, levels, **kwargs)
+
+        return contours
+
+
+def test1():
+    """Display a Taylor diagram in a separate axis."""
+
+    # Reference dataset
+    x = NP.linspace(0, 4*NP.pi, 100)
+    data = NP.sin(x)
+    refstd = data.std(ddof=1)           # Reference standard deviation
+
+    # Generate models
+    m1 = data + 0.2*NP.random.randn(len(x))     # Model 1
+    m2 = 0.8*data + .1*NP.random.randn(len(x))  # Model 2
+    m3 = NP.sin(x-NP.pi/10)                     # Model 3
+
+    # Compute stddev and correlation coefficient of models
+    samples = NP.array([ [m.std(ddof=1), NP.corrcoef(data, m)[0, 1]]
+                         for m in (m1, m2, m3)])
+
+    fig = PLT.figure(figsize=(10, 4))
+
+    ax1 = fig.add_subplot(1, 2, 1, xlabel='X', ylabel='Y')
+    # Taylor diagram
+    dia = TaylorDiagram(refstd, fig=fig, rect=122, label="Reference")
+
+    colors = PLT.matplotlib.cm.jet(NP.linspace(0, 1, len(samples)))
+
+    ax1.plot(x, data, 'ko', label='Data')
+    for i, m in enumerate([m1, m2, m3]):
+        ax1.plot(x, m, c=colors[i], label='Model %d' % (i+1))
+    ax1.legend(numpoints=1, prop=dict(size='small'), loc='best')
+
+    # Add the models to Taylor diagram
+    for i, (stddev, corrcoef) in enumerate(samples):
+        dia.add_sample(stddev, corrcoef,
+                       marker='$%d$' % (i+1), ms=10, ls='',
+                       mfc=colors[i], mec=colors[i],
+                       label="Model %d" % (i+1))
+
+    # Add grid
+    dia.add_grid()
+
+    # Add RMS contours, and label them
+    contours = dia.add_contours(colors='0.5')
+    PLT.clabel(contours, inline=1, fontsize=10, fmt='%.2f')
+
+    # Add a figure legend
+    fig.legend(dia.samplePoints,
+               [ p.get_label() for p in dia.samplePoints ],
+               numpoints=1, prop=dict(size='small'), loc='upper right')
+
+    return dia
+
+
+def test2():
+    """
+    Climatology-oriented example (after iteration w/ Michael A. Rawlins).
+    """
+
+    # Reference std
+    stdref = 48.491
+
+    # Samples std,rho,name
+    samples = [[25.939, 0.385, "Model A"],
+               [29.593, 0.509, "Model B"],
+               [33.125, 0.585, "Model C"],
+               [29.593, 0.509, "Model D"],
+               [71.215, 0.473, "Model E"],
+               [27.062, 0.360, "Model F"],
+               [38.449, 0.342, "Model G"],
+               [35.807, 0.609, "Model H"],
+               [17.831, 0.360, "Model I"]]
+
+    fig = PLT.figure()
+
+    dia = TaylorDiagram(stdref, fig=fig, label='Reference', extend=True)
+    dia.samplePoints[0].set_color('r')  # Mark reference point as a red star
+
+    # Add models to Taylor diagram
+    for i, (stddev, corrcoef, name) in enumerate(samples):
+        dia.add_sample(stddev, corrcoef,
+                       marker='$%d$' % (i+1), ms=10, ls='',
+                       mfc='k', mec='k',
+                       label=name)
+
+    # Add RMS contours, and label them
+    contours = dia.add_contours(levels=5, colors='0.5')  # 5 levels in grey
+    PLT.clabel(contours, inline=1, fontsize=10, fmt='%.0f')
+
+    dia.add_grid()                                  # Add grid
+    dia._ax.axis[:].major_ticks.set_tick_out(True)  # Put ticks outward
+
+    # Add a figure legend and title
+    fig.legend(dia.samplePoints,
+               [ p.get_label() for p in dia.samplePoints ],
+               numpoints=1, prop=dict(size='small'), loc='upper right')
+    fig.suptitle("Taylor diagram", size='x-large')  # Figure title
+
+    return dia
+
+
+if __name__ == '__main__':
+
+    dia = test1()
+    dia = test2()
+
+    PLT.show()

From 2f7e875822cfa4e859247b6578f44c0091dcabd4 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 19:03:27 +0200
Subject: [PATCH 024/129] evaluation

---
 class4gl/evaluation/evaluation.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 4b8c232..8cbc2cb 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -150,7 +150,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     #dia.ax.plot(x99,y99,color='k')
 
     
-    for ikey,key in enumerate(EXPS.keys()):
+    for ikey,key in enumerate(args.experiments.split(';')):
         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
         # clearsky = (cc < 0.05)
         # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
@@ -194,8 +194,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
 i = 0
 for varkey in ['h','theta','q']:                                                    
-    ikey = 2
-    key = list(EXPS.keys())[ikey]
+    ikey = 0
+    key = list(args.experiments.split(';'))[ikey]
     cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
     clearsky = (cc < 0.05)
 
@@ -260,12 +260,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 # legend for different forcing simulations (colors)
 ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
 leg = []
-for ikey,key in enumerate(EXPS.keys()):
+for ikey,key in enumerate(args.experiments.split(';')):
     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
     leg.append(leg1)
 ax.axis('off')
 #leg1 =
-ax.legend(leg,list(EXPS.keys()),loc=2,fontsize=10)
+ax.legend(leg,list(args.experiments.split(';')),loc=2,fontsize=10)
 
 
 # # legend for different stations (symbols)

From d9594ed6b29937198426e544cddf96828907ff0f Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 19:11:39 +0200
Subject: [PATCH 025/129] tkagg

---
 class4gl/evaluation/evaluation.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 8cbc2cb..902882e 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -29,6 +29,8 @@
 from matplotlib import ticker
 # import importlib
 # importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+import matplotlib
+matplotlib.use('TkAgg')
 
 
 

From 568a3ab2c25b61b1396afb0cd9e9e056ce1c4797 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 19:14:27 +0200
Subject: [PATCH 026/129] tkagg

---
 class4gl/evaluation/evaluation.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 902882e..12c9e48 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -3,6 +3,8 @@
 import pandas as pd
 import sys
 
+import matplotlib
+matplotlib.use('TkAgg')
 import argparse
 parser = argparse.ArgumentParser()
 parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
@@ -29,8 +31,6 @@
 from matplotlib import ticker
 # import importlib
 # importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
-import matplotlib
-matplotlib.use('TkAgg')
 
 
 

From d6f72e617a9b31c90c5fc977ec9cb76c2b0573be Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 19:16:28 +0200
Subject: [PATCH 027/129] tkagg

---
 class4gl/evaluation/evaluation.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 12c9e48..742bc9b 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -5,6 +5,7 @@
 
 import matplotlib
 matplotlib.use('TkAgg')
+
 import argparse
 parser = argparse.ArgumentParser()
 parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')

From 25f852a201a00e03e3d989d069ed3c000e3e54d2 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 19:22:49 +0200
Subject: [PATCH 028/129] evaluation

---
 class4gl/evaluation/evaluation.py | 350 +++++++++++++++---------------
 1 file changed, 175 insertions(+), 175 deletions(-)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/evaluation/evaluation.py
index 742bc9b..7becde2 100644
--- a/class4gl/evaluation/evaluation.py
+++ b/class4gl/evaluation/evaluation.py
@@ -118,184 +118,184 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
-fig = plt.figure(figsize=(10,7))   #width,height
-i = 1                                                                           
-axes = {}         
-axes_taylor = {}         
-
-colors = ['r','g','b','m']
-symbols = ['*','x','+']
-dias = {}
-
-for varkey in ['h','theta','q']:                                                    
-    axes[varkey] = fig.add_subplot(2,3,i)                                       
-    #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
-
-    #print(obs.std())
-    dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
-    if i == 0:
-        print("hello")
-        dias[varkey]._ax.axis["left"].label.set_text(\
-            "Standard deviation (model) / Standard deviation (observations)")
-        # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-        # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
-    #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-    # Q95 = obs.quantile(0.95)
-    # Q95 = obs.quantile(0.90)
-    # Add RMS contours, and label them
-    contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
-    dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
-    #dia._ax.set_title(season.capitalize())
-
-    dias[varkey].add_grid()
-
-
-    #dia.ax.plot(x99,y99,color='k')
-
-    
-    for ikey,key in enumerate(args.experiments.split(';')):
-        # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-        # clearsky = (cc < 0.05)
-        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-        x, y = obs.values,mod.values
-        print(key,len(obs.values))
-
-        STD_OBS = obs.std()
-        #scores
-        PR = pearsonr(mod,obs)[0]
-        RMSE = rmse(obs,mod)                                               
-        BIAS = np.mean(mod) - np.mean(obs)
-        STD = mod.std()
-        
-        # fit = np.polyfit(x,y,deg=1)
-        # axes[varkey].plot(x, fit[0] * x + fit[1],\
-        #                   color=colors[ikey],alpha=0.8,lw=2,\
-        #                   label=key+", "+\
-        #                               'R = '+str(round(PR,3))+', '+\
-        #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-        #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
-        # axes[varkey].legend(fontsize=5)
-        
-        # print(STD)
-        # print(PR)
-        dias[varkey].add_sample(STD/STD_OBS, PR,
-                       marker='o', ms=5, ls='',
-                       #mfc='k', mec='k', # B&W
-                       mfc=colors[ikey], mec=colors[ikey], # Colors
-                       label=key)
-
-    # put ticker position, see
-    # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
-    # dia.ax.axis['bottom'].
-    # dia.ax.axis['left'].
-    # dia.ax.axis['left'].
-
-    i += 1
-
-i = 0
-for varkey in ['h','theta','q']:                                                    
-    ikey = 0
-    key = list(args.experiments.split(';'))[ikey]
-    cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-    clearsky = (cc < 0.05)
-
-    mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-    obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-
-
-    nbins=40       
-    x, y = obs.values,mod.values
-    
-    xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
-    zi = np.zeros_like(xi)*np.nan       
-    for ibin in range(nbins):
-        xmin = x.min() + ibin * (x.max() - x.min())/nbins
-        xmax = xmin + (x.max() - x.min())/nbins
-        in_bin = ((x >= xmin) & (x < xmax))
-        ybin = y[in_bin]
-        xbin = x[in_bin]
-        if len(ybin) > 20:
-            k = kde.gaussian_kde((ybin))
-            zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
-    zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
-    zi_int = zi.cumsum(axis=1) 
-                 #  label=key+", "+\
-                 #                    'R = '+str(round(PR[0],3))+', '+\
-                 #                    'RMSE = '+str(round(RMSE,5))+', '+\
-                 #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-    axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
-            colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
-    axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
-            colors=['darkred'],alpha=0.5,)
-
-
-    latex = {}
-    latex['dthetadt'] =  r'$d \theta / dt $'
-    latex['dqdt'] =      r'$d q / dt $'
-    latex['dhdt'] =      r'$d h / dt $'
-
-    axes[varkey].set_xlabel('observations')     
-    axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
-
-    PR = pearsonr(mod,obs)[0]
-    RMSE = rmse(obs,mod)                                               
-    BIAS = np.mean(mod) - np.mean(obs)
-    STD = mod.std()
-
-    axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
-                                  'R = '+str(round(PR,3))+', '+\
-                                  'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-                                  'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
-                         s=0.1,alpha=0.14,color='k')
-    axes[varkey].legend(fontsize=5)
-                   
-    axes[varkey].set_xlabel('observations')     
-    if i==0:                                    
-        axes[varkey].set_ylabel('model')                                            
-    abline(1,0,axis=axes[varkey])
-    i +=1
-
-
-
-# legend for different forcing simulations (colors)
-ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-leg = []
-for ikey,key in enumerate(args.experiments.split(';')):
-    leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
-    leg.append(leg1)
-ax.axis('off')
-#leg1 =
-ax.legend(leg,list(args.experiments.split(';')),loc=2,fontsize=10)
-
-
-# # legend for different stations (symbols)
-# ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+# # the lines below activate TaylorPlots but it is disabled for now
+# fig = plt.figure(figsize=(10,7))   #width,height
+# i = 1                                                                           
+# axes = {}         
+# axes_taylor = {}         
+# 
+# colors = ['r','g','b','m']
+# symbols = ['*','x','+']
+# dias = {}
+# 
+# for varkey in ['h','theta','q']:                                                    
+#     axes[varkey] = fig.add_subplot(2,3,i)                                       
+#     #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+# 
+#     #print(obs.std())
+#     dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+#     if i == 0:
+#         dias[varkey]._ax.axis["left"].label.set_text(\
+#             "Standard deviation (model) / Standard deviation (observations)")
+#         # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+#         # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+#     #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+#     # Q95 = obs.quantile(0.95)
+#     # Q95 = obs.quantile(0.90)
+#     # Add RMS contours, and label them
+#     contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+#     dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+#     #dia._ax.set_title(season.capitalize())
+# 
+#     dias[varkey].add_grid()
+# 
+# 
+#     #dia.ax.plot(x99,y99,color='k')
+# 
+#     
+#     for ikey,key in enumerate(args.experiments.split(';')):
+#         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+#         # clearsky = (cc < 0.05)
+#         # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+#         # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+#         mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+#         obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+#         x, y = obs.values,mod.values
+#         print(key,len(obs.values))
+# 
+#         STD_OBS = obs.std()
+#         #scores
+#         PR = pearsonr(mod,obs)[0]
+#         RMSE = rmse(obs,mod)                                               
+#         BIAS = np.mean(mod) - np.mean(obs)
+#         STD = mod.std()
+#         
+#         # fit = np.polyfit(x,y,deg=1)
+#         # axes[varkey].plot(x, fit[0] * x + fit[1],\
+#         #                   color=colors[ikey],alpha=0.8,lw=2,\
+#         #                   label=key+", "+\
+#         #                               'R = '+str(round(PR,3))+', '+\
+#         #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+#         #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+#         # axes[varkey].legend(fontsize=5)
+#         
+#         # print(STD)
+#         # print(PR)
+#         dias[varkey].add_sample(STD/STD_OBS, PR,
+#                        marker='o', ms=5, ls='',
+#                        #mfc='k', mec='k', # B&W
+#                        mfc=colors[ikey], mec=colors[ikey], # Colors
+#                        label=key)
+# 
+#     # put ticker position, see
+#     # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+#     # dia.ax.axis['bottom'].
+#     # dia.ax.axis['left'].
+#     # dia.ax.axis['left'].
+# 
+#     i += 1
+# 
+# i = 0
+# for varkey in ['h','theta','q']:                                                    
+#     ikey = 0
+#     key = list(args.experiments.split(';'))[ikey]
+#     cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+#     clearsky = (cc < 0.05)
+# 
+#     mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+#     obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+# 
+# 
+#     nbins=40       
+#     x, y = obs.values,mod.values
+#     
+#     xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+#     zi = np.zeros_like(xi)*np.nan       
+#     for ibin in range(nbins):
+#         xmin = x.min() + ibin * (x.max() - x.min())/nbins
+#         xmax = xmin + (x.max() - x.min())/nbins
+#         in_bin = ((x >= xmin) & (x < xmax))
+#         ybin = y[in_bin]
+#         xbin = x[in_bin]
+#         if len(ybin) > 20:
+#             k = kde.gaussian_kde((ybin))
+#             zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+#     zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+#     zi_int = zi.cumsum(axis=1) 
+#                  #  label=key+", "+\
+#                  #                    'R = '+str(round(PR[0],3))+', '+\
+#                  #                    'RMSE = '+str(round(RMSE,5))+', '+\
+#                  #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+#     axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
+#             colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+#     axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
+#             colors=['darkred'],alpha=0.5,)
+# 
+# 
+#     latex = {}
+#     latex['dthetadt'] =  r'$d \theta / dt $'
+#     latex['dqdt'] =      r'$d q / dt $'
+#     latex['dhdt'] =      r'$d h / dt $'
+# 
+#     axes[varkey].set_xlabel('observations')     
+#     axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+# 
+#     PR = pearsonr(mod,obs)[0]
+#     RMSE = rmse(obs,mod)                                               
+#     BIAS = np.mean(mod) - np.mean(obs)
+#     STD = mod.std()
+# 
+#     axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
+#                                   'R = '+str(round(PR,3))+', '+\
+#                                   'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+#                                   'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+#                          s=0.1,alpha=0.14,color='k')
+#     axes[varkey].legend(fontsize=5)
+#                    
+#     axes[varkey].set_xlabel('observations')     
+#     if i==0:                                    
+#         axes[varkey].set_ylabel('model')                                            
+#     abline(1,0,axis=axes[varkey])
+#     i +=1
+# 
+# 
+# 
+# # legend for different forcing simulations (colors)
+# ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
 # leg = []
-# isymbol = 0
-# for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
-#     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+# for ikey,key in enumerate(args.experiments.split(';')):
+#     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
 #     leg.append(leg1)
-#     isymbol += 1
-# 
-# # symbol for all stations
-# leg1, = ax.plot([],'ko',markersize=10)
-# leg.append(leg1)
-
-
 # ax.axis('off')
-# ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
-
-
-fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
-
-
-#pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
-figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
-fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
-fig.show()  
+# #leg1 =
+# ax.legend(leg,list(args.experiments.split(';')),loc=2,fontsize=10)
+# 
+# 
+# # # legend for different stations (symbols)
+# # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+# # leg = []
+# # isymbol = 0
+# # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+# #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+# #     leg.append(leg1)
+# #     isymbol += 1
+# # 
+# # # symbol for all stations
+# # leg1, = ax.plot([],'ko',markersize=10)
+# # leg.append(leg1)
+# 
+# 
+# # ax.axis('off')
+# # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+# 
+# 
+# fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+# 
+# 
+# #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+# # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+# # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+# fig.show()  
 
 
 

From 9ab98caaafba1e5a0c3990445c1f8efa79d57b4a Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 19:24:27 +0200
Subject: [PATCH 029/129] evaluation

---
 class4gl/{evaluation/evaluation.py => interface/interface.py} | 0
 class4gl/{evaluation => interface}/taylorDiagram.py           | 0
 2 files changed, 0 insertions(+), 0 deletions(-)
 rename class4gl/{evaluation/evaluation.py => interface/interface.py} (100%)
 rename class4gl/{evaluation => interface}/taylorDiagram.py (100%)

diff --git a/class4gl/evaluation/evaluation.py b/class4gl/interface/interface.py
similarity index 100%
rename from class4gl/evaluation/evaluation.py
rename to class4gl/interface/interface.py
diff --git a/class4gl/evaluation/taylorDiagram.py b/class4gl/interface/taylorDiagram.py
similarity index 100%
rename from class4gl/evaluation/taylorDiagram.py
rename to class4gl/interface/taylorDiagram.py

From 86a6dc9bfd988f029e146ab54fa11b4c6cd7c149 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 22 Aug 2018 20:52:40 +0200
Subject: [PATCH 030/129] wilting point experiment

---
 class4gl/simulations/simulations_wilt.py | 273 +++++++++++++++++++++++
 1 file changed, 273 insertions(+)
 create mode 100644 class4gl/simulations/simulations_wilt.py

diff --git a/class4gl/simulations/simulations_wilt.py b/class4gl/simulations/simulations_wilt.py
new file mode 100644
index 0000000..9e9a8f7
--- /dev/null
+++ b/class4gl/simulations/simulations_wilt.py
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--global-chunk') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--first-station-row')
+parser.add_argument('--last-station-row')
+parser.add_argument('--station-id') # run a specific station id
+parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--error-handling',default='dump_on_success')
+parser.add_argument('--experiments')
+parser.add_argument('--split-by',default=-1)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'WILT':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+}
+
+
+# #SET = 'GLOBAL'
+# SET = args.dataset
+
+# path_soundingsSET = args.path_soundings+'/'+SET+'/'
+
+print("getting stations")
+all_stations = stations(args.path_soundings,suffix='morning',refetch_stations=False)
+
+if args.global_chunk is not None:
+    
+    all_records_morning = get_records(all_stations.table,\
+                                  args.path_soundings,\
+                                  subset='morning',
+                                  refetch_records=False,
+                                  )
+    totalchunks = 0
+    stations_iter = all_stations.table.iterrows()
+    in_current_chunk = False
+    while not in_current_chunk:
+        istation,current_station = stations_iter.__next__()
+        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
+        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
+        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+
+        if in_current_chunk:
+            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+            run_station_chunk = int(args.global_chunk) - totalchunks 
+
+        totalchunks +=chunks_current_station
+
+else:
+    if args.station_id is not None:
+        print("Selecting station by ID")
+        print(all_stations.table)
+        stations_iter = stations_iterator(all_stations)
+        STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+        run_stations = pd.DataFrame([run_station])
+    else:
+        print("Selecting stations from a row range in the table")
+        run_stations = pd.DataFrame(all_stations.table)
+        if args.last_station_row is not None:
+            run_stations = run_stations.iloc[:(int(args.last_station)+1)]
+        if args.first_station_row is not None:
+            run_stations = run_stations.iloc[int(args.first_station):]
+    run_station_chunk = args.station_chunk
+
+#print(all_stations)
+records_morning = get_records(run_stations,\
+                              args.path_soundings,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+records_afternoon = get_records(run_stations,\
+                                args.path_soundings,\
+                                subset='afternoon',
+                                refetch_records=False,
+                                )
+
+# print(records_morning.index)
+# print(records_afternoon.index)
+# align afternoon records with the noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+
+experiments = args.experiments.split(';')
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = args.path_experiments+'/'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+            print('starting station chunk number: '\
+                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+            isim = 0
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                    print('starting '+str(isim)+' out of '+\
+                      str(len(records_morning_station_chunk) )+\
+                      ' (station total: ',str(len(records_morning_station)),')')  
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gli_morning.update(source=expname, \
+                                         pars={'wg':c4gli_morning.pars.wwilt,\
+                                               'w2':c4gli_morning.pars.wwilt},
+                                        )
+                    c4gl = class4gl(c4gli_morning)
+
+                    if args.error_handling == 'dump_always':
+                        try:
+                            c4gl.run()
+                            print('run succesfull')
+                        except:
+                            print('run not succesfull')
+                        onerun = True
+
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                  include_input=False,\
+                                  #timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    # in this case, only the file will dumped if the runs were
+                    # successful
+                    elif args.error_handling == 'dump_on_success':
+                        try:
+                            c4gl.run()
+                            print('run succesfull')
+                            c4gli_morning.dump(file_ini)
+                            
+                            
+                            c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                      #timeseries_only=timeseries_only,\
+                                     )
+                            onerun = True
+                        except:
+                            print('run not succesfull')
+                    isim += 1
+
+
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+

From eb0f601bad4678958cc0d352b9a5e8b2de6e9aaf Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 23 Aug 2018 11:41:30 +0200
Subject: [PATCH 031/129] fix loading class4gl libraries

---
 class4gl/interface_multi.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 6f70487..a9f6ce8 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -8,7 +8,7 @@
 from time import sleep
 
 
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+# sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl,units
 from interface_functions import *
 # from data_soundings import wyoming

From fd3c2ecc77c2efab6f7d048d72787c43d042b704 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 23 Aug 2018 11:43:24 +0200
Subject: [PATCH 032/129] fix loading class4gl libraries

---
 class4gl/interface_multi.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index a9f6ce8..25bde26 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -9,6 +9,7 @@
 
 
 # sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+
 from class4gl import class4gl_input, data_global,class4gl,units
 from interface_functions import *
 # from data_soundings import wyoming

From aa571f80fad2757211767add96af5f4bede1e112 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:02:24 +0200
Subject: [PATCH 033/129] batch qsub array jobs + clean ups + revision of
 arguments

---
 class4gl/interface/interface.py               |   8 +-
 class4gl/simulations/batch_simulations.pbs    |  30 +++
 class4gl/simulations/batch_simulations.py     | 144 ++++++++----
 class4gl/simulations/batch_simulations_old.py |  77 +++++++
 class4gl/simulations/simulations.py           | 206 ++++++++++++------
 5 files changed, 347 insertions(+), 118 deletions(-)
 create mode 100644 class4gl/simulations/batch_simulations.pbs
 create mode 100644 class4gl/simulations/batch_simulations_old.py

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 7becde2..f5891f2 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -8,11 +8,11 @@
 
 import argparse
 parser = argparse.ArgumentParser()
-parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--experiments')
-parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--load-globaldata',default=False)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=False)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
new file mode 100644
index 0000000..6a7c2e5
--- /dev/null
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -0,0 +1,30 @@
+#!/bin/bash 
+#
+#PBS -j oe
+#PBS -M hendrik.wouters@ugent.be
+#PBS -m b
+#PBS -m e
+#PBS -m a
+#PBS -N batch_simulation
+
+module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python
+
+EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
+
+for var in $(compgen -v | grep C4GLJOB_ ); do
+    echo $var
+    if [ "$var" != "C4GLJOB_exec" ]
+    then
+    EXEC_ALL=$EXEC_ALL" --"`echo $var | cut -c9-`"="${!var}
+    fi
+done
+
+
+# EXEC_ALL="python $exec --global-chunk-number $PBS_ARRAYID \
+#                        --split-by $split_by \
+#                        --dataset $dataset \
+#                        --experiments $experiments"
+#                  #      --path-soundings $path_soundings \
+echo Executing: $EXEC_ALL
+$EXEC_ALL
+
diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index b5d4cc3..efacd60 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -1,70 +1,126 @@
-
-import argparse
+# -*- coding: utf-8 -*-
 
 import pandas as pd
+import io
 import os
-import math
 import numpy as np
+import datetime as dt
 import sys
+import pytz
 import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+
+import argparse
+
+parser = argparse.ArgumentParser()
+#if __name__ == '__main__':
+parser.add_argument('--exec') # chunk simulation script
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--pbs_string',default=' -l walltime=:2:0:0')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling')
+parser.add_argument('--subset_forcing',default='morning') 
+                                        # this tells which yaml subset
+                                        # to initialize with.
+                                        # Most common options are
+                                        # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime')
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=50)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--path_forcing') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
 from class4gl import class4gl_input, data_global,class4gl
 from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
 
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-df_stations = pd.read_csv(fn_stations)
 
-# if 'path-soundings' in args.__dict__.keys():
-#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-# else:
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
 
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
 
 
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    #parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
-    parser.add_argument('--exec')
-    parser.add_argument('--experiments')#should be ';'-seperated list
-    parser.add_argument('--split-by',default=-1)
-    args = parser.parse_args()
 
-experiments = args.experiments.split(';')
-#SET = 'GLOBAL'
-SET = args.dataset
-print(args.experiments)
+# #SET = 'GLOBAL'
+# SET = args.dataset
 
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+# path_forcingSET = args.path_forcing+'/'+SET+'/'
+
+print("getting stations")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
 
-for expname in experiments:
-    #exp = EXP_DEFS[expname]
-    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-    os.system('rm -R '+path_exp)
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,\
+                                         refetch_records=False,\
+                                        )
 
+print('splitting batch in --split_by='+args.split_by+' jobs.')
 totalchunks = 0
-for istation,current_station in all_stations.iterrows():
-    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
-    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
+for istation,current_station in all_stations_select.iterrows():
+    records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+    chunks_current_station = math.ceil(float(len(records_morning_station_select))/float(args.split_by))
     totalchunks +=chunks_current_station
 
+print('total chunks (= size of array-job) per experiment: ' + str(totalchunks))
+
+
 #if sys.argv[1] == 'qsub':
 # with qsub
-os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
-                                       ',split_by='+str(args.split_by)+\
-                                       ',exec='+str(args.exec)+\
-                                       ',experiments='+str(args.experiments))
+for EXP in args.experiments.strip().split(" "):
+
+    command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
+                str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)
+    # propagate arguments towards the job script
+    for argkey in args.__dict__.keys():
+        if ((argkey not in ['experiments','pbs_string']) and \
+            # default values are specified in the simulation script, so
+            # excluded here
+            (args.__dict__[argkey] is not None)
+           ):
+                command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+
+    print('Submitting array job for experiment '+EXP+': '+command)
+    os.system(command)
+
+
+    #os.system(command)
 # elif sys.argv[1] == 'wsub':
 #     
 #     # with wsub
diff --git a/class4gl/simulations/batch_simulations_old.py b/class4gl/simulations/batch_simulations_old.py
new file mode 100644
index 0000000..b5d4cc3
--- /dev/null
+++ b/class4gl/simulations/batch_simulations_old.py
@@ -0,0 +1,77 @@
+
+import argparse
+
+import pandas as pd
+import os
+import math
+import numpy as np
+import sys
+import math
+sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+fn_stations = odir+'/igra-stations_sel.txt'
+df_stations = pd.read_csv(fn_stations)
+
+# if 'path-soundings' in args.__dict__.keys():
+#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+# else:
+
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--dataset')
+    parser.add_argument('--path-soundings')
+    #parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
+    parser.add_argument('--exec')
+    parser.add_argument('--experiments')#should be ';'-seperated list
+    parser.add_argument('--split-by',default=-1)
+    args = parser.parse_args()
+
+experiments = args.experiments.split(';')
+#SET = 'GLOBAL'
+SET = args.dataset
+print(args.experiments)
+
+if 'path-soundings' in args.__dict__.keys():
+    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
+else:
+    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+
+all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+records_morning = get_records(all_stations,\
+                              path_soundingsSET,\
+                              subset='morning',
+                              refetch_records=False,
+                              )
+
+for expname in experiments:
+    #exp = EXP_DEFS[expname]
+    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+    os.system('rm -R '+path_exp)
+
+totalchunks = 0
+for istation,current_station in all_stations.iterrows():
+    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
+    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
+    totalchunks +=chunks_current_station
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
+                                       ',split_by='+str(args.split_by)+\
+                                       ',exec='+str(args.exec)+\
+                                       ',experiments='+str(args.experiments))
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index dd3d799..2191c3b 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -13,19 +13,29 @@
 
 #if __name__ == '__main__':
 parser = argparse.ArgumentParser()
-parser.add_argument('--global-chunk') # this is the batch number according to split-by in case of considering all stations
-parser.add_argument('--first-station-row')
-parser.add_argument('--last-station-row')
-parser.add_argument('--station-id') # run a specific station id
-parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--error-handling',default='dump_on_success')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
 parser.add_argument('--experiments')
-parser.add_argument('--split-by',default=-1)# station soundings are split
+parser.add_argument('--split_by',default=-1)# station soundings are split
                                             # up in chunks
 
-parser.add_argument('--station-chunk',default=0)
-parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
 args = parser.parse_args()
 
 sys.path.insert(0, args.c4gl_path_lib)
@@ -57,69 +67,120 @@
 # #SET = 'GLOBAL'
 # SET = args.dataset
 
-# path_soundingsSET = args.path_soundings+'/'+SET+'/'
+# path_forcingSET = args.path_forcing+'/'+SET+'/'
 
 print("getting stations")
-all_stations = stations(args.path_soundings,suffix='morning',refetch_stations=False)
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
-if args.global_chunk is not None:
-    
-    all_records_morning = get_records(all_stations.table,\
-                                  args.path_soundings,\
-                                  subset='morning',
-                                  refetch_records=False,
-                                  )
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
     totalchunks = 0
-    stations_iter = all_stations.table.iterrows()
+    stations_iter = all_stations_select.iterrows()
     in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iter.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
 
-        totalchunks +=chunks_current_station
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
 
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
 else:
-    if args.station_id is not None:
-        print("Selecting station by ID")
-        print(all_stations.table)
-        stations_iter = stations_iterator(all_stations)
-        STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
-        run_stations = pd.DataFrame([run_station])
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
     else:
-        print("Selecting stations from a row range in the table")
-        run_stations = pd.DataFrame(all_stations.table)
-        if args.last_station_row is not None:
-            run_stations = run_stations.iloc[:(int(args.last_station)+1)]
-        if args.first_station_row is not None:
-            run_stations = run_stations.iloc[int(args.first_station):]
-    run_station_chunk = args.station_chunk
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
 
 #print(all_stations)
+print('Fetching initial/forcing records')
 records_morning = get_records(run_stations,\
-                              args.path_soundings,\
-                              subset='morning',
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
                               refetch_records=False,
                               )
-records_afternoon = get_records(run_stations,\
-                                args.path_soundings,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# print(records_morning.index)
-# print(records_afternoon.index)
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
+
+# note that if runtime is an integer number, we don't need to get the afternoon
+# profiles. 
+if args.runtime == 'from_afternoon_profile':
+    print('Fetching afternoon records for determining the simulation runtimes')
+    records_afternoon = get_records(run_stations,\
+                                    args.path_forcing,\
+                                    subset='afternoon',
+                                    refetch_records=False,
+                                    )
+    
+    # print(records_morning.index)
+    # print(records_afternoon.index)
+    # align afternoon records with the noon records, and set same index
+    print('hello')
+    print(len(records_afternoon))
+    print(len(records_morning))
+
+    print("aligning morning and afternoon records")
+    records_morning['dates'] = records_morning.ldatetime.dt.date
+    records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
+    records_afternoon.set_index(['STNID','dates'],inplace=True)
+    ini_index_dates = records_morning.set_index(['STNID','dates']).index
+    records_afternoon = records_afternoon.loc[ini_index_dates]
+    records_afternoon.index = records_morning.index
+
+experiments = args.experiments.strip(' ').split(' ')
 for expname in experiments:
     exp = EXP_DEFS[expname]
     path_exp = args.path_experiments+'/'+expname+'/'
@@ -131,8 +192,8 @@
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            file_morning = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
             fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
@@ -149,7 +210,7 @@
 
             isim = 0
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                    print('starting '+str(isim)+' out of '+\
+                    print('starting '+str(isim+1)+' out of '+\
                       str(len(records_morning_station_chunk) )+\
                       ' (station total: ',str(len(records_morning_station)),')')  
                 
@@ -162,15 +223,20 @@
                     #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
                     
                     
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
+                    if args.runtime == 'from_afternoon_profile':
+                        record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                        c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                          record_afternoon.index_start, 
+                                                          record_afternoon.index_end,
+                                                        mode='ini')
+                        runtime = int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())
+                    else:
+                        runtime = int(args.runtime)
+
             
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                                        runtime})
                     c4gli_morning.update(source=expname, pars=exp)
 
                     c4gl = class4gl(c4gli_morning)
@@ -247,7 +313,7 @@
     #     with \
     #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
     #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #     open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
     #         for (STNID,index),record_ini in records_iterator(records_ini):
     #             c4gli_ini = get_record_yaml(file_station_ini, 
     #                                         record_ini.index_start, 

From 190a5721c079762b2807d54339f938b36eaa99b4 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:03:34 +0200
Subject: [PATCH 034/129] remove old file

---
 class4gl/class4gl.py | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index c87e0be..51d58e1 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -94,18 +94,26 @@ def __init__(self):
 }
 
 class class4gl_input(object):
-# this was the way it was defined previously.
-#class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
+    """
+    this is the class4gl_input. It extends the model_input, which is now
+    assigned to self.pars. It now also includes initial profiles as pandas
+    Dataframes:
+        self.air_balloon: raw profile input for profile of u,v,theta,q (not used)
+        self.air_ap : the same as self.air_balloonm, but for which a mixed
+                      layer is fitted. Thi profile is used as input.
+        self.air_ac : atmospheric circulation profiles for advection and
+                      subsidence
+
+    # FYI this was the way it was defined in an early version:
+    #    class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
+    """
 
     def __init__(self,set_pars_defaults=True,debug_level=None):
 
-
         """ set up logger (see: https://docs.python.org/2/howto/logging.html)
         """
 
-        print('hello')
         self.logger = logging.getLogger('class4gl_input')
-        print(self.logger)
         if debug_level is not None:
             self.logger.setLevel(debug_level)
 

From 804edc18aa8bd4eb2513aa7ef30ca74424837fa4 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:12:02 +0200
Subject: [PATCH 035/129] align simulations_wilt.py with simulations.py

---
 class4gl/simulations/simulations_wilt.py | 203 +++++++++++++++--------
 1 file changed, 135 insertions(+), 68 deletions(-)

diff --git a/class4gl/simulations/simulations_wilt.py b/class4gl/simulations/simulations_wilt.py
index 9e9a8f7..bbfac45 100644
--- a/class4gl/simulations/simulations_wilt.py
+++ b/class4gl/simulations/simulations_wilt.py
@@ -13,19 +13,29 @@
 
 #if __name__ == '__main__':
 parser = argparse.ArgumentParser()
-parser.add_argument('--global-chunk') # this is the batch number according to split-by in case of considering all stations
-parser.add_argument('--first-station-row')
-parser.add_argument('--last-station-row')
-parser.add_argument('--station-id') # run a specific station id
-parser.add_argument('--path-experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path-soundings')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--error-handling',default='dump_on_success')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
 parser.add_argument('--experiments')
-parser.add_argument('--split-by',default=-1)# station soundings are split
+parser.add_argument('--split_by',default=-1)# station soundings are split
                                             # up in chunks
 
-parser.add_argument('--station-chunk',default=0)
-parser.add_argument('--c4gl-path-lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
 args = parser.parse_args()
 
 sys.path.insert(0, args.c4gl_path_lib)
@@ -60,67 +70,120 @@
 
 # path_soundingsSET = args.path_soundings+'/'+SET+'/'
 
+
 print("getting stations")
-all_stations = stations(args.path_soundings,suffix='morning',refetch_stations=False)
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
-if args.global_chunk is not None:
-    
-    all_records_morning = get_records(all_stations.table,\
-                                  args.path_soundings,\
-                                  subset='morning',
-                                  refetch_records=False,
-                                  )
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
     totalchunks = 0
-    stations_iter = all_stations.table.iterrows()
+    stations_iter = all_stations_select.iterrows()
     in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iter.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
-
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
 
-        totalchunks +=chunks_current_station
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
 
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
 else:
-    if args.station_id is not None:
-        print("Selecting station by ID")
-        print(all_stations.table)
-        stations_iter = stations_iterator(all_stations)
-        STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
-        run_stations = pd.DataFrame([run_station])
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
     else:
-        print("Selecting stations from a row range in the table")
-        run_stations = pd.DataFrame(all_stations.table)
-        if args.last_station_row is not None:
-            run_stations = run_stations.iloc[:(int(args.last_station)+1)]
-        if args.first_station_row is not None:
-            run_stations = run_stations.iloc[int(args.first_station):]
-    run_station_chunk = args.station_chunk
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
 
 #print(all_stations)
+print('Fetching initial/forcing records')
 records_morning = get_records(run_stations,\
-                              args.path_soundings,\
-                              subset='morning',
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
                               refetch_records=False,
                               )
-records_afternoon = get_records(run_stations,\
-                                args.path_soundings,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# print(records_morning.index)
-# print(records_afternoon.index)
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
+
+# note that if runtime is an integer number, we don't need to get the afternoon
+# profiles. 
+if args.runtime == 'from_afternoon_profile':
+    print('Fetching afternoon records for determining the simulation runtimes')
+    records_afternoon = get_records(run_stations,\
+                                    args.path_forcing,\
+                                    subset='afternoon',
+                                    refetch_records=False,
+                                    )
+    
+    # print(records_morning.index)
+    # print(records_afternoon.index)
+    # align afternoon records with the noon records, and set same index
+    print('hello')
+    print(len(records_afternoon))
+    print(len(records_morning))
+
+    print("aligning morning and afternoon records")
+    records_morning['dates'] = records_morning.ldatetime.dt.date
+    records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
+    records_afternoon.set_index(['STNID','dates'],inplace=True)
+    ini_index_dates = records_morning.set_index(['STNID','dates']).index
+    records_afternoon = records_afternoon.loc[ini_index_dates]
+    records_afternoon.index = records_morning.index
+
+experiments = args.experiments.strip(' ').split(' ')
+
 for expname in experiments:
     exp = EXP_DEFS[expname]
     path_exp = args.path_experiments+'/'+expname+'/'
@@ -132,8 +195,8 @@
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            file_morning = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(args.path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
             fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
@@ -150,7 +213,7 @@
 
             isim = 0
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                    print('starting '+str(isim)+' out of '+\
+                    print('starting '+str(isim+1)+' out of '+\
                       str(len(records_morning_station_chunk) )+\
                       ' (station total: ',str(len(records_morning_station)),')')  
                 
@@ -163,15 +226,19 @@
                     #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
                     
                     
-                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                    c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                      record_afternoon.index_start, 
-                                                      record_afternoon.index_end,
-                                                    mode='ini')
+                    if args.runtime == 'from_afternoon_profile':
+                        record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                        c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                          record_afternoon.index_start, 
+                                                          record_afternoon.index_end,
+                                                        mode='ini')
+                        runtime = int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())
+                    else:
+                        runtime = int(args.runtime)
             
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                                        runtime})
                     c4gli_morning.update(source=expname, pars=exp)
 
                     c4gli_morning.update(source=expname, \

From faf3b4d053438396b5c2610d2cfbcf3eeb361699 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:24:36 +0200
Subject: [PATCH 036/129] small updates

---
 class4gl/simulations/simulations.py      | 1 -
 class4gl/simulations/simulations_wilt.py | 1 -
 2 files changed, 2 deletions(-)

diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 2191c3b..49eb282 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -67,7 +67,6 @@
 # #SET = 'GLOBAL'
 # SET = args.dataset
 
-# path_forcingSET = args.path_forcing+'/'+SET+'/'
 
 print("getting stations")
 # these are all the stations that are found in the input dataset
diff --git a/class4gl/simulations/simulations_wilt.py b/class4gl/simulations/simulations_wilt.py
index bbfac45..414d35f 100644
--- a/class4gl/simulations/simulations_wilt.py
+++ b/class4gl/simulations/simulations_wilt.py
@@ -68,7 +68,6 @@
 # #SET = 'GLOBAL'
 # SET = args.dataset
 
-# path_soundingsSET = args.path_soundings+'/'+SET+'/'
 
 
 print("getting stations")

From 124d46eccc9c73f8401a8499fdcd883a424b697a Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:25:04 +0200
Subject: [PATCH 037/129] small updates

---
 class4gl/simulations/batch_simulations_old.py | 77 -------------------
 1 file changed, 77 deletions(-)
 delete mode 100644 class4gl/simulations/batch_simulations_old.py

diff --git a/class4gl/simulations/batch_simulations_old.py b/class4gl/simulations/batch_simulations_old.py
deleted file mode 100644
index b5d4cc3..0000000
--- a/class4gl/simulations/batch_simulations_old.py
+++ /dev/null
@@ -1,77 +0,0 @@
-
-import argparse
-
-import pandas as pd
-import os
-import math
-import numpy as np
-import sys
-import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
-df_stations = pd.read_csv(fn_stations)
-
-# if 'path-soundings' in args.__dict__.keys():
-#     path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-# else:
-
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    #parser.add_argument('--exec',default='/user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.py')
-    parser.add_argument('--exec')
-    parser.add_argument('--experiments')#should be ';'-seperated list
-    parser.add_argument('--split-by',default=-1)
-    args = parser.parse_args()
-
-experiments = args.experiments.split(';')
-#SET = 'GLOBAL'
-SET = args.dataset
-print(args.experiments)
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
-
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
-records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
-
-for expname in experiments:
-    #exp = EXP_DEFS[expname]
-    path_exp = '/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
-    os.system('rm -R '+path_exp)
-
-totalchunks = 0
-for istation,current_station in all_stations.iterrows():
-    records_morning_query = records_morning.query('STNID == '+str(current_station.name))
-    chunks_current_station = math.ceil(float(len(records_morning_query))/float(args.split_by))
-    totalchunks +=chunks_current_station
-
-#if sys.argv[1] == 'qsub':
-# with qsub
-os.system('qsub /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(totalchunks-1)+" -v dataset="+args.dataset+\
-                                       ',split_by='+str(args.split_by)+\
-                                       ',exec='+str(args.exec)+\
-                                       ',experiments='+str(args.experiments))
-# elif sys.argv[1] == 'wsub':
-#     
-#     # with wsub
-#     STNlist = list(df_stations.iterrows())
-#     NUMSTNS = len(STNlist)
-#     PROCS = NUMSTNS 
-#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-# 
-#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
-

From c9aaa5063195ce23e91d569a4551574886f8cae9 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:30:46 +0200
Subject: [PATCH 038/129] small updates

---
 class4gl/interface_functions.py           | 4 ++--
 class4gl/simulations/batch_simulations.py | 6 +++---
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 42dba38..b005ca4 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -213,8 +213,8 @@ def set_row(self,row):
         return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
     def set_STNID(self,STNID):
         self.ix = np.where((self.stations.table.index == STNID))[0][0]
-        print(self.ix)
-        print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
+        #print(self.ix)
+        #print( self.stations.table.index[self.ix], self.stations.table.iloc[self.ix])
         return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix]
 
     def __prev__(self):
diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index efacd60..58b12ce 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -62,7 +62,7 @@
 
 # path_forcingSET = args.path_forcing+'/'+SET+'/'
 
-print("getting stations")
+print("getting all stations from --path_forcing")
 # these are all the stations that are found in the input dataset
 all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
@@ -70,12 +70,12 @@
 # these are all the stations that are supposed to run by the whole batch (all
 # chunks). We narrow it down according to the station(s) specified.
 if args.station_id is not None:
-    print("Selecting station by ID")
+    print("Selecting stations by --station_id")
     stations_iter = stations_iterator(all_stations)
     STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
     all_stations_select = pd.DataFrame([run_station])
 else:
-    print("Selecting stations from a row range in the table")
+    print("Selecting stations from a row range in the table [--first_station_row,--last_station_row]")
     all_stations_select = pd.DataFrame(all_stations.table)
     if args.last_station_row is not None:
         all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]

From de6c9cb219a2c4e2c9657ce8f0a7e912be700c5f Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:43:34 +0200
Subject: [PATCH 039/129] small updates

---
 class4gl/simulations/batch_simulations.pbs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
index 6a7c2e5..d7bb06c 100644
--- a/class4gl/simulations/batch_simulations.pbs
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -5,7 +5,7 @@
 #PBS -m b
 #PBS -m e
 #PBS -m a
-#PBS -N batch_simulation
+#PBS -N c4gl_sim
 
 module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python
 

From 032acc1f0939525ad6823c2d2bfd2b5909ee0317 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 17:50:47 +0200
Subject: [PATCH 040/129] make it also work for the standard simulation (for
 the tutorial)

---
 class4gl/simulations/simulations_wilt.py | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/class4gl/simulations/simulations_wilt.py b/class4gl/simulations/simulations_wilt.py
index 414d35f..8da29e4 100644
--- a/class4gl/simulations/simulations_wilt.py
+++ b/class4gl/simulations/simulations_wilt.py
@@ -239,11 +239,12 @@
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
                                         runtime})
                     c4gli_morning.update(source=expname, pars=exp)
-
-                    c4gli_morning.update(source=expname, \
-                                         pars={'wg':c4gli_morning.pars.wwilt,\
-                                               'w2':c4gli_morning.pars.wwilt},
-                                        )
+                    
+                    if expname == 'WILT':
+                       c4gli_morning.update(source=expname, \
+                                            pars={'wg':c4gli_morning.pars.wwilt,\
+                                                  'w2':c4gli_morning.pars.wwilt},
+                                           )
                     c4gl = class4gl(c4gli_morning)
 
                     if args.error_handling == 'dump_always':

From 40c1982ad4961963580a5703cc719210ec3cc582 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 18:17:21 +0200
Subject: [PATCH 041/129] make tempdir

---
 class4gl/interface_functions.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index b005ca4..5b529d2 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -58,6 +58,7 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
 
     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
 
+    os.system('mkdir -p '+gettempdir())
     filebuffer = open(gettempdir()+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w')
     filebuffer.write(buf)
     filebuffer.close()
@@ -419,6 +420,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             current_tell = next_tell
                             next_record_found = False
                             yaml_file.seek(current_tell)
+                            os.system('mkdir -p '+gettempdir())
                             filebuffer = open(gettempdir()+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
                             linebuffer = ''
                             while ( (not next_record_found) and (not end_of_file)):

From f3b282622a0d0e55a8ed42166070fe6506da012b Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 18:40:41 +0200
Subject: [PATCH 042/129]  temp dir interface_fucntions.py wrong

---
 class4gl/interface_functions.py | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 5b529d2..e21ed69 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -10,9 +10,6 @@
 from tempfile import gettempdir
 
 
-
-
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl,units
 from interface_functions import *
 #from data_soundings import wyoming

From effb426f444657a2f628a4b7313d2fefd4cade47 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 18:57:21 +0200
Subject: [PATCH 043/129]  temp dir interface_fucntions.py wrong

---
 class4gl/interface_functions.py | 28 +++++++++++++++-------------
 1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index e21ed69..3b9fff1 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -24,6 +24,8 @@
 
 from matplotlib.colors import LinearSegmentedColormap
 
+TEMPDIR = gettempdir().replace('[',"").replace(']',"")
+
 class records_iterator(object):
     def __init__(self,records):
             
@@ -55,27 +57,27 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
 
     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
 
-    os.system('mkdir -p '+gettempdir())
-    filebuffer = open(gettempdir()+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w')
+    os.system('mkdir -p '+TEMPDIR
+    filebuffer = open(TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w')
     filebuffer.write(buf)
     filebuffer.close()
     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
     
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+gettempdir()+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start)+' '
+    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)+' '
 
     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
     print(command)
     os.system(command)
-    jsonstream = open(gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start))
+    jsonstream = open(TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start))
     record_dict = json.load(jsonstream)
     jsonstream.close()
-    os.system('rm '+gettempdir()+'/'+shortfn+'.buffer.yaml.'+str(index_start))
+    os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start))
 
 
     if mode =='mod':
         modelout = class4gl()
         modelout.load_yaml_dict(record_dict)
-        os.system('rm '+gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start))
+        os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start))
 
         return modelout
     elif mode == 'ini':
@@ -104,7 +106,7 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
         c4gli = class4gl_input()
         #print(c4gli.logger,'hello')
         c4gli.load_yaml_dict(record_dict)
-        os.system('rm '+gettempdir()+'/'+shortfn+'.buffer.json.'+str(index_start))
+        os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start))
         return c4gli
 
 
@@ -417,8 +419,8 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             current_tell = next_tell
                             next_record_found = False
                             yaml_file.seek(current_tell)
-                            os.system('mkdir -p '+gettempdir())
-                            filebuffer = open(gettempdir()+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                            os.system('mkdir -p '+TEMPDIR)
+                            filebuffer = open(TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
                             linebuffer = ''
                             while ( (not next_record_found) and (not end_of_file)):
                                 filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
@@ -433,14 +435,14 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
 
                             
                             #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+gettempdir()+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+gettempdir()+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                            command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
                             print(command)
                             
                             os.system(command)
                             #jsonoutput = subprocess.check_output(command,shell=True) 
                             #print(jsonoutput)
                             #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(gettempdir()+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
+                            jsonstream = open(TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
                             record = json.load(jsonstream)
                             dictouttemp = {}
                             for key,value in record['pars'].items():
@@ -458,14 +460,14 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             dictouttemp['chunk'] = chunk
                             dictouttemp['index_start'] = index_start
                             dictouttemp['index_end'] = index_end
-                            os.system('rm '+gettempdir()+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
+                            os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
                             for key,value in dictouttemp.items():
                                 if key not in dictout.keys():
                                     dictout[key] = {}
                                 dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
                             print(' obs record registered')
                             jsonstream.close()
-                            os.system('rm '+gettempdir()+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                            os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell))
                     records_station = pd.DataFrame.from_dict(dictout)
                     records_station.index.set_names(('STNID','chunk','index'),inplace=True)
                     print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\

From d947a258b8c782248d9e96d315ae357b1c57963a Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 18:58:54 +0200
Subject: [PATCH 044/129]  temp dir interface_fucntions.py wrong

---
 class4gl/interface_functions.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 3b9fff1..2615846 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -57,7 +57,7 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
 
     buf =  yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','')
 
-    os.system('mkdir -p '+TEMPDIR
+    os.system('mkdir -p '+TEMPDIR)
     filebuffer = open(TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w')
     filebuffer.write(buf)
     filebuffer.close()

From 286eb0e7d73984d4b89436ac284e06d7ae84dcde Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 19:23:19 +0200
Subject: [PATCH 045/129]  temp dir interface_fucntions.py wrong

---
 class4gl/interface/interface.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index f5891f2..fa4ba4e 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -109,11 +109,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     globaldata = None
 
 c4gldata = {}
-for key in args.experiments.split(';'):
+for key in args.experiments.strip(' ').split(' '):
     
     c4gldata[key] = c4gl_interface_soundings( \
                       args.path_experiments+'/'+key+'/',\
-                      args.path_soundings+'/',\
+                      args.path_forcing+'/',\
                       globaldata,\
                       refetch_records=False
                     )

From 121462c01c34c98fd845d89eb313652c2b2843d8 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 21:07:02 +0200
Subject: [PATCH 046/129]  temp dir interface_fucntions.py wrong

---
 class4gl/interface_functions.py            | 3 ++-
 class4gl/simulations/batch_simulations.pbs | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 2615846..26429a2 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -24,7 +24,8 @@
 
 from matplotlib.colors import LinearSegmentedColormap
 
-TEMPDIR = gettempdir().replace('[',"").replace(']',"")
+#TEMPDIR = gettempdir().replace('[',"").replace(']',"")
+TEMPDIR = '/tmp/'
 
 class records_iterator(object):
     def __init__(self,records):
diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
index d7bb06c..54e3168 100644
--- a/class4gl/simulations/batch_simulations.pbs
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -7,7 +7,7 @@
 #PBS -m a
 #PBS -N c4gl_sim
 
-module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python
+module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python Ruby
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
 

From d42486fe770f3ddde14f03662a6cc6e2c12065c8 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 21:07:46 +0200
Subject: [PATCH 047/129]  temp dir interface_fucntions.py wrong

---
 class4gl/interface_functions.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 26429a2..3791a79 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -24,8 +24,8 @@
 
 from matplotlib.colors import LinearSegmentedColormap
 
-#TEMPDIR = gettempdir().replace('[',"").replace(']',"")
-TEMPDIR = '/tmp/'
+TEMPDIR = gettempdir() #.replace('[',"").replace(']',"")
+#TEMPDIR = '/tmp/'
 
 class records_iterator(object):
     def __init__(self,records):

From e7c973aa453c0d7e01c572fefb8c79b24a0ef8f3 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 21:39:51 +0200
Subject: [PATCH 048/129] fixing ruby

---
 class4gl/interface_functions.py | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 3791a79..43f0be6 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -24,6 +24,25 @@
 
 from matplotlib.colors import LinearSegmentedColormap
 
+def which(program):
+    import os
+    def is_exe(fpath):
+        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+    fpath, fname = os.path.split(program)
+    if fpath:
+        if is_exe(program):
+            return program
+    else:
+        for path in os.environ["PATH"].split(os.pathsep):
+            exe_file = os.path.join(path, program)
+            if is_exe(exe_file):
+                return exe_file
+
+    return None
+
+
+
 TEMPDIR = gettempdir() #.replace('[',"").replace(']',"")
 #TEMPDIR = '/tmp/'
 
@@ -64,7 +83,9 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
     filebuffer.close()
     # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start))
     
-    command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)+' '
+    if which('ruby') is None:
+        raise RuntimeError ('ruby is not found. Aborting...')
+    command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)+' '
 
     #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"'
     print(command)
@@ -435,6 +456,8 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                             index_end = next_tell
 
                             
+                            if which('ruby') is None:
+                                raise RuntimeError ('ruby is not found. Aborting...')
                             #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
                             command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
                             print(command)

From 0e6bae74ae15aaf3346cae994512d60847fd5bba Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 28 Aug 2018 21:48:18 +0200
Subject: [PATCH 049/129] add cleanup_experiments feature

---
 class4gl/simulations/batch_simulations.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index 58b12ce..ce6ae69 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -28,7 +28,8 @@
 # Tuntime is usually specified from the afternoon profile. You can also just
 # specify the simulation length in seconds
 parser.add_argument('--runtime')
-
+# delete folders of experiments before running them
+parser.add_argument('--cleanup_experiments',default=False)
 parser.add_argument('--experiments')
 parser.add_argument('--split_by',default=50)# station soundings are split
                                             # up in chunks
@@ -104,6 +105,8 @@
 #if sys.argv[1] == 'qsub':
 # with qsub
 for EXP in args.experiments.strip().split(" "):
+    if args.cleanup_experiments:
+        os.system("rm -R "+args.path_experiments+'/'+EXP)
 
     command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
                 str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)

From b1edc0d9f4a0348b92fe37a35b956d9e79749b68 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 29 Aug 2018 10:08:41 +0200
Subject: [PATCH 050/129] fix to avoid boolean cleanup_experiments passed
 through pbs job

---
 class4gl/simulations/batch_simulations.py     |   6 +-
 class4gl/simulations/c4gl_sim.o643256-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643256-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643257-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643258-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643268-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643269-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643271-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643272-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643274-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643275-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643276-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643277-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643278-7       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-0       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-1       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-2       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-3       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-4       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-5       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-6       |  28 ++
 class4gl/simulations/c4gl_sim.o643290-7       |  28 ++
 class4gl/simulations/simulations_smchange2.py | 357 ++++++++++++++++++
 105 files changed, 3245 insertions(+), 2 deletions(-)
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643256-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643257-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643258-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643268-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643269-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643271-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643272-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643274-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643275-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643276-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643277-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643278-7
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-0
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-1
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-2
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-3
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-4
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-5
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-6
 create mode 100644 class4gl/simulations/c4gl_sim.o643290-7
 create mode 100644 class4gl/simulations/simulations_smchange2.py

diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index ce6ae69..c5bb7c9 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -101,9 +101,11 @@
 
 print('total chunks (= size of array-job) per experiment: ' + str(totalchunks))
 
-
 #if sys.argv[1] == 'qsub':
 # with qsub
+
+print(args.experiments.strip().split(" "))
+
 for EXP in args.experiments.strip().split(" "):
     if args.cleanup_experiments:
         os.system("rm -R "+args.path_experiments+'/'+EXP)
@@ -112,7 +114,7 @@
                 str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)
     # propagate arguments towards the job script
     for argkey in args.__dict__.keys():
-        if ((argkey not in ['experiments','pbs_string']) and \
+        if ((argkey not in ['experiments','pbs_string','cleanup_experiments']) and \
             # default values are specified in the simulation script, so
             # excluded here
             (args.__dict__[argkey] is not None)
diff --git a/class4gl/simulations/c4gl_sim.o643256-0 b/class4gl/simulations/c4gl_sim.o643256-0
new file mode 100644
index 0000000..8cc36ec
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-1 b/class4gl/simulations/c4gl_sim.o643256-1
new file mode 100644
index 0000000..5f3860e
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-2 b/class4gl/simulations/c4gl_sim.o643256-2
new file mode 100644
index 0000000..961e883
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-3 b/class4gl/simulations/c4gl_sim.o643256-3
new file mode 100644
index 0000000..c473bda
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-4 b/class4gl/simulations/c4gl_sim.o643256-4
new file mode 100644
index 0000000..3011ce2
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-5 b/class4gl/simulations/c4gl_sim.o643256-5
new file mode 100644
index 0000000..ceb34e5
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-6 b/class4gl/simulations/c4gl_sim.o643256-6
new file mode 100644
index 0000000..4bb288e
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-7 b/class4gl/simulations/c4gl_sim.o643256-7
new file mode 100644
index 0000000..a3d1eeb
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643256-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643257-0 b/class4gl/simulations/c4gl_sim.o643257-0
new file mode 100644
index 0000000..23fcd02
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-1 b/class4gl/simulations/c4gl_sim.o643257-1
new file mode 100644
index 0000000..461c192
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-2 b/class4gl/simulations/c4gl_sim.o643257-2
new file mode 100644
index 0000000..7d9eb94
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-3 b/class4gl/simulations/c4gl_sim.o643257-3
new file mode 100644
index 0000000..3dabc1a
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-4 b/class4gl/simulations/c4gl_sim.o643257-4
new file mode 100644
index 0000000..ea5fb1d
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-5 b/class4gl/simulations/c4gl_sim.o643257-5
new file mode 100644
index 0000000..681b989
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-6 b/class4gl/simulations/c4gl_sim.o643257-6
new file mode 100644
index 0000000..3029ba0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-7 b/class4gl/simulations/c4gl_sim.o643257-7
new file mode 100644
index 0000000..ce30f0a
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643257-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643258-0 b/class4gl/simulations/c4gl_sim.o643258-0
new file mode 100644
index 0000000..217999c
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-1 b/class4gl/simulations/c4gl_sim.o643258-1
new file mode 100644
index 0000000..9795172
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-2 b/class4gl/simulations/c4gl_sim.o643258-2
new file mode 100644
index 0000000..2666ea7
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-3 b/class4gl/simulations/c4gl_sim.o643258-3
new file mode 100644
index 0000000..4cb628d
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-4 b/class4gl/simulations/c4gl_sim.o643258-4
new file mode 100644
index 0000000..0ad9bd0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-5 b/class4gl/simulations/c4gl_sim.o643258-5
new file mode 100644
index 0000000..ba1d645
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-6 b/class4gl/simulations/c4gl_sim.o643258-6
new file mode 100644
index 0000000..3e2e3da
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-7 b/class4gl/simulations/c4gl_sim.o643258-7
new file mode 100644
index 0000000..a2ad87b
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643258-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643268-0 b/class4gl/simulations/c4gl_sim.o643268-0
new file mode 100644
index 0000000..8cc36ec
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-1 b/class4gl/simulations/c4gl_sim.o643268-1
new file mode 100644
index 0000000..5f3860e
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-2 b/class4gl/simulations/c4gl_sim.o643268-2
new file mode 100644
index 0000000..961e883
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-3 b/class4gl/simulations/c4gl_sim.o643268-3
new file mode 100644
index 0000000..c473bda
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-4 b/class4gl/simulations/c4gl_sim.o643268-4
new file mode 100644
index 0000000..3011ce2
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-5 b/class4gl/simulations/c4gl_sim.o643268-5
new file mode 100644
index 0000000..ceb34e5
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-6 b/class4gl/simulations/c4gl_sim.o643268-6
new file mode 100644
index 0000000..4bb288e
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-7 b/class4gl/simulations/c4gl_sim.o643268-7
new file mode 100644
index 0000000..a3d1eeb
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643268-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643269-0 b/class4gl/simulations/c4gl_sim.o643269-0
new file mode 100644
index 0000000..23fcd02
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-1 b/class4gl/simulations/c4gl_sim.o643269-1
new file mode 100644
index 0000000..461c192
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-2 b/class4gl/simulations/c4gl_sim.o643269-2
new file mode 100644
index 0000000..7d9eb94
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-3 b/class4gl/simulations/c4gl_sim.o643269-3
new file mode 100644
index 0000000..3dabc1a
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-4 b/class4gl/simulations/c4gl_sim.o643269-4
new file mode 100644
index 0000000..ea5fb1d
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-5 b/class4gl/simulations/c4gl_sim.o643269-5
new file mode 100644
index 0000000..681b989
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-6 b/class4gl/simulations/c4gl_sim.o643269-6
new file mode 100644
index 0000000..3029ba0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-7 b/class4gl/simulations/c4gl_sim.o643269-7
new file mode 100644
index 0000000..ce30f0a
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643269-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643271-0 b/class4gl/simulations/c4gl_sim.o643271-0
new file mode 100644
index 0000000..217999c
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-1 b/class4gl/simulations/c4gl_sim.o643271-1
new file mode 100644
index 0000000..9795172
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-2 b/class4gl/simulations/c4gl_sim.o643271-2
new file mode 100644
index 0000000..2666ea7
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-3 b/class4gl/simulations/c4gl_sim.o643271-3
new file mode 100644
index 0000000..4cb628d
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-4 b/class4gl/simulations/c4gl_sim.o643271-4
new file mode 100644
index 0000000..0ad9bd0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-5 b/class4gl/simulations/c4gl_sim.o643271-5
new file mode 100644
index 0000000..ba1d645
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-6 b/class4gl/simulations/c4gl_sim.o643271-6
new file mode 100644
index 0000000..3e2e3da
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-7 b/class4gl/simulations/c4gl_sim.o643271-7
new file mode 100644
index 0000000..a2ad87b
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643271-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643272-0 b/class4gl/simulations/c4gl_sim.o643272-0
new file mode 100644
index 0000000..3cc76c0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-1 b/class4gl/simulations/c4gl_sim.o643272-1
new file mode 100644
index 0000000..a6f9c52
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-2 b/class4gl/simulations/c4gl_sim.o643272-2
new file mode 100644
index 0000000..aa569ec
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-3 b/class4gl/simulations/c4gl_sim.o643272-3
new file mode 100644
index 0000000..a065842
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-4 b/class4gl/simulations/c4gl_sim.o643272-4
new file mode 100644
index 0000000..61b092e
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-5 b/class4gl/simulations/c4gl_sim.o643272-5
new file mode 100644
index 0000000..8a030e2
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-6 b/class4gl/simulations/c4gl_sim.o643272-6
new file mode 100644
index 0000000..ac15fba
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-7 b/class4gl/simulations/c4gl_sim.o643272-7
new file mode 100644
index 0000000..95252f1
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643272-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643274-0 b/class4gl/simulations/c4gl_sim.o643274-0
new file mode 100644
index 0000000..02ba97b
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-1 b/class4gl/simulations/c4gl_sim.o643274-1
new file mode 100644
index 0000000..df17474
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-2 b/class4gl/simulations/c4gl_sim.o643274-2
new file mode 100644
index 0000000..d3fdbfd
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-3 b/class4gl/simulations/c4gl_sim.o643274-3
new file mode 100644
index 0000000..b403bf3
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-4 b/class4gl/simulations/c4gl_sim.o643274-4
new file mode 100644
index 0000000..4e68631
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-5 b/class4gl/simulations/c4gl_sim.o643274-5
new file mode 100644
index 0000000..08f9aed
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-6 b/class4gl/simulations/c4gl_sim.o643274-6
new file mode 100644
index 0000000..cfb6f68
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-7 b/class4gl/simulations/c4gl_sim.o643274-7
new file mode 100644
index 0000000..5fb4e4f
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643274-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643275-0 b/class4gl/simulations/c4gl_sim.o643275-0
new file mode 100644
index 0000000..3bcff7a
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-1 b/class4gl/simulations/c4gl_sim.o643275-1
new file mode 100644
index 0000000..f65c900
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-2 b/class4gl/simulations/c4gl_sim.o643275-2
new file mode 100644
index 0000000..e418bbd
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-3 b/class4gl/simulations/c4gl_sim.o643275-3
new file mode 100644
index 0000000..d1b0803
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-4 b/class4gl/simulations/c4gl_sim.o643275-4
new file mode 100644
index 0000000..e466956
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-6 b/class4gl/simulations/c4gl_sim.o643275-6
new file mode 100644
index 0000000..3264a04
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-7 b/class4gl/simulations/c4gl_sim.o643275-7
new file mode 100644
index 0000000..b7582e0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643275-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643276-0 b/class4gl/simulations/c4gl_sim.o643276-0
new file mode 100644
index 0000000..bc5f648
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-1 b/class4gl/simulations/c4gl_sim.o643276-1
new file mode 100644
index 0000000..8794be3
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-2 b/class4gl/simulations/c4gl_sim.o643276-2
new file mode 100644
index 0000000..a7da9c0
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-3 b/class4gl/simulations/c4gl_sim.o643276-3
new file mode 100644
index 0000000..69bf48a
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-4 b/class4gl/simulations/c4gl_sim.o643276-4
new file mode 100644
index 0000000..6b44c82
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-5 b/class4gl/simulations/c4gl_sim.o643276-5
new file mode 100644
index 0000000..e189294
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-6 b/class4gl/simulations/c4gl_sim.o643276-6
new file mode 100644
index 0000000..453e682
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-7 b/class4gl/simulations/c4gl_sim.o643276-7
new file mode 100644
index 0000000..a8c3544
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643276-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643277-0 b/class4gl/simulations/c4gl_sim.o643277-0
new file mode 100644
index 0000000..4dae9b5
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-1 b/class4gl/simulations/c4gl_sim.o643277-1
new file mode 100644
index 0000000..385b39b
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-2 b/class4gl/simulations/c4gl_sim.o643277-2
new file mode 100644
index 0000000..77d3895
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-3 b/class4gl/simulations/c4gl_sim.o643277-3
new file mode 100644
index 0000000..27737f2
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-4 b/class4gl/simulations/c4gl_sim.o643277-4
new file mode 100644
index 0000000..91915db
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-5 b/class4gl/simulations/c4gl_sim.o643277-5
new file mode 100644
index 0000000..40dbcf2
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-6 b/class4gl/simulations/c4gl_sim.o643277-6
new file mode 100644
index 0000000..d6ad526
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-7 b/class4gl/simulations/c4gl_sim.o643277-7
new file mode 100644
index 0000000..8fec120
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643277-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643278-0 b/class4gl/simulations/c4gl_sim.o643278-0
new file mode 100644
index 0000000..980578e
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-1 b/class4gl/simulations/c4gl_sim.o643278-1
new file mode 100644
index 0000000..33d1e33
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-2 b/class4gl/simulations/c4gl_sim.o643278-2
new file mode 100644
index 0000000..7d22207
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-3 b/class4gl/simulations/c4gl_sim.o643278-3
new file mode 100644
index 0000000..15b6190
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-4 b/class4gl/simulations/c4gl_sim.o643278-4
new file mode 100644
index 0000000..ca44618
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-5 b/class4gl/simulations/c4gl_sim.o643278-5
new file mode 100644
index 0000000..a9eae55
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-6 b/class4gl/simulations/c4gl_sim.o643278-6
new file mode 100644
index 0000000..327b4d1
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-7 b/class4gl/simulations/c4gl_sim.o643278-7
new file mode 100644
index 0000000..a846ec1
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643278-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643290-0 b/class4gl/simulations/c4gl_sim.o643290-0
new file mode 100644
index 0000000..e6eb9c6
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-0
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-1 b/class4gl/simulations/c4gl_sim.o643290-1
new file mode 100644
index 0000000..273a974
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-1
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-2 b/class4gl/simulations/c4gl_sim.o643290-2
new file mode 100644
index 0000000..bef9d8d
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-2
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-3 b/class4gl/simulations/c4gl_sim.o643290-3
new file mode 100644
index 0000000..21bc334
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-3
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-4 b/class4gl/simulations/c4gl_sim.o643290-4
new file mode 100644
index 0000000..fbc5a1d
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-4
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (4)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 4
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-5 b/class4gl/simulations/c4gl_sim.o643290-5
new file mode 100644
index 0000000..89c55da
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-5
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (5)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 5
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-6 b/class4gl/simulations/c4gl_sim.o643290-6
new file mode 100644
index 0000000..55c69ee
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-6
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (6)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 6
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-7 b/class4gl/simulations/c4gl_sim.o643290-7
new file mode 100644
index 0000000..e646e64
--- /dev/null
+++ b/class4gl/simulations/c4gl_sim.o643290-7
@@ -0,0 +1,28 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+getting stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (7)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 7
+Fetching initial/forcing records
+Fetching afternoon records for determining the simulation runtimes
+hello
+396
+396
+aligning morning and afternoon records
+Traceback (most recent call last):
+  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
+    exp = EXP_DEFS[expname]
+KeyError: ''
diff --git a/class4gl/simulations/simulations_smchange2.py b/class4gl/simulations/simulations_smchange2.py
new file mode 100644
index 0000000..99c38a9
--- /dev/null
+++ b/class4gl/simulations/simulations_smchange2.py
@@ -0,0 +1,357 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'SMa':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.05},
+  'SMb':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.1},
+  'SMc':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.15},
+  'SMd':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.02},
+  'SMe':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.25},
+  'SMf':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.3},
+  'SMg':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.35},
+  'SMh':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.4},
+  'SMi':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.45},
+  'SMj':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.5},
+  'SMk':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.55},
+  'SMl':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.6},
+  'SMm':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.65},
+  'SMo':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.7},
+  'SMp':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.75},
+  'SMq':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.08},
+  'SMr':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.85},
+  'SMs':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.9},
+  'SMt':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 0.95},
+  'SMu':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange': 1.},
+}
+
+##for i in range(1,20):
+##    EXP_DEFS['SM'+str(i)] = {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'wgchange':float(i)/20.}
+
+
+
+# #SET = 'GLOBAL'
+# SET = args.dataset
+
+
+print("getting stations")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching initial/forcing records')
+records_morning = get_records(run_stations,\
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
+                              refetch_records=False,
+                              )
+
+# note that if runtime is an integer number, we don't need to get the afternoon
+# profiles. 
+if args.runtime == 'from_afternoon_profile':
+    print('Fetching afternoon records for determining the simulation runtimes')
+    records_afternoon = get_records(run_stations,\
+                                    args.path_forcing,\
+                                    subset='afternoon',
+                                    refetch_records=False,
+                                    )
+    
+    # print(records_morning.index)
+    # print(records_afternoon.index)
+    # align afternoon records with the noon records, and set same index
+    print('hello')
+    print(len(records_afternoon))
+    print(len(records_morning))
+
+    print("aligning morning and afternoon records")
+    records_morning['dates'] = records_morning.ldatetime.dt.date
+    records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
+    records_afternoon.set_index(['STNID','dates'],inplace=True)
+    ini_index_dates = records_morning.set_index(['STNID','dates']).index
+    records_afternoon = records_afternoon.loc[ini_index_dates]
+    records_afternoon.index = records_morning.index
+
+experiments = args.experiments.strip(' ').split(' ')
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = args.path_experiments+'/'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+            print('starting station chunk number: '\
+                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+            isim = 0
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                    print('starting '+str(isim+1)+' out of '+\
+                      str(len(records_morning_station_chunk) )+\
+                      ' (station total: ',str(len(records_morning_station)),')')  
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+
+                    sm = exp['wgchange']*(c4gli_morning.pars.wfc - c4gli_morning.pars.wwilt) + c4gli_morning.pars.wwilt
+                    c4gli_morning.update(source=expname, pars={'wg': sm})
+                    c4gli_morning.update(source=expname, pars={'w2': sm})
+                    
+                    
+                    if args.runtime == 'from_afternoon_profile':
+                        record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                        c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                          record_afternoon.index_start, 
+                                                          record_afternoon.index_end,
+                                                        mode='ini')
+                        runtime = int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())
+                    else:
+                        runtime = int(args.runtime)
+
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        runtime})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+
+                    if args.error_handling == 'dump_always':
+                        try:
+                            c4gl.run()
+                            print('run succesfull')
+                        except:
+                            print('run not succesfull')
+                        onerun = True
+
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                  include_input=False,\
+                                  #timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    # in this case, only the file will dumped if the runs were
+                    # successful
+                    elif args.error_handling == 'dump_on_success':
+                        try:
+                            c4gl.run()
+                            print('run succesfull')
+                            c4gli_morning.dump(file_ini)
+                            
+                            
+                            c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                      #timeseries_only=timeseries_only,\
+                                     )
+                            onerun = True
+                        except:
+                            print('run not succesfull')
+                    isim += 1
+
+
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+

From 2f26d34cfeb87c924d87a0ac7f2060a968a04b58 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 29 Aug 2018 10:09:17 +0200
Subject: [PATCH 051/129] fix to avoid boolean cleanup_experiments passed
 through pbs job

---
 class4gl/simulations/c4gl_sim.o643256-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643256-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643256-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643256-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643256-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643256-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643256-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643257-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643258-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643268-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643269-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643271-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643272-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643274-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643275-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643275-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643275-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643275-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643275-4       | 28 -------------------
 ...{c4gl_sim.o643256-5 => c4gl_sim.o643275-5} |  4 +--
 class4gl/simulations/c4gl_sim.o643275-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643275-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643276-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643277-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643278-7       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-0       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-1       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-2       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-3       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-4       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-5       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-6       | 28 -------------------
 class4gl/simulations/c4gl_sim.o643290-7       | 28 -------------------
 103 files changed, 2 insertions(+), 2858 deletions(-)
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643256-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643257-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643258-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643268-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643269-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643271-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643272-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643274-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-4
 rename class4gl/simulations/{c4gl_sim.o643256-5 => c4gl_sim.o643275-5} (93%)
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643276-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643277-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643278-7
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-0
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-1
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-2
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-3
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-4
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-5
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-6
 delete mode 100644 class4gl/simulations/c4gl_sim.o643290-7

diff --git a/class4gl/simulations/c4gl_sim.o643256-0 b/class4gl/simulations/c4gl_sim.o643256-0
deleted file mode 100644
index 8cc36ec..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-1 b/class4gl/simulations/c4gl_sim.o643256-1
deleted file mode 100644
index 5f3860e..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-2 b/class4gl/simulations/c4gl_sim.o643256-2
deleted file mode 100644
index 961e883..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-3 b/class4gl/simulations/c4gl_sim.o643256-3
deleted file mode 100644
index c473bda..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-4 b/class4gl/simulations/c4gl_sim.o643256-4
deleted file mode 100644
index 3011ce2..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-6 b/class4gl/simulations/c4gl_sim.o643256-6
deleted file mode 100644
index 4bb288e..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643256-7 b/class4gl/simulations/c4gl_sim.o643256-7
deleted file mode 100644
index a3d1eeb..0000000
--- a/class4gl/simulations/c4gl_sim.o643256-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643257-0 b/class4gl/simulations/c4gl_sim.o643257-0
deleted file mode 100644
index 23fcd02..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-1 b/class4gl/simulations/c4gl_sim.o643257-1
deleted file mode 100644
index 461c192..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-2 b/class4gl/simulations/c4gl_sim.o643257-2
deleted file mode 100644
index 7d9eb94..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-3 b/class4gl/simulations/c4gl_sim.o643257-3
deleted file mode 100644
index 3dabc1a..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-4 b/class4gl/simulations/c4gl_sim.o643257-4
deleted file mode 100644
index ea5fb1d..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-5 b/class4gl/simulations/c4gl_sim.o643257-5
deleted file mode 100644
index 681b989..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-6 b/class4gl/simulations/c4gl_sim.o643257-6
deleted file mode 100644
index 3029ba0..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643257-7 b/class4gl/simulations/c4gl_sim.o643257-7
deleted file mode 100644
index ce30f0a..0000000
--- a/class4gl/simulations/c4gl_sim.o643257-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643258-0 b/class4gl/simulations/c4gl_sim.o643258-0
deleted file mode 100644
index 217999c..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-1 b/class4gl/simulations/c4gl_sim.o643258-1
deleted file mode 100644
index 9795172..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-2 b/class4gl/simulations/c4gl_sim.o643258-2
deleted file mode 100644
index 2666ea7..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-3 b/class4gl/simulations/c4gl_sim.o643258-3
deleted file mode 100644
index 4cb628d..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-4 b/class4gl/simulations/c4gl_sim.o643258-4
deleted file mode 100644
index 0ad9bd0..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-5 b/class4gl/simulations/c4gl_sim.o643258-5
deleted file mode 100644
index ba1d645..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-6 b/class4gl/simulations/c4gl_sim.o643258-6
deleted file mode 100644
index 3e2e3da..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643258-7 b/class4gl/simulations/c4gl_sim.o643258-7
deleted file mode 100644
index a2ad87b..0000000
--- a/class4gl/simulations/c4gl_sim.o643258-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643268-0 b/class4gl/simulations/c4gl_sim.o643268-0
deleted file mode 100644
index 8cc36ec..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-1 b/class4gl/simulations/c4gl_sim.o643268-1
deleted file mode 100644
index 5f3860e..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-2 b/class4gl/simulations/c4gl_sim.o643268-2
deleted file mode 100644
index 961e883..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-3 b/class4gl/simulations/c4gl_sim.o643268-3
deleted file mode 100644
index c473bda..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-4 b/class4gl/simulations/c4gl_sim.o643268-4
deleted file mode 100644
index 3011ce2..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-5 b/class4gl/simulations/c4gl_sim.o643268-5
deleted file mode 100644
index ceb34e5..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-6 b/class4gl/simulations/c4gl_sim.o643268-6
deleted file mode 100644
index 4bb288e..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643268-7 b/class4gl/simulations/c4gl_sim.o643268-7
deleted file mode 100644
index a3d1eeb..0000000
--- a/class4gl/simulations/c4gl_sim.o643268-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM0'
diff --git a/class4gl/simulations/c4gl_sim.o643269-0 b/class4gl/simulations/c4gl_sim.o643269-0
deleted file mode 100644
index 23fcd02..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-1 b/class4gl/simulations/c4gl_sim.o643269-1
deleted file mode 100644
index 461c192..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-2 b/class4gl/simulations/c4gl_sim.o643269-2
deleted file mode 100644
index 7d9eb94..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-3 b/class4gl/simulations/c4gl_sim.o643269-3
deleted file mode 100644
index 3dabc1a..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-4 b/class4gl/simulations/c4gl_sim.o643269-4
deleted file mode 100644
index ea5fb1d..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-5 b/class4gl/simulations/c4gl_sim.o643269-5
deleted file mode 100644
index 681b989..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-6 b/class4gl/simulations/c4gl_sim.o643269-6
deleted file mode 100644
index 3029ba0..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643269-7 b/class4gl/simulations/c4gl_sim.o643269-7
deleted file mode 100644
index ce30f0a..0000000
--- a/class4gl/simulations/c4gl_sim.o643269-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM1 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM1'
diff --git a/class4gl/simulations/c4gl_sim.o643271-0 b/class4gl/simulations/c4gl_sim.o643271-0
deleted file mode 100644
index 217999c..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-1 b/class4gl/simulations/c4gl_sim.o643271-1
deleted file mode 100644
index 9795172..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-2 b/class4gl/simulations/c4gl_sim.o643271-2
deleted file mode 100644
index 2666ea7..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-3 b/class4gl/simulations/c4gl_sim.o643271-3
deleted file mode 100644
index 4cb628d..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-4 b/class4gl/simulations/c4gl_sim.o643271-4
deleted file mode 100644
index 0ad9bd0..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-5 b/class4gl/simulations/c4gl_sim.o643271-5
deleted file mode 100644
index ba1d645..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-6 b/class4gl/simulations/c4gl_sim.o643271-6
deleted file mode 100644
index 3e2e3da..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643271-7 b/class4gl/simulations/c4gl_sim.o643271-7
deleted file mode 100644
index a2ad87b..0000000
--- a/class4gl/simulations/c4gl_sim.o643271-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM2 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM2'
diff --git a/class4gl/simulations/c4gl_sim.o643272-0 b/class4gl/simulations/c4gl_sim.o643272-0
deleted file mode 100644
index 3cc76c0..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-1 b/class4gl/simulations/c4gl_sim.o643272-1
deleted file mode 100644
index a6f9c52..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-2 b/class4gl/simulations/c4gl_sim.o643272-2
deleted file mode 100644
index aa569ec..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-3 b/class4gl/simulations/c4gl_sim.o643272-3
deleted file mode 100644
index a065842..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-4 b/class4gl/simulations/c4gl_sim.o643272-4
deleted file mode 100644
index 61b092e..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-5 b/class4gl/simulations/c4gl_sim.o643272-5
deleted file mode 100644
index 8a030e2..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-6 b/class4gl/simulations/c4gl_sim.o643272-6
deleted file mode 100644
index ac15fba..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643272-7 b/class4gl/simulations/c4gl_sim.o643272-7
deleted file mode 100644
index 95252f1..0000000
--- a/class4gl/simulations/c4gl_sim.o643272-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM3 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM3'
diff --git a/class4gl/simulations/c4gl_sim.o643274-0 b/class4gl/simulations/c4gl_sim.o643274-0
deleted file mode 100644
index 02ba97b..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-1 b/class4gl/simulations/c4gl_sim.o643274-1
deleted file mode 100644
index df17474..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-2 b/class4gl/simulations/c4gl_sim.o643274-2
deleted file mode 100644
index d3fdbfd..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-3 b/class4gl/simulations/c4gl_sim.o643274-3
deleted file mode 100644
index b403bf3..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-4 b/class4gl/simulations/c4gl_sim.o643274-4
deleted file mode 100644
index 4e68631..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-5 b/class4gl/simulations/c4gl_sim.o643274-5
deleted file mode 100644
index 08f9aed..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-6 b/class4gl/simulations/c4gl_sim.o643274-6
deleted file mode 100644
index cfb6f68..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643274-7 b/class4gl/simulations/c4gl_sim.o643274-7
deleted file mode 100644
index 5fb4e4f..0000000
--- a/class4gl/simulations/c4gl_sim.o643274-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM4 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM4'
diff --git a/class4gl/simulations/c4gl_sim.o643275-0 b/class4gl/simulations/c4gl_sim.o643275-0
deleted file mode 100644
index 3bcff7a..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-1 b/class4gl/simulations/c4gl_sim.o643275-1
deleted file mode 100644
index f65c900..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-2 b/class4gl/simulations/c4gl_sim.o643275-2
deleted file mode 100644
index e418bbd..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-3 b/class4gl/simulations/c4gl_sim.o643275-3
deleted file mode 100644
index d1b0803..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-4 b/class4gl/simulations/c4gl_sim.o643275-4
deleted file mode 100644
index e466956..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643256-5 b/class4gl/simulations/c4gl_sim.o643275-5
similarity index 93%
rename from class4gl/simulations/c4gl_sim.o643256-5
rename to class4gl/simulations/c4gl_sim.o643275-5
index ceb34e5..df8103f 100644
--- a/class4gl/simulations/c4gl_sim.o643256-5
+++ b/class4gl/simulations/c4gl_sim.o643275-5
@@ -6,7 +6,7 @@ C4GLJOB_path_forcing
 C4GLJOB_split_by
 C4GLJOB_station_id
 C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM0 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
 getting stations
 defining all_stations_select
 Selecting station by ID
@@ -25,4 +25,4 @@ aligning morning and afternoon records
 Traceback (most recent call last):
   File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
     exp = EXP_DEFS[expname]
-KeyError: 'SM0'
+KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-6 b/class4gl/simulations/c4gl_sim.o643275-6
deleted file mode 100644
index 3264a04..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643275-7 b/class4gl/simulations/c4gl_sim.o643275-7
deleted file mode 100644
index b7582e0..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/c4gl_sim.o643276-0 b/class4gl/simulations/c4gl_sim.o643276-0
deleted file mode 100644
index bc5f648..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-1 b/class4gl/simulations/c4gl_sim.o643276-1
deleted file mode 100644
index 8794be3..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-2 b/class4gl/simulations/c4gl_sim.o643276-2
deleted file mode 100644
index a7da9c0..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-3 b/class4gl/simulations/c4gl_sim.o643276-3
deleted file mode 100644
index 69bf48a..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-4 b/class4gl/simulations/c4gl_sim.o643276-4
deleted file mode 100644
index 6b44c82..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-5 b/class4gl/simulations/c4gl_sim.o643276-5
deleted file mode 100644
index e189294..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-6 b/class4gl/simulations/c4gl_sim.o643276-6
deleted file mode 100644
index 453e682..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643276-7 b/class4gl/simulations/c4gl_sim.o643276-7
deleted file mode 100644
index a8c3544..0000000
--- a/class4gl/simulations/c4gl_sim.o643276-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM6 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM6'
diff --git a/class4gl/simulations/c4gl_sim.o643277-0 b/class4gl/simulations/c4gl_sim.o643277-0
deleted file mode 100644
index 4dae9b5..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-1 b/class4gl/simulations/c4gl_sim.o643277-1
deleted file mode 100644
index 385b39b..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-2 b/class4gl/simulations/c4gl_sim.o643277-2
deleted file mode 100644
index 77d3895..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-3 b/class4gl/simulations/c4gl_sim.o643277-3
deleted file mode 100644
index 27737f2..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-4 b/class4gl/simulations/c4gl_sim.o643277-4
deleted file mode 100644
index 91915db..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-5 b/class4gl/simulations/c4gl_sim.o643277-5
deleted file mode 100644
index 40dbcf2..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-6 b/class4gl/simulations/c4gl_sim.o643277-6
deleted file mode 100644
index d6ad526..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643277-7 b/class4gl/simulations/c4gl_sim.o643277-7
deleted file mode 100644
index 8fec120..0000000
--- a/class4gl/simulations/c4gl_sim.o643277-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM7 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM7'
diff --git a/class4gl/simulations/c4gl_sim.o643278-0 b/class4gl/simulations/c4gl_sim.o643278-0
deleted file mode 100644
index 980578e..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-1 b/class4gl/simulations/c4gl_sim.o643278-1
deleted file mode 100644
index 33d1e33..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-2 b/class4gl/simulations/c4gl_sim.o643278-2
deleted file mode 100644
index 7d22207..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-3 b/class4gl/simulations/c4gl_sim.o643278-3
deleted file mode 100644
index 15b6190..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-4 b/class4gl/simulations/c4gl_sim.o643278-4
deleted file mode 100644
index ca44618..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-5 b/class4gl/simulations/c4gl_sim.o643278-5
deleted file mode 100644
index a9eae55..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-6 b/class4gl/simulations/c4gl_sim.o643278-6
deleted file mode 100644
index 327b4d1..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643278-7 b/class4gl/simulations/c4gl_sim.o643278-7
deleted file mode 100644
index a846ec1..0000000
--- a/class4gl/simulations/c4gl_sim.o643278-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM9 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM9'
diff --git a/class4gl/simulations/c4gl_sim.o643290-0 b/class4gl/simulations/c4gl_sim.o643290-0
deleted file mode 100644
index e6eb9c6..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-0
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-1 b/class4gl/simulations/c4gl_sim.o643290-1
deleted file mode 100644
index 273a974..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-1
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-2 b/class4gl/simulations/c4gl_sim.o643290-2
deleted file mode 100644
index bef9d8d..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-2
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-3 b/class4gl/simulations/c4gl_sim.o643290-3
deleted file mode 100644
index 21bc334..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-3
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-4 b/class4gl/simulations/c4gl_sim.o643290-4
deleted file mode 100644
index fbc5a1d..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-4
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (4)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 4
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-5 b/class4gl/simulations/c4gl_sim.o643290-5
deleted file mode 100644
index 89c55da..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-6 b/class4gl/simulations/c4gl_sim.o643290-6
deleted file mode 100644
index 55c69ee..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-6
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (6)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 6
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''
diff --git a/class4gl/simulations/c4gl_sim.o643290-7 b/class4gl/simulations/c4gl_sim.o643290-7
deleted file mode 100644
index e646e64..0000000
--- a/class4gl/simulations/c4gl_sim.o643290-7
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments= --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (7)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 7
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: ''

From e443be17ecc9a84cf551ed3af0a388d31de8e991 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 08:49:21 +0200
Subject: [PATCH 052/129] add evaluation interfaces

---
 class4gl/data_global.py                       |   6 +-
 class4gl/interface/interface.py               |   2 +-
 class4gl/interface/interface_eval.py          | 375 ++++++++++++++++++
 class4gl/interface/interface_eval_stations.py | 290 ++++++++++++++
 class4gl/interface/interface_stations.py      | 309 +++++++++++++++
 class4gl/interface_multi.py                   |   2 +-
 6 files changed, 979 insertions(+), 5 deletions(-)
 create mode 100644 class4gl/interface/interface_eval.py
 create mode 100644 class4gl/interface/interface_eval_stations.py
 create mode 100644 class4gl/interface/interface_stations.py

diff --git a/class4gl/data_global.py b/class4gl/data_global.py
index 9c3d9b5..63081f1 100644
--- a/class4gl/data_global.py
+++ b/class4gl/data_global.py
@@ -221,10 +221,10 @@ def __init__(self,sources= {
         # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
         # 'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR',
         # 'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF',
-        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
-        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
+        'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf',
+        'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc:SMroot',
         #'GLEAM:BR'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR',
-        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/EF_*_GLEAM_v3.2a.nc:EF',
+        'GLEAM:EF'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc:EF',
         "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc",
         "GLAS:z0m"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
         "GLAS:z0h"      : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1",
diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index fa4ba4e..f6d718f 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -100,7 +100,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 # # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 # }
 
-if args.load_globaldata:
+if bool(args.load_globaldata):
     # iniitialize global data
     globaldata = data_global()
     # ...  and load initial data pages
diff --git a/class4gl/interface/interface_eval.py b/class4gl/interface/interface_eval.py
new file mode 100644
index 0000000..f72d4fc
--- /dev/null
+++ b/class4gl/interface/interface_eval.py
@@ -0,0 +1,375 @@
+import numpy as np
+
+import pandas as pd
+import sys
+
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=True)
+parser.add_argument('--show_control_parameters',default=True)
+parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--figure_filename_2',default=None)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+#import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    x_vals = np.array(axis.get_xlim())
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if args.load_globaldata:
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = {}
+for key in args.experiments.strip(' ').split(' '):
+    
+    c4gldata[key] = c4gl_interface_soundings( \
+                      args.path_experiments+'/'+key+'/',\
+                      args.path_forcing+'/',\
+                      globaldata,\
+                      refetch_records=False
+                    )
+
+# the lines below activate TaylorPlots but it is disabled for now
+fig = plt.figure(figsize=(10,7))   #width,height
+i = 1                                                                           
+axes = {}         
+axes_taylor = {}         
+
+colors = ['r','g','b','m']
+symbols = ['*','x','+']
+dias = {}
+
+for varkey in ['h','theta','q']:                                                    
+    axes[varkey] = fig.add_subplot(2,3,i)                                       
+    #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+
+    #print(obs.std())
+    dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+    if i == 0:
+        dias[varkey]._ax.axis["left"].label.set_text(\
+            "Standard deviation (model) / Standard deviation (observations)")
+        # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+    #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+    # Q95 = obs.quantile(0.95)
+    # Q95 = obs.quantile(0.90)
+    # Add RMS contours, and label them
+    contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+    dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+    #dia._ax.set_title(season.capitalize())
+
+    dias[varkey].add_grid()
+
+
+    #dia.ax.plot(x99,y99,color='k')
+
+    
+    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        # clearsky = (cc < 0.05)
+        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+        x, y = obs.values,mod.values
+        print(key,len(obs.values))
+
+        STD_OBS = obs.std()
+        #scores
+        PR = pearsonr(mod,obs)[0]
+        RMSE = rmse(obs,mod)                                               
+        BIAS = np.mean(mod) - np.mean(obs)
+        STD = mod.std()
+        
+        # fit = np.polyfit(x,y,deg=1)
+        # axes[varkey].plot(x, fit[0] * x + fit[1],\
+        #                   color=colors[ikey],alpha=0.8,lw=2,\
+        #                   label=key+", "+\
+        #                               'R = '+str(round(PR,3))+', '+\
+        #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+        #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+        # axes[varkey].legend(fontsize=5)
+        
+        # print(STD)
+        # print(PR)
+        dias[varkey].add_sample(STD/STD_OBS, PR,
+                       marker='o', ms=5, ls='',
+                       #mfc='k', mec='k', # B&W
+                       mfc=colors[ikey], mec=colors[ikey], # Colors
+                       label=key)
+
+    # put ticker position, see
+    # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+    # dia.ax.axis['bottom'].
+    # dia.ax.axis['left'].
+    # dia.ax.axis['left'].
+
+    i += 1
+
+i = 0
+for varkey in ['h','theta','q']:                                                    
+    ikey = 0
+    key = list(args.experiments.strip().split(' '))[ikey]
+    cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+    clearsky = (cc < 0.05)
+
+    mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+    obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+
+
+    nbins=40       
+    x, y = obs.values,mod.values
+    
+    xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+    zi = np.zeros_like(xi)*np.nan       
+    for ibin in range(nbins):
+        xmin = x.min() + ibin * (x.max() - x.min())/nbins
+        xmax = xmin + (x.max() - x.min())/nbins
+        in_bin = ((x >= xmin) & (x < xmax))
+        ybin = y[in_bin]
+        xbin = x[in_bin]
+        if len(ybin) > 20:
+            k = kde.gaussian_kde((ybin))
+            zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+    zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+    zi_int = zi.cumsum(axis=1) 
+                 #  label=key+", "+\
+                 #                    'R = '+str(round(PR[0],3))+', '+\
+                 #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                 #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+    axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
+            colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+    axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
+            colors=['darkred'],alpha=0.5,)
+
+
+    latex = {}
+    latex['dthetadt'] =  r'$d \theta / dt $'
+    latex['dqdt'] =      r'$d q / dt $'
+    latex['dhdt'] =      r'$d h / dt $'
+
+    axes[varkey].set_xlabel('observations')     
+    axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+
+    PR = pearsonr(mod,obs)[0]
+    RMSE = rmse(obs,mod)                                               
+    BIAS = np.mean(mod) - np.mean(obs)
+    STD = mod.std()
+
+    axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
+                                  'R = '+str(round(PR,3))+', '+\
+                                  'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                  'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+                         s=0.1,alpha=0.14,color='k')
+    axes[varkey].legend(fontsize=5)
+                   
+    axes[varkey].set_xlabel('observations')     
+    if i==0:                                    
+        axes[varkey].set_ylabel('model')                                            
+    abline(1,0,axis=axes[varkey])
+    i +=1
+
+
+
+# legend for different forcing simulations (colors)
+ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+leg = []
+for ikey,key in enumerate(args.experiments.strip().split(' ')):
+    leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+    leg.append(leg1)
+ax.axis('off')
+#leg1 =
+ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
+
+
+# # legend for different stations (symbols)
+# ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+# leg = []
+# isymbol = 0
+# for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+#     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+#     leg.append(leg1)
+#     isymbol += 1
+# 
+# # symbol for all stations
+# leg1, = ax.plot([],'ko',markersize=10)
+# leg.append(leg1)
+
+
+# ax.axis('off')
+# ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+
+
+fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+
+
+#pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+# figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+# fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+
+if args.figure_filename is not None:
+    fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+fig.show()  
+
+
+if bool(args.show_control_parameters):
+    import seaborn as sns
+    sns.set()
+    fig = pl.figure(figsize=(12,8))
+    i = 1
+    axes = {}
+    data_all = pd.DataFrame()
+    
+    
+    
+    # #for varkey in ['theta','q']:     
+    # EF =\
+    #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR/(1.+\
+    #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR)
+    # EF[EF<0] = np.nan
+    # EF[EF>1] = np.nan
+    
+    # c4gldata[key].frames['stats']['records_all_stations_ini']['EF'] = EF
+    
+    ikey = 0
+    key = list(args.experiments.strip().split(' '))[ikey]
+    for varkey in ['h','theta','q']:
+        for input_key in ['wg','cc']:
+            data_all = pd.DataFrame()
+            data = pd.DataFrame()
+            data[varkey] = ""
+            data[varkey] = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+            data["source"] = "soundings"
+            data_all = pd.concat([data_all,data])
+                
+            data = pd.DataFrame()
+            
+            
+            data[varkey] = ""
+            data[varkey] = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+            data["source"] = "model"
+            data_all = pd.concat([data_all,data])
+            data_input = pd.concat([c4gldata[key].frames['stats']['records_all_stations_ini'],
+                                   c4gldata[key].frames['stats']['records_all_stations_ini']],axis=0)
+            input_key_full = input_key + "["+units[input_key]+"]"
+            data_all[input_key_full] =  pd.cut(x=data_input[input_key].values,bins=10)
+            
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+            data_all = data_all.rename(columns={varkey:varkey_full})
+            
+            qvalmax = data_all[varkey_full].quantile(0.999)
+            qvalmin = data_all[varkey_full].quantile(0.001)
+            data_all = data_all[(data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)]
+            
+            ax = fig.add_subplot(3,2,i)
+            sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+            ax.grid()
+            plt.xticks(rotation=45,ha='right')
+            i +=1
+            plt.legend(loc='lower right')
+    fig.tight_layout()
+    if args.figure_filename_2 is not None:
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:", args.figure_filename)
+    fig.show()
+
+
+
+
+
+
+
+
+
+
diff --git a/class4gl/interface/interface_eval_stations.py b/class4gl/interface/interface_eval_stations.py
new file mode 100644
index 0000000..49deebc
--- /dev/null
+++ b/class4gl/interface/interface_eval_stations.py
@@ -0,0 +1,290 @@
+import numpy as np
+
+import pandas as pd
+import sys
+
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=True)
+parser.add_argument('--figure_filename',default=None)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+#import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+
+
+
+latex = {}
+latex['dthetadt'] =  r'$d \theta / dt $'
+latex['dqdt'] =      r'$d q / dt $'
+latex['dhdt'] =      r'$d h / dt $'
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    x_vals = np.array(axis.get_xlim())
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if args.load_globaldata:
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = {}
+for key in args.experiments.strip(' ').split(' '):
+    
+    c4gldata[key] = c4gl_interface_soundings( \
+                      args.path_experiments+'/'+key+'/',\
+                      args.path_forcing+'/',\
+                      globaldata,\
+                      refetch_records=False
+                    )
+
+fig = plt.figure(figsize=(10,7))   #width,height
+i = 1                                                                           
+axes = {}         
+axes_taylor = {}         
+
+colors = ['r','g','b','m']
+symbols = ['*','x','+']
+dias = {}
+
+for varkey in ['h','theta','q']:                                                    
+    axes[varkey] = fig.add_subplot(2,3,i)                                       
+    #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+
+    #print(obs.std())
+    obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+    STD_OBS = obs.std()
+    dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+    if i == 2:
+        dias[varkey]._ax.axis["left"].label.set_text(\
+            "Standard deviation (model) / Standard deviation (observations)")
+        # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+    #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+    # Q95 = obs.quantile(0.95)
+    # Q95 = obs.quantile(0.90)
+    # Add RMS contours, and label them
+    contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+    dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+    #dia._ax.set_title(season.capitalize())
+
+    dias[varkey].add_grid()
+
+
+    #dia.ax.plot(x99,y99,color='k')
+
+    
+    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+        x, y = obs.values,mod.values
+        print(key,len(obs.values))
+
+        #scores
+        PR = pearsonr(mod,obs)[0]
+        RMSE = rmse(obs,mod)                                               
+        BIAS = np.mean(mod) - np.mean(obs)
+        STD = mod.std()
+        
+        fit = np.polyfit(x,y,deg=1)
+        axes[varkey].plot(x, fit[0] * x + fit[1],\
+                          color=colors[ikey],alpha=0.8,lw=2,\
+                          label=key+", "+\
+                                      'R = '+str(round(PR,3))+', '+\
+                                      'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                      'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+        axes[varkey].legend(fontsize=5)
+        
+        # print(STD)
+        # print(PR)
+        dias[varkey].add_sample(STD/STD_OBS, PR,
+                       marker='o', ms=5, ls='',
+                       #mfc='k', mec='k', # B&W
+                       mfc=colors[ikey], mec=colors[ikey], # Colors
+                       label=key)
+
+    # put ticker position, see
+    # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+    # dia.ax.axis['bottom'].
+    # dia.ax.axis['left'].
+    # dia.ax.axis['left'].
+
+    i += 1
+
+i = 0
+for varkey in ['h','theta','q']:                                                    
+    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        isymbol = 0
+        for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+            indices =  (c4gldata[key].frames['stats']['records_all_stations_index'].get_level_values('STNID') == current_station.name)
+            station_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].iloc[indices]
+            station_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].iloc[indices]
+
+            axes[varkey].scatter(station_obs,station_mod,marker=symbols[isymbol],color=colors[ikey])
+                     #  label=key+", "+\
+                     #                    'R = '+str(round(PR[0],3))+', '+\
+                     #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                     #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+
+
+
+        # # pl.scatter(obs,mod,label=key+", "+\
+        # #                              'R = '+str(round(PR[0],3))+', '+\
+        # #                              'RMSE = '+str(round(RMSE,5))+', '+\
+        # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+            
+            dias[varkey].add_sample(station_mod.std()/station_obs.std(),
+                           pearsonr(station_mod,station_obs)[0],
+                           marker=symbols[isymbol], ms=5, ls='',
+                           #mfc='k', mec='k', # B&W
+                           mfc=colors[ikey], mec=colors[ikey], # Colors
+                           label=key)
+            isymbol += 1
+
+
+        axes[varkey].set_xlabel('observations')     
+        axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+    if i==0:                                    
+        axes[varkey].set_ylabel('model')                                            
+    abline(1,0,axis=axes[varkey])
+    i +=1
+
+
+
+# legend for different forcing simulations (colors)
+ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+leg = []
+for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+    leg1, = ax.plot([],colors[ikey]+'s' ,markersize=10)
+    leg.append(leg1)
+ax.axis('off')
+#leg1 =
+ax.legend(leg,list(args.experiments.strip(' ').split(' ')),loc=2,fontsize=10)
+
+
+# legend for different stations (symbols)
+ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+leg = []
+isymbol = 0
+for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+    leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+    leg.append(leg1)
+    isymbol += 1
+
+# symbol for all stations
+leg1, = ax.plot([],'ko',markersize=10)
+leg.append(leg1)
+
+
+ax.axis('off')
+ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+
+
+fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+
+
+#pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+#figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/iops_eval_report.png'
+
+if args.figure_filename is not None:
+    fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+fig.show()  
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
new file mode 100644
index 0000000..f6d718f
--- /dev/null
+++ b/class4gl/interface/interface_stations.py
@@ -0,0 +1,309 @@
+import numpy as np
+
+import pandas as pd
+import sys
+
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=False)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+#import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    x_vals = np.array(axis.get_xlim())
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if bool(args.load_globaldata):
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = {}
+for key in args.experiments.strip(' ').split(' '):
+    
+    c4gldata[key] = c4gl_interface_soundings( \
+                      args.path_experiments+'/'+key+'/',\
+                      args.path_forcing+'/',\
+                      globaldata,\
+                      refetch_records=False
+                    )
+
+# # the lines below activate TaylorPlots but it is disabled for now
+# fig = plt.figure(figsize=(10,7))   #width,height
+# i = 1                                                                           
+# axes = {}         
+# axes_taylor = {}         
+# 
+# colors = ['r','g','b','m']
+# symbols = ['*','x','+']
+# dias = {}
+# 
+# for varkey in ['h','theta','q']:                                                    
+#     axes[varkey] = fig.add_subplot(2,3,i)                                       
+#     #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+# 
+#     #print(obs.std())
+#     dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+#     if i == 0:
+#         dias[varkey]._ax.axis["left"].label.set_text(\
+#             "Standard deviation (model) / Standard deviation (observations)")
+#         # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+#         # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+#     #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+#     # Q95 = obs.quantile(0.95)
+#     # Q95 = obs.quantile(0.90)
+#     # Add RMS contours, and label them
+#     contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+#     dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+#     #dia._ax.set_title(season.capitalize())
+# 
+#     dias[varkey].add_grid()
+# 
+# 
+#     #dia.ax.plot(x99,y99,color='k')
+# 
+#     
+#     for ikey,key in enumerate(args.experiments.split(';')):
+#         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+#         # clearsky = (cc < 0.05)
+#         # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+#         # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+#         mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+#         obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+#         x, y = obs.values,mod.values
+#         print(key,len(obs.values))
+# 
+#         STD_OBS = obs.std()
+#         #scores
+#         PR = pearsonr(mod,obs)[0]
+#         RMSE = rmse(obs,mod)                                               
+#         BIAS = np.mean(mod) - np.mean(obs)
+#         STD = mod.std()
+#         
+#         # fit = np.polyfit(x,y,deg=1)
+#         # axes[varkey].plot(x, fit[0] * x + fit[1],\
+#         #                   color=colors[ikey],alpha=0.8,lw=2,\
+#         #                   label=key+", "+\
+#         #                               'R = '+str(round(PR,3))+', '+\
+#         #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+#         #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+#         # axes[varkey].legend(fontsize=5)
+#         
+#         # print(STD)
+#         # print(PR)
+#         dias[varkey].add_sample(STD/STD_OBS, PR,
+#                        marker='o', ms=5, ls='',
+#                        #mfc='k', mec='k', # B&W
+#                        mfc=colors[ikey], mec=colors[ikey], # Colors
+#                        label=key)
+# 
+#     # put ticker position, see
+#     # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+#     # dia.ax.axis['bottom'].
+#     # dia.ax.axis['left'].
+#     # dia.ax.axis['left'].
+# 
+#     i += 1
+# 
+# i = 0
+# for varkey in ['h','theta','q']:                                                    
+#     ikey = 0
+#     key = list(args.experiments.split(';'))[ikey]
+#     cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+#     clearsky = (cc < 0.05)
+# 
+#     mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+#     obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+# 
+# 
+#     nbins=40       
+#     x, y = obs.values,mod.values
+#     
+#     xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+#     zi = np.zeros_like(xi)*np.nan       
+#     for ibin in range(nbins):
+#         xmin = x.min() + ibin * (x.max() - x.min())/nbins
+#         xmax = xmin + (x.max() - x.min())/nbins
+#         in_bin = ((x >= xmin) & (x < xmax))
+#         ybin = y[in_bin]
+#         xbin = x[in_bin]
+#         if len(ybin) > 20:
+#             k = kde.gaussian_kde((ybin))
+#             zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+#     zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+#     zi_int = zi.cumsum(axis=1) 
+#                  #  label=key+", "+\
+#                  #                    'R = '+str(round(PR[0],3))+', '+\
+#                  #                    'RMSE = '+str(round(RMSE,5))+', '+\
+#                  #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+#     axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
+#             colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+#     axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
+#             colors=['darkred'],alpha=0.5,)
+# 
+# 
+#     latex = {}
+#     latex['dthetadt'] =  r'$d \theta / dt $'
+#     latex['dqdt'] =      r'$d q / dt $'
+#     latex['dhdt'] =      r'$d h / dt $'
+# 
+#     axes[varkey].set_xlabel('observations')     
+#     axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+# 
+#     PR = pearsonr(mod,obs)[0]
+#     RMSE = rmse(obs,mod)                                               
+#     BIAS = np.mean(mod) - np.mean(obs)
+#     STD = mod.std()
+# 
+#     axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
+#                                   'R = '+str(round(PR,3))+', '+\
+#                                   'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+#                                   'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+#                          s=0.1,alpha=0.14,color='k')
+#     axes[varkey].legend(fontsize=5)
+#                    
+#     axes[varkey].set_xlabel('observations')     
+#     if i==0:                                    
+#         axes[varkey].set_ylabel('model')                                            
+#     abline(1,0,axis=axes[varkey])
+#     i +=1
+# 
+# 
+# 
+# # legend for different forcing simulations (colors)
+# ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+# leg = []
+# for ikey,key in enumerate(args.experiments.split(';')):
+#     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+#     leg.append(leg1)
+# ax.axis('off')
+# #leg1 =
+# ax.legend(leg,list(args.experiments.split(';')),loc=2,fontsize=10)
+# 
+# 
+# # # legend for different stations (symbols)
+# # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+# # leg = []
+# # isymbol = 0
+# # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+# #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+# #     leg.append(leg1)
+# #     isymbol += 1
+# # 
+# # # symbol for all stations
+# # leg1, = ax.plot([],'ko',markersize=10)
+# # leg.append(leg1)
+# 
+# 
+# # ax.axis('off')
+# # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+# 
+# 
+# fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+# 
+# 
+# #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+# # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+# # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+# fig.show()  
+
+
+
+
+
+
+
+
+
+
+
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 25bde26..8672499 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -826,7 +826,7 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
         frames = self.frames
         fig = self.fig
  
-        if globaldata is not None:
+        if self.globaldata is not None:
             if (only is None) or ('worldmap' in only):
                 globaldata = self.globaldata
                 if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:

From 41b6f19309a9f3e45f82bdacd491a0968a5bf51e Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 08:53:24 +0200
Subject: [PATCH 053/129] evaluation interfaces

---
 trash/data_ground.py | 393 -------------------------------------------
 1 file changed, 393 deletions(-)
 delete mode 100644 trash/data_ground.py

diff --git a/trash/data_ground.py b/trash/data_ground.py
deleted file mode 100644
index d4e0b5a..0000000
--- a/trash/data_ground.py
+++ /dev/null
@@ -1,393 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Nov  7 10:51:03 2017
-
-@author: vsc42247
-
-Purpose: Set surface conditions for the CLASS boundary-layer model
-"""
-
-
-import netCDF4 as nc4
-import numpy as np
-import datetime as dt
-#you can install with
-import pynacolada as pcd
-import pandas as pd
-
-def get_class4gl_ground(class_settings,**kwargs):   
-    
-    key = "IGBPDIS"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-    
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc"
-        print('reading soil water saturation from '+input_fn)
-
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__['wsat'] = input_nc.variables['wsat'][ilon,ilat]
-        input_nc.close()
-
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc"
-        print('reading soil water field capacity from '+input_fn)
-    
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__['wfc'] = input_nc.variables['wfc'][ilon,ilat]
-        input_nc.close()
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc"
-        print('reading soil wilting point from '+input_fn)
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__['wwilt'] = input_nc.variables['wwp'][ilon,ilat]
-        input_nc.close()
-        
-    key = "GLEAM"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-        
-        #INPUT_gleam = gleam() 
-        #INPUT_gleam.path = "/kyukon/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/" 
-        
-        gleam_path = "/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/"
-        print('reading soil-water content for "+str(class_settings,datetime.year)+" from '+gleam_path)
-        
-        gleam_files = {}
-        
-        gleam_vars = ['SMroot','SMsurf']
-        
-        for VAR in gleam_vars:
-            gleam_files[VAR] = nc4.Dataset(gleam_path+'/'+str(class_settings.datetime.year)+'/'+VAR+'_'+str(class_settings.datetime.year)+'_GLEAM_v3.1a.nc','r')
-        
-
-        year = class_settings.datetime.year
-        day = class_settings.datetime.day
-        hour = class_settings.datetime.hour
-  
-        ilat = np.where(gleam_files['SMsurf'].variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(gleam_files['SMsurf'].variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        VAR = 'SMsurf'; class_settings.wg = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
-        VAR = 'SMroot'; class_settings.w2 = gleam_files[VAR].variables[VAR][day-1,ilon,ilat]
-        
-        for VAR in gleam_vars:
-            gleam_files[VAR].close()
-    
-    key = "MOD44B"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-    
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc"
-        print('initializing vegetation fraction from '+input_fn)
-        var = 'cveg'
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        class_settings.__dict__[var] = input_nc.variables['fv'][ilon,ilat]
-        input_nc.close()
-        
-    key = "DSMW"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):
-         # Procedure of the thermal properties:
-         # 1. determine soil texture from DSMW
-         # 2. soil type with look-up table (according to DWD/EXTPAR)
-         # 3. Thermal properties used in the force-restore method (Clapp and Hornberger, 1987) 
-         #    with parameter look-up table from Noilhan and Planton (1989). 
-         #    Note: The look-up table is inspired on DWD/COSMO
-                 
-       
-        #preparing for soil thermal properties
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc"
-        
-        print("deriving soil thermal properties for the force-restore methodes from the soil texture file "+ input_fn)
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        DSMW = input_nc.variables['DSMW'][ilat,ilon]
-        
-        
-        #EXTPAR: zfine   = soil_texslo(soil_unit)%tex_fine
-        SP = {}; SPKEYS = ['tex_coarse', 'tex_medium', 'tex_fine', 'code']
-        for SPKEY in SPKEYS: 
-            SP[SPKEY] = np.array(input_nc.variables[SPKEY][DSMW])
-        input_nc.close()
-        
-        SP['texture'] = (0.5*SP['tex_medium']+1.0*SP['tex_coarse']) /(SP['tex_coarse']+SP['tex_medium']+SP['tex_fine'])
-        
-        if pd.isnull(SP['texture']):
-            print('Warning, texture is invalid> Setting to Ocean')
-            SP['itex'] = 9
-        
-        else:
-            SP['itex'] = int(SP['texture']*100)
-        
-        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-        SP['isoil'] = np.zeros_like(SP['itex'],dtype=np.int)
-        LOOKUP = [
-                  [0 ,7],# fine textured, clay (soil type 7)
-                  [20,6],# medium to fine textured, loamy clay (soil type 6)
-                  [40,5],# medium textured, loam (soil type 5)
-                  [60,4],# coarse to medium textured, sandy loam (soil type 4)
-                  [80,3],# coarse textured, sand (soil type 3)
-                ]
-        for iitex,iisoil in LOOKUP: 
-            SP['isoil'][SP['itex'] >= iitex ] = iisoil 
-        
-        #adopted from mo_agg_soil.f90 (EXTPAR3.0)
-        LOOKUP = [
-                  [9001, 1 ], # ice, glacier (soil type 1) 
-                  [9002, 2 ], # rock, lithosols (soil type 2)
-                  [9003, 3 ], # salt, set soiltype to sand (soil type 3)
-                  [9004, 8 ], # histosol, e.g. peat (soil type 8)
-                  [9,    9 ], # undefined (ocean)
-                  [9005, 3 ], # shifting sands or dunes, set soiltype to sand (soil type 3)
-                  [9000, 9 ], # undefined (inland lake)
-                  [9009, 5 ], #  default_soiltype ! undefined (nodata), set soiltype to loam (soil type )
-                  [9012, 5 ], #  default_soiltype undefined (dominant part undefined), set soiltype to loam (soil type 5)
-                ]
-        # EXTPAR: soil_code = soil_texslo(soil_unit)%dsmw_code # the legend has some special cases for the "soil_code"
-        for icode,iisoil in LOOKUP: 
-            SP['isoil'][SP['code'] == icode] = iisoil 
-        
-        #adopted from data_soil.f90 (COSMO5.0)
-        SP_LOOKUP = { 
-          # soil type:         ice        rock       sand        sandy      loam         clay        clay        peat        sea        sea  
-          # (by index)                                           loam                    loam                                water      ice
-          'cporv'  : [ np.nan, 1.E-10   , 1.E-10   , 0.364     , 0.445     , 0.455     , 0.475     , 0.507     , 0.863     , 1.E-10   , 1.E-10   ],
-          'cfcap'  : [ np.nan, 1.E-10   , 1.E-10   , 0.196     , 0.260     , 0.340     , 0.370     , 0.463     , 0.763     , 1.E-10   , 1.E-10   ],
-          'cpwp'   : [ np.nan, 0.0      , 0.0      , 0.042     , 0.100     , 0.110     , 0.185     , 0.257     , 0.265     , 0.0      ,  0.0     ],
-          'cadp'   : [ np.nan, 0.0      , 0.0      , 0.012     , 0.030     , 0.035     , 0.060     , 0.065     , 0.098     , 0.0      ,  0.0     ],
-          'crhoc'  : [ np.nan, 1.92E6   , 2.10E6   , 1.28E6    , 1.35E6    , 1.42E6    , 1.50E6    , 1.63E6    , 0.58E6    , 4.18E6   , 1.92E6   ],
-          'cik2'   : [ np.nan, 0.0      , 0.0      , 0.0035    , 0.0023    , 0.0010    , 0.0006    , 0.0001    , 0.0002    , 0.0      ,  0.0     ],
-          'ckw0'   : [ np.nan, 0.0      , 0.0      , 479.E-7   , 943.E-8   , 531.E-8   , 764.E-9   , 17.E-9    , 58.E-9    , 0.0      ,  0.0     ],
-          'ckw1'   : [ np.nan, 0.0      , 0.0      , -19.27    , -20.86    , -19.66    , -18.52    , -16.32    , -16.48    , 0.0      ,  0.0     ],
-          'cdw0'   : [ np.nan, 0.0      , 0.0      , 184.E-7   , 346.E-8   , 357.E-8   , 118.E-8   , 442.E-9   , 106.E-9   , 0.0      ,  0.0     ],
-          'cdw1'   : [ np.nan, 0.0      , 0.0      , -8.45     , -9.47     , -7.44     , -7.76     , -6.74     , -5.97     , 0.0      ,  0.0     ],
-          'crock'  : [ np.nan, 0.0      , 0.0      , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 1.0       , 0.0      ,  0.0     ],
-          'cala0'  : [ np.nan, 2.26     , 2.41     , 0.30      , 0.28      , 0.25      , 0.21      , 0.18      , 0.06      , 1.0      ,  2.26    ],
-          'cala1'  : [ np.nan, 2.26     , 2.41     , 2.40      , 2.40      , 1.58      , 1.55      , 1.50      , 0.50      , 1.0      ,  2.26    ],
-          'csalb'  : [ np.nan, 0.70     , 0.30     , 0.30      , 0.25      , 0.25      , 0.25      , 0.25      , 0.20      , 0.07     ,  0.70    ],
-          'csalbw' : [ np.nan, 0.00     , 0.00     , 0.44      , 0.27      , 0.24      , 0.23      , 0.22      , 0.10      , 0.00     ,  0.00    ],
-          'ck0di'  : [ np.nan, 1.E-4    , 1.E-4    , 2.E-4     , 2.E-5     , 6.E-6     , 2.E-6     , 1.E-6     , 1.5E-6    , 0.00     ,  0.00    ],
-          'cbedi'  : [ np.nan, 1.00     , 1.00     , 3.5       , 4.8       , 6.1       , 8.6       , 10.0      , 9.0       , 0.00     ,  0.00    ],
-          'csandf' : [ np.nan, 0.0      , 0.0      , 90.       , 65.       , 40.       , 35.       , 15.       , 90.       , 0.00     ,  0.00    ],
-          'cclayf' : [ np.nan, 0.0      , 0.0      , 5.0       , 10.       , 20.       , 35.       , 70.       , 5.0       , 0.00     ,  0.00    ],
-          #supplement Noihhan andf Planton 1989 soil texture parameters for the force-restore method.
-          'b'      : [ np.nan, np.nan   , np.nan   , 4.05      , 4.90      , 5.39      , 8.52      , 11.40     , np.nan    , np.nan   ,  np.nan  ],
-          #error in table 2 of NP89: values need to be multiplied by e-6
-          'CGsat'  : [ np.nan, np.nan   , np.nan   , 3.222e-6     , 3.560e-6     , 4.111e-6     , 3.995e-6     , 3.600e-6     , np.nan    , np.nan   ,  np.nan  ],
-          'p'  :     [ np.nan, np.nan   , np.nan   , 4.        , 4.        , 6.        , 10.       , 12.       , np.nan    , np.nan   ,  np.nan  ],
-          'a'  :     [ np.nan, np.nan   , np.nan   , 0.387     , 0.219     , 0.148     , 0.084     , 0.083     , np.nan    , np.nan   ,  np.nan  ],
-          'C1sat'  : [ np.nan, np.nan   , np.nan   , 0.082     , 0.132     , 0.191     , 0.227     , 0.342     , np.nan    , np.nan   ,  np.nan  ],
-          'C2ref'  : [ np.nan, np.nan   , np.nan   , 3.9       , 1.8       , 0.8       , 0.6       , 0.3       , np.nan    , np.nan   ,  np.nan  ],
-        }
-        
-        for SPKEY in SP_LOOKUP.keys(): 
-            SP[SPKEY] = np.zeros_like(SP['isoil'],dtype=np.float)
-        
-        for i in range(11):
-            SELECT = (SP['isoil'] == i)
-            for SPKEY in SP_LOOKUP.keys(): 
-                SP[SPKEY][SELECT] = SP_LOOKUP[SPKEY][i]
-        
-        for SPKEY in list(SP_LOOKUP.keys())[-6:]: 
-            var = SPKEY
-            class_settings.__dict__[var] = np.float(SP[SPKEY])
-            
-        # only print the last parameter value in the plot
-        
-        #inputs.append(cp.deepcopy(class_settings))
-        #var = 'cala'
-        #class_settings.__dict__[var] = np.float(SP['cala0'])
-        #valnew = class_settings.__dict__[var]
-        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-        
-        #inputs.append(cp.deepcopy(class_settings))
-        #var = 'crhoc'
-        #class_settings.__dict__[var] = np.float(SP['crhoc'])
-        #valnew = class_settings.__dict__[var]
-        #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-        
-    key = "CERES"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
-        
-        CERES_start_date = dt.datetime(2000,3,1)
-        DT_CERES_START = (CERES_start_date + dt.timedelta(days=(int((class_settings.datetime - CERES_start_date ).days/61) * 61)))
-        DT_CERES_END   = DT_CERES_START +dt.timedelta(days=60)
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_"+DT_CERES_START.strftime("%Y%m%d")+"-"+DT_CERES_END.strftime("%Y%m%d")+".nc"
-        print("Reading afternoon cloud cover for "+str(class_settings.datetime)+" from "+input_fn)
-            
-        var = 'cc'
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        print(class_settings.lat,class_settings.lon)
-        
-        class_settings.__dict__[var] = np.nanmean(input_nc.variables['cldarea_total_1h'][idatetime:(idatetime+class_settings.runtime),ilat,ilon])/100.
-   
-        input_nc.close()
-    
-    key = "GIMMS"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):    
-       
-    
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean.nc"
-        print("Reading Leag Area Index from "+input_fn)
-        var = 'LAI'
-        
-        #plt.plot
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        #idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilatitude = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilongitude = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        # divide by cveg, since it only reflects the LAI for the vegetation fraction and not for the entire (satellite) grid cell
-        
-        print('Warning! Dividing by cveg, which is: '+str(class_settings.cveg))
-        tarray = np.array(input_nc.variables['LAI'][:,ilatitude,ilongitude])/class_settings.cveg
-        
-        if np.isnan(tarray[idatetime]):
-            print("interpolating GIMMS cveg nan value")
-            
-            mask = np.isnan(tarray)
-            if np.where(mask)[0].shape[0] < 0.25*mask.shape[0]:
-                tarray[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), tarray[~mask])
-            else:
-                print("Warning. Could not interpolate GIMMS cveg nan value")
-                
-        class_settings.__dict__[var] = tarray[idatetime]
-        
-        input_nc.close()
- 
-    key = "IGBPDIS_ALPHA"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
-       
-        var = 'alpha'
-        
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc"
-        print("Reading albedo from "+input_fn)
-    
-        input_nc = nc4.Dataset(input_fn,'r')
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][-1]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        
-        landfr = {}
-        for ltype in ['W','B','H','TC']:   
-            landfr[ltype] = input_nc.variables['f'+ltype][0,ilon,ilat]
-        
-        aweights = {'W':0.075,'TC':0.15,'H':0.22,'B':0.30}
-        
-        alpha=0.
-        for ltype in landfr.keys():
-            alpha += landfr[ltype]*aweights[ltype]
-        
-        
-        class_settings.__dict__[var] = alpha
-        input_nc.close()        
-        
-        
-    key = "ERAINT_ST"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
-       
-        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly/stl1_'+str(class_settings.datetime.year)+"_3hourly.nc"
-        print("Reading soil temperature from "+input_fn)
-        
-        var = 'Tsoil'
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-        
-        
-        class_settings.__dict__[var] = input_nc.variables['stl1'][idatetime,ilatitude,ilongitude]
-        
-        input_fn = '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly/stl2_'+str(class_settings.datetime.year)+"_3hourly.nc"
-        var = 'T2'
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        idatetime = np.where(np.array(pcd.ncgetdatetime(input_nc))  >= class_settings.datetime)[0][0]
-        
-        ilatitude = np.where(input_nc.variables['latitude'][:] >= class_settings.lat)[0][-1]
-        ilongitude = np.where(input_nc.variables['longitude'][:] >= class_settings.lon)[0][0]
-        
-        
-        class_settings.__dict__[var] = input_nc.variables['stl2'][idatetime,ilatitude,ilongitude]
-        
-        
-        input_nc.close()
-        
-        
-    
-    #inputs.append(cp.deepcopy(class_settings))
-    #var = 'T2'
-    #valold = class_settings.__dict__[var]
-    #
-    #class_settings.__dict__[var] = 305.
-    #class_settings.__dict__['Tsoil'] = 302.
-    #valnew = class_settings.__dict__[var]
-    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-    
-    
-    
-    #inputs.append(cp.deepcopy(class_settings))
-    #
-    #var = 'Lambda'
-    #valold = class_settings.__dict__[var]
-    
-    ## I presume that the skin layer conductivity scales with both LAI and vegetation fraction, which seems ~ valid according to table 10.6 in CLASS-book. 
-    ## I need to ask Chiel.
-    ## I extrapolate from Lambda value of grass with Lambda = 5.9 W m-2 K-1, LAI = 2 and cveg = 0.85
-    #
-    #valnew = 5.9 / 2. / 0.85 * class_settings.__dict__['LAI'] * class_settings.__dict__['cveg'] 
-    #class_settings.__dict__[var] = valnew
-    #labels.append(var+': '+format(valold,"0.2g")+'->'+format(valnew,"0.2g"))
-    
-    
-    
-    key = "GLAS"
-    if ((kwargs == {}) or ((key in kwargs.keys()) and (kwargs[key]))):          
-       
-        input_fn = "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc"
-        print("Reading canopy height for determining roughness length from "+input_fn)
-        var = 'z0m'
-    
-        
-        #plt.plot
-        
-        input_nc = nc4.Dataset(input_fn,'r')
-        
-        ilat = np.where(input_nc.variables['lat'][:] >= class_settings.lat)[0][0]
-        ilon = np.where(input_nc.variables['lon'][:] >= class_settings.lon)[0][0]
-        
-        testval = np.float64(input_nc.variables['Band1'][ilat,ilon])/10.
-        
-        lowerlimit = 0.01
-        if testval < lowerlimit:
-            print('forest canopy height very very small. We take a value of '+str(lowerlimit))
-            class_settings.__dict__[var] = lowerlimit
-        else:
-            class_settings.__dict__[var] = testval
-        
-        class_settings.__dict__['z0h'] =  class_settings.__dict__['z0m']/10.
-        
-        
-        input_nc.close()
-        

From 60ae5ece37a3f681c988552303c1f1bf098795c0 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 09:14:53 +0200
Subject: [PATCH 054/129] modified:   .gitignore modified:  
 class4gl/interface/interface.py deleted:   
 class4gl/interface/interface_eval.py deleted:   
 class4gl/interface/interface_eval_stations.py modified:  
 class4gl/interface/interface_stations.py

---
 .gitignore                                    |   2 +-
 class4gl/interface/interface.py               | 433 ++++++++++--------
 class4gl/interface/interface_eval.py          | 375 ---------------
 class4gl/interface/interface_eval_stations.py | 290 ------------
 class4gl/interface/interface_stations.py      | 343 +++++++-------
 5 files changed, 411 insertions(+), 1032 deletions(-)
 delete mode 100644 class4gl/interface/interface_eval.py
 delete mode 100644 class4gl/interface/interface_eval_stations.py

diff --git a/.gitignore b/.gitignore
index d5a1137..b65667c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,7 +7,7 @@ class4gl/__pycache__/*
 .*
 build/
 dist/
-
+trash/
 */__pychache__/
 *.py[cod]
 *$py.class
diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index f6d718f..72b970e 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -13,6 +13,10 @@
 parser.add_argument('--experiments')
 parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
 parser.add_argument('--load_globaldata',default=False)
+parser.add_argument('--make_figures',default=None)
+parser.add_argument('--show_control_parameters',default=True)
+parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--figure_filename_2',default=None)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -118,192 +122,249 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
-# # the lines below activate TaylorPlots but it is disabled for now
-# fig = plt.figure(figsize=(10,7))   #width,height
-# i = 1                                                                           
-# axes = {}         
-# axes_taylor = {}         
-# 
-# colors = ['r','g','b','m']
-# symbols = ['*','x','+']
-# dias = {}
-# 
-# for varkey in ['h','theta','q']:                                                    
-#     axes[varkey] = fig.add_subplot(2,3,i)                                       
-#     #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
-# 
-#     #print(obs.std())
-#     dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
-#     if i == 0:
-#         dias[varkey]._ax.axis["left"].label.set_text(\
-#             "Standard deviation (model) / Standard deviation (observations)")
-#         # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-#         # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
-#     #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-#     # Q95 = obs.quantile(0.95)
-#     # Q95 = obs.quantile(0.90)
-#     # Add RMS contours, and label them
-#     contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
-#     dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
-#     #dia._ax.set_title(season.capitalize())
-# 
-#     dias[varkey].add_grid()
-# 
-# 
-#     #dia.ax.plot(x99,y99,color='k')
-# 
-#     
-#     for ikey,key in enumerate(args.experiments.split(';')):
-#         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-#         # clearsky = (cc < 0.05)
-#         # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-#         # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-#         mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-#         obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-#         x, y = obs.values,mod.values
-#         print(key,len(obs.values))
-# 
-#         STD_OBS = obs.std()
-#         #scores
-#         PR = pearsonr(mod,obs)[0]
-#         RMSE = rmse(obs,mod)                                               
-#         BIAS = np.mean(mod) - np.mean(obs)
-#         STD = mod.std()
-#         
-#         # fit = np.polyfit(x,y,deg=1)
-#         # axes[varkey].plot(x, fit[0] * x + fit[1],\
-#         #                   color=colors[ikey],alpha=0.8,lw=2,\
-#         #                   label=key+", "+\
-#         #                               'R = '+str(round(PR,3))+', '+\
-#         #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-#         #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
-#         # axes[varkey].legend(fontsize=5)
-#         
-#         # print(STD)
-#         # print(PR)
-#         dias[varkey].add_sample(STD/STD_OBS, PR,
-#                        marker='o', ms=5, ls='',
-#                        #mfc='k', mec='k', # B&W
-#                        mfc=colors[ikey], mec=colors[ikey], # Colors
-#                        label=key)
-# 
-#     # put ticker position, see
-#     # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
-#     # dia.ax.axis['bottom'].
-#     # dia.ax.axis['left'].
-#     # dia.ax.axis['left'].
-# 
-#     i += 1
-# 
-# i = 0
-# for varkey in ['h','theta','q']:                                                    
-#     ikey = 0
-#     key = list(args.experiments.split(';'))[ikey]
-#     cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-#     clearsky = (cc < 0.05)
-# 
-#     mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-#     obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-# 
-# 
-#     nbins=40       
-#     x, y = obs.values,mod.values
-#     
-#     xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
-#     zi = np.zeros_like(xi)*np.nan       
-#     for ibin in range(nbins):
-#         xmin = x.min() + ibin * (x.max() - x.min())/nbins
-#         xmax = xmin + (x.max() - x.min())/nbins
-#         in_bin = ((x >= xmin) & (x < xmax))
-#         ybin = y[in_bin]
-#         xbin = x[in_bin]
-#         if len(ybin) > 20:
-#             k = kde.gaussian_kde((ybin))
-#             zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
-#     zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
-#     zi_int = zi.cumsum(axis=1) 
-#                  #  label=key+", "+\
-#                  #                    'R = '+str(round(PR[0],3))+', '+\
-#                  #                    'RMSE = '+str(round(RMSE,5))+', '+\
-#                  #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-#     axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
-#             colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
-#     axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
-#             colors=['darkred'],alpha=0.5,)
-# 
-# 
-#     latex = {}
-#     latex['dthetadt'] =  r'$d \theta / dt $'
-#     latex['dqdt'] =      r'$d q / dt $'
-#     latex['dhdt'] =      r'$d h / dt $'
-# 
-#     axes[varkey].set_xlabel('observations')     
-#     axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
-# 
-#     PR = pearsonr(mod,obs)[0]
-#     RMSE = rmse(obs,mod)                                               
-#     BIAS = np.mean(mod) - np.mean(obs)
-#     STD = mod.std()
-# 
-#     axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
-#                                   'R = '+str(round(PR,3))+', '+\
-#                                   'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-#                                   'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
-#                          s=0.1,alpha=0.14,color='k')
-#     axes[varkey].legend(fontsize=5)
-#                    
-#     axes[varkey].set_xlabel('observations')     
-#     if i==0:                                    
-#         axes[varkey].set_ylabel('model')                                            
-#     abline(1,0,axis=axes[varkey])
-#     i +=1
-# 
-# 
-# 
-# # legend for different forcing simulations (colors)
-# ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-# leg = []
-# for ikey,key in enumerate(args.experiments.split(';')):
-#     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
-#     leg.append(leg1)
-# ax.axis('off')
-# #leg1 =
-# ax.legend(leg,list(args.experiments.split(';')),loc=2,fontsize=10)
-# 
-# 
-# # # legend for different stations (symbols)
-# # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-# # leg = []
-# # isymbol = 0
-# # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
-# #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
-# #     leg.append(leg1)
-# #     isymbol += 1
-# # 
-# # # symbol for all stations
-# # leg1, = ax.plot([],'ko',markersize=10)
-# # leg.append(leg1)
-# 
-# 
-# # ax.axis('off')
-# # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
-# 
-# 
-# fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
-# 
-# 
-# #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
-# # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
-# # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
-# fig.show()  
-
-
-
-
-
-
-
-
+if args.show_figures:
+    # the lines below activate TaylorPlots but it is disabled for now
+    fig = plt.figure(figsize=(10,7))   #width,height
+    i = 1                                                                           
+    axes = {}         
+    axes_taylor = {}         
+    
+    colors = ['r','g','b','m']
+    symbols = ['*','x','+']
+    dias = {}
+    
+    for varkey in ['h','theta','q']:                                                    
+        axes[varkey] = fig.add_subplot(2,3,i)                                       
+        #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+    
+        #print(obs.std())
+        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+        if i == 0:
+            dias[varkey]._ax.axis["left"].label.set_text(\
+                "Standard deviation (model) / Standard deviation (observations)")
+            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+        #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # Q95 = obs.quantile(0.95)
+        # Q95 = obs.quantile(0.90)
+        # Add RMS contours, and label them
+        contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+        dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+        #dia._ax.set_title(season.capitalize())
+    
+        dias[varkey].add_grid()
+    
+    
+        #dia.ax.plot(x99,y99,color='k')
+    
+        
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+            # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+            # clearsky = (cc < 0.05)
+            # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+            # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+            mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+            obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+            x, y = obs.values,mod.values
+            print(key,len(obs.values))
+    
+            STD_OBS = obs.std()
+            #scores
+            PR = pearsonr(mod,obs)[0]
+            RMSE = rmse(obs,mod)                                               
+            BIAS = np.mean(mod) - np.mean(obs)
+            STD = mod.std()
+            
+            # fit = np.polyfit(x,y,deg=1)
+            # axes[varkey].plot(x, fit[0] * x + fit[1],\
+            #                   color=colors[ikey],alpha=0.8,lw=2,\
+            #                   label=key+", "+\
+            #                               'R = '+str(round(PR,3))+', '+\
+            #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+            #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+            # axes[varkey].legend(fontsize=5)
+            
+            # print(STD)
+            # print(PR)
+            dias[varkey].add_sample(STD/STD_OBS, PR,
+                           marker='o', ms=5, ls='',
+                           #mfc='k', mec='k', # B&W
+                           mfc=colors[ikey], mec=colors[ikey], # Colors
+                           label=key)
+    
+        # put ticker position, see
+        # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+        # dia.ax.axis['bottom'].
+        # dia.ax.axis['left'].
+        # dia.ax.axis['left'].
+    
+        i += 1
+    
+    i = 0
+    for varkey in ['h','theta','q']:                                                    
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        clearsky = (cc < 0.05)
+    
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+    
+    
+        nbins=40       
+        x, y = obs.values,mod.values
+        
+        xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+        zi = np.zeros_like(xi)*np.nan       
+        for ibin in range(nbins):
+            xmin = x.min() + ibin * (x.max() - x.min())/nbins
+            xmax = xmin + (x.max() - x.min())/nbins
+            in_bin = ((x >= xmin) & (x < xmax))
+            ybin = y[in_bin]
+            xbin = x[in_bin]
+            if len(ybin) > 20:
+                k = kde.gaussian_kde((ybin))
+                zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+        zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+        zi_int = zi.cumsum(axis=1) 
+                     #  label=key+", "+\
+                     #                    'R = '+str(round(PR[0],3))+', '+\
+                     #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                     #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
+                colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+        axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
+                colors=['darkred'],alpha=0.5,)
+    
+    
+        latex = {}
+        latex['dthetadt'] =  r'$d \theta / dt $'
+        latex['dqdt'] =      r'$d q / dt $'
+        latex['dhdt'] =      r'$d h / dt $'
+    
+        axes[varkey].set_xlabel('observations')     
+        axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+    
+        PR = pearsonr(mod,obs)[0]
+        RMSE = rmse(obs,mod)                                               
+        BIAS = np.mean(mod) - np.mean(obs)
+        STD = mod.std()
+    
+        axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
+                                      'R = '+str(round(PR,3))+', '+\
+                                      'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                      'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+                             s=0.1,alpha=0.14,color='k')
+        axes[varkey].legend(fontsize=5)
+                       
+        axes[varkey].set_xlabel('observations')     
+        if i==0:                                    
+            axes[varkey].set_ylabel('model')                                            
+        abline(1,0,axis=axes[varkey])
+        i +=1
+    
+    
+    
+    # legend for different forcing simulations (colors)
+    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    leg = []
+    for ikey,key in enumerate(args.experiments.strip().split(' ')):
+        leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+        leg.append(leg1)
+    ax.axis('off')
+    #leg1 =
+    ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
+    
+    
+    # # legend for different stations (symbols)
+    # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # isymbol = 0
+    # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+    #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+    #     leg.append(leg1)
+    #     isymbol += 1
+    # 
+    # # symbol for all stations
+    # leg1, = ax.plot([],'ko',markersize=10)
+    # leg.append(leg1)
+    
+    
+    # ax.axis('off')
+    # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+    
+    
+    fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+    
+    
+    #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+    # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+    # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+    
+    if args.figure_filename is not None:
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+    fig.show()  
+    
+    
+    if bool(args.show_control_parameters):
+        import seaborn as sns
+        sns.set()
+        fig = pl.figure(figsize=(12,8))
+        i = 1
+        axes = {}
+        data_all = pd.DataFrame()
+        
+        
+        
+        # #for varkey in ['theta','q']:     
+        # EF =\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR/(1.+\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR)
+        # EF[EF<0] = np.nan
+        # EF[EF>1] = np.nan
+        
+        # c4gldata[key].frames['stats']['records_all_stations_ini']['EF'] = EF
+        
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        for varkey in ['h','theta','q']:
+            for input_key in ['wg','cc']:
+                data_all = pd.DataFrame()
+                data = pd.DataFrame()
+                data[varkey] = ""
+                data[varkey] = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+                data["source"] = "soundings"
+                data_all = pd.concat([data_all,data])
+                    
+                data = pd.DataFrame()
+                
+                
+                data[varkey] = ""
+                data[varkey] = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+                data["source"] = "model"
+                data_all = pd.concat([data_all,data])
+                data_input = pd.concat([c4gldata[key].frames['stats']['records_all_stations_ini'],
+                                       c4gldata[key].frames['stats']['records_all_stations_ini']],axis=0)
+                input_key_full = input_key + "["+units[input_key]+"]"
+                data_all[input_key_full] =  pd.cut(x=data_input[input_key].values,bins=10)
+                
+                varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+                data_all = data_all.rename(columns={varkey:varkey_full})
+                
+                qvalmax = data_all[varkey_full].quantile(0.999)
+                qvalmin = data_all[varkey_full].quantile(0.001)
+                data_all = data_all[(data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)]
+                
+                ax = fig.add_subplot(3,2,i)
+                sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+                ax.grid()
+                plt.xticks(rotation=45,ha='right')
+                i +=1
+                plt.legend(loc='lower right')
+        fig.tight_layout()
+        if args.figure_filename_2 is not None:
+            fig.savefig(args.figure_filename,dpi=200); print("Image file written to:", args.figure_filename)
+        fig.show()
 
 
 
diff --git a/class4gl/interface/interface_eval.py b/class4gl/interface/interface_eval.py
deleted file mode 100644
index f72d4fc..0000000
--- a/class4gl/interface/interface_eval.py
+++ /dev/null
@@ -1,375 +0,0 @@
-import numpy as np
-
-import pandas as pd
-import sys
-
-import matplotlib
-matplotlib.use('TkAgg')
-
-import argparse
-parser = argparse.ArgumentParser()
-parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--experiments')
-parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--load_globaldata',default=True)
-parser.add_argument('--show_control_parameters',default=True)
-parser.add_argument('--figure_filename',default=None)
-parser.add_argument('--figure_filename_2',default=None)
-args = parser.parse_args()
-
-print('Adding python library:',args.c4gl_path_lib)
-sys.path.insert(0, args.c4gl_path_lib)
-from interface_multi import c4gl_interface_soundings,get_record_yaml
-from class4gl import class4gl_input, data_global,class4gl,units
-#from sklearn.metrics import mean_squared_error
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-#import seaborn.apionly as sns
-import pylab as pl
-import numpy as np
-import matplotlib.pyplot as plt
-from scipy.stats import kde
-from scipy.stats import pearsonr                                                
-from taylorDiagram import TaylorDiagram
-from matplotlib import ticker
-# import importlib
-# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
-
-
-
-
-def abline(slope, intercept,axis):
-    """Plot a line from slope and intercept"""
-    #axis = plt.gca()
-    x_vals = np.array(axis.get_xlim())
-    y_vals = intercept + slope * x_vals
-    axis.plot(x_vals, y_vals, 'k--')
-
-def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
-    """ calculated root mean squared error 
-        
-    
-        INPUT:
-            y_actual: reference dataset
-            y_predicted: predicting dataset
-            z_actual: coordinate values of reference dataset
-            z_predicted: coordinate values of the predicting dataset
-            
-            filternan_actual: throw away reference values that have nans
-    """
-    
-    y_actual_temp = np.array(y_actual)
-    y_predicted_temp = np.array(y_predicted)
-    
-    if z_actual is not None:
-        z_actual_temp = np.array(z_actual)
-    else: 
-        z_actual_temp = None
-        
-    
-    if filternan_actual:
-        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
-        if z_actual_temp is not None:
-            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
-    
-    if ((z_actual_temp is not None) or (z_predicted is not None)):    
-        if (z_actual_temp is None) or (z_predicted is None):
-            raise ValueError('Input z_actual and z_predicted need \
-                              to be specified simultaneously.')
-        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
-    
-    else:
-        # this catches the situation that y_predicted is a single value (eg., 
-        # which is the case for evaluating eg., mixed-layer estimates)
-        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
-        
-    rmse_temp = (y_actual_temp - y_predicted_temp)
-    rmse_temp = np.mean(rmse_temp*rmse_temp)
-    return np.sqrt(rmse_temp)
-
-
-
-
-
-# EXPS  =\
-# {
-# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-# }
-
-if args.load_globaldata:
-    # iniitialize global data
-    globaldata = data_global()
-    # ...  and load initial data pages
-    globaldata.load_datasets(recalc=0)
-else:
-    globaldata = None
-
-c4gldata = {}
-for key in args.experiments.strip(' ').split(' '):
-    
-    c4gldata[key] = c4gl_interface_soundings( \
-                      args.path_experiments+'/'+key+'/',\
-                      args.path_forcing+'/',\
-                      globaldata,\
-                      refetch_records=False
-                    )
-
-# the lines below activate TaylorPlots but it is disabled for now
-fig = plt.figure(figsize=(10,7))   #width,height
-i = 1                                                                           
-axes = {}         
-axes_taylor = {}         
-
-colors = ['r','g','b','m']
-symbols = ['*','x','+']
-dias = {}
-
-for varkey in ['h','theta','q']:                                                    
-    axes[varkey] = fig.add_subplot(2,3,i)                                       
-    #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
-
-    #print(obs.std())
-    dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
-    if i == 0:
-        dias[varkey]._ax.axis["left"].label.set_text(\
-            "Standard deviation (model) / Standard deviation (observations)")
-        # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-        # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
-    #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-    # Q95 = obs.quantile(0.95)
-    # Q95 = obs.quantile(0.90)
-    # Add RMS contours, and label them
-    contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
-    dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
-    #dia._ax.set_title(season.capitalize())
-
-    dias[varkey].add_grid()
-
-
-    #dia.ax.plot(x99,y99,color='k')
-
-    
-    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
-        # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-        # clearsky = (cc < 0.05)
-        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-        x, y = obs.values,mod.values
-        print(key,len(obs.values))
-
-        STD_OBS = obs.std()
-        #scores
-        PR = pearsonr(mod,obs)[0]
-        RMSE = rmse(obs,mod)                                               
-        BIAS = np.mean(mod) - np.mean(obs)
-        STD = mod.std()
-        
-        # fit = np.polyfit(x,y,deg=1)
-        # axes[varkey].plot(x, fit[0] * x + fit[1],\
-        #                   color=colors[ikey],alpha=0.8,lw=2,\
-        #                   label=key+", "+\
-        #                               'R = '+str(round(PR,3))+', '+\
-        #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-        #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
-        # axes[varkey].legend(fontsize=5)
-        
-        # print(STD)
-        # print(PR)
-        dias[varkey].add_sample(STD/STD_OBS, PR,
-                       marker='o', ms=5, ls='',
-                       #mfc='k', mec='k', # B&W
-                       mfc=colors[ikey], mec=colors[ikey], # Colors
-                       label=key)
-
-    # put ticker position, see
-    # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
-    # dia.ax.axis['bottom'].
-    # dia.ax.axis['left'].
-    # dia.ax.axis['left'].
-
-    i += 1
-
-i = 0
-for varkey in ['h','theta','q']:                                                    
-    ikey = 0
-    key = list(args.experiments.strip().split(' '))[ikey]
-    cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-    clearsky = (cc < 0.05)
-
-    mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-    obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-
-
-    nbins=40       
-    x, y = obs.values,mod.values
-    
-    xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
-    zi = np.zeros_like(xi)*np.nan       
-    for ibin in range(nbins):
-        xmin = x.min() + ibin * (x.max() - x.min())/nbins
-        xmax = xmin + (x.max() - x.min())/nbins
-        in_bin = ((x >= xmin) & (x < xmax))
-        ybin = y[in_bin]
-        xbin = x[in_bin]
-        if len(ybin) > 20:
-            k = kde.gaussian_kde((ybin))
-            zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
-    zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
-    zi_int = zi.cumsum(axis=1) 
-                 #  label=key+", "+\
-                 #                    'R = '+str(round(PR[0],3))+', '+\
-                 #                    'RMSE = '+str(round(RMSE,5))+', '+\
-                 #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-    axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
-            colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
-    axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
-            colors=['darkred'],alpha=0.5,)
-
-
-    latex = {}
-    latex['dthetadt'] =  r'$d \theta / dt $'
-    latex['dqdt'] =      r'$d q / dt $'
-    latex['dhdt'] =      r'$d h / dt $'
-
-    axes[varkey].set_xlabel('observations')     
-    axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
-
-    PR = pearsonr(mod,obs)[0]
-    RMSE = rmse(obs,mod)                                               
-    BIAS = np.mean(mod) - np.mean(obs)
-    STD = mod.std()
-
-    axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
-                                  'R = '+str(round(PR,3))+', '+\
-                                  'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-                                  'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
-                         s=0.1,alpha=0.14,color='k')
-    axes[varkey].legend(fontsize=5)
-                   
-    axes[varkey].set_xlabel('observations')     
-    if i==0:                                    
-        axes[varkey].set_ylabel('model')                                            
-    abline(1,0,axis=axes[varkey])
-    i +=1
-
-
-
-# legend for different forcing simulations (colors)
-ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-leg = []
-for ikey,key in enumerate(args.experiments.strip().split(' ')):
-    leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
-    leg.append(leg1)
-ax.axis('off')
-#leg1 =
-ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
-
-
-# # legend for different stations (symbols)
-# ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-# leg = []
-# isymbol = 0
-# for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
-#     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
-#     leg.append(leg1)
-#     isymbol += 1
-# 
-# # symbol for all stations
-# leg1, = ax.plot([],'ko',markersize=10)
-# leg.append(leg1)
-
-
-# ax.axis('off')
-# ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
-
-
-fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
-
-
-#pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
-# figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
-# fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
-
-if args.figure_filename is not None:
-    fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
-fig.show()  
-
-
-if bool(args.show_control_parameters):
-    import seaborn as sns
-    sns.set()
-    fig = pl.figure(figsize=(12,8))
-    i = 1
-    axes = {}
-    data_all = pd.DataFrame()
-    
-    
-    
-    # #for varkey in ['theta','q']:     
-    # EF =\
-    #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR/(1.+\
-    #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR)
-    # EF[EF<0] = np.nan
-    # EF[EF>1] = np.nan
-    
-    # c4gldata[key].frames['stats']['records_all_stations_ini']['EF'] = EF
-    
-    ikey = 0
-    key = list(args.experiments.strip().split(' '))[ikey]
-    for varkey in ['h','theta','q']:
-        for input_key in ['wg','cc']:
-            data_all = pd.DataFrame()
-            data = pd.DataFrame()
-            data[varkey] = ""
-            data[varkey] = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-            data["source"] = "soundings"
-            data_all = pd.concat([data_all,data])
-                
-            data = pd.DataFrame()
-            
-            
-            data[varkey] = ""
-            data[varkey] = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-            data["source"] = "model"
-            data_all = pd.concat([data_all,data])
-            data_input = pd.concat([c4gldata[key].frames['stats']['records_all_stations_ini'],
-                                   c4gldata[key].frames['stats']['records_all_stations_ini']],axis=0)
-            input_key_full = input_key + "["+units[input_key]+"]"
-            data_all[input_key_full] =  pd.cut(x=data_input[input_key].values,bins=10)
-            
-            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
-            data_all = data_all.rename(columns={varkey:varkey_full})
-            
-            qvalmax = data_all[varkey_full].quantile(0.999)
-            qvalmin = data_all[varkey_full].quantile(0.001)
-            data_all = data_all[(data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)]
-            
-            ax = fig.add_subplot(3,2,i)
-            sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
-            ax.grid()
-            plt.xticks(rotation=45,ha='right')
-            i +=1
-            plt.legend(loc='lower right')
-    fig.tight_layout()
-    if args.figure_filename_2 is not None:
-        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:", args.figure_filename)
-    fig.show()
-
-
-
-
-
-
-
-
-
-
diff --git a/class4gl/interface/interface_eval_stations.py b/class4gl/interface/interface_eval_stations.py
deleted file mode 100644
index 49deebc..0000000
--- a/class4gl/interface/interface_eval_stations.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import numpy as np
-
-import pandas as pd
-import sys
-
-import matplotlib
-matplotlib.use('TkAgg')
-
-import argparse
-parser = argparse.ArgumentParser()
-parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--experiments')
-parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--load_globaldata',default=True)
-parser.add_argument('--figure_filename',default=None)
-args = parser.parse_args()
-
-print('Adding python library:',args.c4gl_path_lib)
-sys.path.insert(0, args.c4gl_path_lib)
-from interface_multi import c4gl_interface_soundings,get_record_yaml
-from class4gl import class4gl_input, data_global,class4gl,units
-#from sklearn.metrics import mean_squared_error
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-#import seaborn.apionly as sns
-import pylab as pl
-import numpy as np
-import matplotlib.pyplot as plt
-from scipy.stats import kde
-from scipy.stats import pearsonr                                                
-from taylorDiagram import TaylorDiagram
-from matplotlib import ticker
-# import importlib
-# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
-
-
-
-
-
-latex = {}
-latex['dthetadt'] =  r'$d \theta / dt $'
-latex['dqdt'] =      r'$d q / dt $'
-latex['dhdt'] =      r'$d h / dt $'
-
-def abline(slope, intercept,axis):
-    """Plot a line from slope and intercept"""
-    #axis = plt.gca()
-    x_vals = np.array(axis.get_xlim())
-    y_vals = intercept + slope * x_vals
-    axis.plot(x_vals, y_vals, 'k--')
-
-def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
-    """ calculated root mean squared error 
-        
-    
-        INPUT:
-            y_actual: reference dataset
-            y_predicted: predicting dataset
-            z_actual: coordinate values of reference dataset
-            z_predicted: coordinate values of the predicting dataset
-            
-            filternan_actual: throw away reference values that have nans
-    """
-    
-    y_actual_temp = np.array(y_actual)
-    y_predicted_temp = np.array(y_predicted)
-    
-    if z_actual is not None:
-        z_actual_temp = np.array(z_actual)
-    else: 
-        z_actual_temp = None
-        
-    
-    if filternan_actual:
-        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
-        if z_actual_temp is not None:
-            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
-    
-    if ((z_actual_temp is not None) or (z_predicted is not None)):    
-        if (z_actual_temp is None) or (z_predicted is None):
-            raise ValueError('Input z_actual and z_predicted need \
-                              to be specified simultaneously.')
-        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
-    
-    else:
-        # this catches the situation that y_predicted is a single value (eg., 
-        # which is the case for evaluating eg., mixed-layer estimates)
-        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
-        
-    rmse_temp = (y_actual_temp - y_predicted_temp)
-    rmse_temp = np.mean(rmse_temp*rmse_temp)
-    return np.sqrt(rmse_temp)
-
-
-
-
-
-# EXPS  =\
-# {
-# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-# }
-
-if args.load_globaldata:
-    # iniitialize global data
-    globaldata = data_global()
-    # ...  and load initial data pages
-    globaldata.load_datasets(recalc=0)
-else:
-    globaldata = None
-
-c4gldata = {}
-for key in args.experiments.strip(' ').split(' '):
-    
-    c4gldata[key] = c4gl_interface_soundings( \
-                      args.path_experiments+'/'+key+'/',\
-                      args.path_forcing+'/',\
-                      globaldata,\
-                      refetch_records=False
-                    )
-
-fig = plt.figure(figsize=(10,7))   #width,height
-i = 1                                                                           
-axes = {}         
-axes_taylor = {}         
-
-colors = ['r','g','b','m']
-symbols = ['*','x','+']
-dias = {}
-
-for varkey in ['h','theta','q']:                                                    
-    axes[varkey] = fig.add_subplot(2,3,i)                                       
-    #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
-
-    #print(obs.std())
-    obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-    STD_OBS = obs.std()
-    dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
-    if i == 2:
-        dias[varkey]._ax.axis["left"].label.set_text(\
-            "Standard deviation (model) / Standard deviation (observations)")
-        # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-        # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
-    #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-    # Q95 = obs.quantile(0.95)
-    # Q95 = obs.quantile(0.90)
-    # Add RMS contours, and label them
-    contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
-    dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
-    #dia._ax.set_title(season.capitalize())
-
-    dias[varkey].add_grid()
-
-
-    #dia.ax.plot(x99,y99,color='k')
-
-    
-    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
-        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-        x, y = obs.values,mod.values
-        print(key,len(obs.values))
-
-        #scores
-        PR = pearsonr(mod,obs)[0]
-        RMSE = rmse(obs,mod)                                               
-        BIAS = np.mean(mod) - np.mean(obs)
-        STD = mod.std()
-        
-        fit = np.polyfit(x,y,deg=1)
-        axes[varkey].plot(x, fit[0] * x + fit[1],\
-                          color=colors[ikey],alpha=0.8,lw=2,\
-                          label=key+", "+\
-                                      'R = '+str(round(PR,3))+', '+\
-                                      'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-                                      'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
-        axes[varkey].legend(fontsize=5)
-        
-        # print(STD)
-        # print(PR)
-        dias[varkey].add_sample(STD/STD_OBS, PR,
-                       marker='o', ms=5, ls='',
-                       #mfc='k', mec='k', # B&W
-                       mfc=colors[ikey], mec=colors[ikey], # Colors
-                       label=key)
-
-    # put ticker position, see
-    # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
-    # dia.ax.axis['bottom'].
-    # dia.ax.axis['left'].
-    # dia.ax.axis['left'].
-
-    i += 1
-
-i = 0
-for varkey in ['h','theta','q']:                                                    
-    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
-        isymbol = 0
-        for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
-            indices =  (c4gldata[key].frames['stats']['records_all_stations_index'].get_level_values('STNID') == current_station.name)
-            station_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].iloc[indices]
-            station_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].iloc[indices]
-
-            axes[varkey].scatter(station_obs,station_mod,marker=symbols[isymbol],color=colors[ikey])
-                     #  label=key+", "+\
-                     #                    'R = '+str(round(PR[0],3))+', '+\
-                     #                    'RMSE = '+str(round(RMSE,5))+', '+\
-                     #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-
-
-
-        # # pl.scatter(obs,mod,label=key+", "+\
-        # #                              'R = '+str(round(PR[0],3))+', '+\
-        # #                              'RMSE = '+str(round(RMSE,5))+', '+\
-        # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-            
-            dias[varkey].add_sample(station_mod.std()/station_obs.std(),
-                           pearsonr(station_mod,station_obs)[0],
-                           marker=symbols[isymbol], ms=5, ls='',
-                           #mfc='k', mec='k', # B&W
-                           mfc=colors[ikey], mec=colors[ikey], # Colors
-                           label=key)
-            isymbol += 1
-
-
-        axes[varkey].set_xlabel('observations')     
-        axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
-    if i==0:                                    
-        axes[varkey].set_ylabel('model')                                            
-    abline(1,0,axis=axes[varkey])
-    i +=1
-
-
-
-# legend for different forcing simulations (colors)
-ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-leg = []
-for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
-    leg1, = ax.plot([],colors[ikey]+'s' ,markersize=10)
-    leg.append(leg1)
-ax.axis('off')
-#leg1 =
-ax.legend(leg,list(args.experiments.strip(' ').split(' ')),loc=2,fontsize=10)
-
-
-# legend for different stations (symbols)
-ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-leg = []
-isymbol = 0
-for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
-    leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
-    leg.append(leg1)
-    isymbol += 1
-
-# symbol for all stations
-leg1, = ax.plot([],'ko',markersize=10)
-leg.append(leg1)
-
-
-ax.axis('off')
-ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
-
-
-fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
-
-
-#pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
-#figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/iops_eval_report.png'
-
-if args.figure_filename is not None:
-    fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
-fig.show()  
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
index f6d718f..9e6b82a 100644
--- a/class4gl/interface/interface_stations.py
+++ b/class4gl/interface/interface_stations.py
@@ -12,7 +12,9 @@
 parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--experiments')
 parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--load_globaldata',default=False)
+parser.add_argument('--load_globaldata',default=False) # load the data needed for the interface
+parser.add_argument('--make_figures',default=None)
+parser.add_argument('--figure_filename',default=None)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -36,6 +38,12 @@
 
 
 
+
+latex = {}
+latex['dthetadt'] =  r'$d \theta / dt $'
+latex['dqdt'] =      r'$d q / dt $'
+latex['dhdt'] =      r'$d h / dt $'
+
 def abline(slope, intercept,axis):
     """Plot a line from slope and intercept"""
     #axis = plt.gca()
@@ -100,7 +108,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 # # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 # }
 
-if bool(args.load_globaldata):
+if args.load_globaldata:
     # iniitialize global data
     globaldata = data_global()
     # ...  and load initial data pages
@@ -118,184 +126,159 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
-# # the lines below activate TaylorPlots but it is disabled for now
-# fig = plt.figure(figsize=(10,7))   #width,height
-# i = 1                                                                           
-# axes = {}         
-# axes_taylor = {}         
-# 
-# colors = ['r','g','b','m']
-# symbols = ['*','x','+']
-# dias = {}
-# 
-# for varkey in ['h','theta','q']:                                                    
-#     axes[varkey] = fig.add_subplot(2,3,i)                                       
-#     #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
-# 
-#     #print(obs.std())
-#     dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
-#     if i == 0:
-#         dias[varkey]._ax.axis["left"].label.set_text(\
-#             "Standard deviation (model) / Standard deviation (observations)")
-#         # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-#         # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
-#     #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-#     # Q95 = obs.quantile(0.95)
-#     # Q95 = obs.quantile(0.90)
-#     # Add RMS contours, and label them
-#     contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
-#     dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
-#     #dia._ax.set_title(season.capitalize())
-# 
-#     dias[varkey].add_grid()
-# 
-# 
-#     #dia.ax.plot(x99,y99,color='k')
-# 
-#     
-#     for ikey,key in enumerate(args.experiments.split(';')):
-#         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-#         # clearsky = (cc < 0.05)
-#         # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-#         # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-#         mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-#         obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-#         x, y = obs.values,mod.values
-#         print(key,len(obs.values))
-# 
-#         STD_OBS = obs.std()
-#         #scores
-#         PR = pearsonr(mod,obs)[0]
-#         RMSE = rmse(obs,mod)                                               
-#         BIAS = np.mean(mod) - np.mean(obs)
-#         STD = mod.std()
-#         
-#         # fit = np.polyfit(x,y,deg=1)
-#         # axes[varkey].plot(x, fit[0] * x + fit[1],\
-#         #                   color=colors[ikey],alpha=0.8,lw=2,\
-#         #                   label=key+", "+\
-#         #                               'R = '+str(round(PR,3))+', '+\
-#         #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-#         #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
-#         # axes[varkey].legend(fontsize=5)
-#         
-#         # print(STD)
-#         # print(PR)
-#         dias[varkey].add_sample(STD/STD_OBS, PR,
-#                        marker='o', ms=5, ls='',
-#                        #mfc='k', mec='k', # B&W
-#                        mfc=colors[ikey], mec=colors[ikey], # Colors
-#                        label=key)
-# 
-#     # put ticker position, see
-#     # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
-#     # dia.ax.axis['bottom'].
-#     # dia.ax.axis['left'].
-#     # dia.ax.axis['left'].
-# 
-#     i += 1
-# 
-# i = 0
-# for varkey in ['h','theta','q']:                                                    
-#     ikey = 0
-#     key = list(args.experiments.split(';'))[ikey]
-#     cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-#     clearsky = (cc < 0.05)
-# 
-#     mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-#     obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-# 
-# 
-#     nbins=40       
-#     x, y = obs.values,mod.values
-#     
-#     xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
-#     zi = np.zeros_like(xi)*np.nan       
-#     for ibin in range(nbins):
-#         xmin = x.min() + ibin * (x.max() - x.min())/nbins
-#         xmax = xmin + (x.max() - x.min())/nbins
-#         in_bin = ((x >= xmin) & (x < xmax))
-#         ybin = y[in_bin]
-#         xbin = x[in_bin]
-#         if len(ybin) > 20:
-#             k = kde.gaussian_kde((ybin))
-#             zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
-#     zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
-#     zi_int = zi.cumsum(axis=1) 
-#                  #  label=key+", "+\
-#                  #                    'R = '+str(round(PR[0],3))+', '+\
-#                  #                    'RMSE = '+str(round(RMSE,5))+', '+\
-#                  #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-#     axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
-#             colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
-#     axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
-#             colors=['darkred'],alpha=0.5,)
-# 
-# 
-#     latex = {}
-#     latex['dthetadt'] =  r'$d \theta / dt $'
-#     latex['dqdt'] =      r'$d q / dt $'
-#     latex['dhdt'] =      r'$d h / dt $'
-# 
-#     axes[varkey].set_xlabel('observations')     
-#     axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
-# 
-#     PR = pearsonr(mod,obs)[0]
-#     RMSE = rmse(obs,mod)                                               
-#     BIAS = np.mean(mod) - np.mean(obs)
-#     STD = mod.std()
-# 
-#     axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
-#                                   'R = '+str(round(PR,3))+', '+\
-#                                   'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-#                                   'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
-#                          s=0.1,alpha=0.14,color='k')
-#     axes[varkey].legend(fontsize=5)
-#                    
-#     axes[varkey].set_xlabel('observations')     
-#     if i==0:                                    
-#         axes[varkey].set_ylabel('model')                                            
-#     abline(1,0,axis=axes[varkey])
-#     i +=1
-# 
-# 
-# 
-# # legend for different forcing simulations (colors)
-# ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-# leg = []
-# for ikey,key in enumerate(args.experiments.split(';')):
-#     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
-#     leg.append(leg1)
-# ax.axis('off')
-# #leg1 =
-# ax.legend(leg,list(args.experiments.split(';')),loc=2,fontsize=10)
-# 
-# 
-# # # legend for different stations (symbols)
-# # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-# # leg = []
-# # isymbol = 0
-# # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
-# #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
-# #     leg.append(leg1)
-# #     isymbol += 1
-# # 
-# # # symbol for all stations
-# # leg1, = ax.plot([],'ko',markersize=10)
-# # leg.append(leg1)
-# 
-# 
-# # ax.axis('off')
-# # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
-# 
-# 
-# fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
-# 
-# 
-# #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
-# # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
-# # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
-# fig.show()  
+if bool(args.make_figures):
+    fig = plt.figure(figsize=(10,7))   #width,height
+    i = 1                                                                           
+    axes = {}         
+    axes_taylor = {}         
+    
+    colors = ['r','g','b','m']
+    symbols = ['*','x','+']
+    dias = {}
+    
+    for varkey in ['h','theta','q']:                                                    
+        axes[varkey] = fig.add_subplot(2,3,i)                                       
+        #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+    
+        #print(obs.std())
+        obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+        STD_OBS = obs.std()
+        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+        if i == 2:
+            dias[varkey]._ax.axis["left"].label.set_text(\
+                "Standard deviation (model) / Standard deviation (observations)")
+            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+        #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # Q95 = obs.quantile(0.95)
+        # Q95 = obs.quantile(0.90)
+        # Add RMS contours, and label them
+        contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+        dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+        #dia._ax.set_title(season.capitalize())
+    
+        dias[varkey].add_grid()
+    
+    
+        #dia.ax.plot(x99,y99,color='k')
+    
+        
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+            mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+            obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+            x, y = obs.values,mod.values
+            print(key,len(obs.values))
+    
+            #scores
+            PR = pearsonr(mod,obs)[0]
+            RMSE = rmse(obs,mod)                                               
+            BIAS = np.mean(mod) - np.mean(obs)
+            STD = mod.std()
+            
+            fit = np.polyfit(x,y,deg=1)
+            axes[varkey].plot(x, fit[0] * x + fit[1],\
+                              color=colors[ikey],alpha=0.8,lw=2,\
+                              label=key+", "+\
+                                          'R = '+str(round(PR,3))+', '+\
+                                          'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                          'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+            axes[varkey].legend(fontsize=5)
+            
+            # print(STD)
+            # print(PR)
+            dias[varkey].add_sample(STD/STD_OBS, PR,
+                           marker='o', ms=5, ls='',
+                           #mfc='k', mec='k', # B&W
+                           mfc=colors[ikey], mec=colors[ikey], # Colors
+                           label=key)
+    
+        # put ticker position, see
+        # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+        # dia.ax.axis['bottom'].
+        # dia.ax.axis['left'].
+        # dia.ax.axis['left'].
+    
+        i += 1
+    
+    i = 0
+    for varkey in ['h','theta','q']:                                                    
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+            isymbol = 0
+            for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+                indices =  (c4gldata[key].frames['stats']['records_all_stations_index'].get_level_values('STNID') == current_station.name)
+                station_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].iloc[indices]
+                station_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].iloc[indices]
+    
+                axes[varkey].scatter(station_obs,station_mod,marker=symbols[isymbol],color=colors[ikey])
+                         #  label=key+", "+\
+                         #                    'R = '+str(round(PR[0],3))+', '+\
+                         #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                         #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+    
+    
+    
+            # # pl.scatter(obs,mod,label=key+", "+\
+            # #                              'R = '+str(round(PR[0],3))+', '+\
+            # #                              'RMSE = '+str(round(RMSE,5))+', '+\
+            # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+                
+                dias[varkey].add_sample(station_mod.std()/station_obs.std(),
+                               pearsonr(station_mod,station_obs)[0],
+                               marker=symbols[isymbol], ms=5, ls='',
+                               #mfc='k', mec='k', # B&W
+                               mfc=colors[ikey], mec=colors[ikey], # Colors
+                               label=key)
+                isymbol += 1
+    
+    
+            axes[varkey].set_xlabel('observations')     
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+        if i==0:                                    
+            axes[varkey].set_ylabel('model')                                            
+        abline(1,0,axis=axes[varkey])
+        i +=1
+    
+    
+    
+    # legend for different forcing simulations (colors)
+    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    leg = []
+    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        leg1, = ax.plot([],colors[ikey]+'s' ,markersize=10)
+        leg.append(leg1)
+    ax.axis('off')
+    #leg1 =
+    ax.legend(leg,list(args.experiments.strip(' ').split(' ')),loc=2,fontsize=10)
+    
+    
+    # legend for different stations (symbols)
+    ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    leg = []
+    isymbol = 0
+    for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+        leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+        leg.append(leg1)
+        isymbol += 1
+    
+    # symbol for all stations
+    leg1, = ax.plot([],'ko',markersize=10)
+    leg.append(leg1)
+    
+    
+    ax.axis('off')
+    ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+    
+    
+    fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+    
+    
+    #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+    #figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/iops_eval_report.png'
+    
+    if args.figure_filename is not None:
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+    fig.show()  
+
 
 
 

From 3e7b724243c15e38d3a6eb4e516d0ce9dc7be678 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 09:27:25 +0200
Subject: [PATCH 055/129] 	modified:   class4gl/interface/interface.py

---
 class4gl/interface/interface.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 72b970e..4d851ce 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -122,7 +122,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
-if args.show_figures:
+if args.make_figures:
     # the lines below activate TaylorPlots but it is disabled for now
     fig = plt.figure(figsize=(10,7))   #width,height
     i = 1                                                                           

From b4b76cce841d95a7f318bcab0c0ec990359d0eda Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 10:06:47 +0200
Subject: [PATCH 056/129] xies in figures

---
 class4gl/interface/interface.py | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 4d851ce..0685a46 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -231,10 +231,13 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                      #                    'R = '+str(round(PR[0],3))+', '+\
                      #                    'RMSE = '+str(round(RMSE,5))+', '+\
                      #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.5,0.86] ,
+        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.5,0.86] ,
                 colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
-        axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.14,0.86] ,
+        axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.84] ,
                 colors=['darkred'],alpha=0.5,)
+
+        axes[varkey].set_xlim((xi.min(),xi.max())
+        axes[varkey].set_ylim((yi.min(),yi.max())
     
     
         latex = {}
@@ -256,7 +259,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                                       'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
                              s=0.1,alpha=0.14,color='k')
         axes[varkey].legend(fontsize=5)
-                       
+        
+
+
+
         axes[varkey].set_xlabel('observations')     
         if i==0:                                    
             axes[varkey].set_ylabel('model')                                            
@@ -363,7 +369,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                 plt.legend(loc='lower right')
         fig.tight_layout()
         if args.figure_filename_2 is not None:
-            fig.savefig(args.figure_filename,dpi=200); print("Image file written to:", args.figure_filename)
+            fig.savefig(args.figure_filename_2,dpi=200); print("Image file
+                                                               written to:",
+                                                               args.figure_filename_2)
         fig.show()
 
 

From b4202d8a27d89376a7f603eaa869f8ec12664d41 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 10:09:56 +0200
Subject: [PATCH 057/129] figure fixes

---
 class4gl/interface/interface.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 0685a46..4ccd1e4 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -237,7 +237,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                 colors=['darkred'],alpha=0.5,)
 
         axes[varkey].set_xlim((xi.min(),xi.max())
-        axes[varkey].set_ylim((yi.min(),yi.max())
+        axes[varkey].set_ylim((xi.min(),xi.max())
     
     
         latex = {}

From f74ec72ab00a7cc05f89fde44ae71301ae1d7492 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 10:10:50 +0200
Subject: [PATCH 058/129] figure fixes

---
 class4gl/interface/interface.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 4ccd1e4..0a8a009 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -236,8 +236,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.84] ,
                 colors=['darkred'],alpha=0.5,)
 
-        axes[varkey].set_xlim((xi.min(),xi.max())
-        axes[varkey].set_ylim((xi.min(),xi.max())
+        axes[varkey].set_xlim((xi.min(),xi.max()))
+        axes[varkey].set_ylim((xi.min(),xi.max()))
     
     
         latex = {}

From 91d362d451b4bb96fdb495168dfbb4971a62f61a Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 10:11:50 +0200
Subject: [PATCH 059/129] figure fixes

---
 class4gl/interface/interface.py | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 0a8a009..3d76c13 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -369,9 +369,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                 plt.legend(loc='lower right')
         fig.tight_layout()
         if args.figure_filename_2 is not None:
-            fig.savefig(args.figure_filename_2,dpi=200); print("Image file
-                                                               written to:",
-                                                               args.figure_filename_2)
+            fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
         fig.show()
 
 

From b24bc772a40587b75d74b43ef9b1cf24d688d28d Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 10:22:16 +0200
Subject: [PATCH 060/129] figure fixes

---
 class4gl/interface/interface.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 3d76c13..e481bbe 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -231,13 +231,13 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                      #                    'R = '+str(round(PR[0],3))+', '+\
                      #                    'RMSE = '+str(round(RMSE,5))+', '+\
                      #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.5,0.86] ,
+        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.5,0.84] ,
                 colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
         axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.84] ,
                 colors=['darkred'],alpha=0.5,)
-
-        axes[varkey].set_xlim((xi.min(),xi.max()))
-        axes[varkey].set_ylim((xi.min(),xi.max()))
+        nanxi = xi[zi != np.nan]
+        axes[varkey].set_xlim((nanxi.min(),nanxi.max()))
+        axes[varkey].set_ylim((nanxi.min(),nanxi.max()))
     
     
         latex = {}

From 70e9d665d1eafd87bf127ddc2544eb07601b0bbc Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 11:10:33 +0200
Subject: [PATCH 061/129] modified:   .gitignore

---
 .gitignore | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/.gitignore b/.gitignore
index b65667c..84f7975 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,20 +1,17 @@
 *.o, 
 *.pyc
 *.class 
-class4gl/__pycache__/
 class4gl/__pycache__/*
 *.log
 .*
-build/
-dist/
-trash/
+build/*
+dist/*
+trash/*
 */__pychache__/
 *.py[cod]
 *$py.class
 .Python
-build/
 develop-eggs/
-dist/
 downloads/
 eggs/
 .eggs/

From 95466045a2ef98e97107a38074b4810393dae7b7 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 30 Aug 2018 17:19:59 +0200
Subject: [PATCH 062/129] figure fixes

---
 class4gl/interface/interface.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index e481bbe..5f55626 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -238,6 +238,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         nanxi = xi[zi != np.nan]
         axes[varkey].set_xlim((nanxi.min(),nanxi.max()))
         axes[varkey].set_ylim((nanxi.min(),nanxi.max()))
+        print(varkey,(nanxi.min(),nanxi.max()))
     
     
         latex = {}

From 51fbba95c8606b6c5d0cea93d42fc738d6289734 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 1 Sep 2018 02:22:16 +0200
Subject: [PATCH 063/129] nicely-organized boxplots

---
 class4gl/interface/interface.py               | 209 +++++++++++++++---
 class4gl/simulations/c4gl_sim.o643275-5       |  28 ---
 ...tions_wilt.py => simulations_wwilt_wfc.py} |  37 ++--
 3 files changed, 202 insertions(+), 72 deletions(-)
 delete mode 100644 class4gl/simulations/c4gl_sim.o643275-5
 rename class4gl/simulations/{simulations_wilt.py => simulations_wwilt_wfc.py} (92%)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 5f55626..cc0febf 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -1,3 +1,5 @@
+'''
+
 import numpy as np
 
 import pandas as pd
@@ -122,7 +124,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
+'''
 if args.make_figures:
+    '''
     # the lines below activate TaylorPlots but it is disabled for now
     fig = plt.figure(figsize=(10,7))   #width,height
     i = 1                                                                           
@@ -311,15 +315,39 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     if args.figure_filename is not None:
         fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
     fig.show()  
-    
-    
+   ''' 
     if bool(args.show_control_parameters):
+
         import seaborn as sns
-        sns.set()
+
+        pkmn_type_colors = [
+                                            '#A0A0A0',  # Poison
+                                            '#78C850',  # Grass
+                                            '#F08030',  # Fire
+                                            '#6890F0',  # Water
+                                            '#F08030',  # Fire
+                                            '#C03028',  # Fighting
+                                            '#F85888',  # Psychic
+                                            '#A8B820',  # Bug
+                                            '#A8A878',  # Normal
+                                            '#F8D030',  # Electric
+                                            '#E0C068',  # Ground
+                                            '#EE99AC',  # Fairy
+                                            '#B8A038',  # Rock
+                                            '#705898',  # Ghost
+                                            '#98D8D8',  # Ice
+                                            '#7038F8',  # Dragon
+                                           ]
+
+
+
+        sns.set_style('whitegrid')
+        #sns.set()
         fig = pl.figure(figsize=(12,8))
         i = 1
         axes = {}
         data_all = pd.DataFrame()
+        data_input = pd.DataFrame()
         
         
         
@@ -334,41 +362,166 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         
         ikey = 0
         key = list(args.experiments.strip().split(' '))[ikey]
+        data_all = pd.DataFrame()
+
+        tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
+        tempdatamodstats["source"] = "soundings"
+        tempdatamodstats["source_index"] = "soundings"
+
+        ini_ref = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+        tempdataini_this = pd.DataFrame(ini_ref.copy())
+
+        tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+        tempdatamodstats['STNID']= tempdataini_this.STNID
+        tempdatamodstats['source']= "soundings"
+        tempdatamodstats['source_index']= "soundings"
+        tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+        print('hello')
+
+        tempdataini = pd.DataFrame(ini_ref)
+        tempdataini["source"] = "soundings"
+        tempdataini["source_index"] = "soundings"
+        tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+        print('hello2')
+
+
+        data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+        data_input = pd.concat([data_input,tempdataini],axis=0)
+        print(data_input.shape)
+        print(data_all.shape)
+
+            
+        for key in list(args.experiments.strip().split(' ')):
+
+            tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
+            tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+            tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+            tempdatamodstats['STNID']= tempdataini_this.STNID
+            tempdatamodstats['source']= key
+            tempdatamodstats['source_index']= key
+            tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+            print('hello')
+
+
+            tempdataini = pd.DataFrame(ini_ref.copy())
+            tempdataini["source"] = key 
+            tempdataini["source_index"] = key
+            tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+    
+
+            print('hello2')
+            index_intersect = tempdataini.index.intersection(tempdatamodstats.index)
+            print('hello3')
+
+            tempdataini = tempdataini.loc[index_intersect]
+            print('hello4')
+            tempdatamodstats = tempdatamodstats.loc[index_intersect]
+            print('hello5')
+
+
+            # data[varkey] = tempdatamodstats['d'+varkey+'dt']
+            data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+            data_input = pd.concat([data_input, tempdataini],axis=0)
+            print(data_input.shape)
+            print(data_all.shape)
+
+        data_input.cc = data_input.cc.clip(0.,+np.inf)
+
+        for varkey in ['h','theta','q']:
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+            data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
+            print(data_input.shape)
+            print(data_all.shape)
+        print('hello6')
+        print(data_all.columns)
+        print('hello7')
         for varkey in ['h','theta','q']:
             for input_key in ['wg','cc']:
-                data_all = pd.DataFrame()
-                data = pd.DataFrame()
-                data[varkey] = ""
-                data[varkey] = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-                data["source"] = "soundings"
-                data_all = pd.concat([data_all,data])
-                    
-                data = pd.DataFrame()
-                
-                
-                data[varkey] = ""
-                data[varkey] = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-                data["source"] = "model"
-                data_all = pd.concat([data_all,data])
-                data_input = pd.concat([c4gldata[key].frames['stats']['records_all_stations_ini'],
-                                       c4gldata[key].frames['stats']['records_all_stations_ini']],axis=0)
-                input_key_full = input_key + "["+units[input_key]+"]"
-                data_all[input_key_full] =  pd.cut(x=data_input[input_key].values,bins=10)
-                
                 varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
-                data_all = data_all.rename(columns={varkey:varkey_full})
+
+                print('hello8')
+                print(data_input.shape)
+                print(data_all.shape)
+                input_key_full = input_key + "["+units[input_key]+"]"
+                data_all[input_key_full] = pd.cut(x=data_input[input_key].values,bins=10,precision=2)
+                data_input[input_key_full] = pd.cut(x=data_input[input_key].values,bins=10,precision=2,)
+                print('hello9')
+                print(data_input.shape)
+                print(data_all.shape)
                 
                 qvalmax = data_all[varkey_full].quantile(0.999)
                 qvalmin = data_all[varkey_full].quantile(0.001)
-                data_all = data_all[(data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)]
+                select_data = (data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)
+                print('hello11')
+                data_all = data_all[select_data]
+                print('hello12')
+                data_input = data_input[select_data.values]
+                print('hello13')
+                print(data_input.shape)
+                print(data_all.shape)
+                print('hello10')
                 
+                sns.set(style="ticks", palette="pastel")
                 ax = fig.add_subplot(3,2,i)
-                sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
-                ax.grid()
-                plt.xticks(rotation=45,ha='right')
+                #sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+                
+                #ax.set_title(input_key_full)
+                sb = sns.boxplot(x=input_key_full, y=varkey_full, hue="source",
+                                 palette=pkmn_type_colors,
+                                # palette=["m", "g",'r','b'],
+                                 linewidth=1.2, data=data_all,sym='')
+                if i ==1:
+                     plt.legend(loc='upper right',fontsize=7.)
+                else:
+                     ax.get_legend().set_visible(False)
+                #     plt.legend('off')
+                if i >= 5:
+                    ax.set_xticklabels(labels=ax.get_xticklabels(),rotation=45.,ha='right')
+                else:
+                    ax.set_xticklabels([])
+                    ax.set_xlabel('')
+
+                if np.mod(i,2) == 0:
+                    ax.set_yticklabels([])
+                    ax.set_ylabel('')
+
+                for j,artist in enumerate(ax.artists):
+                    if np.mod(j,4) !=0:
+                        # Set the linecolor on the artist to the facecolor, and set the facecolor to None
+                        print(j,artist)
+                        col = artist.get_facecolor()
+                        print(j,artist)
+                        artist.set_edgecolor(col)
+                        print(j,artist)
+                        artist.set_facecolor('None')
+                
+                        # Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
+                        # Loop over them here, and use the same colour as above
+                        
+                        for k in range(j*5,j*5+5):
+                            line = ax.lines[k]
+                            line.set_color(col)
+                            line.set_mfc(col)
+                            line.set_mec(col)
+                
+                # Also fix the legend
+                j = 0
+                for legpatch in ax.get_legend().get_patches():
+                    if j > 0:
+
+                        col = legpatch.get_facecolor()
+                        legpatch.set_edgecolor(col)
+                        legpatch.set_facecolor('None')
+                    j +=1
+
+
+
+
+                #ax.grid()
+                #sns.despine(offset=10, trim=True)
                 i +=1
-                plt.legend(loc='lower right')
         fig.tight_layout()
+        fig.subplots_adjust( bottom=0.18,left=0.09,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
         if args.figure_filename_2 is not None:
             fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
         fig.show()
diff --git a/class4gl/simulations/c4gl_sim.o643275-5 b/class4gl/simulations/c4gl_sim.o643275-5
deleted file mode 100644
index df8103f..0000000
--- a/class4gl/simulations/c4gl_sim.o643275-5
+++ /dev/null
@@ -1,28 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=SM5 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL --split_by=50 --station_id=74560 --subset_forcing=morning
-getting stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (5)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 5
-Fetching initial/forcing records
-Fetching afternoon records for determining the simulation runtimes
-hello
-396
-396
-aligning morning and afternoon records
-Traceback (most recent call last):
-  File "/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py", line 184, in 
-    exp = EXP_DEFS[expname]
-KeyError: 'SM5'
diff --git a/class4gl/simulations/simulations_wilt.py b/class4gl/simulations/simulations_wwilt_wfc.py
similarity index 92%
rename from class4gl/simulations/simulations_wilt.py
rename to class4gl/simulations/simulations_wwilt_wfc.py
index 8da29e4..6f0afd3 100644
--- a/class4gl/simulations/simulations_wilt.py
+++ b/class4gl/simulations/simulations_wwilt_wfc.py
@@ -57,11 +57,12 @@
 
 EXP_DEFS  =\
 {
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-  'WILT':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC_WILT':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC_FC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
 
@@ -69,7 +70,6 @@
 # SET = args.dataset
 
 
-
 print("getting stations")
 # these are all the stations that are found in the input dataset
 all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
@@ -182,7 +182,6 @@
     records_afternoon.index = records_morning.index
 
 experiments = args.experiments.strip(' ').split(' ')
-
 for expname in experiments:
     exp = EXP_DEFS[expname]
     path_exp = args.path_experiments+'/'+expname+'/'
@@ -235,24 +234,30 @@
                                              c4gli_morning.pars.datetime_daylight).total_seconds())
                     else:
                         runtime = int(args.runtime)
+
             
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
                                         runtime})
                     c4gli_morning.update(source=expname, pars=exp)
-                    
-                    if expname == 'WILT':
-                       c4gli_morning.update(source=expname, \
-                                            pars={'wg':c4gli_morning.pars.wwilt,\
-                                                  'w2':c4gli_morning.pars.wwilt},
-                                           )
+                    if expname == 'GLOBAL_NOAC_WILT':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'wg':c4gli_morning.pars.wwilt,\
+                                              'w2':c4gli_morning.pars.wwilt}\
+                                            )
+                    if expname == 'GLOBAL_NOAC_FC':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'wg':c4gli_morning.pars.wfc,\
+                                              'w2':c4gli_morning.pars.wfc}\
+                                            )
+
                     c4gl = class4gl(c4gli_morning)
 
                     if args.error_handling == 'dump_always':
                         try:
                             c4gl.run()
-                            print('run succesfull')
+                            print('run successful')
                         except:
-                            print('run not succesfull')
+                            print('run not successful')
                         onerun = True
 
                         c4gli_morning.dump(file_ini)
@@ -319,7 +324,7 @@
     #     with \
     #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
     #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #     open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
     #         for (STNID,index),record_ini in records_iterator(records_ini):
     #             c4gli_ini = get_record_yaml(file_station_ini, 
     #                                         record_ini.index_start, 

From ded0a79fd562044b3ebf6de93ee0f1adee39c3a6 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 1 Sep 2018 02:30:26 +0200
Subject: [PATCH 064/129] nicely-organized boxplots

---
 class4gl/interface/interface.py | 74 ++++++++++++++++-----------------
 1 file changed, 36 insertions(+), 38 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index cc0febf..af989c0 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -1,4 +1,3 @@
-'''
 
 import numpy as np
 
@@ -124,9 +123,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
-'''
 if args.make_figures:
-    '''
     # the lines below activate TaylorPlots but it is disabled for now
     fig = plt.figure(figsize=(10,7))   #width,height
     i = 1                                                                           
@@ -315,7 +312,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     if args.figure_filename is not None:
         fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
     fig.show()  
-   ''' 
+
     if bool(args.show_control_parameters):
 
         import seaborn as sns
@@ -376,19 +373,19 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         tempdatamodstats['source']= "soundings"
         tempdatamodstats['source_index']= "soundings"
         tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
-        print('hello')
+        #print('hello')
 
         tempdataini = pd.DataFrame(ini_ref)
         tempdataini["source"] = "soundings"
         tempdataini["source_index"] = "soundings"
         tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
-        print('hello2')
+        #print('hello2')
 
 
         data_all = pd.concat([data_all,tempdatamodstats],axis=0)
         data_input = pd.concat([data_input,tempdataini],axis=0)
-        print(data_input.shape)
-        print(data_all.shape)
+        #print(data_input.shape)
+        #print(data_all.shape)
 
             
         for key in list(args.experiments.strip().split(' ')):
@@ -400,7 +397,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             tempdatamodstats['source']= key
             tempdatamodstats['source_index']= key
             tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
-            print('hello')
+            #print('hello')
 
 
             tempdataini = pd.DataFrame(ini_ref.copy())
@@ -409,57 +406,58 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
     
 
-            print('hello2')
+            #print('hello2')
             index_intersect = tempdataini.index.intersection(tempdatamodstats.index)
-            print('hello3')
+            #print('hello3')
 
             tempdataini = tempdataini.loc[index_intersect]
-            print('hello4')
+            #print('hello4')
             tempdatamodstats = tempdatamodstats.loc[index_intersect]
-            print('hello5')
+            #print('hello5')
 
 
             # data[varkey] = tempdatamodstats['d'+varkey+'dt']
             data_all = pd.concat([data_all,tempdatamodstats],axis=0)
             data_input = pd.concat([data_input, tempdataini],axis=0)
-            print(data_input.shape)
-            print(data_all.shape)
+            #print(data_input.shape)
+            #print(data_all.shape)
 
         data_input.cc = data_input.cc.clip(0.,+np.inf)
 
         for varkey in ['h','theta','q']:
             varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
             data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
-            print(data_input.shape)
-            print(data_all.shape)
-        print('hello6')
-        print(data_all.columns)
-        print('hello7')
+            #print(data_input.shape)
+            #print(data_all.shape)
+        #print('hello6')
+        #print(data_all.columns)
+        #print('hello7')
         for varkey in ['h','theta','q']:
-            for input_key in ['wg','cc']:
+            input_keys =['wg','cc']
+            for input_key in input_keys:
                 varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
 
-                print('hello8')
-                print(data_input.shape)
-                print(data_all.shape)
+                #print('hello8')
+                #print(data_input.shape)
+                #print(data_all.shape)
                 input_key_full = input_key + "["+units[input_key]+"]"
                 data_all[input_key_full] = pd.cut(x=data_input[input_key].values,bins=10,precision=2)
                 data_input[input_key_full] = pd.cut(x=data_input[input_key].values,bins=10,precision=2,)
-                print('hello9')
-                print(data_input.shape)
-                print(data_all.shape)
+                #print('hello9')
+                #print(data_input.shape)
+                #print(data_all.shape)
                 
                 qvalmax = data_all[varkey_full].quantile(0.999)
                 qvalmin = data_all[varkey_full].quantile(0.001)
                 select_data = (data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)
-                print('hello11')
+                #print('hello11')
                 data_all = data_all[select_data]
-                print('hello12')
+                #print('hello12')
                 data_input = data_input[select_data.values]
-                print('hello13')
-                print(data_input.shape)
-                print(data_all.shape)
-                print('hello10')
+                #print('hello13')
+                #print(data_input.shape)
+                #print(data_all.shape)
+                #print('hello10')
                 
                 sns.set(style="ticks", palette="pastel")
                 ax = fig.add_subplot(3,2,i)
@@ -481,18 +479,18 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                     ax.set_xticklabels([])
                     ax.set_xlabel('')
 
-                if np.mod(i,2) == 0:
+                if np.mod(i,len(input_keys)) == 0:
                     ax.set_yticklabels([])
                     ax.set_ylabel('')
 
                 for j,artist in enumerate(ax.artists):
-                    if np.mod(j,4) !=0:
+                    if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
                         # Set the linecolor on the artist to the facecolor, and set the facecolor to None
-                        print(j,artist)
+                        #print(j,artist)
                         col = artist.get_facecolor()
-                        print(j,artist)
+                        #print(j,artist)
                         artist.set_edgecolor(col)
-                        print(j,artist)
+                        #print(j,artist)
                         artist.set_facecolor('None')
                 
                         # Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)

From cfc535dea502eee3e62fb4ce4102a1cf236509ac Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 4 Sep 2018 13:23:57 +0200
Subject: [PATCH 065/129]  batch_update.py and update_yaml.py for updating
 datasets, new interface figures, temporary fix concerning sign of advection,
 lots more ...

---
 class4gl/class4gl.py                          |  15 +-
 class4gl/data_global.py                       |   1 +
 class4gl/interface/interface_koeppen.py       | 530 ++++++++++++++++++
 class4gl/model.py                             |  12 +-
 class4gl/simulations/batch_simulations.py     |   6 +
 class4gl/simulations/batch_update.py          | 154 +++++
 class4gl/simulations/simulations.py           |   9 +-
 class4gl/simulations/simulations_iter.py      |   1 +
 class4gl/simulations/simulations_smchange2.py |   1 +
 class4gl/simulations/simulations_wwilt_wfc.py |   1 +
 class4gl/simulations/update_yaml.py           | 331 +++++++++++
 class4gl/simulations/update_yaml_old.py       | 277 +++++++++
 12 files changed, 1323 insertions(+), 15 deletions(-)
 create mode 100644 class4gl/interface/interface_koeppen.py
 create mode 100644 class4gl/simulations/batch_update.py
 create mode 100644 class4gl/simulations/update_yaml.py
 create mode 100644 class4gl/simulations/update_yaml_old.py

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 51d58e1..7fe3767 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -860,10 +860,6 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
         # by default, we get all dataset keys
         keys = list(globaldata.datasets.keys())
 
-        # We add LAI manually, because it is not listed in the datasets and
-        #they its retreival is hard coded below based on LAIpixel and cveg
-        if ('LAIpixel' in keys) and ('cveg' in keys):
-            keys.append('LAI')
 
         # # In case there is surface pressure, we also calculate the half-level
         # # and full-level pressure fields
@@ -873,9 +869,11 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
 
         # If specified, we only take the keys that are in only_keys
         if only_keys is not None:
-            for key in keys:
+            cycle_keys = list(keys)
+            for key in cycle_keys:
                 if key not in only_keys:
                     keys.remove(key)
+
                 
         # If specified, we take out keys that are in exclude keys
         if exclude_keys is not None:
@@ -883,9 +881,16 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                 if key in exclude_keys:
                     keys.remove(key)
 
+        # We add LAI manually, because it is not listed in the datasets and
+        #they its retreival is hard coded below based on LAIpixel and cveg
+        if ('LAIpixel' in keys) and ('cveg' in keys):
+            keys.append('LAI')
+
         # we set everything to nan first in the pars section (non-profile parameters
         # without lev argument), so that we can check afterwards whether the
         # data is well-fetched or not.
+
+
         for key in keys:
             if not ((key in globaldata.datasets) and \
                 (globaldata.datasets[key].page is not None) and \
diff --git a/class4gl/data_global.py b/class4gl/data_global.py
index 63081f1..73f0dda 100644
--- a/class4gl/data_global.py
+++ b/class4gl/data_global.py
@@ -216,6 +216,7 @@ def browse_page(self,rewind=2,**args):
 
 class data_global(object):
     def __init__(self,sources= {
+        'KOEPPEN:KGC'   : '/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc',
         # # old gleam
         # 'GLEAM:wg'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMsurf_*_GLEAM_v3.1a.nc:SMsurf',
         # 'GLEAM:w2'      : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot',
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
new file mode 100644
index 0000000..6af9a7c
--- /dev/null
+++ b/class4gl/interface/interface_koeppen.py
@@ -0,0 +1,530 @@
+'''
+import numpy as np
+
+import pandas as pd
+import sys
+
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=False)
+parser.add_argument('--make_figures',default=None)
+parser.add_argument('--show_control_parameters',default=True)
+parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--figure_filename_2',default=None)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+#import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+import xarray as xr
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    x_vals = np.array(axis.get_xlim())
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if bool(args.load_globaldata):
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = {}
+for key in args.experiments.strip(' ').split(' '):
+    
+    c4gldata[key] = c4gl_interface_soundings( \
+                      args.path_experiments+'/'+key+'/',\
+                      args.path_forcing+'/',\
+                      globaldata,\
+                      refetch_records=False
+                    )
+'''
+if args.make_figures:
+    """
+    # the lines below activate TaylorPlots but it is disabled for now
+    fig = plt.figure(figsize=(10,7))   #width,height
+    i = 1                                                                           
+    axes = {}         
+    axes_taylor = {}         
+    
+    colors = ['r','g','b','m']
+    symbols = ['*','x','+']
+    dias = {}
+    
+    for varkey in ['h','theta','q']:                                                    
+        axes[varkey] = fig.add_subplot(2,3,i)                                       
+        #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+    
+        #print(obs.std())
+        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+        if i == 0:
+            dias[varkey]._ax.axis["left"].label.set_text(\
+                "Standard deviation (model) / Standard deviation (observations)")
+            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+        #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # Q95 = obs.quantile(0.95)
+        # Q95 = obs.quantile(0.90)
+        # Add RMS contours, and label them
+        contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+        dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+        #dia._ax.set_title(season.capitalize())
+    
+        dias[varkey].add_grid()
+    
+    
+        #dia.ax.plot(x99,y99,color='k')
+    
+        
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+            # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+            # clearsky = (cc < 0.05)
+            # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+            # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+            mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+            obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+            x, y = obs.values,mod.values
+            print(key,len(obs.values))
+    
+            STD_OBS = obs.std()
+            #scores
+            PR = pearsonr(mod,obs)[0]
+            RMSE = rmse(obs,mod)                                               
+            BIAS = np.mean(mod) - np.mean(obs)
+            STD = mod.std()
+            
+            # fit = np.polyfit(x,y,deg=1)
+            # axes[varkey].plot(x, fit[0] * x + fit[1],\
+            #                   color=colors[ikey],alpha=0.8,lw=2,\
+            #                   label=key+", "+\
+            #                               'R = '+str(round(PR,3))+', '+\
+            #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+            #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+            # axes[varkey].legend(fontsize=5)
+            
+            # print(STD)
+            # print(PR)
+            dias[varkey].add_sample(STD/STD_OBS, PR,
+                           marker='o', ms=5, ls='',
+                           #mfc='k', mec='k', # B&W
+                           mfc=colors[ikey], mec=colors[ikey], # Colors
+                           label=key)
+    
+        # put ticker position, see
+        # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+        # dia.ax.axis['bottom'].
+        # dia.ax.axis['left'].
+        # dia.ax.axis['left'].
+    
+        i += 1
+    
+    i = 0
+    for varkey in ['h','theta','q']:                                                    
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        clearsky = (cc < 0.05)
+    
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+    
+    
+        nbins=40       
+        x, y = obs.values,mod.values
+        
+        xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+        zi = np.zeros_like(xi)*np.nan       
+        for ibin in range(nbins):
+            xmin = x.min() + ibin * (x.max() - x.min())/nbins
+            xmax = xmin + (x.max() - x.min())/nbins
+            in_bin = ((x >= xmin) & (x < xmax))
+            ybin = y[in_bin]
+            xbin = x[in_bin]
+            if len(ybin) > 20:
+                k = kde.gaussian_kde((ybin))
+                zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+        zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+        zi_int = zi.cumsum(axis=1) 
+                     #  label=key+", "+\
+                     #                    'R = '+str(round(PR[0],3))+', '+\
+                     #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                     #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.5,0.84] ,
+                colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+        axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.84] ,
+                colors=['darkred'],alpha=0.5,)
+        nanxi = xi[zi != np.nan]
+        axes[varkey].set_xlim((nanxi.min(),nanxi.max()))
+        axes[varkey].set_ylim((nanxi.min(),nanxi.max()))
+        print(varkey,(nanxi.min(),nanxi.max()))
+    
+    
+        latex = {}
+        latex['dthetadt'] =  r'$d \theta / dt $'
+        latex['dqdt'] =      r'$d q / dt $'
+        latex['dhdt'] =      r'$d h / dt $'
+    
+        axes[varkey].set_xlabel('observations')     
+        axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+    
+        PR = pearsonr(mod,obs)[0]
+        RMSE = rmse(obs,mod)                                               
+        BIAS = np.mean(mod) - np.mean(obs)
+        STD = mod.std()
+    
+        axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
+                                      'R = '+str(round(PR,3))+', '+\
+                                      'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                      'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+                             s=0.1,alpha=0.14,color='k')
+        axes[varkey].legend(fontsize=5)
+        
+
+
+
+        axes[varkey].set_xlabel('observations')     
+        if i==0:                                    
+            axes[varkey].set_ylabel('model')                                            
+        abline(1,0,axis=axes[varkey])
+        i +=1
+    
+    
+    
+    # legend for different forcing simulations (colors)
+    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    leg = []
+    for ikey,key in enumerate(args.experiments.strip().split(' ')):
+        leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+        leg.append(leg1)
+    ax.axis('off')
+    #leg1 =
+    ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
+    
+    
+    # # legend for different stations (symbols)
+    # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # isymbol = 0
+    # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+    #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+    #     leg.append(leg1)
+    #     isymbol += 1
+    # 
+    # # symbol for all stations
+    # leg1, = ax.plot([],'ko',markersize=10)
+    # leg.append(leg1)
+    7
+    
+    # ax.axis('off')
+    # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+    
+    
+    fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+    
+    
+    #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+    # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+    # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+    
+    if args.figure_filename is not None:
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+    fig.show()  
+"""
+    if bool(args.show_control_parameters):
+
+        import seaborn as sns
+
+        pkmn_type_colors = [
+                                            '#A0A0A0',  # Poison
+                                            '#78C850',  # Grass
+                                            '#F08030',  # Fire
+                                            '#6890F0',  # Water
+                                            '#F08030',  # Fire
+                                            '#C03028',  # Fighting
+                                            '#F85888',  # Psychic
+                                            '#A8B820',  # Bug
+                                            '#A8A878',  # Normal
+                                            '#F8D030',  # Electric
+                                            '#E0C068',  # Ground
+                                            '#EE99AC',  # Fairy
+                                            '#B8A038',  # Rock
+                                            '#705898',  # Ghost
+                                            '#98D8D8',  # Ice
+                                            '#7038F8',  # Dragon
+                                           ]
+
+
+
+        sns.set_style('whitegrid')
+        #sns.set()
+        fig = pl.figure(figsize=(12,8))
+        i = 1
+        axes = {}
+        data_all = pd.DataFrame()
+        data_input = pd.DataFrame()
+        
+        
+        
+        # #for varkey in ['theta','q']:     
+        # EF =\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR/(1.+\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR)
+        # EF[EF<0] = np.nan
+        # EF[EF>1] = np.nan
+        
+        # c4gldata[key].frames['stats']['records_all_stations_ini']['EF'] = EF
+        
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        data_all = pd.DataFrame()
+
+        tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
+        tempdatamodstats["source"] = "soundings"
+        tempdatamodstats["source_index"] = "soundings"
+
+        ini_ref = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+        tempdataini_this = pd.DataFrame(ini_ref.copy())
+
+        tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+        tempdatamodstats['STNID']= tempdataini_this.STNID
+        tempdatamodstats['source']= "soundings"
+        tempdatamodstats['source_index']= "soundings"
+        tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+        #print('hello')
+
+        tempdataini = pd.DataFrame(ini_ref)
+        tempdataini["source"] = "soundings"
+        tempdataini["source_index"] = "soundings"
+        tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+        #print('hello2')
+
+
+        data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+        data_input = pd.concat([data_input,tempdataini],axis=0)
+        #print(data_input.shape)
+        #print(data_all.shape)
+
+            
+        for key in list(args.experiments.strip().split(' ')):
+
+            tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
+            tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+            tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+            tempdatamodstats['STNID']= tempdataini_this.STNID
+            tempdatamodstats['source']= key
+            tempdatamodstats['source_index']= key
+            tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+            #print('hello')
+
+
+            tempdataini = pd.DataFrame(ini_ref.copy())
+            tempdataini["source"] = key 
+            tempdataini["source_index"] = key
+            tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+    
+
+            #print('hello2')
+            index_intersect = tempdataini.index.intersection(tempdatamodstats.index)
+            #print('hello3')
+
+            tempdataini = tempdataini.loc[index_intersect]
+            #print('hello4')
+            tempdatamodstats = tempdatamodstats.loc[index_intersect]
+            #print('hello5')
+
+
+            # data[varkey] = tempdatamodstats['d'+varkey+'dt']
+            data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+            data_input = pd.concat([data_input, tempdataini],axis=0)
+            #print(data_input.shape)
+            #print(data_all.shape)
+
+        data_input.cc = data_input.cc.clip(0.,+np.inf)
+
+        for varkey in ['h','theta','q']:
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+            data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
+            #print(data_input.shape)
+            #print(data_all.shape)
+        xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
+        lookuptable = pd.Series(xrkoeppen['KGCID'])
+        data_all['KGCname'] = data_input['KGC'].map(lookuptable)
+        #print('hello6')
+        #print(data_all.columns)
+        #print('hello7')
+        for varkey in ['h','theta','q']:
+            #input_keys =['wg','cc']
+            #for input_key in input_keys:
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+
+            #print('hello8')
+            #print(data_input.shape)
+            #print(data_all.shape)
+            #input_key_full = input_key + "["+units[input_key]+"]"
+            #print('hello9')
+            #print(data_input.shape)
+            #print(data_all.shape)
+            qvalmax = data_all[varkey_full].quantile(0.999)
+            qvalmin = data_all[varkey_full].quantile(0.001)
+            select_data = (data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)
+            #print('hello11')
+            data_all = data_all[select_data]
+            #print('hello12')
+            data_input = data_input[select_data.values]
+            #print('hello13')
+            #print(data_input.shape)
+            #print(data_all.shape)
+            #print('hello10')
+            
+
+            sns.set(style="ticks", palette="pastel")
+            ax = fig.add_subplot(3,1,i)
+            #sns.violinplot(x='KGC',y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+            
+            #ax.set_title(input_key_full)
+            sb = sns.boxplot(x='KGCname', y=varkey_full, hue="source",
+                             palette=pkmn_type_colors,
+                            # palette=["m", "g",'r','b'],
+                             linewidth=1.2, data=data_all,sym='')
+            if i ==1:
+                 plt.legend(loc='upper right',fontsize=7.)
+            else:
+                 ax.get_legend().set_visible(False)
+            #     plt.legend('off')
+            if i >= 2:
+                ax.set_xticklabels(labels=ax.get_xticklabels(),rotation=45.,ha='right')
+            else:
+                ax.set_xticklabels([])
+                ax.set_xlabel('')
+
+            # ax.set_yticklabels([])
+            # ax.set_ylabel('')
+
+            for j,artist in enumerate(ax.artists):
+                if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
+                    # Set the linecolor on the artist to the facecolor, and set the facecolor to None
+                    #print(j,artist)
+                    col = artist.get_facecolor()
+                    #print(j,artist)
+                    artist.set_edgecolor(col)
+                    #print(j,artist)
+                    artist.set_facecolor('None')
+            
+                    # Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
+                    # Loop over them here, and use the same colour as above
+                    
+                    for k in range(j*5,j*5+5):
+                        line = ax.lines[k]
+                        line.set_color(col)
+                        line.set_mfc(col)
+                        line.set_mec(col)
+            
+            # Also fix the legend
+            j = 0
+            for legpatch in ax.get_legend().get_patches():
+                if j > 0:
+
+                    col = legpatch.get_facecolor()
+                    legpatch.set_edgecolor(col)
+                    legpatch.set_facecolor('None')
+                j +=1
+
+
+
+
+            #ax.grid()
+            #sns.despine(offset=10, trim=True)
+            i +=1
+        fig.tight_layout()
+        fig.subplots_adjust( bottom=0.18,left=0.09,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
+        if args.figure_filename_2 is not None:
+            fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
+        fig.show()
+
+
+
diff --git a/class4gl/model.py b/class4gl/model.py
index e6eeb07..471d218 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -965,15 +965,15 @@ def run_mixed_layer(self):
         self.dthetatend = l_entrainment*dthetatend_pre + \
                         (1.-l_entrainment)*0.
         self.thetatend = l_entrainment*thetatend_pre + \
-                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h - self.advtheta)
         self.htend = l_entrainment*htend_pre + \
                      (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
         #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
         #stop
 
 
-        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
-        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h - self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h - self.advCO2
 
 
         # self.qtend = l_entrainment*qtend_pre + \
@@ -1010,8 +1010,8 @@ def run_mixed_layer(self):
      
         # assume u + du = ug, so ug - u = du
         if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h - self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h - self.advv
   
             self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
             self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
@@ -1076,7 +1076,7 @@ def integrate_mixed_layer(self):
 
             # take into account advection for the whole profile
                 
-                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
+                self.air_ap[var] = self.air_ap[var] - self.dtcur * self.air_ap['adv'+var]
 
             var = 'z'
             #print(self.air_ap[var])
diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index c5bb7c9..9b6398e 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -37,6 +37,11 @@
 parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
 parser.add_argument('--path_forcing') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--path_experiments') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+
+
+#arguments only used for update_yaml.py
+parser.add_argument('--path_dataset') 
+parser.add_argument('--global_keys') 
 args = parser.parse_args()
 
 sys.path.insert(0, args.c4gl_path_lib)
@@ -110,6 +115,7 @@
     if args.cleanup_experiments:
         os.system("rm -R "+args.path_experiments+'/'+EXP)
 
+    # C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
     command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
                 str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)
     # propagate arguments towards the job script
diff --git a/class4gl/simulations/batch_update.py b/class4gl/simulations/batch_update.py
new file mode 100644
index 0000000..d9ff78a
--- /dev/null
+++ b/class4gl/simulations/batch_update.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+
+"""
+Usage:
+python batch_update.py --exec $CLASS4GL/simulations/update_yaml_old.py
+--path_experiments $VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC/ --path_forcing
+$VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC_BACKUP_20180904/ --c4gl_path_lib
+$CLASS4GL --split_by 50 --global_keys "KGC" --subset_forcing ini --experiments
+"GLOBAL_NOAC"
+"""
+
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+parser = argparse.ArgumentParser()
+#if __name__ == '__main__':
+parser.add_argument('--exec') # chunk simulation script
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--pbs_string',default=' -l walltime=:2:0:0')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling')
+parser.add_argument('--subset_forcing',default='morning') 
+                                        # this tells which yaml subset
+                                        # to initialize with.
+                                        # Most common options are
+                                        # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime')
+# delete folders of experiments before running them
+parser.add_argument('--cleanup_experiments',default=False)
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=50)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--path_forcing') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+
+
+#arguments only used for update_yaml.py
+parser.add_argument('--path_dataset') 
+parser.add_argument('--global_keys') 
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+# #SET = 'GLOBAL'
+# SET = args.dataset
+
+# path_forcingSET = args.path_forcing+'/'+SET+'/'
+
+print("getting all stations from --path_forcing")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting stations by --station_id")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table [--first_station_row,--last_station_row]")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,\
+                                         refetch_records=False,\
+                                        )
+
+print('splitting batch in --split_by='+args.split_by+' jobs.')
+totalchunks = 0
+for istation,current_station in all_stations_select.iterrows():
+    records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+    chunks_current_station = len(records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique())
+    totalchunks +=chunks_current_station
+
+print('total chunks (= size of array-job) per experiment: ' + str(totalchunks))
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+
+print(args.experiments.strip().split(" "))
+
+for EXP in args.experiments.strip().split(" "):
+    if args.cleanup_experiments:
+        os.system("rm -R "+args.path_experiments+'/'+EXP)
+
+    #C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+    command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
+                str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)
+    # propagate arguments towards the job script
+    for argkey in args.__dict__.keys():
+        if ((argkey not in ['experiments','pbs_string','cleanup_experiments']) and \
+            # default values are specified in the simulation script, so
+            # excluded here
+            (args.__dict__[argkey] is not None)
+           ):
+                command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+
+    print('Submitting array job for experiment '+EXP+': '+command)
+    os.system(command)
+
+
+    #os.system(command)
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 49eb282..bced8d3 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -13,6 +13,7 @@
 
 #if __name__ == '__main__':
 parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
 parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 parser.add_argument('--first_station_row')
@@ -57,10 +58,10 @@
 
 EXP_DEFS  =\
 {
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
 
diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py
index 5dfbaff..438db32 100644
--- a/class4gl/simulations/simulations_iter.py
+++ b/class4gl/simulations/simulations_iter.py
@@ -39,6 +39,7 @@
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
+    #parser.add_argument('--timestamp')
     parser.add_argument('--global-chunk')
     parser.add_argument('--first-station')
     parser.add_argument('--last-station')
diff --git a/class4gl/simulations/simulations_smchange2.py b/class4gl/simulations/simulations_smchange2.py
index 99c38a9..d79a46f 100644
--- a/class4gl/simulations/simulations_smchange2.py
+++ b/class4gl/simulations/simulations_smchange2.py
@@ -13,6 +13,7 @@
 
 #if __name__ == '__main__':
 parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
 parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 parser.add_argument('--first_station_row')
diff --git a/class4gl/simulations/simulations_wwilt_wfc.py b/class4gl/simulations/simulations_wwilt_wfc.py
index 6f0afd3..1e5450c 100644
--- a/class4gl/simulations/simulations_wwilt_wfc.py
+++ b/class4gl/simulations/simulations_wwilt_wfc.py
@@ -13,6 +13,7 @@
 
 #if __name__ == '__main__':
 parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
 parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 parser.add_argument('--first_station_row')
diff --git a/class4gl/simulations/update_yaml.py b/class4gl/simulations/update_yaml.py
new file mode 100644
index 0000000..1681e88
--- /dev/null
+++ b/class4gl/simulations/update_yaml.py
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--global_keys') 
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+# #SET = 'GLOBAL'
+# SET = args.dataset
+
+
+print("getting stations")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching initial/forcing records')
+records_morning = get_records(run_stations,\
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
+                              refetch_records=False,
+                              )
+
+# # note that if runtime is an integer number, we don't need to get the afternoon
+# # profiles. 
+# if args.runtime == 'from_afternoon_profile':
+#     print('Fetching afternoon records for determining the simulation runtimes')
+#     records_afternoon = get_records(run_stations,\
+#                                     args.path_forcing,\
+#                                     subset='afternoon',
+#                                     refetch_records=False,
+#                                     )
+#     
+#     # print(records_morning.index)
+#     # print(records_afternoon.index)
+#     # align afternoon records with the noon records, and set same index
+#     print('hello')
+#     print(len(records_afternoon))
+#     print(len(records_morning))
+# 
+#     print("aligning morning and afternoon records")
+#     records_morning['dates'] = records_morning.ldatetime.dt.date
+#     records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
+#     records_afternoon.set_index(['STNID','dates'],inplace=True)
+#     ini_index_dates = records_morning.set_index(['STNID','dates']).index
+#     records_afternoon = records_afternoon.loc[ini_index_dates]
+#     records_afternoon.index = records_morning.index
+
+experiments = args.experiments.strip(' ').split(' ')
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = args.path_experiments+'/'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    for istation,current_station in run_stations.iterrows():
+        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+            fn_forcing = \
+                    args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                    str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+
+            #file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            file_ini = open(fn_ini,'w')
+
+            #iexp = 0
+            onerun = False
+            print('starting station chunk number: '\
+                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+            isim = 0
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                    print('starting '+str(isim+1)+' out of '+\
+                      str(len(records_morning_station_chunk) )+\
+                      ' (station total: ',str(len(records_morning_station)),')')  
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    if args.runtime == 'from_afternoon_profile':
+                        record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                        c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                          record_afternoon.index_start, 
+                                                          record_afternoon.index_end,
+                                                        mode='ini')
+                        runtime = int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())
+                    else:
+                        runtime = int(args.runtime)
+
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        runtime})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+
+                    if args.error_handling == 'dump_always':
+                        try:
+                            c4gl.run()
+                            print('run succesfull')
+                        except:
+                            print('run not succesfull')
+                        onerun = True
+
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                  include_input=False,\
+                                  #timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    # in this case, only the file will dumped if the runs were
+                    # successful
+                    elif args.error_handling == 'dump_on_success':
+                        try:
+                            c4gl.run()
+                            print('run succesfull')
+                            c4gli_morning.dump(file_ini)
+                            
+                            
+                            c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                      #timeseries_only=timeseries_only,\
+                                     )
+                            onerun = True
+                        except:
+                            print('run not succesfull')
+                    isim += 1
+
+
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/class4gl/simulations/update_yaml_old.py b/class4gl/simulations/update_yaml_old.py
new file mode 100644
index 0000000..18b3071
--- /dev/null
+++ b/class4gl/simulations/update_yaml_old.py
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+
+""" 
+Purpose:
+    update variables in class4gl yaml files, eg., when you need new categorical
+    values in the table.
+
+
+"""
+
+
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+import dateutil.parser
+
+import argparse
+
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--path_experiments')
+parser.add_argument('--experiments')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--mode',default='ini') # this tells which yaml subset
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to update in the yaml
+                                                      # dataset.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+parser.add_argument('--split_by',default=-1)# station soundings are split
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--global_keys') 
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+
+print("getting stations")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+os.system('mkdir -p '+args.path_experiments)
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            #chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+
+            chunks_current_station = len(all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique())
+            print('chunks_current_station',chunks_current_station)
+
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk =all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique()[int(args.global_chunk_number) - totalchunks ]
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching current records')
+records_forcing = get_records(run_stations,\
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
+                              refetch_records=False,
+                              )
+
+# if args.timestamp is None:
+#     backupdir = args.path_forcing+'/'+dt.datetime.now().isoformat()+'/'
+# else: 
+#     backupdir = args.path_forcing+'/'+args.timestamp+'/'
+# print('creating backup dir: '+backupdir)
+# os.system('mkdir -p "'+backupdir+'"')
+
+
+
+
+
+for istation,current_station in run_stations.iterrows():
+    records_forcing_station = records_forcing.query('STNID == ' +\
+                                                    str(current_station.name))
+
+    records_forcing_station_chunk = records_forcing.query('STNID == ' +\
+                                                    str(current_station.name)+\
+                                                   '& chunk == '+str(run_station_chunk))
+    print('lenrecords_forcing_station: ',len(records_forcing_station))
+    print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk))
+    print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1))
+    
+    # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)):
+    #     print("warning: outside of profile number range for station "+\
+    #           str(current_station)+". Skipping chunk number for this station.")
+    if len(records_forcing_station_chunk) == 0:
+        print("warning: outside of profile number range for station "+\
+              str(current_station)+". Skipping chunk number for this station.")
+    else:
+        # normal case
+        if ((int(args.split_by) > 0) or \
+            (os.path.isfile(args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                 str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'))):
+            fn_forcing = \
+                    args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                    str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            file_forcing = \
+                open(fn_forcing,'r')
+            fn_experiment = args.path_experiments+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            file_experiment = \
+                open(fn_experiment,'w')
+            fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+
+            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+        else:
+            print("\
+Warning. We are choosing chunk 0 without specifying it in filename.    \
+ No-chunk naming will be removed in the future."\
+                 )
+
+            fn_forcing = \
+                    args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                    str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            file_forcing = \
+                open(fn_forcing,'r')
+            fn_experiment = args.path_experiments+'/'+format(current_station.name,'05d')+'_'+\
+                     args.subset_forcing+'.yaml'
+            file_experiment = \
+                open(fn_experiment,'w')
+            fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
+                     args.subset_forcing+'.pkl'
+
+            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+            #          args.subset_forcing+'.pkl'
+
+        onerun = False
+        print('starting station chunk number: '\
+              +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+        #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+        # records_forcing_station_chunk = records_forcing.query('STNID == ' +\
+        #                                                 str(current_station.name)+\
+        #                                                '& chunk == '+str(run_station_chunk))
+        isim = 0
+        for (STNID,chunk,index),record_forcing in records_forcing_station_chunk.iterrows():
+                print('starting '+str(isim+1)+' out of '+\
+                  str(len(records_forcing_station_chunk) )+\
+                  ' (station total: ',str(len(records_forcing_station)),')')  
+            
+        
+                c4gli_forcing = get_record_yaml(file_forcing, 
+                                                record_forcing.index_start, 
+                                                record_forcing.index_end,
+                                                mode=args.mode)
+                
+                #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
+                
+                if args.global_keys is not None:
+                    print(args.global_keys.strip(' ').split(' '))
+                    c4gli_forcing.get_global_input(
+                        globaldata, 
+                        only_keys=args.global_keys.strip(' ').split(' ')
+                    )
+        
+
+
+                c4gli_forcing.dump(file_experiment)
+                    
+                    
+                onerun = True
+                isim += 1
+
+
+        file_forcing.close()
+        file_experiment.close()
+
+        if onerun:
+            # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"')
+            # if os.path.isfile(fn_forcing_pkl):
+            #     os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"')
+            # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+            # print('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+            records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\
+                                                       args.path_experiments,\
+                                                       getchunk = int(run_station_chunk),\
+                                                       subset=args.subset_forcing,
+                                                       refetch_records=True,
+                                                       )
+

From dbf3f270a34c8b7b861f070b18426bdabf16a20d Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 4 Sep 2018 21:29:33 +0200
Subject: [PATCH 066/129] correction in advection terms

---
 class4gl/model.py                        | 2 +-
 class4gl/simulations/simulations.py      | 8 ++++++++
 class4gl/simulations/simulations_iter.py | 4 ++++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/class4gl/model.py b/class4gl/model.py
index 471d218..6a9d2c3 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -926,7 +926,7 @@ def run_mixed_layer(self):
         htend_pre       = self.we + self.ws + self.wf - self.M
         
         #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h - self.advtheta
         
  
         #print('thetatend_pre',thetatend_pre)
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index bced8d3..e10c1da 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -58,10 +58,18 @@
 
 EXP_DEFS  =\
 {
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
 
diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py
index 438db32..cdc8923 100644
--- a/class4gl/simulations/simulations_iter.py
+++ b/class4gl/simulations/simulations_iter.py
@@ -29,6 +29,10 @@
 
 EXP_DEFS  =\
 {
+  'GLOBAL_ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
   'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},

From 659ca0f63dca9ab5909e24103d758cccf8871897 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Fri, 7 Sep 2018 17:49:35 +0200
Subject: [PATCH 067/129] era-interim input

---
 class4gl/class4gl.py                     | 262 ++++++++++++++-
 class4gl/interface/interface.py          |   2 +-
 class4gl/interface/interface_koeppen.py  | 155 +++++++--
 class4gl/interface/interface_stations.py |  10 +-
 class4gl/interface/taylorDiagram.py      |   9 +-
 class4gl/interface_functions.py          |   2 +-
 class4gl/interface_multi.py              | 411 ++++++++++++-----------
 class4gl/setup/setup_era.py              | 270 +++++++++++++++
 class4gl/simulations/simulations.py      |  36 +-
 9 files changed, 913 insertions(+), 244 deletions(-)
 create mode 100644 class4gl/setup/setup_era.py

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 7fe3767..9b5c8b9 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -378,6 +378,7 @@ def update(self,source,**kwargs):
         #print(source,kwargs)
 
         for key,data in kwargs.items():
+            #print('update',key,data)
 
             #print(key)
             # if the key is not in class4gl_input object, then just add it. In
@@ -414,24 +415,37 @@ def update(self,source,**kwargs):
                     
 
             elif type(self.__dict__[key]) == model_input:
-                # if the data type is a model_input, then we update its internal
+                # if the data type is a model_input (pars), then we update its internal
                 # dictionary of parameters
                 if type(data) == model_input:
                     self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
                                                    **data.__dict__}
                     datakeys = list(data.__dict__.keys())
                 elif type(data) == dict:
-                    self.__dict__[key].__dict__ = {**self.__dict__[key].__dict__, \
-                                                   **data}
                     datakeys = list(data.keys())
+                    datavalues = list(data.values())
+                    for idatavalue,datavalue in enumerate(datavalues):
+
+                        # convert numpy to native python value types, so that
+                        # we get clean output in the yaml file
+                        if type(datavalue).__module__ == 'numpy':
+                            datavalues[idatavalue] = datavalue.item()
+
+                    self.__dict__[key].__dict__ = \
+                            {**self.__dict__[key].__dict__, \
+                            **dict(zip(datakeys, datavalues))}
                 else:
                     raise TypeError('input key '+key+' is not of the same type\
                                     as the one in the class4gl_object')
 
+
+
             elif type(self.__dict__[key]) == dict:
                 # if the data type is a dictionary, we update the
                 # dictionary 
+               # print('before update', self.__dict__[key] , data)
                 self.__dict__[key] = {self.__dict__[key] , data}
+               # print('after update',self.__dict__[key] )
                 datakeys = list(data.keys())
 
 
@@ -576,7 +590,7 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
         # # this is an alternative pipe/numpy method
         # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0]
         valid_indices = air_balloon.index[is_valid].values
-        print(valid_indices)
+        #print(valid_indices)
 
         dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
 
@@ -860,6 +874,7 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
         # by default, we get all dataset keys
         keys = list(globaldata.datasets.keys())
 
+        #print('keys orig', keys)
 
         # # In case there is surface pressure, we also calculate the half-level
         # # and full-level pressure fields
@@ -874,6 +889,7 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                 if key not in only_keys:
                     keys.remove(key)
 
+        #print('keys 1', keys)
                 
         # If specified, we take out keys that are in exclude keys
         if exclude_keys is not None:
@@ -901,14 +917,15 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
             #else:
             #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
 
-        self.logger.debug('getting keys "'+', '.join(keys)+'\
-                          from global data')
+        #print('keys 2', keys)
 
         for key in keys:
             # If we find it, then we obtain the variables
+            #print('key 0', key)
             if ((key in globaldata.datasets) and \
                 (globaldata.datasets[key].page is not None)):
 
+                #print('key 1', key)
                 # check first whether the dataset has a height coordinate (3d space)
                 if 'lev' in globaldata.datasets[key].page[key].dims:
 
@@ -926,12 +943,35 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                         
                         # if we have a time dimension, then we look up the required timesteps during the class simulation
                         if 'time' in list(globaldata.datasets[key].page[key].dims):
-                            itimes = ((globaldata.datasets[key].page.time >= \
-                                       classdatetime) & (globaldata.datasets[key].page.time < classdatetime_stop))
+
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+                            
+                            idatetime = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
+                            if key not in ['t','u','v','q']:
+                                if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+                                    idatetime += 1
+                            
+                            DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime_stop))
+                            idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
+                            #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
+                            if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetime_stop)):
+                                idatetimeend -= 1
+                            idatetime = np.min((idatetime,idatetimeend))
+                            #for gleam, we take the previous day values
+
+                            # in case of soil temperature, we take the exact
+                            # timing (which is the morning)
+                            if key in ['t','u','v','q']:
+                                idatetimeend = idatetime
+                            
+                            itimes = range(idatetime,idatetimeend+1)
+                            #print(key,'itimes',itimes)
+
 
                             # In case we didn't find any correct time, we take the
                             # closest one.
-                            if np.sum(itimes) == 0.:
+                            if len(itimes) == 0:
 
 
                                 classdatetimemean = \
@@ -969,8 +1009,6 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                 else:
                     # this procedure is for reading the ground fields (2d space). 
                     # Actually, the code should be simplified to a similar fasion as the 3d procedure above and tested again.
-
-    
                     if 'time' in list(globaldata.datasets[key].page[key].dims):
     
                        # first, we browse to the correct file
@@ -1029,11 +1067,12 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                             
                             idatetime = np.where((DIST) == np.min(DIST))[0][0]
                             #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
-                            if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
-                                idatetime += 1
+                            if key not in ['Tsoil','T2']:
+                                if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+                                    idatetime += 1
                             
                             classdatetimeend = np.datetime64(\
-                                                             self.pars.datetime +\
+                                                             self.pars.datetime_daylight +\
                                                              dt.timedelta(seconds=self.pars.runtime)\
                                                             ) 
                             DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetimeend))
@@ -1224,6 +1263,23 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
             # self.update(source='globaldata',\
             #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
 
+
+    def query_source(self,var):
+        """ 
+        purpose:
+            this procedure returns the name of the data source for a certain
+            variable
+        
+        input:
+            var: this should be in the format "section:variable", eg.,
+            "pars:h", or "air_ac:theta"
+
+        """
+
+        for source,vars_in_source in self.sources.items():
+            if var in vars_in_source:
+                return source
+
     def check_source(self,source,check_only_sections=None):
         """ this procedure checks whether data of a specified source is valid.
 
@@ -1316,6 +1372,184 @@ def check_source_globaldata(self):
 
         return source_globaldata_ok
 
+    def mixed_layer_fit(self,air_ap,source,mode):
+        """ 
+            Purpose: 
+
+
+        """
+
+
+        # Raise an error in case the input stream is not the correct object
+        # if type(wy_strm) is not wyoming:
+        #    raise TypeError('Not a wyoming type input stream')
+
+        # Let's tell the class_input object that it is a Wyoming fit type
+        self.air_ap_type = source+'_fit'
+        # ... and which mode of fitting we apply
+        self.air_ap_mode = mode
+
+
+        # Therefore, determine the sounding that are valid for 'any' column 
+        is_valid = ~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)
+        #is_valid = (air_ap.z >= 0)
+        # # this is an alternative pipe/numpy method
+        # (~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)).pipe(np.where)[0]
+        valid_indices = air_ap.index[is_valid].values
+        #print(valid_indices)
+
+
+        hvalues = {}
+        if len(valid_indices) > 0:
+            #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
+            hvalues['h_b'] ,hvalues['h_u'],hvalues['h_l']  = blh(air_ap.z,air_ap.thetav,np.sqrt(air_ap.u**2. + air_ap.u**2.))
+            
+            hvalues['h_b']  = np.max((hvalues['h_b'] ,10.))
+            hvalues['h_u']  = np.max((hvalues['h_u'] ,10.)) #upper limit of mixed layer height
+            hvalues['h_l']  = np.max((hvalues['h_l'] ,10.)) #low limit of mixed layer height
+            hvalues['h_e']  = np.abs( hvalues['h_u']  - hvalues['h_l'] ) # error of mixed-layer height
+            
+            # the final mixed-layer height that will be used by class. We round it
+            # to 1 decimal so that we get a clean yaml output format
+            hvalues['h']  = np.round(hvalues['h_'+mode],1)
+        else:
+            hvalues['h_u']  =np.nan
+            hvalues['h_l']  =np.nan
+            hvalues['h_e']  =np.nan
+            hvalues['h']    =np.nan
+        self.update(source='fit_from_'+source,pars=hvalues)
+
+        if np.isnan(self.pars.h ):
+            self.pars.Ps  = nan
+
+        mlvalues = {}
+        if ~np.isnan(self.pars.h ):
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_ap.z < self.pars.h)
+            valid_indices_below_h =  air_ap.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_ap[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_ap.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_ap.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_ap)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            mlvalues['theta'] = ml_mean.theta
+            mlvalues['q']     = ml_mean.q
+            mlvalues['u']     = ml_mean.u 
+            mlvalues['v']     = ml_mean.v 
+        else:
+            mlvalues['theta']  = np.nan
+            mlvalues['q']  = np.nan
+            mlvalues['u']  = np.nan
+            mlvalues['v']  = np.nan
+            
+
+        self.update(source='fit_from_'+source,pars=mlvalues)
+
+
+        # First 3 data points of the mixed-layer fit. We create a empty head
+        # first
+        air_ap_head = air_ap[0:0] #pd.DataFrame(columns = air_ap.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_ap[air_ap.z > self.pars.h ]
+        
+        #calculate mixed-layer jump ( this should be larger than 0.1)
+        
+        air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+        #air_ap_head['HGHT'] = air_ap_head['z'] \
+        #                        + \
+        #                        np.round(dpars[ 'Station elevation'],1)
+        
+        # make a row object for defining the jump
+        jump = air_ap_head.iloc[0] * np.nan
+            
+        if air_ap_tail.shape[0] > 1:
+
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (self.pars.h - air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = self.pars.theta 
+        z_low =     self.pars.h 
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                (z_mean > (z_low+10.)) and \
+                (theta_mean > (theta_low+0.2) ) and \
+                (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+
+
+
+
+
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        #print(air_ap['PRES'].iloc[0])
+
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+
+        air_ap['p'].iloc[0] =self.pars.Ps  
+        air_ap['p'].iloc[1] =(self.pars.Ps - rho * g * self.pars.h )
+        air_ap['p'].iloc[2] =(self.pars.Ps - rho * g * self.pars.h -0.1)
+
+        self.update(source='fit_from_'+source,air_ap=air_ap)
+
+
 
 class c4gli_iterator():
     """ this iterator allows to loop through an entire yaml file and load class4gl_input sequentially 
diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index af989c0..1af9e4c 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -118,7 +118,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     c4gldata[key] = c4gl_interface_soundings( \
                       args.path_experiments+'/'+key+'/',\
-                      args.path_forcing+'/',\
+                      args.path_forcing,\
                       globaldata,\
                       refetch_records=False
                     )
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
index 6af9a7c..cbe89f2 100644
--- a/class4gl/interface/interface_koeppen.py
+++ b/class4gl/interface/interface_koeppen.py
@@ -123,25 +123,132 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       globaldata,\
                       refetch_records=False
                     )
+
 '''
+key = args.experiments.strip(' ').split(' ')[0]
+xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
+koeppenlookuptable = pd.DataFrame()
+koeppenlookuptable['KGCID'] = pd.Series(xrkoeppen['KGCID'])
+c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] =  \
+    c4gldata[key].frames['stats']['records_all_stations_ini']['KGC'].map(koeppenlookuptable['KGCID'])
+
+koeppenlookuptable['amount'] = ""
+for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+    print(ikoeppen,':',koeppen)
+    kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen.KGCID)
+    print(np.sum(kgc_select))
+    koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
+
+koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
+koeppenlookuptable = koeppenlookuptable[:9]
+koeppenlookuptable = koeppenlookuptable.sort_index()
+
+
+kgccolors = {
+    'Dfa':['navy','white'],
+    'Cfb':['green','white']       ,
+    'BSk':['tan','black']      ,
+    'Csb':['lightgreen','black'] ,     
+    'Cfa':['darkgreen','white']  ,    
+    'BWh':['orange','black']      ,
+    'Aw' :['pink','black'],
+    'Dwc':['rebeccapurple','white'] ,    
+    'Dfb':['darkviolet','white']    , 
+}
+kgcnames = {
+    'Dfa':'snow - fully humid - hot summer',
+    'Cfb':'green'       ,
+    'BSk':''      ,
+    'Csb':''      ,
+    'Cfa':'darkgreen' ,     
+    'BWh':''      ,
+    'Aw' :''     ,
+    'Dwc':''     ,
+    'Dfb':''     ,
+    #'Dfa':'',
+}
+
+
+koeppenlookuptable['color'] = ""
+koeppenlookuptable['textcolor'] = ""
+koeppenlookuptable['name'] = ""
+for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+    print(ikoeppen)
+    print(koeppen.KGCID)
+    print(kgccolors[koeppen.KGCID])
+    koeppenlookuptable['color'].loc[ikoeppen] = kgccolors[koeppen.KGCID][0]
+    koeppenlookuptable['textcolor'].loc[ikoeppen] = kgccolors[koeppen.KGCID][1]
+    koeppenlookuptable['name'].loc[ikoeppen] = kgcnames[koeppen.KGCID]
+
+
 if args.make_figures:
-    """
     # the lines below activate TaylorPlots but it is disabled for now
     fig = plt.figure(figsize=(10,7))   #width,height
     i = 1                                                                           
     axes = {}         
     axes_taylor = {}         
     
-    colors = ['r','g','b','m']
+    colors = ['r','g','b','m','y','purple','orange','sienna','navy']
     symbols = ['*','x','+']
     dias = {}
-    
+
+
+
+    i = 1
     for varkey in ['h','theta','q']:                                                    
+        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
         axes[varkey] = fig.add_subplot(2,3,i)                                       
+
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]):
+            icolor = 0
+            for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+                print(ikoeppen,':',koeppen)
+                kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen.KGCID)
+                
+                koeppen_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'][kgc_select]
+                koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'][kgc_select]
+    
+                #axes[varkey].scatter(koeppen_obs,koeppen_mod,marker=symbols[ikoeppen],color=colors[ikey])
+                         #  label=key+", "+\
+                         #                    'R = '+str(round(PR[0],3))+', '+\
+                         #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                         #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+    
+    
+    
+            # # pl.scatter(obs,mod,label=key+", "+\
+            # #                              'R = '+str(round(PR[0],3))+', '+\
+            # #                              'RMSE = '+str(round(RMSE,5))+', '+\
+            # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+                
+                print('hellobla')
+                print(koeppen.KGCID)
+                print(koeppen.color)
+                dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                               pearsonr(koeppen_mod,koeppen_obs)[0],
+                               annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                               bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                               )
+                icolor += 1
+    
+            latex = {}
+            latex['dthetadt'] =  r'$d \theta / dt $'
+            latex['dqdt'] =      r'$d q / dt $'
+            latex['dhdt'] =      r'$d h / dt $'
+    
+            axes[varkey].set_xlabel('observations')     
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+        if i==1:                                    
+            axes[varkey].set_ylabel('model')                                            
+        abline(1,0,axis=axes[varkey])
+        i +=1
+
+    
+    i = 0
+    for varkey in ['h','theta','q']:                                                    
         #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
     
         #print(obs.std())
-        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
         if i == 0:
             dias[varkey]._ax.axis["left"].label.set_text(\
                 "Standard deviation (model) / Standard deviation (observations)")
@@ -161,7 +268,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         #dia.ax.plot(x99,y99,color='k')
     
         
-        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        #for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]):
             # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
             # clearsky = (cc < 0.05)
             # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
@@ -189,11 +297,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             
             # print(STD)
             # print(PR)
-            dias[varkey].add_sample(STD/STD_OBS, PR,
-                           marker='o', ms=5, ls='',
-                           #mfc='k', mec='k', # B&W
-                           mfc=colors[ikey], mec=colors[ikey], # Colors
-                           label=key)
+            dias[varkey].add_sample(STD/STD_OBS, PR,\
+                               annotate='All', zorder=100,color='black',weight='bold',fontsize=5.,\
+                                    bbox={'edgecolor':'black','boxstyle':'circle','fc':'lightgrey','alpha':0.6}\
+                            )
     
         # put ticker position, see
         # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
@@ -202,6 +309,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         # dia.ax.axis['left'].
     
         i += 1
+
     
     i = 0
     for varkey in ['h','theta','q']:                                                    
@@ -275,15 +383,15 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     
     
-    # legend for different forcing simulations (colors)
-    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-    leg = []
-    for ikey,key in enumerate(args.experiments.strip().split(' ')):
-        leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
-        leg.append(leg1)
-    ax.axis('off')
-    #leg1 =
-    ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
+    # # legend for different forcing simulations (colors)
+    # ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # for ikey,key in enumerate(args.experiments.strip().split(' ')):
+    #     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+    #     leg.append(leg1)
+    # ax.axis('off')
+    # #leg1 =
+    # ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
     
     
     # # legend for different stations (symbols)
@@ -314,7 +422,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     if args.figure_filename is not None:
         fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
     fig.show()  
-"""
+
     if bool(args.show_control_parameters):
 
         import seaborn as sns
@@ -429,11 +537,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         for varkey in ['h','theta','q']:
             varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
             data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
+            data_all['KGCname'] = data_input['KGCname']
             #print(data_input.shape)
             #print(data_all.shape)
-        xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
-        lookuptable = pd.Series(xrkoeppen['KGCID'])
-        data_all['KGCname'] = data_input['KGC'].map(lookuptable)
+        # xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
+        # lookuptable = pd.Series(xrkoeppen['KGCID'])
+        # data_all['KGCname'] = data_input['KGC'].map(lookuptable)
         #print('hello6')
         #print(data_all.columns)
         #print('hello7')
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
index 9e6b82a..c7ac908 100644
--- a/class4gl/interface/interface_stations.py
+++ b/class4gl/interface/interface_stations.py
@@ -203,13 +203,13 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     i = 0
     for varkey in ['h','theta','q']:                                                    
         for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
-            isymbol = 0
+            istation = 0
             for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
                 indices =  (c4gldata[key].frames['stats']['records_all_stations_index'].get_level_values('STNID') == current_station.name)
                 station_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].iloc[indices]
                 station_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].iloc[indices]
     
-                axes[varkey].scatter(station_obs,station_mod,marker=symbols[isymbol],color=colors[ikey])
+                axes[varkey].scatter(station_obs,station_mod,marker=symbols[istation],color=colors[ikey])
                          #  label=key+", "+\
                          #                    'R = '+str(round(PR[0],3))+', '+\
                          #                    'RMSE = '+str(round(RMSE,5))+', '+\
@@ -223,12 +223,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
                 
                 dias[varkey].add_sample(station_mod.std()/station_obs.std(),
-                               pearsonr(station_mod,station_obs)[0],
-                               marker=symbols[isymbol], ms=5, ls='',
+                               pearsonr(station_mod,station_obs)[0],annotate=symbols[istation],
+                               marker=symbols[istation], ms=5, ls='',
                                #mfc='k', mec='k', # B&W
                                mfc=colors[ikey], mec=colors[ikey], # Colors
                                label=key)
-                isymbol += 1
+                istation += 1
     
     
             axes[varkey].set_xlabel('observations')     
diff --git a/class4gl/interface/taylorDiagram.py b/class4gl/interface/taylorDiagram.py
index 9c51b48..d9e9e26 100644
--- a/class4gl/interface/taylorDiagram.py
+++ b/class4gl/interface/taylorDiagram.py
@@ -106,14 +106,19 @@ def __init__(self, refstd,
         # Collect sample points for latter use (e.g. legend)
         self.samplePoints = [l]
 
-    def add_sample(self, stddev, corrcoef, *args, **kwargs):
+    def add_sample(self, stddev, corrcoef, annotate=None,*args, **kwargs):
         """
         Add sample (*stddev*, *corrcoeff*) to the Taylor
         diagram. *args* and *kwargs* are directly propagated to the
         `Figure.plot` command.
         """
+        print(annotate)
 
-        l, = self.ax.plot(NP.arccos(corrcoef), stddev,
+        if annotate is not None:
+            l = self.ax.annotate(annotate,xy=(NP.arccos(corrcoef), stddev),\
+                              *args, **kwargs)  # (theta, radius)
+        else:
+            l, = self.ax.plot(NP.arccos(corrcoef), stddev,
                           *args, **kwargs)  # (theta, radius)
         self.samplePoints.append(l)
 
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 43f0be6..0da9a00 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -178,7 +178,7 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
 
 
 class stations(object):
-    def __init__(self,path,suffix='ini',refetch_stations=False):
+    def __init__(self,path,suffix='ini',refetch_stations=True):
 
         self.path = path
 
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 8672499..6e7cd2f 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -51,7 +51,7 @@
 os.system('module load Ruby')
 
 class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+    def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
         """ creates an interactive interface for analysing class4gl experiments
 
         INPUT:
@@ -117,48 +117,50 @@ def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetc
                                            refetch_records=refetch_records
                                            )
 
-        # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_obs_afternoon'] =\
-                        get_records(self.frames['stats']['stations'].table,\
-                                           self.path_obs,\
-                                           subset='afternoon',\
-                                           refetch_records=refetch_records
-                                           )
+        if self.path_obs is not None:
+            # get its records and load it into the stats frame
+            self.frames['stats']['records_all_stations_obs_afternoon'] =\
+                            get_records(self.frames['stats']['stations'].table,\
+                                               self.path_obs,\
+                                               subset='afternoon',\
+                                               refetch_records=refetch_records
+                                               )
 
         self.frames['stats']['records_all_stations_mod'].index = \
             self.frames['stats']['records_all_stations_ini'].index 
 
         self.frames['stats']['records_all_stations_ini']['dates'] = \
-            self.frames['stats']['records_all_stations_ini'].ldatetime.dt.date
+            self.frames['stats']['records_all_stations_ini']['ldatetime'].dt.date
 
-        self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].ldatetime.dt.date
+        if self.path_obs is not None:
+            self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
+                self.frames['stats']['records_all_stations_obs_afternoon']['ldatetime'].dt.date
 
-        self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+            self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
 
 
-        ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
+            ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
 
-        self.frames['stats']['records_all_stations_obs_afternoon'] = \
-            self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+            self.frames['stats']['records_all_stations_obs_afternoon'] = \
+                self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
 
-        self.frames['stats']['records_all_stations_obs_afternoon'].index = \
-            self.frames['stats']['records_all_stations_ini'].index 
+            self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+                self.frames['stats']['records_all_stations_ini'].index 
 
-        self.frames['stats']['viewkeys'] = ['h','theta','q']
-        print('Calculating table statistics')
-        self.frames['stats']['records_all_stations_mod_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_mod'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
-        self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_obs_afternoon'],\
-                           self.frames['stats']['records_all_stations_ini'],\
-                           self.frames['stats']['viewkeys']\
-                          )
+            self.frames['stats']['viewkeys'] = ['h','theta','q']
+            print('Calculating table statistics')
+            self.frames['stats']['records_all_stations_mod_stats'] = \
+                    tendencies(self.frames['stats']['records_all_stations_mod'],\
+                               self.frames['stats']['records_all_stations_obs_afternoon'],\
+                               self.frames['stats']['records_all_stations_ini'],\
+                               self.frames['stats']['viewkeys']\
+                              )
+            self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                    tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                               self.frames['stats']['records_all_stations_obs_afternoon'],\
+                               self.frames['stats']['records_all_stations_ini'],\
+                               self.frames['stats']['viewkeys']\
+                              )
 
         self.frames['stats']['inputkeys'] = inputkeys
         
@@ -204,21 +206,25 @@ def __init__(self,path_exp,path_obs,globaldata=None,refetch_records=False,refetc
         #               obs = self.frames['stats']['records_all_stations_ini'], \
         #               columns = self.frames['stats']['viewkeys'], \
         #              )
-        indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+
         
-        print('filtering pathological data')
-        # some observational sounding still seem problematic, which needs to be
-        # investigated. In the meantime, we filter them
-        valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
-
-        # we filter ALL data frames!!!
-        for key in self.frames['stats'].keys():
-            if (type(self.frames['stats'][key]) == pd.DataFrame) and \
-               (self.frames['stats'][key].index.names == indextype):
-                self.frames['stats'][key] = self.frames['stats'][key][valid]
-        print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+
+        if self.path_obs is not None:
+            print('filtering pathological data')
+            indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+            # some observational sounding still seem problematic, which needs to be
+            # investigated. In the meantime, we filter them
+
+            if self.path_obs is not None:
+                valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
+                        ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                        ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+
+                for key in self.frames['stats'].keys():
+                    if (type(self.frames['stats'][key]) == pd.DataFrame) and \
+                       (self.frames['stats'][key].index.names == indextype):
+                        self.frames['stats'][key] = self.frames['stats'][key][valid]
+                print(str(len(valid) - np.sum(valid))+' soundings are filtered')
 
         self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
 
@@ -300,7 +306,10 @@ def update_station(self):
 
         # create the value table of the records of the current station
         tab_suffixes = \
-                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                ['_mod','_ini','_ini_pct']
+        if self.path_obs is not None:
+            tab_suffixes=tab_suffixes+['_obs_afternoon','_mod_stats','_obs_afternoon_stats']
+
         for tab_suffix in tab_suffixes:
             self.frames['stats']['records_current_station'+tab_suffix] = \
                 self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
@@ -330,8 +339,9 @@ def update_station(self):
             open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
         if 'current_station_file_afternoon' in self.frames['profiles'].keys():
             self.frames['profiles']['current_station_file_afternoon'].close()
-        self.frames['profiles']['current_station_file_afternoon'] = \
-            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+        if self.path_obs is not None:
+            self.frames['profiles']['current_station_file_afternoon'] = \
+                open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
 
         # for the profiles we make a distinct record iterator, so that the
         # stats iterator can move independently
@@ -379,22 +389,23 @@ def update_record(self):
                   (self.frames['profiles']['STNID'] , \
                   self.frames['profiles']['current_record_chunk'],\
                   self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon'] =  \
-            self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
-                  (self.frames['profiles']['STNID'] , \
-                  self.frames['profiles']['current_record_chunk'] , \
-                  self.frames['profiles']['current_record_index'])]
-
-        self.frames['profiles']['current_record_mod_stats'] = \
-                self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
-                    self.frames['profiles']['STNID'], \
-                    self.frames['profiles']['current_record_chunk'], \
-                    self.frames['profiles']['current_record_index'])]
-        self.frames['profiles']['current_record_obs_afternoon_stats'] = \
-                self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
-                    self.frames['profiles']['STNID'],\
-                    self.frames['profiles']['current_record_chunk'],\
-                    self.frames['profiles']['current_record_index'])]
+        if self.path_obs is not None:
+            self.frames['profiles']['current_record_obs_afternoon'] =  \
+                self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+                      (self.frames['profiles']['STNID'] , \
+                      self.frames['profiles']['current_record_chunk'] , \
+                      self.frames['profiles']['current_record_index'])]
+
+            self.frames['profiles']['current_record_mod_stats'] = \
+                    self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+                        self.frames['profiles']['STNID'], \
+                        self.frames['profiles']['current_record_chunk'], \
+                        self.frames['profiles']['current_record_index'])]
+            self.frames['profiles']['current_record_obs_afternoon_stats'] = \
+                    self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+                        self.frames['profiles']['STNID'],\
+                        self.frames['profiles']['current_record_chunk'],\
+                        self.frames['profiles']['current_record_index'])]
         self.frames['profiles']['current_record_ini_pct'] = \
                 self.frames['profiles']['records_all_stations_ini_pct'].loc[(\
                     self.frames['profiles']['STNID'],\
@@ -430,17 +441,18 @@ def update_record(self):
                record_ini.index_end,
                 mode='ini')
 
-        record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
-                       (self.frames['stats']['STNID'] , \
-                        self.frames['stats']['current_record_chunk'] , \
-                        self.frames['stats']['current_record_index'])]
+        if self.path_obs is not None:
+            record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+                           (self.frames['stats']['STNID'] , \
+                            self.frames['stats']['current_record_chunk'] , \
+                            self.frames['stats']['current_record_index'])]
 
-        self.frames['profiles']['record_yaml_obs_afternoon'] = \
-           get_record_yaml(
-               self.frames['profiles']['current_station_file_afternoon'], \
-               record_afternoon.index_start,
-               record_afternoon.index_end,
-                mode='ini')
+            self.frames['profiles']['record_yaml_obs_afternoon'] = \
+               get_record_yaml(
+                   self.frames['profiles']['current_station_file_afternoon'], \
+                   record_afternoon.index_start,
+                   record_afternoon.index_end,
+                    mode='ini')
 
 
         key = self.frames['worldmap']['inputkey']
@@ -481,20 +493,22 @@ def plot(self):
         btns = {} #buttons
 
         # frames, which sets attributes for a group of axes, buttens, 
-        for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
-            label = 'stats_'+str(key)
-            axes[label] = fig.add_subplot(\
-                            len(self.frames['stats']['viewkeys']),\
-                            5,\
-                            5*ikey+1,label=label)
-            # Actually, the axes should be a part of the frame!
-            #self.frames['stats']['axes'] = axes[
+        if self.path_obs is not None:
+
+            for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+                label = 'stats_'+str(key)
+                axes[label] = fig.add_subplot(\
+                                len(self.frames['stats']['viewkeys']),\
+                                5,\
+                                5*ikey+1,label=label)
+                # Actually, the axes should be a part of the frame!
+                #self.frames['stats']['axes'] = axes[
 
-            # pointer to the axes' point data
-            axes[label].data = {}
+                # pointer to the axes' point data
+                axes[label].data = {}
 
-            # pointer to the axes' color fields
-            axes[label].fields = {}
+                # pointer to the axes' color fields
+                axes[label].fields = {}
 
 
         fig.tight_layout()
@@ -906,7 +920,8 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
             # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
 
-        if (only is None) or ('stats' in only) or ('stats_lightupdate' in only):
+        if (self.path_obs is not None) and \
+           ((only is None) or ('stats' in only) or ('stats_lightupdate' in only)):
 
             statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
             store_xlim = {}
@@ -1192,23 +1207,21 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
             #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
             hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                           self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                           self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                          ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
-            #print('r13')
-            # 
+                           self.frames['profiles']['record_yaml_mod'].out.h[-1]])
+            if self.path_obs is not None:
+                hmax = np.nanmax([hmax,self.frames['profiles']['record_yaml_obs_afternoon'].pars.h])
 
 
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
+                                    < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+                zco = range(zidxmax)
 
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
+                axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                                 label="obs "+\
+                                 self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
             #print('r14')
             zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values
                                 < 2.*hmax))[0][-1])+2
@@ -1223,29 +1236,30 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
 
 
             #print('r15')
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
-                                < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
+            if self.path_obs is not None:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+                                    < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+                zco = range(zidxmax)
 
-                          
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
+                              
+                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                                 label="obs "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
 
-            #print('r16')
+                #print('r16')
 
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+                zco = range(zidxmax)
 
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
+                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                                 label="fit "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
 
             #print('r17')
             print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
@@ -1260,7 +1274,8 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                 axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
                                  self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
                                  label="mod "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 (self.frames['profiles']['record_yaml_ini'].pars.ldatetime
+                                 +dt.timedelta(seconds=self.frames['profiles']['record_yaml_ini'].pars.runtime)).strftime("%H:%M")\
                                  +'LT')
 
             #print('r18')
@@ -1272,7 +1287,11 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             axes[label].clear()
 
             tbox['datetime'].set_text(\
-                self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M")) #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
+                 (self.frames['profiles']['record_yaml_ini'].pars.datetime_daylight+\
+                  dt.timedelta(seconds=self.frames['profiles']['record_yaml_ini'].pars.runtime)).strftime("%Y/%m/%d %H:%M")
+                )
+            
+            #+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M")+ "UTC")
             # 
 
             #print('r19')
@@ -1280,27 +1299,26 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
             # 
             if valid_mod:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                               self.frames['profiles']['record_yaml_mod'].out.h[-1]
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+                hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
+                               self.frames['profiles']['record_yaml_mod'].out.h[-1]])
             else:
-                hmax = np.max([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_obs_afternoon'].pars.h,
-                              ])#self.morning_sounding.c4gl.out.h[-1],self.evening_sounding.fit.PARAMS.T.h.value])
+                hmax = self.frames['profiles']['record_yaml_ini'].pars.h
+
+            if self.path_obs is not None:
+                hmax = np.nanmax([hmax,self.frames['profiles']['record_yaml_obs_afternoon'].pars.h])
             # 
             #print('r20')
 
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
-            zco = range(zidxmax)
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_ini'].air_balloon.z.values)))
+                zco = range(zidxmax)
 
-            axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
-            #print('r21')
+                axes[label].plot(self.frames['profiles']['record_yaml_ini'].air_balloon.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_ini'].air_balloon.z.values[zco],"b*", \
+                                 label="obs "+\
+                                 self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
+                #print('r21')
 
 
             zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_ap.z.values < 2.*hmax))[0][-1])+2
@@ -1313,27 +1331,28 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                              self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
                              +'LT')
 
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
-            zco = range(zidxmax)
+            if self.path_obs is not None:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+                zco = range(zidxmax)
 
 
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
-                             label="obs "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
+                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                                 label="obs "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
 
-            zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-            zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
-            zco = range(zidxmax)
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+                zco = range(zidxmax)
 
-            #print('r23')
-            axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
-                             self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
-                             label="fit "+\
-                             self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
-                             +'LT')
+                #print('r23')
+                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                                 label="fit "+\
+                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 +'LT')
 
             #print('r24')
             if valid_mod:
@@ -1550,10 +1569,11 @@ def on_pick(self,event):
                         self.frames['profiles']['current_station_file_mod'].close()
                     self.frames['profiles']['current_station_file_mod'] = \
                         open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-                    if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_afternoon'].close()
-                    self.frames['profiles']['current_station_file_afternoon'] = \
-                        open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+                    if self.path_obs is not None:
+                        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                            self.frames['profiles']['current_station_file_afternoon'].close()
+                        self.frames['profiles']['current_station_file_afternoon'] = \
+                            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
 
                     # go to hovered record of current station
                     self.frames['profiles']['records_iterator'] = \
@@ -1642,22 +1662,22 @@ def on_plot_hover(self,event):
                         # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
                         # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
                         
-
-                        if label[:5] == 'stats':
-                            records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                            records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                            (self.frames['stats']['STNID'] ,
-                             self.frames['stats']['current_record_chunk'], 
-                             self.frames['stats']['current_record_index']) = \
-                                records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
-                        # elif label[:5] == 'stats':
-                        #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                        #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
-                        #     records_datetimes = self.frames['stats']['records_all_stations_ini']
-                        #     (self.frames['stats']['STNID'] ,
-                        #      self.frames['stats']['current_record_chunk'], 
-                        #      self.frames['stats']['current_record_index']) = \
-                        #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                        if self.path_obs is not None:
+                            if label[:5] == 'stats':
+                                records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                                records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                                (self.frames['stats']['STNID'] ,
+                                 self.frames['stats']['current_record_chunk'], 
+                                 self.frames['stats']['current_record_index']) = \
+                                    records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                            # elif label[:5] == 'stats':
+                            #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
+                            #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            #     records_datetimes = self.frames['stats']['records_all_stations_ini']
+                            #     (self.frames['stats']['STNID'] ,
+                            #      self.frames['stats']['current_record_chunk'], 
+                            #      self.frames['stats']['current_record_index']) = \
+                            #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
 
 
                         self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
@@ -1699,7 +1719,11 @@ def on_plot_hover(self,event):
 
 
                         tab_suffixes = \
-                                ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                                ['_mod','_ini','_ini_pct']
+                        if self.path_obs is not None:
+                            tab_suffixes += \
+                                ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+                            
                         for tab_suffix in tab_suffixes:
                             self.frames['stats']['records_current_station'+tab_suffix] = \
                                 self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
@@ -1735,7 +1759,10 @@ def on_plot_hover(self,event):
 
                         #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
                         tab_suffixes = \
-                                ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                                ['_ini','_ini_pct']
+                        if self.path_obs is not None:
+                            tab_suffixes += \
+                                ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
                         for tab_suffix in tab_suffixes:
                             #print(tab_suffix)
                             #print(self.frames['stats']['records_current_station'+tab_suffix])
@@ -1833,9 +1860,12 @@ def on_plot_hover(self,event):
                                      == \
                                      self.frames['stats']['current_station'].name)
 
-
                             tab_suffixes = \
-                                    ['_mod','_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                                    ['_mod','_ini','_ini_pct']
+                            if self.path_obs is not None:
+                                tab_suffixes += \
+                                    ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+
                             for tab_suffix in tab_suffixes:
                                 self.frames['stats']['records_current_station'+tab_suffix] = \
                                     self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
@@ -1858,13 +1888,12 @@ def on_plot_hover(self,event):
                             self.frames['stats']['current_record_mod'] = \
                                 self.frames['stats']['records_iterator'].__next__()
                         
-
-
-
-                            #print('h10')
-                            # cash the current record
                             tab_suffixes = \
-                                    ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                                    ['_ini','_ini_pct']
+                            if self.path_obs is not None:
+                                tab_suffixes += \
+                                    ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+
                             for tab_suffix in tab_suffixes:
                                 self.frames['stats']['current_record'+tab_suffix] =  \
                                     self.frames['stats']['records_current_station'+tab_suffix].loc[\
@@ -1959,18 +1988,18 @@ def on_plot_hover(self,event):
                          == \
                          self.frames['stats']['current_station'].name)
 
-
+                
                 tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
+                        ['_ini','_ini_pct']
+                if self.path_obs is not None:
+                    tab_suffixes += \
+                        ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+
                 for tab_suffix in tab_suffixes:
                     self.frames['stats']['records_current_station'+tab_suffix] = \
                         self.frames['stats']['records_all_stations'+tab_suffix].iloc[self.frames['stats']['records_current_station_index']]
 
-                
 
-                # cash the records of the current stations
-                tab_suffixes = \
-                        ['_ini','_obs_afternoon','_mod_stats','_obs_afternoon_stats','_ini_pct']
                 for tab_suffix in tab_suffixes:
                     self.frames['stats']['current_record'+tab_suffix] =  \
                         self.frames['stats']['records_current_station'+tab_suffix].loc[\
diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
new file mode 100644
index 0000000..7f259ad
--- /dev/null
+++ b/class4gl/setup/setup_era.py
@@ -0,0 +1,270 @@
+
+
+# -*- coding: utf-8 -*-
+
+import logging
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+import importlib
+spam_loader = importlib.find_loader('Pysolar')
+found = spam_loader is not None
+if found:
+    import Pysolar
+    import Pysolar.util.GetSunriseSunset
+else:
+    import pysolar as Pysolar
+    GetSunriseSunset =  Pysolar.util.get_sunrise_sunset
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_YYYYMMDD',default="19810101")
+parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--latitude') # run a specific station id
+parser.add_argument('--longitude') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+parser.add_argument('--subset_experiments',default='ini') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+EXP_DEFS  =\
+{
+  'ERA-INTERIM_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+# iniitialize global data
+# ===============================
+print("Initializing global data")
+# ===============================
+globaldata = data_global()
+globaldata.sources = {**globaldata.sources,**{
+    
+        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_19830609-19830808_6hourly.nc",
+        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830609-19830808_6hourly.nc",
+        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_19830609-19830808_6hourly.nc",
+        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_19830609-19830808_6hourly.nc",
+    
+#        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830209-19830410_6hourly.nc",
+ #       "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q*_6hourly.nc",
+ #       "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u*_6hourly.nc",
+ #       "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v*_6hourly.nc",
+        }}
+
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+
+
+# ===============================
+print("getting a list of stations")
+# ===============================
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=True)
+
+
+# # ===============================
+# print("Selecting station by ID")
+# # ===============================
+# stations_iter = stations_iterator(all_stations)
+# STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+# all_stations_select = pd.DataFrame([run_station])
+# print(run_station)
+
+
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if (args.latitude is not None) or (args.longitude is not None):
+    print('custom coordinates not implemented yet, please ask developer.')
+elif args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+#     print("making a custom station according to the coordinates")
+# 
+#     STNID = 43.23
+else:
+     print("Selecting stations from a row range in the table")
+     all_stations_select = pd.DataFrame(all_stations.table)
+     if args.last_station_row is not None:
+         all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+     if args.first_station_row is not None:
+         all_stations_select = all_station_select.iloc[int(args.first_station):]
+
+
+
+
+
+
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+
+dtfirst = dt.datetime.strptime(args.first_YYYYMMDD,"%Y%m%d",)
+dtlast = dt.datetime.strptime(args.last_YYYYMMDD,"%Y%m%d",)
+# ===============================
+print("Creating daily timeseries from", dtfirst," to ", dtlast)
+# ===============================
+DTS = [dtfirst + dt.timedelta(days=iday) for iday in \
+       range(int((dtlast + dt.timedelta(days=1) -
+                  dtfirst).total_seconds()/3600./24.))]
+
+if args.split_by != -1:
+    totalchunks = len(all_stations_select)*len(DTS)/int(args.split_by)
+else:
+    totalchunks = len(all_stations_select)
+
+
+if args.global_chunk_number is not None:
+    run_station_chunk = np.mod(int(args.global_chunk_number),len(DTS)/int(args.split_by))
+else:
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+
+DTS_chunk = DTS[(int(run_station_chunk)*int(args.split_by)):\
+                 (int(run_station_chunk)+1)*int(args.split_by)]
+
+# for the current implementation we only consider one station. Let's upgrade it
+# later for more stations.
+run_station_chunk = int(args.global_chunk_number)
+
+# ===============================
+print('start looping over chunk')
+# ===============================
+
+os.system('mkdir -p '+args.path_experiments)
+
+
+fn_ini = args.path_experiments+'/'+format(run_station.name,'05d')+'_'+\
+        str(int(run_station_chunk))+'_'+args.subset_experiments+'.yaml'
+file_ini = open(fn_ini,'w');print('Writing to: ',fn_ini)
+
+for iDT,DT in enumerate(DTS_chunk):
+    print(iDT,DT)
+    c4gli = class4gl_input(debug_level=logging.INFO)
+    c4gli.update(source='STNID'+format(STNID,'05d'),\
+                 pars=dict(latitude = float(run_station.latitude), \
+                           longitude = float(run_station.longitude),\
+                           STNID=int(STNID)))
+
+    lSunrise, lSunset = GetSunriseSunset(c4gli.pars.latitude,0.,DT)
+
+    #start simulation at sunrise and stop at one hour before sunset
+    runtime = (lSunset - lSunrise).total_seconds() - 3600.*1.
+    ldatetime = lSunrise
+    datetime = ldatetime - dt.timedelta(hours=c4gli.pars.longitude/360.*24.)
+    datetime_daylight = datetime
+    c4gli.update(source='timeseries',   \
+                 pars=dict(\
+                           lSunrise = lSunrise, \
+                           lSunset = lSunset, \
+                           datetime = datetime, \
+                           ldatetime = ldatetime, \
+                           ldatetime_daylight = ldatetime, \
+                           datetime_daylight = datetime, \
+                           doy = datetime.timetuple().tm_yday,\
+                           runtime = runtime,\
+                          ))
+
+    c4gli.get_global_input(globaldata)
+
+    c4gli.update(source='era-interim',pars={'Ps' : c4gli.pars.sp})
+
+    cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+    Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+    Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+    R = (Rd*(1.-c4gli.air_ac.q) + Rv*c4gli.air_ac.q)
+    rho = c4gli.air_ac.p/R/c4gli.air_ac.t
+    dz = c4gli.air_ac.delpdgrav/rho
+    z = [dz.iloc[-1]/2.]
+    for idz in list(reversed(range(0,len(dz)-1,1))):
+        z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.)
+    z = list(reversed(z))
+
+    theta = c4gli.air_ac.t * \
+               (c4gli.pars.sp/(c4gli.air_ac.p))**(R/cp)
+    thetav   = theta*(1. + 0.61 * c4gli.air_ac.q)
+
+    
+    c4gli.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z),
+                                                           'theta':list(theta),
+                                                           'thetav':list(thetav),
+                                                          }))
+    air_ap_input = c4gli.air_ac[::-1].reset_index().drop('index',axis=1)
+    air_ap_mode = 'b'
+    air_ap_input_source = c4gli.query_source('air_ac:theta')
+
+
+    c4gli.mixed_layer_fit(air_ap=air_ap_input,
+                         source=air_ap_input_source,
+                         mode=air_ap_mode)
+
+
+    c4gli.dump(file_ini)
+
+file_ini.close()
+all_records_morning = get_records(pd.DataFrame([run_station]),\
+                              args.path_experiments,\
+                              getchunk = int(run_station_chunk),\
+                              subset=args.subset_experiments,
+                              refetch_records=True,
+                              )
+
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index e10c1da..c9e31ff 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -58,6 +58,7 @@
 
 EXP_DEFS  =\
 {
+  'ERA_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
   'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
@@ -76,14 +77,22 @@
 # #SET = 'GLOBAL'
 # SET = args.dataset
 
+# ========================
+print("getting a list of stations")
+# ========================
 
-print("getting stations")
 # these are all the stations that are found in the input dataset
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=True)
 
+# ====================================
 print('defining all_stations_select')
+# ====================================
+
 # these are all the stations that are supposed to run by the whole batch (all
 # chunks). We narrow it down according to the station(s) specified.
+
+
+
 if args.station_id is not None:
     print("Selecting station by ID")
     stations_iter = stations_iterator(all_stations)
@@ -99,6 +108,7 @@
 print("station numbers included in the whole batch "+\
       "(all chunks):",list(all_stations_select.index))
 
+print(all_stations_select)
 print("getting all records of the whole batch")
 all_records_morning_select = get_records(all_stations_select,\
                                          args.path_forcing,\
@@ -181,8 +191,8 @@
     print(len(records_morning))
 
     print("aligning morning and afternoon records")
-    records_morning['dates'] = records_morning.ldatetime.dt.date
-    records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
+    records_morning['dates'] = records_morning['ldatetime'].dt.date
+    records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
     records_afternoon.set_index(['STNID','dates'],inplace=True)
     ini_index_dates = records_morning.set_index(['STNID','dates']).index
     records_afternoon = records_afternoon.loc[ini_index_dates]
@@ -200,8 +210,17 @@
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
+            if os.path.isfile(fn_morning):
+                file_morning = open(fn_morning)
+            else:
+                fn_morning = \
+                     args.path_forcing+'/'+format(current_station.name,'05d')+\
+                     '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_morning = open(fn_morning)
+
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
             fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
@@ -239,6 +258,8 @@
                                                         mode='ini')
                         runtime = int((c4gli_afternoon.pars.datetime_daylight - 
                                              c4gli_morning.pars.datetime_daylight).total_seconds())
+                    elif args.runtime == 'from_input':
+                        runtime = c4gli_morning.pars.runtime
                     else:
                         runtime = int(args.runtime)
 
@@ -287,7 +308,8 @@
             file_ini.close()
             file_mod.close()
             file_morning.close()
-            file_afternoon.close()
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon.close()
     
             if onerun:
                 records_ini = get_records(pd.DataFrame([current_station]),\

From 1aebcb674065f751416626da8c6e1f6bbd4a36ba Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 8 Sep 2018 09:32:04 +0200
Subject: [PATCH 068/129] fix permission problbem when getting station list

---
 class4gl/setup/setup_era.py         | 2 +-
 class4gl/simulations/simulations.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 7f259ad..9d5a6f0 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -108,7 +108,7 @@
 # ===============================
 print("getting a list of stations")
 # ===============================
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=True)
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
 
 # # ===============================
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index c9e31ff..114d23a 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -82,7 +82,7 @@
 # ========================
 
 # these are all the stations that are found in the input dataset
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=True)
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
 # ====================================
 print('defining all_stations_select')

From 10531915ec9389c1d5e776a98f57ef39fd57482a Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 12 Sep 2018 15:08:56 +0200
Subject: [PATCH 069/129] make setup_global work

---
 class4gl/class4gl.py               |  29 +-
 class4gl/data_air.py               | 482 -----------------------------
 class4gl/data_soundings.py         | 453 +++++++++++++++++++++++++++
 class4gl/setup/batch_setup_era.pbs |  30 ++
 class4gl/setup/batch_setup_era.py  | 188 +++++++++++
 class4gl/setup/setup_era.py        |   6 -
 class4gl/setup/setup_global.py     |  84 +++--
 7 files changed, 745 insertions(+), 527 deletions(-)
 delete mode 100644 class4gl/data_air.py
 create mode 100644 class4gl/data_soundings.py
 create mode 100644 class4gl/setup/batch_setup_era.pbs
 create mode 100644 class4gl/setup/batch_setup_era.py

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 9b5c8b9..805ac35 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -41,8 +41,16 @@
 found = spam_loader is not None
 if found:
     import Pysolar
+    import Pysolar.util as Pysolarutil
+    GetSunriseSunset = Pysolarutil.GetSunriseSunset
+    GetAzimuth = Pysolarutil.GetAzimuth
+    GetAltitude = Pysolarutil.GetAltitude
 else:
     import pysolar as Pysolar
+    Pysolarutil = Pysolar.util
+    GetSunriseSunset = Pysolarutil.get_sunrise_sunset
+    GetAzimuth = Pysolar.solar.get_azimuth
+    GetAltitude = Pysolar.solar.get_altitude
 import yaml
 import logging
 import warnings
@@ -621,9 +629,6 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
         if np.isnan(dpars['h']):
             dpars['Ps'] = np.nan
 
-
-
-
         if ~np.isnan(dpars['h']):
             # determine mixed-layer properties (moisture, potential temperature...) from profile
             
@@ -652,9 +657,6 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
             dpars['u'] = np.nan
             dpars['v'] = np.nan
             
-
-
-
         # First 3 data points of the mixed-layer fit. We create a empty head
         # first
         air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
@@ -761,22 +763,25 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
                             dt.timedelta(minutes=int(dpars['longitude']/360.*24.*60.))
         dpars['doy'] = dpars['datetime'].timetuple().tm_yday
         dpars['SolarAltitude'] = \
-                                Pysolar.GetAltitude(\
+                                GetAltitude(\
                                     dpars['latitude'],\
                                     dpars['longitude'],\
                                     dpars['datetime']\
                                 )
-        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+        dpars['SolarAzimuth'] =  GetAzimuth(\
                                     dpars['latitude'],\
                                     dpars['longitude'],\
                                     dpars['datetime']\
                                 )
         dpars['lSunrise'], dpars['lSunset'] \
-        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+        =  GetSunriseSunset(dpars['latitude'],
                                          0.,
-                                         dpars['ldatetime'],0.)
-        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
-        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+                                         dpars['ldatetime'])
+        #print(dpars['lSunrise'])
+        dpars['lSunrise'] = dpars['lSunrise']
+        dpars['lSunset'] = dpars['lSunset']
+        # dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        # dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
         # This is the nearest datetime when the sun is up (for class)
         dpars['ldatetime_daylight'] = \
                                 np.min(\
diff --git a/class4gl/data_air.py b/class4gl/data_air.py
deleted file mode 100644
index 3860319..0000000
--- a/class4gl/data_air.py
+++ /dev/null
@@ -1,482 +0,0 @@
-import numpy as np
-
-from bs4 import BeautifulSoup
-import pandas as pd
-import datetime as dt
-#import pylab as pl
-import io
-import os
-import calendar
-
-import importlib
-spam_loader = importlib.find_loader('Pysolar')
-found = spam_loader is not None
-if found:
-    import Pysolar
-else:
-    import pysolar as Pysolar
-import Pysolar.util as Pysolarutil
-
-
-
-
-
-#from urllib import request
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / WSPD**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHd = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHd = np.min([BLHd,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHd = np.min([BLHd,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHd = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHd
-
-def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
-    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
-    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
-
-
-#from os import listdir
-#from os.path import isfile #,join
-import glob
-
-
-class wyoming(object):
-    def __init__(self):
-       self.status = 'init'
-       self.found = False
-       self.DT = None
-       self.current = None
-       #self.mode = 'b'
-       self.profile_type = 'wyoming'  
-       self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
-       self.PATH = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-         
-    def set_STNM(self,STNM):
-        self.__init__()
-        self.STNM = STNM
-        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
-        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
-        self.current = None
-        self.found = False
-        self.FILES.sort()
-        
-    def find_first(self,year=None,get_atm=False):
-        self.found = False    
-                
-        # check first file/year or specified year
-        if year == None:
-            self.iFN = 0
-            self.FN = self.FILES[self.iFN]
-        else:
-            self.FN = os.path.realpath(self.PATH+'/'+str(year)+'/SOUNDINGS_'+str(year)+'_'+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-        self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-        self.current = self.sounding_series.find('h2')
-        keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
-        
-        # go through other files and find first sounding when year is not specified
-        self.iFN=self.iFN+1
-        while keepsearching:
-            self.FN = self.FILES[self.iFN]
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            self.iFN=self.iFN+1
-            keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
-        self.found = (self.current is not None)
-
-        self.status = 'fetch'
-        if self.found:
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        
-        if self.found and get_atm:
-            self.get_values_air_input()
-        
-    
-    def find(self,DT,get_atm=False):
-        
-        self.found = False
-        keepsearching = True
-        #print(DT)
-        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
-        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
-            self.DT = DT
-            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
-            self.iFN = self.FILES.index(self.FN)
-            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-            self.current = self.sounding_series.find('h2')
-            
-        keepsearching = (self.current is not None)
-        while keepsearching:
-            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            if DTcurrent == DT:
-                self.found = True
-                keepsearching = False
-                if get_atm:
-                    self.get_values_air_input()
-                    self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-            elif DTcurrent > DT:
-                keepsearching = False
-                self.current = None
-            else:
-                self.current = self.current.find_next('h2')
-                if self.current is None:
-                    keepsearching = False
-        self.found = (self.current is not None)
-        self.status = 'fetch'
-
-    def find_next(self,get_atm=False):
-        self.found = False
-        self.DT = None
-        if self.current is None:
-            self.find_first()
-        else:                
-            self.current = self.current.find_next('h2')
-            self.found = (self.current is not None)
-            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
-            while keepsearching:
-                self.iFN=self.iFN+1
-                self.FN = self.FILES[self.iFN]
-                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
-                self.current = self.sounding_series.find('h2')
-                
-                self.found = (self.current is not None)
-                keepsearching = ((self.current is None) and (self.iFN < len(self.FILES)))
-        if self.found:        
-            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
-        if self.found and get_atm:
-            self.get_values_air_input()
-       
-
-
-    def get_values_air_input(self,latitude=None,longitude=None):
-
-        # for iDT,DT in enumerate(DTS):
-        
-            #websource = urllib.request.urlopen(webpage)
-        #soup = BeautifulSoup(open(webpage), "html.parser")
-        
-       
-        #workaround for ...last line has 
 which results in stringlike first column
-        string = self.current.find_next('pre').text
-        string = string.split('\n')[:-1]
-        string =  '\n'.join(string)
-        columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV']             
-        ONE_COLUMN = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns).iloc[5:-1]
-        #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4])
-        
-        #string =  soup.pre.next_sibling.next_sibling
-        
-        string = self.current.find_next('pre').find_next('pre').text
-
-        PARAMS = pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T
-        #PARAMS.insert(0,'date',DT)
-
-        PARAMS.insert(0,'datetime', dt.datetime.strptime(str(PARAMS['Observation time'][0]),"%y%m%d/%H%M"))
-        PARAMS.insert(0,'STNID',int(PARAMS['Station number']))
-        
-        THTV = np.array(ONE_COLUMN.THTV,dtype='float')
-        #THTA = np.array(ONE_COLUMN.THTA,dtype='float')
-        HGHT = np.array(ONE_COLUMN.HGHT,dtype='float')
-        HAGL = HGHT - np.float(PARAMS['Station elevation'])
-        ONE_COLUMN.insert(0,'HAGL',HAGL)
-
-        
-        
-        
-        MIXR = np.array(ONE_COLUMN.MIXR,dtype='float')
-        QABS = (MIXR/1000.)/(MIXR/1000.+1.)
-        ONE_COLUMN.insert(0,'QABS',QABS)
-        
-        WSPD =0.51444 * np.array(ONE_COLUMN.SKNT,dtype='float')
-
-        #mixed layer potential temperature
-        #THTVM = np.float(VALUE['Mean mixed layer potential temperature'].iloc[0])
-
-        #THTV_0 = THTA[np.where(~np.isnan(THTA))[0][0]]
-        #RiBV = 9.81/THTV_0 * ( THTV - THTV_0) * HGHT / WSPD**2
-        #RiBA = 9.81/THTA_0 * ( THTA - THTA_0) * HGHT / WSPD**2
-
-        BLHV,BLHVu,BLHVd = blh(HAGL,THTV,WSPD)
-        BLHV = np.max((BLHV,10.))
-        BLHVu = np.max((BLHVu,10.))
-        BLHVd = np.max((BLHVd,10.))
-        #BLHA,BLHAu,BLHAd = BLH(HGHT,THTA,WSPD)
-
-        #security values for mixed-layer jump values dthetav, dtheta and dq
-        
-        # fit new profiles taking the above-estimated mixed-layer height
-        ONE_COLUMNNEW = []
-        for BLH in [BLHV,BLHVu,BLHVd]:
-            ONE_COLUMNNEW.append(pd.DataFrame())
-            
-            HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
-            ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
-            
-            listHAGLNEW = list(HAGLNEW)
-            for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
-                
-                # get index of lowest valid observation. This seems to vary
-                idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
-                if len(idxvalid) > 0:
-                    #print('idxvalid',idxvalid)
-                    if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
-                        meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
-                    else:
-                        meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
-                else:
-                    meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
-                    #print(col,meanabl)
-               
-                
-                # if col == 'PRES':
-                #     meanabl =  
-            
-                new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
-                #THTVM = np.nanmean(THTV[HAGL <= BLH])
-                #print("new_pro_h",new_pro_h)
-                # calculate jump ath the top of the mixed layer
-                if col in ['THTA','THTV',]:
-                    #for moisture
-                    #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
-                    #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
-                    if len(listHAGLNEW) > 4:
-                        #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
-                        dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
-                        dtheta = np.max((0.1,dtheta_pre))
-                        #meanabl = meanabl - (dtheta - dtheta_pre)
-                        #print('dtheta_pre',dtheta_pre)
-                        #print('dtheta',dtheta)
-                        #print('meanabl',meanabl)
-                        #stop
-                        
-                    else:
-                        dtheta = np.nan
-                else:
-                    if len(listHAGLNEW) > 4:
-                        #for moisture (it can have both negative and positive slope)
-                        dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
-                    else:
-                        dtheta = np.nan
-                #print('dtheta',dtheta)
-                
-                new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
-            
-                
-                ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
-                
-            #QABSM = np.nanmean(QABS[HAGL <= BLH])
-            #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
-            #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
-            
-        # we just make a copy of the fields, so that it can be read correctly by CLASS 
-        for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
-            dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
-            dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
-            dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
-            
-            angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
-            spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
-        
-            dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
-            dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
-
-
-        # assign fields adopted by CLASS
-        if self.mode == 'o': #original 
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'b':
-            PARAMS.insert(0,'h',   np.float(BLHV))
-        elif self.mode == 'u':
-            PARAMS.insert(0,'h',   BLHVu)
-        elif self.mode == 'd':
-            PARAMS.insert(0,'h',   BLHVd)
-        else:
-            PARAMS.insert(0,'h',   BLHV)
-            
-
-        try:
-            PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
-            PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
-        except:
-            print("could not convert latitude coordinate")
-            PARAMS.insert(0,'latitude', np.nan)
-            PARAMS.insert(0,'lat', np.nan)
-        try:
-            PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
-            # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
-            PARAMS.insert(0,'lon', 0.)
-        except:
-            print("could not convert longitude coordinate")
-            PARAMS.insert(0,'longitude', np.nan)
-            PARAMS.insert(0,'lon', 0.)
-
-        if latitude is not None:
-            print('overwriting latitude with specified value')
-            PARAMS['latitude'] = np.float(latitude)
-            PARAMS['lat'] = np.float(latitude)
-        if longitude is not None:
-            print('overwriting longitude with specified value')
-            PARAMS['longitude'] = np.float(longitude)
-        try:
-            #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
-            PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
-            PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolarutil.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            # This is the nearest datetime when sun is up (for class)
-            PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
-            # apply the same time shift for UTC datetime
-            PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
-            
-        except:
-            print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
-            PARAMS['ldatetime'] = dt.datetime(1900,1,1)
-            PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
-            PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolarutil.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
-            PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
-            PARAMS['datetime_daylight'] =PARAMS['datetime'].value
-
-        
-
-        PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
-        # as we are forcing lon equal to zero this is is expressed in local suntime
-        PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
-
-           
-        ONE_COLUMNb = ONE_COLUMNNEW[0]
-        ONE_COLUMNu = ONE_COLUMNNEW[1]
-        ONE_COLUMNd = ONE_COLUMNNEW[2]
-        
-
-        THTVM = np.nanmean(THTV[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
-        
-        QABSM = np.nanmean(QABS[HAGL <= BLHV])
-        PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHV',BLHV)
-        PARAMS.insert(len(PARAMS.columns),'BLHVu',BLHVu)
-        PARAMS.insert(len(PARAMS.columns),'BLHVd',BLHVd)  
-
-        BLHVe = abs(BLHV - BLHVu)
-        BLHVe = max(BLHVe,abs(BLHV - BLHVd))
-
-        #PARAMS.insert(0,'dq',0.)
-        
-        PARAMS.insert(len(PARAMS.columns),'BLHVe',BLHVe)  
-        PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
-        #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
-        #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
-        
-        if self.mode == 'o': #original 
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
-        elif self.mode == 'b': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNb
-            BLCOLUMN = ONE_COLUMNb
-        elif self.mode == 'u': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNu
-            BLCOLUMN = ONE_COLUMNu
-        elif self.mode == 'd': # best BLH
-            USE_ONECOLUMN = ONE_COLUMNd
-            BLCOLUMN = ONE_COLUMNd
-        else:
-            USE_ONECOLUMN = ONE_COLUMN
-            BLCOLUMN = ONE_COLUMNb
-
-        lt6000 = (BLCOLUMN['HAGL'] < 6000.)
-        lt2500 = (BLCOLUMN['HAGL'] < 2500. + BLHV)
-        # print(BLCOLUMN['HAGL'][lt6000])
-        # print(BLCOLUMN['HAGL'][lt2500])
-        # 
-        # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
-
-        #print(BLCOLUMN['HAGL'][lt2500])
-        PARAMS.insert(0,'OK',
-                      ((BLHVe < 200.) and 
-                       ( len(np.where(lt6000)[0]) > 5) and
-                       (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
-                       (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
-                       (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
-                       (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
-                       (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
-                       (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
-                      )
-                     )
-
-        PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
-        PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
-        PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
-        PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
-        PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
-        PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
-        
-        
-        PARAMS = PARAMS.T
-
-        
-        self.PARAMS = PARAMS
-        self.ONE_COLUMN = USE_ONECOLUMN
-        # if self.mode == 'o': #original 
-        #     self.ONE_COLUMN = ONE_COLUMN
-        # elif self.mode == 'b': # best BLH
-        #     self.ONE_COLUMN = ONE_COLUMNb
-        # elif self.mode == 'u':# upper BLH
-        #     self.ONE_COLUMN = ONE_COLUMNu
-        # elif self.mode == 'd': # lower BLH
-        #     self.ONE_COLUMN=ONE_COLUMNd
-        # else:
-        #     self.ONE_COLUMN = ONE_COLUMN
-
diff --git a/class4gl/data_soundings.py b/class4gl/data_soundings.py
new file mode 100644
index 0000000..8e438b4
--- /dev/null
+++ b/class4gl/data_soundings.py
@@ -0,0 +1,453 @@
+import numpy as np
+
+from bs4 import BeautifulSoup
+import pandas as pd
+import datetime as dt
+#import pylab as pl
+import io
+import os
+import calendar
+import warnings
+
+
+
+def dtrange(STARTTIME,ENDTIME,TIMEJUMP=dt.timedelta(hours=24)):
+    STEPS = int((ENDTIME - STARTTIME).total_seconds()/TIMEJUMP.total_seconds())
+    return [STARTTIME + TIMEJUMP*i for i in range(0,STEPS)]
+
+
+#from os import listdir
+#from os.path import isfile #,join
+import glob
+
+
+class wyoming(object):
+    def __init__(self, PATH="/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/",STNM=None):
+
+        self.profile_type = 'wyoming'  
+        self.MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
+        self.PATH = PATH
+        self.reset()
+        if STNM is not None:
+            self.set_STNM(STNM) 
+        else:
+            warnings.warn('warning. No station is set yet. Please use class function set_STNM to set station number')
+
+    def reset(self):
+
+        self.status = 'init'
+        self.found = False
+        self.DT = None
+        self.current = None
+        #self.mode = 'b'
+
+         
+    def set_STNM(self,STNM):
+        self.reset()
+        self.STNM = STNM
+        self.FILES = glob.glob(self.PATH+'/????/SOUNDINGS_????_'+format(STNM,'05d')+".html")
+        self.FILES = [os.path.realpath(FILE) for FILE in self.FILES]
+        self.current = None
+        self.found = False
+        self.FILES.sort()
+        
+    def find_first(self,year=None,get_atm=False):
+        self.found = False    
+        self.current = None
+                
+        # check first file/year or specified year
+        if year == None:
+            self.iFN = 0
+            self.FN = self.FILES[self.iFN]
+        else:
+            # this counter cycles through all the years. We avoid an endless
+            # loop. We don't check more years than the amount of self.FILES
+            iyear = 0
+            while ((not self.found) and (iyear <  100)):
+
+                FN = os.path.realpath(self.PATH \
+                                      +'/' \
+                                      +str(year+iyear) \
+                                      +'/SOUNDINGS_' \
+                                      +str(year+iyear) \
+                                      +'_' \
+                                      +format(self.STNM,'05d') \
+                                      +".html")
+                if FN in self.FILES:
+                    self.iFN = self.FILES.index(FN)
+                    self.found = True
+                    self.FN = FN
+                else:
+                    self.iFN = -1
+                    self.FN = None
+
+                iyear += 1
+                
+        if self.found:
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            keepsearching = (self.current is None) #if we don't want later years, add here: "and (year is None)"
+            
+            # go through other files and find first sounding when year is not specified
+            self.iFN=self.iFN+1
+            while keepsearching:
+                self.FN = self.FILES[self.iFN]
+                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+                self.current = self.sounding_series.find('h2')
+                self.iFN=self.iFN+1
+                keepsearching = (self.current is None) and (self.iFN < len(self.FILES))
+            self.found = (self.current is not None)
+
+        self.status = 'fetch'
+        if self.found:
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        
+        # if self.found and get_atm:
+        #     self.get_values_air_input()
+        
+    
+    def find(self,DT,get_atm=False):
+        
+        self.found = False
+        keepsearching = True
+        #print(DT)
+        # we open a new file only when it's needed. Otherwise we just scroll to the right sounding.  
+        if not ((self.current is not None) and (DT >= self.DT) and (self.DT.year == DT.year)):
+            self.DT = DT
+            self.FN = os.path.realpath(self.PATH+"/"+self.DT.strftime("%Y")+"/SOUNDINGS_"+self.DT.strftime("%Y")+"_"+format(self.STNM,'05d')+".html")
+            self.iFN = self.FILES.index(self.FN)
+            self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+            self.current = self.sounding_series.find('h2')
+            
+        keepsearching = (self.current is not None)
+        while keepsearching:
+            DTcurrent = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            if DTcurrent == DT:
+                self.found = True
+                keepsearching = False
+                # if get_atm:
+                #     self.get_values_air_input()
+                self.DT = DTcurrent #dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+            elif DTcurrent > DT:
+                keepsearching = False
+                self.current = None
+            else:
+                self.current = self.current.find_next('h2')
+                if self.current is None:
+                    keepsearching = False
+        self.found = (self.current is not None)
+        self.status = 'fetch'
+
+    def find_next(self,get_atm=False):
+        self.found = False
+        self.DT = None
+        if self.current is None:
+            self.find_first()
+        else:                
+            self.current = self.current.find_next('h2')
+            self.found = (self.current is not None)
+            keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
+            while keepsearching:
+                self.iFN=self.iFN+1
+                self.FN = self.FILES[self.iFN]
+                self.sounding_series = BeautifulSoup(open(self.FN), "html.parser")
+                self.current = self.sounding_series.find('h2')
+                
+                self.found = (self.current is not None)
+                keepsearching = ((self.current is None) and ((self.iFN+1) < len(self.FILES)))
+        if self.found:        
+            self.DT = dt.datetime(int(self.current.text[-4:]),self.MONTHS.index(self.current.text[-8:-5])+1,int(self.current.text[-11:-9]),int(self.current.text[-15:-13]))
+        # if self.found and get_atm:
+        #     self.get_values_air_input()
+       
+
+
+
+# # should be placed under class4gl!!!
+# class sounding(object):
+#     #def statistics:
+#     # returns a list of sounding statistics as a dict
+    
+
+#def get_sounding_wyoming(self,wy_strm,latitude=None,longitude=None):
+# input:
+#   wy_strm: wyoming stream
+
+# for iDT,DT in enumerate(DTS):
+
+    #websource = urllib.request.urlopen(webpage)
+#soup = BeautifulSoup(open(webpage), "html.parser")
+
+# __init__(self):
+
+#    sounding_keys
+# list of variables that we get from global ground data
+
+#workaround for ...last line has 
 which results in stringlike first column
+
+
+
+
+
+# dtheta_pre = air
+# 
+# dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+# 
+# 
+# dtheta = np.max((0.1,dtheta_pre))
+# 
+# 
+# 
+# 
+# self.air_ap.
+
+
+
+
+# if len(valid_indices) > 0:
+#     #print('valid_indices',valid_indices)
+#     if len(np.where(HAGL[valid_indices[0]:] < BLH)[0]) >= 3:
+#         meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(valid_indices[0]+1):],dtype=np.float))
+#     else:
+#         meanabl = np.nanmean(ONE_COLUMN[col][valid_indices[0]:(valid_indices[0]+1)],dtype=np.float)                    
+# else:
+#     meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+# #
+
+
+
+
+
+
+
+# # fit new profiles taking the above-estimated mixed-layer height
+# ONE_COLUMNNEW = []
+# for BLH in [self.h,self.h_u,self.h_d]:
+#     ONE_COLUMNNEW.append(pd.DataFrame())
+#     
+#     HAGLNEW = np.array([2.,BLH,BLH]+list(HAGL[HAGL > BLH]),dtype=np.float)
+#     ONE_COLUMNNEW[-1].insert(0,'HAGL',HAGLNEW)
+#     
+#     listHAGLNEW = list(HAGLNEW)
+#     for icol,col in enumerate(['THTA','THTV','QABS','SKNT','DRCT','PRES']):
+#         
+#         # get index of lowest valid observation. This seems to vary
+#         idxvalid = np.where((np.array(HAGL) >= 0) & (~pd.isnull(np.array(ONE_COLUMN[col],dtype=np.float) )))[0]
+#         if len(idxvalid) > 0:
+#             #print('idxvalid',idxvalid)
+#             if len(np.where(HAGL[idxvalid[0]:] < BLH)[0]) >= 3:
+#                 meanabl = np.nanmean(np.array(ONE_COLUMN[col][HAGL < BLH][(idxvalid[0]+1):],dtype=np.float))
+#             else:
+#                 meanabl = np.nanmean(ONE_COLUMN[col][idxvalid[0]:(idxvalid[0]+1)],dtype=np.float)                    
+#         else:
+#             meanabl = np.nanmean(ONE_COLUMN[col][0:1],dtype=np.float)
+#             #print(col,meanabl)
+#        
+#         
+#         # if col == 'PRES':
+#         #     meanabl =  
+#     
+#         new_pro_h = list(np.array(ONE_COLUMN[col][HAGL > BLH],dtype=np.float))
+#         #THTVM = np.nanmean(THTV[HAGL <= BLH])
+#         #print("new_pro_h",new_pro_h)
+#         # calculate jump ath the top of the mixed layer
+#         if col in ['THTA','THTV',]:
+#             #for moisture
+#             #print('hello:',(new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]))
+#             #print('hello:',new_pro_h[1] , new_pro_h[0],listHAGLNEW[4] , listHAGLNEW[3],BLH,listHAGLNEW[3])
+#             if len(listHAGLNEW) > 4:
+#                 #print(type(new_pro_h[1]),type(new_pro_h[0]),type(listHAGLNEW[4]),type(listHAGLNEW[3]),type(BLH),type(meanabl))
+#                 dtheta_pre = (new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl 
+#                 dtheta = np.max((0.1,dtheta_pre))
+#                 #meanabl = meanabl - (dtheta - dtheta_pre)
+#                 #print('dtheta_pre',dtheta_pre)
+#                 #print('dtheta',dtheta)
+#                 #print('meanabl',meanabl)
+#                 #stop
+#                 
+#             else:
+#                 dtheta = np.nan
+#         else:
+#             if len(listHAGLNEW) > 4:
+#                 #for moisture (it can have both negative and positive slope)
+#                 dtheta = ((new_pro_h[1] - new_pro_h[0])/(listHAGLNEW[4] - listHAGLNEW[3])*(BLH-listHAGLNEW[3]) + new_pro_h[0] - meanabl ) 
+#             else:
+#                 dtheta = np.nan
+#         #print('dtheta',dtheta)
+#         
+#         new_pro = np.array([meanabl,meanabl,meanabl+dtheta]+new_pro_h,dtype=np.float)
+#     
+#         
+#         ONE_COLUMNNEW[-1].insert(len(ONE_COLUMNNEW[-1].columns),col,new_pro)
+#         
+#     #QABSM = np.nanmean(QABS[HAGL <= BLH])
+#     #QABSNEW = np.array([QABSM,QABSM]+list(QABS[HAGL > BLH]))
+#     #ONE_COLUMNNEW.append(pd.DataFrame(zip(HAGLNEW,THTVNEW,QABSNEW),columns=('HAGL','THTV','QABS')))
+#     
+# # we just make a copy of the fields, so that it can be read correctly by CLASS 
+# for dataonecolumn in ONE_COLUMNNEW+[ONE_COLUMN]:
+#     dataonecolumn.insert(len(dataonecolumn.columns),'p_pro',np.array(dataonecolumn.PRES,dtype=np.float)*100.)
+#     dataonecolumn.insert(len(dataonecolumn.columns),'z_pro',np.array(dataonecolumn.HAGL,dtype=np.float))
+#     dataonecolumn.insert(len(dataonecolumn.columns),'theta_pro',np.array(dataonecolumn.THTA,dtype=np.float))
+#     dataonecolumn.insert(len(dataonecolumn.columns),'thetav_pro',np.array(dataonecolumn.THTV,dtype=np.float))
+#     dataonecolumn.insert(len(dataonecolumn.columns),'q_pro',np.array(dataonecolumn.QABS,dtype=np.float))
+#     
+#     angle_x = (90.-np.array(dataonecolumn.DRCT,dtype=np.float))/180.*np.pi # assuming that wind in direction of the south is 0 degrees.
+#     spd = 0.51444* np.array(dataonecolumn.SKNT,dtype=np.float)
+# 
+#     dataonecolumn.insert(len(dataonecolumn.columns),'u_pro',spd * np.sin(angle_x))
+#     dataonecolumn.insert(len(dataonecolumn.columns),'v_pro',spd * np.cos(angle_x))
+# 
+# 
+# # assign fields adopted by CLASS
+# if self.mode == 'o': #original 
+#     PARAMS.insert(0,'h',   np.float(self.h))
+# elif self.mode == 'b':
+#     PARAMS.insert(0,'h',   np.float(self.h))
+# elif self.mode == 'u':
+#     PARAMS.insert(0,'h',   self.h_u)
+# elif self.mode == 'd':
+#     PARAMS.insert(0,'h',   self.h_d)
+# else:
+#     PARAMS.insert(0,'h',   self.h)
+#     
+# 
+# try:
+#     PARAMS.insert(0,'lat', np.float(PARAMS['Station latitude'][0]))
+#     PARAMS.insert(0,'latitude', np.float(PARAMS['Station latitude'][0]))
+# except:
+#     print("could not convert latitude coordinate")
+#     PARAMS.insert(0,'latitude', np.nan)
+#     PARAMS.insert(0,'lat', np.nan)
+# try:
+#     PARAMS.insert(0,'longitude', np.float(PARAMS['Station longitude'][0]))
+#     # we set the actual input parameter value of lon to zero as we are working in local time (as if we were in Greenwhich) 
+#     PARAMS.insert(0,'lon', 0.)
+# except:
+#     print("could not convert longitude coordinate")
+#     PARAMS.insert(0,'longitude', np.nan)
+#     PARAMS.insert(0,'lon', 0.)
+# 
+# if latitude is not None:
+#     print('overwriting latitude with specified value')
+#     PARAMS['latitude'] = np.float(latitude)
+#     PARAMS['lat'] = np.float(latitude)
+# if longitude is not None:
+#     print('overwriting longitude with specified value')
+#     PARAMS['longitude'] = np.float(longitude)
+# try:
+#     #this is the local suntime datetime from which we calculate the hour of the day (assuming we would be in greenwhich hence taking lon=0)
+#     PARAMS['ldatetime'] = PARAMS.datetime.value + dt.timedelta(hours=PARAMS.longitude.value/360.*24.) 
+#     PARAMS['SolarAltitude'] = Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+#     PARAMS['SolarAzimuth'] = Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.longitude.value,PARAMS.datetime.value)
+#     PARAMS['lSunrise'], PARAMS['lSunset'] = Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+#     # This is the nearest datetime when sun is up (for class)
+#     PARAMS['ldatetime_daylight'] = np.min(np.max(PARAMS['ldatetime'].value ,PARAMS['lSunrise'].value),PARAMS['lSunset'].value) 
+#     # apply the same time shift for UTC datetime
+#     PARAMS['datetime_daylight'] = PARAMS.datetime.value  + (PARAMS.ldatetime_daylight.value  - PARAMS.ldatetime.value)
+#     
+# except:
+#     print("could not get local times for profile, perhaps because of wrong longitude or latitude in the profile description")
+#     PARAMS['ldatetime'] = dt.datetime(1900,1,1)
+#     PARAMS['SolarAltitude'] = np.nan #Pysolar.GetAltitude(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+#     PARAMS['SolarAzimuth'] = np.nan #Pysolar.GetAzimuth(PARAMS.lat.value,PARAMS.lon.value,PARAMS.datetime.value)
+#     PARAMS['lSunrise'], PARAMS['lSunset'] = dt.datetime(1900,1,1), dt.datetime(1900,1,1) #Pysolar.util.GetSunriseSunset(PARAMS.lat.value,0.,PARAMS.datetime.value,0.)
+#     PARAMS['ldatetime_daylight'] =PARAMS['ldatetime'].value
+#     PARAMS['datetime_daylight'] =PARAMS['datetime'].value
+# 
+# 
+# 
+# PARAMS.insert(0,'day', PARAMS['ldatetime'][0].day)
+# # as we are forcing lon equal to zero this is is expressed in local suntime
+# PARAMS.insert(0,'tstart', PARAMS['ldatetime_daylight'][0].hour + PARAMS['ldatetime_daylight'][0].minute/60. + PARAMS['ldatetime_daylight'][0].second/3600.)
+# 
+#    
+# ONE_COLUMNb = ONE_COLUMNNEW[0]
+# ONE_COLUMNu = ONE_COLUMNNEW[1]
+# ONE_COLUMNd = ONE_COLUMNNEW[2]
+# 
+# 
+# THTVM = np.nanmean(THTV[HAGL <= self.h])
+# PARAMS.insert(len(PARAMS.columns),'THTVM',THTVM)
+# 
+# QABSM = np.nanmean(QABS[HAGL <= self.h])
+# PARAMS.insert(len(PARAMS.columns),'QABSM',QABSM)
+# 
+# PARAMS.insert(len(PARAMS.columns),'self.h',self.h)
+# PARAMS.insert(len(PARAMS.columns),'self.h_u',self.h_u)
+# PARAMS.insert(len(PARAMS.columns),'self.h_d',self.h_d)  
+# 
+# self.he = abs(self.h - self.h_u)
+# self.he = max(self.he,abs(self.h - self.h_d))
+# 
+# #PARAMS.insert(0,'dq',0.)
+# 
+# PARAMS.insert(len(PARAMS.columns),'self.he',self.he)  
+# PARAMS.insert(0,'Ps',np.array(ONE_COLUMN.PRES,dtype='float')[0]*100.)
+# #PARAMS.insert(len(PARAMS.columns),'STNM',STNM)
+# #PARAMS.insert(len(PARAMS.columns),'PATH',webpage)
+# 
+# if self.mode == 'o': #original 
+#     USE_ONECOLUMN = ONE_COLUMN
+#     BLCOLUMN = ONE_COLUMNb # this var is used for investigating whether the original profile is of sufficient quality to be used for analysis or class model input.
+# elif self.mode == 'b': # best BLH
+#     USE_ONECOLUMN = ONE_COLUMNb
+#     BLCOLUMN = ONE_COLUMNb
+# elif self.mode == 'u': # best BLH
+#     USE_ONECOLUMN = ONE_COLUMNu
+#     BLCOLUMN = ONE_COLUMNu
+# elif self.mode == 'd': # best BLH
+#     USE_ONECOLUMN = ONE_COLUMNd
+#     BLCOLUMN = ONE_COLUMNd
+# else:
+#     USE_ONECOLUMN = ONE_COLUMN
+#     BLCOLUMN = ONE_COLUMNb
+# 
+# lt6000 = (BLCOLUMN['HAGL'] < 6000.)
+# lt2500 = (BLCOLUMN['HAGL'] < 2500. + self.h)
+# # print(BLCOLUMN['HAGL'][lt6000])
+# # print(BLCOLUMN['HAGL'][lt2500])
+# # 
+# # print(len(np.where(lt2500)[0]) > 9.) # distance between two points (lower than 2500m) should be smaller than 400 meters
+# 
+# print(BLCOLUMN['HAGL'][lt2500])
+# PARAMS.insert(0,'OK',
+#               ((self.he < 200.) and 
+#                ( len(np.where(lt6000)[0]) > 5) and
+#                (np.array(BLCOLUMN['HAGL'])[-1] >= 6000.) and # the last coordinate had a height higher than 5000.
+#                (not len(np.where(pd.isnull(BLCOLUMN['THTA'][lt6000]))[0]) >0 ) and
+#                (len(np.where(lt2500)[0]) > 10.) and # distance between two points (lower than 2500m) should be smaller than 400 meters
+#                (not len(np.where(pd.isnull(BLCOLUMN['SKNT'][lt6000]))[0]) >0 ) and
+#                (not len(np.where(pd.isnull(BLCOLUMN['DRCT'][lt6000]))[0]) >0 ) and
+#                (not len(np.where(pd.isnull(BLCOLUMN['PRES'][lt6000]))[0]) >0 ) and
+#                (not len(np.where(pd.isnull(BLCOLUMN['QABS'][lt6000]))[0]) >0 ) and
+#                (not (len(np.where(np.array(BLCOLUMN['THTA'][lt6000])[2:] <= np.array(BLCOLUMN['THTA'][lt6000])[1:-1])[0]) >0) ) #absolute increasing
+#               )
+#              )
+# 
+# PARAMS.insert(0,'theta',np.float(list(BLCOLUMN['THTA'])[1]))
+# PARAMS.insert(0,'q',np.float(list(BLCOLUMN['QABS'])[1]))
+# PARAMS.insert(0,'u',np.float(list(BLCOLUMN['u_pro'])[1]))  
+# PARAMS.insert(0,'v',np.float(list(BLCOLUMN['v_pro'])[1]))
+# PARAMS.insert(0,'dtheta',np.float(list(BLCOLUMN['THTA'])[2]-list(BLCOLUMN['THTA'])[1]))
+# PARAMS.insert(0,'dq',np.float(list(BLCOLUMN['QABS'])[2]-list(BLCOLUMN['QABS'])[1]))
+# PARAMS.insert(0,'du',np.float(list(BLCOLUMN['u_pro'])[2]-list(BLCOLUMN['u_pro'])[1]))
+# PARAMS.insert(0,'dv',np.float(list(BLCOLUMN['v_pro'])[2]-list(BLCOLUMN['v_pro'])[1]))
+# 
+# 
+# PARAMS = PARAMS.T
+# 
+# 
+# self.PARAMS = PARAMS
+# self.ONE_COLUMN = USE_ONECOLUMN
+# # if self.mode == 'o': #original 
+# #     self.ONE_COLUMN = ONE_COLUMN
+# # elif self.mode == 'b': # best BLH
+# #     self.ONE_COLUMN = ONE_COLUMNb
+# # elif self.mode == 'u':# upper BLH
+# #     self.ONE_COLUMN = ONE_COLUMNu
+# # elif self.mode == 'd': # lower BLH
+# #     self.ONE_COLUMN=ONE_COLUMNd
+# # else:
+# #     self.ONE_COLUMN = ONE_COLUMN
+
diff --git a/class4gl/setup/batch_setup_era.pbs b/class4gl/setup/batch_setup_era.pbs
new file mode 100644
index 0000000..7735a7a
--- /dev/null
+++ b/class4gl/setup/batch_setup_era.pbs
@@ -0,0 +1,30 @@
+#!/bin/bash 
+#
+#PBS -j oe
+#PBS -M hendrik.wouters@ugent.be
+#PBS -m b
+#PBS -m e
+#PBS -m a
+#PBS -N c4gl_setup
+
+module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python Ruby
+
+EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
+
+for var in $(compgen -v | grep C4GLJOB_ ); do
+    echo $var
+    if [ "$var" != "C4GLJOB_exec" ]
+    then
+    EXEC_ALL=$EXEC_ALL" --"`echo $var | cut -c9-`"="${!var}
+    fi
+done
+
+
+# EXEC_ALL="python $exec --global-chunk-number $PBS_ARRAYID \
+#                        --split-by $split_by \
+#                        --dataset $dataset \
+#                        --experiments $experiments"
+#                  #      --path-soundings $path_soundings \
+echo Executing: $EXEC_ALL
+$EXEC_ALL
+
diff --git a/class4gl/setup/batch_setup_era.py b/class4gl/setup/batch_setup_era.py
new file mode 100644
index 0000000..7f8c0d0
--- /dev/null
+++ b/class4gl/setup/batch_setup_era.py
@@ -0,0 +1,188 @@
+
+# -*- coding: utf-8 -*-
+
+import logging
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+import importlib
+spam_loader = importlib.find_loader('Pysolar')
+found = spam_loader is not None
+if found:
+    import Pysolar
+    import Pysolar.util.GetSunriseSunset
+else:
+    import pysolar as Pysolar
+    GetSunriseSunset =  Pysolar.util.get_sunrise_sunset
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--exec') # chunk simulation script
+parser.add_argument('--pbs_string',default='')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_YYYYMMDD',default="19810101")
+parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--latitude') # run a specific station id
+parser.add_argument('--longitude') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+parser.add_argument('--subset_experiments',default='ini') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+EXP_DEFS  =\
+{
+  'ERA-INTERIM_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+# iniitialize global data
+# ===============================
+print("Initializing global data")
+# ===============================
+globaldata = data_global()
+globaldata.sources = {**globaldata.sources,**{
+    
+        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_19830609-19830808_6hourly.nc",
+        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830609-19830808_6hourly.nc",
+        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_19830609-19830808_6hourly.nc",
+        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_19830609-19830808_6hourly.nc",
+    
+#        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830209-19830410_6hourly.nc",
+ #       "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q*_6hourly.nc",
+ #       "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u*_6hourly.nc",
+ #       "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v*_6hourly.nc",
+        }}
+
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+
+
+# ===============================
+print("getting a list of stations")
+# ===============================
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+
+
+# # ===============================
+# print("Selecting station by ID")
+# # ===============================
+# stations_iter = stations_iterator(all_stations)
+# STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+# all_stations_select = pd.DataFrame([run_station])
+# print(run_station)
+
+
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if (args.latitude is not None) or (args.longitude is not None):
+    print('custom coordinates not implemented yet, please ask developer.')
+elif args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+#     print("making a custom station according to the coordinates")
+# 
+#     STNID = 43.23
+else:
+     print("Selecting stations from a row range in the table")
+     all_stations_select = pd.DataFrame(all_stations.table)
+     if args.last_station_row is not None:
+         all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+     if args.first_station_row is not None:
+         all_stations_select = all_station_select.iloc[int(args.first_station):]
+
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+dtfirst = dt.datetime.strptime(args.first_YYYYMMDD,"%Y%m%d",)
+dtlast = dt.datetime.strptime(args.last_YYYYMMDD,"%Y%m%d",)
+# ===============================
+print("Creating daily timeseries from", dtfirst," to ", dtlast)
+# ===============================
+DTS = [dtfirst + dt.timedelta(days=iday) for iday in \
+       range(int((dtlast + dt.timedelta(days=1) -
+                  dtfirst).total_seconds()/3600./24.))]
+
+if args.split_by != -1:
+    totalchunks = len(all_stations_select)*math.ceil(len(DTS)/int(args.split_by))
+else:
+    totalchunks = len(all_stations_select)
+
+print(totalchunks)
+
+#if args.cleanup_experiments:
+#    os.system("rm -R "+args.path_experiments+'/')
+
+# C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/setup/batch_setup_era.pbs -t 0-'+\
+            str(totalchunks-1)+" -v "
+# propagate arguments towards the job script
+lfirst = True
+for argkey in args.__dict__.keys():
+    if ((argkey not in ['experiments','pbs_string','cleanup_experiments']) and \
+        # default values are specified in the simulation script, so
+        # excluded here
+        (args.__dict__[argkey] is not None)
+       ):
+        if lfirst:
+            command +=' C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        else:
+            command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        lfirst=False
+
+print('Submitting array job: '+command)
+os.system(command)
diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 9d5a6f0..631e50c 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -144,15 +144,9 @@
      if args.first_station_row is not None:
          all_stations_select = all_station_select.iloc[int(args.first_station):]
 
-
-
-
-
-
 print("station numbers included in the whole batch "+\
       "(all chunks):",list(all_stations_select.index))
 
-
 dtfirst = dt.datetime.strptime(args.first_YYYYMMDD,"%Y%m%d",)
 dtlast = dt.datetime.strptime(args.last_YYYYMMDD,"%Y%m%d",)
 # ===============================
diff --git a/class4gl/setup/setup_global.py b/class4gl/setup/setup_global.py
index 79224d9..08adbb4 100644
--- a/class4gl/setup/setup_global.py
+++ b/class4gl/setup/setup_global.py
@@ -10,7 +10,7 @@
 usage:
     python setup_global.py 
     where  is an integer indicating the row index of the station list
-    under odir+'/'+fn_stations (see below)
+    under args.path_output+'/'+fn_stations (see below)
 
 this scripts should be called from the pbs script setup_global.pbs
 
@@ -29,19 +29,40 @@
 import sys
 #import copy as cp
 import numpy as np
-from sklearn.metrics import mean_squared_error
+#from sklearn.metrics import mean_squared_error
 import logging
 import datetime as dt
 import os
 import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+# parser.add_argument('--first_YYYYMMDD',default="19810101")
+# parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--station_id') # run a specific station id
+# parser.add_argument('--error_handling',default='dump_on_success')
+# parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+
+
+# args.path_output = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
 from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
 
-odir = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
-fn_stations = odir+'/igra-stations_sel.txt'
+fn_stations = args.path_input+'/igra-stations.txt'
 
 
 #calculate the root mean square error
+
 def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
     """ calculated root mean squared error 
         
@@ -80,11 +101,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         # which is the case for evaluating eg., mixed-layer estimates)
         y_predicted_temp = y_actual_temp*0. + y_predicted_temp
         
-    
-    return np.sqrt(mean_squared_error(y_actual_temp,y_predicted_temp))
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
 
 
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl
 from data_soundings import wyoming
 #from data_global import data_global
@@ -96,26 +117,34 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
 # read the list of stations with valid ground data (list generated with
 # get_valid_stations.py)
-idir = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
-
-df_stations = pd.read_csv(fn_stations)
-
+# args.path_input = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_fwf(fn_stations,names=['Country code',\
+                                               'ID',\
+                                               'Name',\
+                                               'latitude',\
+                                               'longitude',\
+                                               'height',\
+                                               'unknown',\
+                                               'startyear',\
+                                               'endyear'])
+if args.station_id is not None:
+    df_stations = df_stations[df_stations.ID == int(args.station_id)]
+else:
+    if args.first_station_row is not None:
+        df_stations = df_stations[int(args.first_station_row):]
+    if args.last_station_row is not None:
+        df_stations = df_stations[:(int(args.last_station_row)+1)]
 
 STNlist = list(df_stations.iterrows())
-NUMSTNS = len(STNlist)
-PROCS = 100
-BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
-
 
-iPROC = int(sys.argv[1])
-
-
-for iSTN,STN in STNlist[iPROC*BATCHSIZE:(iPROC+1)*BATCHSIZE]:  
+os.system('mkdir -p '+args.path_output)
+for iSTN,STN in STNlist:  
     one_run = False
 # for iSTN,STN in STNlist[5:]:  
     
-    fnout = odir+"/"+format(STN['ID'],'05d')+"_morning.yaml"
-    fnout_afternoon = odir+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    fnout = args.path_output+"/"+format(STN['ID'],'05d')+"_morning.yaml"
+    fnout_afternoon = args.path_output+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
     
 
     # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
@@ -123,10 +152,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         
     with open(fnout,'w') as fileout, \
          open(fnout_afternoon,'w') as fileout_afternoon:
-        wy_strm = wyoming(PATH=idir, STNM=STN['ID'])
+        wy_strm = wyoming(PATH=args.path_input, STNM=STN['ID'])
         wy_strm.set_STNM(int(STN['ID']))
 
-        # we consider all soundings after 1981
+        # we consider all soundings from 1981 onwards
         wy_strm.find_first(year=1981)
         #wy_strm.find(dt.datetime(2004,10,19,6))
         
@@ -137,7 +166,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         while wy_strm.current is not None:
             
             c4gli.clear()
-            try:
+            try: 
                 c4gli.get_profile_wyoming(wy_strm)
                 #print(STN['ID'],c4gli.pars.datetime)
                 #c4gli.get_global_input(globaldata)
@@ -179,6 +208,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             except:
                 morning_ok =False
                 print('obtain morning not good')
+
             # the next sounding will be used either for an afternoon sounding
             # or for the morning sounding of the next day.
             wy_strm.find_next()
@@ -292,12 +322,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     if one_run:
         STN.name = STN['ID']
         all_records_morning = get_records(pd.DataFrame([STN]),\
-                                      odir,\
+                                      args.path_output,\
                                       subset='morning',
                                       refetch_records=True,
                                       )
         all_records_afternoon = get_records(pd.DataFrame([STN]),\
-                                      odir,\
+                                      args.path_output,\
                                       subset='afternoon',
                                       refetch_records=True,
                                       )

From 82b3c8dedb69ce3c85e5cd168e740a95768e547e Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 15 Sep 2018 12:31:45 +0200
Subject: [PATCH 070/129] use all era-interim data

---
 class4gl/class4gl.py                       |   4 +-
 class4gl/interface/interface_cloudiness.py | 538 +++++++++++++++++++++
 class4gl/interface/interface_koeppen.py    | 149 ++++--
 class4gl/setup/setup_era.py                |  10 +-
 class4gl/setup/setup_global_afternoon.py   | 245 ++++++++++
 5 files changed, 890 insertions(+), 56 deletions(-)
 create mode 100644 class4gl/interface/interface_cloudiness.py
 create mode 100644 class4gl/setup/setup_global_afternoon.py

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 805ac35..1ed64f8 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -43,8 +43,8 @@
     import Pysolar
     import Pysolar.util as Pysolarutil
     GetSunriseSunset = Pysolarutil.GetSunriseSunset
-    GetAzimuth = Pysolarutil.GetAzimuth
-    GetAltitude = Pysolarutil.GetAltitude
+    GetAzimuth = Pysolarutil.solar.GetAzimuth
+    GetAltitude = Pysolarutil.solar.GetAltitude
 else:
     import pysolar as Pysolar
     Pysolarutil = Pysolar.util
diff --git a/class4gl/interface/interface_cloudiness.py b/class4gl/interface/interface_cloudiness.py
new file mode 100644
index 0000000..7dded65
--- /dev/null
+++ b/class4gl/interface/interface_cloudiness.py
@@ -0,0 +1,538 @@
+'''
+import numpy as np
+
+import pandas as pd
+import sys
+
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=False)
+parser.add_argument('--make_figures',default=None)
+parser.add_argument('--show_control_parameters',default=True)
+parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--figure_filename_2',default=None)
+parser.add_argument('--experiments_labels',default=None)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+#import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+
+if args.experiments_labels is None:
+    keylabels = args.experiments.strip().split(' ')
+else:
+    keylabels = args.experiments_labels.strip().split(';')
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    x_vals = np.array(axis.get_xlim())
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if bool(args.load_globaldata):
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = {}
+for key in args.experiments.strip(' ').split(' '):
+    
+    c4gldata[key] = c4gl_interface_soundings( \
+                      args.path_experiments+'/'+key+'/',\
+                      args.path_forcing,\
+                      globaldata,\
+                      refetch_records=False
+                    )
+    '''
+if args.make_figures:
+    # the lines below activate TaylorPlots but it is disabled for now
+    fig = plt.figure(figsize=(10,7))   #width,height
+    i = 1                                                                           
+    axes = {}         
+    axes_taylor = {}         
+    
+    colors = ['r','g','b','m']
+    symbols = ['*','x','+']
+    dias = {}
+    
+    for varkey in ['h','theta','q']:                                                    
+        axes[varkey] = fig.add_subplot(2,3,i)                                       
+        #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+    
+        #print(obs.std())
+        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+        if i == 0:
+            dias[varkey]._ax.axis["left"].label.set_text(\
+                "Standard deviation (model) / Standard deviation (observations)")
+            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+        #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # Q95 = obs.quantile(0.95)
+        # Q95 = obs.quantile(0.90)
+        # Add RMS contours, and label them
+        contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+        dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+        #dia._ax.set_title(season.capitalize())
+    
+        dias[varkey].add_grid()
+    
+    
+        #dia.ax.plot(x99,y99,color='k')
+    
+        
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+            # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+            # clearsky = (cc < 0.05)
+            # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+            # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+            mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+            obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+            x, y = obs.values,mod.values
+            print(key,len(obs.values))
+    
+            STD_OBS = obs.std()
+            #scores
+            PR = pearsonr(mod,obs)[0]
+            RMSE = rmse(obs,mod)                                               
+            BIAS = np.mean(mod) - np.mean(obs)
+            STD = mod.std()
+            
+            # fit = np.polyfit(x,y,deg=1)
+            # axes[varkey].plot(x, fit[0] * x + fit[1],\
+            #                   color=colors[ikey],alpha=0.8,lw=2,\
+            #                   label=key+", "+\
+            #                               'R = '+str(round(PR,3))+', '+\
+            #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+            #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+            # axes[varkey].legend(fontsize=5)
+            
+            # print(STD)
+            # print(PR)
+            dias[varkey].add_sample(STD/STD_OBS, PR,
+                           marker='o', ms=5, ls='',
+                           #mfc='k', mec='k', # B&W
+                           mfc=colors[ikey], mec=colors[ikey], # Colors
+                           label=keylabels[ikey])
+    
+        # put ticker position, see
+        # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+        # dia.ax.axis['bottom'].
+        # dia.ax.axis['left'].
+        # dia.ax.axis['left'].
+    
+        i += 1
+    
+    i = 0
+    for varkey in ['h','theta','q']:                                                    
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        keylabel = keylabels[ikey]
+        cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        clearsky = (cc < 0.05)
+    
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+    
+    
+        nbins=40       
+        x, y = obs.values,mod.values
+        
+        xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+        zi = np.zeros_like(xi)*np.nan       
+        for ibin in range(nbins):
+            xmin = x.min() + ibin * (x.max() - x.min())/nbins
+            xmax = xmin + (x.max() - x.min())/nbins
+            in_bin = ((x >= xmin) & (x < xmax))
+            ybin = y[in_bin]
+            xbin = x[in_bin]
+            if len(ybin) > 20:
+                k = kde.gaussian_kde((ybin))
+                zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+        zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+        zi_int = zi.cumsum(axis=1) 
+                     #  label=key+", "+\
+                     #                    'R = '+str(round(PR[0],3))+', '+\
+                     #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                     #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.5,0.84] ,
+                colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+        axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.16,0.84] ,
+                colors=['darkred'],alpha=0.5,)
+        nanxi = xi[zi != np.nan]
+        axes[varkey].set_xlim((nanxi.min(),nanxi.max()))
+        axes[varkey].set_ylim((nanxi.min(),nanxi.max()))
+        print(varkey,(nanxi.min(),nanxi.max()))
+    
+    
+        latex = {}
+        latex['dthetadt'] =  r'$d \theta / dt $'
+        latex['dqdt'] =      r'$d q / dt $'
+        latex['dhdt'] =      r'$d h / dt $'
+    
+        axes[varkey].set_xlabel('observations')     
+        axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+    
+        PR = pearsonr(mod,obs)[0]
+        RMSE = rmse(obs,mod)                                               
+        BIAS = np.mean(mod) - np.mean(obs)
+        STD = mod.std()
+    
+        axes[varkey].scatter(obs,mod, label='(only) '+keylabel+", "+\
+                                      'R = '+str(round(PR,3))+', '+\
+                                      'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+                                      'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
+                             s=0.1,alpha=0.14,color='k')
+        axes[varkey].legend(fontsize=5)
+        
+
+
+
+        axes[varkey].set_xlabel('observations')     
+        if i==0:                                    
+            axes[varkey].set_ylabel('model')                                            
+        abline(1,0,axis=axes[varkey])
+        i +=1
+    
+    
+    
+    # legend for different forcing simulations (colors)
+    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    leg = []
+    for ikey,key in enumerate(args.experiments.strip().split(' ')):
+        
+        leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+        leg.append(leg1)
+    ax.axis('off')
+    #leg1 =
+    ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
+    
+    
+    # # legend for different stations (symbols)
+    # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # isymbol = 0
+    # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+    #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+    #     leg.append(leg1)
+    #     isymbol += 1
+    # 
+    # # symbol for all stations
+    # leg1, = ax.plot([],'ko',markersize=10)
+    # leg.append(leg1)
+    
+    
+    # ax.axis('off')
+    # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+    
+    
+    fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+    
+    
+    #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+    # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+    # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+    
+    if args.figure_filename is not None:
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+    fig.show()  
+
+    if bool(args.show_control_parameters):
+
+        import seaborn as sns
+
+        pkmn_type_colors = [
+                                            '#A0A0A0',  # Poison
+                                            '#78C850',  # Grass
+                                            '#F08030',  # Fire
+                                            '#6890F0',  # Water
+                                            '#F08030',  # Fire
+                                            '#C03028',  # Fighting
+                                            '#F85888',  # Psychic
+                                            '#A8B820',  # Bug
+                                            '#A8A878',  # Normal
+                                            '#F8D030',  # Electric
+                                            '#E0C068',  # Ground
+                                            '#EE99AC',  # Fairy
+                                            '#B8A038',  # Rock
+                                            '#705898',  # Ghost
+                                            '#98D8D8',  # Ice
+                                            '#7038F8',  # Dragon
+                                           ]
+
+
+
+        sns.set_style('whitegrid')
+        #sns.set()
+        fig = pl.figure(figsize=(7,5))
+        i = 1
+        axes = {}
+        data_all = pd.DataFrame()
+        data_input = pd.DataFrame()
+        
+        
+        
+        # #for varkey in ['theta','q']:     
+        # EF =\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR/(1.+\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR)
+        # EF[EF<0] = np.nan
+        # EF[EF>1] = np.nan
+        
+        # c4gldata[key].frames['stats']['records_all_stations_ini']['EF'] = EF
+        
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        data_all = pd.DataFrame()
+
+        tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
+        tempdatamodstats["source"] = "Soundings"
+        tempdatamodstats["source_index"] = "Soundings"
+
+        ini_ref = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+        tempdataini_this = pd.DataFrame(ini_ref.copy())
+
+        tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+        tempdatamodstats['STNID']= tempdataini_this.STNID
+        tempdatamodstats['source']= "Soundings"
+        tempdatamodstats['source_index']= "Soundings"
+        tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+        #print('hello')
+
+        tempdataini = pd.DataFrame(ini_ref)
+        tempdataini["source"] = "Soundings"
+        tempdataini["source_index"] = "Soundings"
+        tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+        #print('hello2')
+
+
+        data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+        data_input = pd.concat([data_input,tempdataini],axis=0)
+        #print(data_input.shape)
+        #print(data_all.shape)
+
+            
+        for ikey,key in enumerate(list(args.experiments.strip().split(' '))):
+            #keylabels=args.experimts.strip().split(' ')
+            keylabel = keylabels[ikey]
+
+            tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
+            tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+            tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+            tempdatamodstats['STNID']= tempdataini_this.STNID
+            tempdatamodstats['source']= keylabel
+            tempdatamodstats['source_index']= keylabel
+            tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+            #print('hello')
+
+
+            tempdataini = pd.DataFrame(ini_ref.copy())
+            tempdataini["source"] = keylabel
+            tempdataini["source_index"] = keylabel
+            tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+    
+
+            #print('hello2')
+            index_intersect = tempdataini.index.intersection(tempdatamodstats.index)
+            #print('hello3')
+
+            tempdataini = tempdataini.loc[index_intersect]
+            #print('hello4')
+            tempdatamodstats = tempdatamodstats.loc[index_intersect]
+            #print('hello5')
+
+
+            # data[varkey] = tempdatamodstats['d'+varkey+'dt']
+            data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+            data_input = pd.concat([data_input, tempdataini],axis=0)
+            #print(data_input.shape)
+            #print(data_all.shape)
+
+        data_input.cc = data_input.cc.clip(0.,+np.inf)
+
+        for varkey in ['h','theta','q']:
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+            data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
+            #print(data_input.shape)
+            #print(data_all.shape)
+        #print('hello6')
+        #print(data_all.columns)
+        #print('hello7')
+        for varkey in ['h','theta','q']:
+            input_keys =['cc']
+            for input_key in input_keys:
+                varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+
+                #print('hello8')
+                #print(data_input.shape)
+                #print(data_all.shape)
+                input_key_full = input_key + "["+units[input_key]+"]"
+                data_all[input_key_full] = pd.cut(x=data_input[input_key].values,bins=8,precision=2)
+                data_input[input_key_full] = pd.cut(x=data_input[input_key].values,bins=8,precision=2,)
+                #print('hello9')
+                #print(data_input.shape)
+                #print(data_all.shape)
+                
+                qvalmax = data_all[varkey_full].quantile(0.999)
+                qvalmin = data_all[varkey_full].quantile(0.001)
+                select_data = (data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)
+                #print('hello11')
+                data_all = data_all[select_data]
+                #print('hello12')
+                data_input = data_input[select_data.values]
+                #print('hello13')
+                #print(data_input.shape)
+                #print(data_all.shape)
+                #print('hello10')
+                
+                sns.set(style="ticks", palette="pastel")
+                ax = fig.add_subplot(3,len(input_keys),i)
+                #sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+                
+                #ax.set_title(input_key_full)
+                sb = sns.boxplot(x=input_key_full, y=varkey_full, hue="source",
+                                 palette=pkmn_type_colors,
+                                # palette=["m", "g",'r','b'],
+                                 linewidth=1.2, data=data_all,sym='')
+                if i ==1:
+                     plt.legend(loc='upper right',fontsize=7.,frameon=True,framealpha=0.7)
+                else:
+                     ax.get_legend().set_visible(False)
+                #     plt.legend('off')
+                if i >= 3:
+                    ax.set_xticklabels(labels=['['+str(i)+','+str(i+1)+'[' for i in list(range(0,7))]+['[7,8]'])
+                    ax.set_xlabel('Cloudiness [okta]')
+                else:
+                    ax.set_xticklabels([])
+                    ax.set_xlabel('')
+
+                if np.mod(i,len(input_keys)) != 0:
+                    ax.set_yticklabels([])
+                    ax.set_ylabel('')
+
+                for j,artist in enumerate(ax.artists):
+                    if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
+                        # Set the linecolor on the artist to the facecolor, and set the facecolor to None
+                        #print(j,artist)
+                        col = artist.get_facecolor()
+                        #print(j,artist)
+                        artist.set_edgecolor(col)
+                        #print(j,artist)
+                        artist.set_facecolor('None')
+                
+                        # Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
+                        # Loop over them here, and use the same colour as above
+                        
+                        for k in range(j*5,j*5+5):
+                            line = ax.lines[k]
+                            line.set_color(col)
+                            line.set_mfc(col)
+                            line.set_mec(col)
+                
+                # Also fix the legend
+                j = 0
+                for legpatch in ax.get_legend().get_patches():
+                    if j > 0:
+
+                        col = legpatch.get_facecolor()
+                        legpatch.set_edgecolor(col)
+                        legpatch.set_facecolor('None')
+                    j +=1
+
+
+
+
+                #ax.grid()
+                #sns.despine(offset=10, trim=True)
+                i +=1
+        fig.tight_layout()
+        fig.subplots_adjust( bottom=0.12,left=0.15,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
+        if args.figure_filename_2 is not None:
+            fig.savefig(args.figure_filename_2,dpi=250); print("Image file written to:", args.figure_filename_2)
+        fig.show()
+
+
+
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
index cbe89f2..0054c20 100644
--- a/class4gl/interface/interface_koeppen.py
+++ b/class4gl/interface/interface_koeppen.py
@@ -18,6 +18,7 @@
 parser.add_argument('--show_control_parameters',default=True)
 parser.add_argument('--figure_filename',default=None)
 parser.add_argument('--figure_filename_2',default=None)
+parser.add_argument('--experiments_labels',default=None)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -41,6 +42,10 @@
 
 
 
+if args.experiments_labels is None:
+    keylabels = args.experiments.strip().split(' ')
+else:
+    keylabels = args.experiments_labels.strip().split(';')
 
 def abline(slope, intercept,axis):
     """Plot a line from slope and intercept"""
@@ -123,25 +128,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       globaldata,\
                       refetch_records=False
                     )
-
 '''
 key = args.experiments.strip(' ').split(' ')[0]
 xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
 koeppenlookuptable = pd.DataFrame()
 koeppenlookuptable['KGCID'] = pd.Series(xrkoeppen['KGCID'])
-c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] =  \
-    c4gldata[key].frames['stats']['records_all_stations_ini']['KGC'].map(koeppenlookuptable['KGCID'])
-
-koeppenlookuptable['amount'] = ""
-for ikoeppen,koeppen in koeppenlookuptable.iterrows():
-    print(ikoeppen,':',koeppen)
-    kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen.KGCID)
-    print(np.sum(kgc_select))
-    koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
-
-koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
-koeppenlookuptable = koeppenlookuptable[:9]
-koeppenlookuptable = koeppenlookuptable.sort_index()
 
 
 kgccolors = {
@@ -156,17 +147,22 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     'Dfb':['darkviolet','white']    , 
 }
 kgcnames = {
-    'Dfa':'snow - fully humid - hot summer',
+    'Dfa':'snow \n fully humid \n hot summer',
     'Cfb':'green'       ,
-    'BSk':''      ,
-    'Csb':''      ,
+    'BSk':'4'      ,
+    'Csb':'5'      ,
     'Cfa':'darkgreen' ,     
-    'BWh':''      ,
-    'Aw' :''     ,
-    'Dwc':''     ,
-    'Dfb':''     ,
+    'BWh':'6'      ,
+    'Aw' :'7'     ,
+    'Dwc':'8'     ,
+    'Dfb':'9'     ,
     #'Dfa':'',
 }
+for KGCID in list(pd.Series(xrkoeppen['KGCID'])):
+    if KGCID not in kgcnames.keys():
+        kgcnames[KGCID] = KGCID
+    if KGCID not in kgccolors.keys():
+        kgccolors[KGCID] = ['k','k']
 
 
 koeppenlookuptable['color'] = ""
@@ -181,9 +177,25 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     koeppenlookuptable['name'].loc[ikoeppen] = kgcnames[koeppen.KGCID]
 
 
+
+c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] =  \
+    c4gldata[key].frames['stats']['records_all_stations_ini']['KGC'].map(koeppenlookuptable['KGCID'])
+
+koeppenlookuptable['amount'] = ""
+for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+    print(ikoeppen,':',koeppen)
+    kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
+    print(np.sum(kgc_select))
+    koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
+
+koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
+koeppenlookuptable = koeppenlookuptable[:9]
+koeppenlookuptable = koeppenlookuptable.sort_index()
+
+
 if args.make_figures:
     # the lines below activate TaylorPlots but it is disabled for now
-    fig = plt.figure(figsize=(10,7))   #width,height
+    fig = plt.figure(figsize=(7,5))   #width,height
     i = 1                                                                           
     axes = {}         
     axes_taylor = {}         
@@ -203,7 +215,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             icolor = 0
             for ikoeppen,koeppen in koeppenlookuptable.iterrows():
                 print(ikoeppen,':',koeppen)
-                kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen.KGCID)
+                kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
                 
                 koeppen_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'][kgc_select]
                 koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'][kgc_select]
@@ -249,9 +261,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
     
         #print(obs.std())
-        if i == 0:
+        if i == 1:
             dias[varkey]._ax.axis["left"].label.set_text(\
-                "Standard deviation (model) / Standard deviation (observations)")
+                "standard deviation (model) / standard deviation (observations)")
             # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
             # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
         #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
@@ -315,6 +327,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     for varkey in ['h','theta','q']:                                                    
         ikey = 0
         key = list(args.experiments.strip().split(' '))[ikey]
+        keylabel = keylabels[ikey]
         cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
         clearsky = (cc < 0.05)
     
@@ -358,20 +371,39 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         latex['dhdt'] =      r'$d h / dt $'
     
         axes[varkey].set_xlabel('observations')     
-        axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
-    
+
+        if varkey == 'q':
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+r'$10^{-3} \times $'+units['d'+varkey+'dt']+']')        
+        else:
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')     
+       #  c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname']
+
         PR = pearsonr(mod,obs)[0]
         RMSE = rmse(obs,mod)                                               
         BIAS = np.mean(mod) - np.mean(obs)
         STD = mod.std()
     
-        axes[varkey].scatter(obs,mod, label='(only) '+key+", "+\
-                                      'R = '+str(round(PR,3))+', '+\
-                                      'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-                                      'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] ,\
-                             s=0.1,alpha=0.14,color='k')
-        axes[varkey].legend(fontsize=5)
-        
+        axes[varkey].scatter(obs,mod, label='All',s=0.1,alpha=0.14,color='k')
+
+
+
+        #axes[varkey].legend(fontsize=5)
+
+        #trans = ax.get_xaxis_transform() # x in data untis, y in axes fraction
+
+        if varkey == 'q':
+            annotate_text = 'PC = '+str(round(PR,3))+'\n'+\
+                           'RMSE = '+str(round(RMSE*1000.,5))+r'$10^{-3} \times $'+ units['d'+varkey+'dt']+'\n'+\
+                           'BIAS = '+str(round(BIAS*1000.,5))+r'$10^{-3} \times $'+units['d'+varkey+'dt'] 
+        else:
+            annotate_text = 'PC = '+str(round(PR,3))+'\n'+\
+                           'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+'\n'+\
+                           'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] 
+
+
+        ann = axes[varkey].annotate(annotate_text, xy=(0.05, .95 ), xycoords='axes fraction',fontsize=7,
+       horizontalalignment='left', verticalalignment='top' 
+        )
 
 
 
@@ -380,7 +412,26 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             axes[varkey].set_ylabel('model')                                            
         abline(1,0,axis=axes[varkey])
         i +=1
-    
+
+        #axes[varkey].axis('equal')
+        axes[varkey].set_aspect('equal')
+
+        # To specify the number of ticks on both or any single axes
+        # plt.locator_params(axis='x', nbins=6)
+        #plt.locator_params( nbins=10)
+        axes[varkey].xaxis.set_major_locator(ticker.MaxNLocator(4))
+        axes[varkey].yaxis.set_major_locator(ticker.MaxNLocator(4))
+        # axes[varkey].xaxis.set_major_locator(ticker.MultipleLocator(5))
+        # axes[varkey].yaxis.set_major_locator(ticker.MultipleLocator(5))
+
+        if varkey == 'q':
+            ticks = ticker.FuncFormatter(lambda x, pos:
+                                         '{0:g}'.format(x*1000.))
+            axes[varkey].xaxis.set_major_formatter(ticks)
+            axes[varkey].yaxis.set_major_formatter(ticks)
+
+        #     # axes[varkey].set_xticklabels(labels=ax.get_xticklabels()*1000.)
+        #     # axes[varkey].set_yticklabels(labels=ax.get_yticklabels()*1000.)
     
     
     # # legend for different forcing simulations (colors)
@@ -412,7 +463,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
     
     
-    fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+    fig.subplots_adjust(top=0.95,bottom=0.09,left=0.08,right=0.94,hspace=0.35,wspace=0.29)
     
     
     #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
@@ -420,7 +471,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
     
     if args.figure_filename is not None:
-        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+        fig.savefig(args.figure_filename,dpi=250); print("Image file written to:",args.figure_filename)
     fig.show()  
 
     if bool(args.show_control_parameters):
@@ -450,7 +501,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
         sns.set_style('whitegrid')
         #sns.set()
-        fig = pl.figure(figsize=(12,8))
+        fig = pl.figure(figsize=(7,5))
         i = 1
         axes = {}
         data_all = pd.DataFrame()
@@ -498,21 +549,22 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         #print(data_all.shape)
 
             
-        for key in list(args.experiments.strip().split(' ')):
+        for ikey,key in enumerate(list(args.experiments.strip().split(' '))):
+            keylabel = keylabels[ikey]
 
             tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
             tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
             tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
             tempdatamodstats['STNID']= tempdataini_this.STNID
-            tempdatamodstats['source']= key
-            tempdatamodstats['source_index']= key
+            tempdatamodstats['source']= keylabel
+            tempdatamodstats['source_index']= keylabel
             tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
             #print('hello')
 
 
             tempdataini = pd.DataFrame(ini_ref.copy())
-            tempdataini["source"] = key 
-            tempdataini["source_index"] = key
+            tempdataini["source"] = keylabel
+            tempdataini["source_index"] = keylabel
             tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
     
 
@@ -581,12 +633,13 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                             # palette=["m", "g",'r','b'],
                              linewidth=1.2, data=data_all,sym='')
             if i ==1:
-                 plt.legend(loc='upper right',fontsize=7.)
+                 plt.legend(loc='upper right',fontsize=7.,frameon=True,framealpha=0.7)
             else:
                  ax.get_legend().set_visible(False)
             #     plt.legend('off')
-            if i >= 2:
-                ax.set_xticklabels(labels=ax.get_xticklabels(),rotation=45.,ha='right')
+            if i >= 3:
+                ax.set_xticklabels(labels=ax.get_xticklabels())
+                ax.set_xlabel('Köppen climate class')
             else:
                 ax.set_xticklabels([])
                 ax.set_xlabel('')
@@ -630,9 +683,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             #sns.despine(offset=10, trim=True)
             i +=1
         fig.tight_layout()
-        fig.subplots_adjust( bottom=0.18,left=0.09,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
+        fig.subplots_adjust( bottom=0.12,left=0.15,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
         if args.figure_filename_2 is not None:
-            fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
+            fig.savefig(args.figure_filename_2,dpi=250); print("Image file written to:", args.figure_filename_2)
         fig.show()
 
 
diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 631e50c..0c8ea96 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -88,12 +88,10 @@
 # ===============================
 globaldata = data_global()
 globaldata.sources = {**globaldata.sources,**{
-    
-        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_19830609-19830808_6hourly.nc",
-        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830609-19830808_6hourly.nc",
-        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_19830609-19830808_6hourly.nc",
-        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_19830609-19830808_6hourly.nc",
-    
+        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_????????-????????_6hourly.nc",
+        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_????????-????????_6hourly.nc",
+        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_????????-????????_6hourly.nc",
+        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_????????-????????_6hourly.nc",
 #        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830209-19830410_6hourly.nc",
  #       "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q*_6hourly.nc",
  #       "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u*_6hourly.nc",
diff --git a/class4gl/setup/setup_global_afternoon.py b/class4gl/setup/setup_global_afternoon.py
new file mode 100644
index 0000000..4a49a97
--- /dev/null
+++ b/class4gl/setup/setup_global_afternoon.py
@@ -0,0 +1,245 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under args.path_output+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+#from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+# parser.add_argument('--first_YYYYMMDD',default="19810101")
+# parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--station_id') # run a specific station id
+# parser.add_argument('--error_handling',default='dump_on_success')
+# parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+
+
+# args.path_output = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+fn_stations = args.path_input+'/igra-stations.txt'
+
+
+#calculate the root mean square error
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+# args.path_input = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+df_stations = pd.read_fwf(fn_stations,names=['Country code',\
+                                               'ID',\
+                                               'Name',\
+                                               'latitude',\
+                                               'longitude',\
+                                               'height',\
+                                               'unknown',\
+                                               'startyear',\
+                                               'endyear'])
+if args.station_id is not None:
+    df_stations = df_stations[df_stations.ID == int(args.station_id)]
+else:
+    if args.first_station_row is not None:
+        df_stations = df_stations[int(args.first_station_row):]
+    if args.last_station_row is not None:
+        df_stations = df_stations[:(int(args.last_station_row)+1)]
+
+STNlist = list(df_stations.iterrows())
+
+os.system('mkdir -p '+args.path_output)
+for iSTN,STN in STNlist:  
+    one_run = False
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = args.path_output+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout:
+        wy_strm = wyoming(PATH=args.path_input, STNM=STN['ID'])
+        wy_strm.set_STNM(int(STN['ID']))
+
+        # we consider all soundings from 1981 onwards
+        wy_strm.find_first(year=1981)
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        
+        # so we continue as long as we can find a new sounding
+                
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            try: 
+                c4gli.get_profile_wyoming(wy_strm)
+                #print(STN['ID'],c4gli.pars.datetime)
+                #c4gli.get_global_input(globaldata)
+
+                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+                logic = dict()
+
+		#still needs to be changed to afternoon
+                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+                logic['daylight'] = \
+                    ((c4gli.pars.ldatetime_daylight - 
+                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
+                
+                logic['springsummer'] = (c4gli.pars.theta > 278.)
+                
+                # we take 3000 because previous analysis (ie., HUMPPA) has
+                # focussed towards such altitude
+                le3000 = (c4gli.air_balloon.z <= 3000.)
+                logic['10measurements'] = (np.sum(le3000) >= 5) 
+
+                #leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+                #logic['mlerrlow'] = (\
+                #        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                #        (not np.isnan(c4gli.pars.theta)) and \
+                #        (rmse(c4gli.air_balloon.theta[leh] , \
+                #              c4gli.pars.theta,filternan_actual=True) < 1.)\
+                #              )
+    
+
+                #logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+                    
+                print('logic:', logic)
+                # the result
+                morning_ok = np.mean(list(logic.values()))
+                print(morning_ok,c4gli.pars.ldatetime)
+
+            except:
+                morning_ok =False
+                print('obtain morning not good')
+
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+                                
+            print(morning_ok)
+            if morning_ok == 1.:
+                c4gli.get_global_input(globaldata)
+                print('VERY CLOSE...')
+                if c4gli.check_source_globaldata() and \
+	            (c4gli.check_source(source='wyoming',\
+		                                   check_only_sections='pars')):
+                    c4gli.dump(fileout)
+                    one_run=True		     
+                    print('HIT!!!')
+            wy_strm.find_next()
+                
+    if one_run:
+        STN.name = STN['ID']
+
+        all_records_afternoon = get_records(pd.DataFrame([STN]),\
+                                      args.path_output,\
+                                      subset='afternoon',
+                                      refetch_records=True,
+                                      )
+    else:
+        os.system('rm '+fnout)
+        
+
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+

From efa6280ca03795eb217646aa76b77a5bfe2b45bd Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 15 Sep 2018 12:35:15 +0200
Subject: [PATCH 071/129] use all era-interim data

---
 class4gl/setup/setup_era.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 0c8ea96..43c1c73 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -88,10 +88,10 @@
 # ===============================
 globaldata = data_global()
 globaldata.sources = {**globaldata.sources,**{
-        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_????????-????????_6hourly.nc",
-        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_????????-????????_6hourly.nc",
-        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_????????-????????_6hourly.nc",
-        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_????????-????????_6hourly.nc",
+        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc",
+        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc",
+        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc",
+        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc",
 #        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830209-19830410_6hourly.nc",
  #       "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q*_6hourly.nc",
  #       "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u*_6hourly.nc",

From a1b42c34151df5cd2c04a117ce48ad5f80511162 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 15 Sep 2018 12:37:16 +0200
Subject: [PATCH 072/129] use all era-interim data

---
 class4gl/setup/setup_era.py | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 43c1c73..96ce88f 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -92,10 +92,6 @@
         "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc",
         "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc",
         "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc",
-#        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830209-19830410_6hourly.nc",
- #       "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q*_6hourly.nc",
- #       "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u*_6hourly.nc",
- #       "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v*_6hourly.nc",
         }}
 
 # ...  and load initial data pages

From f02f4200938c00a7e042a31dd3b68dc4a1eea559 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 18 Sep 2018 15:08:49 +0200
Subject: [PATCH 073/129] Fix Ps input; implement warning message in case of
 invalid input

---
 class4gl/c4gl_setup.o4759290-0             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-1             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-2             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-3             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-4             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-5             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-6             | 1251 ++++++++++++++++++++
 class4gl/c4gl_setup.o4759290-7             |  469 ++++++++
 class4gl/c4gl_sim.o4759326-0               |  227 ++++
 class4gl/c4gl_sim.o4759326-1               |  231 ++++
 class4gl/c4gl_sim.o4759326-2               |  227 ++++
 class4gl/c4gl_sim.o4759326-3               |  227 ++++
 class4gl/c4gl_sim.o4759326-4               |   10 +
 class4gl/c4gl_sim.o4759326-5               |   10 +
 class4gl/c4gl_sim.o4759326-6               |   10 +
 class4gl/c4gl_sim.o4759326-7               |   10 +
 class4gl/class4gl.py                       |   46 +-
 class4gl/interface/interface_cloudiness.py |   18 +-
 class4gl/interface/interface_koeppen.py    |   52 +-
 class4gl/model.py                          |   46 +-
 class4gl/simulations/simulations.py        |   24 +-
 21 files changed, 10307 insertions(+), 57 deletions(-)
 create mode 100644 class4gl/c4gl_setup.o4759290-0
 create mode 100644 class4gl/c4gl_setup.o4759290-1
 create mode 100644 class4gl/c4gl_setup.o4759290-2
 create mode 100644 class4gl/c4gl_setup.o4759290-3
 create mode 100644 class4gl/c4gl_setup.o4759290-4
 create mode 100644 class4gl/c4gl_setup.o4759290-5
 create mode 100644 class4gl/c4gl_setup.o4759290-6
 create mode 100644 class4gl/c4gl_setup.o4759290-7
 create mode 100644 class4gl/c4gl_sim.o4759326-0
 create mode 100644 class4gl/c4gl_sim.o4759326-1
 create mode 100644 class4gl/c4gl_sim.o4759326-2
 create mode 100644 class4gl/c4gl_sim.o4759326-3
 create mode 100644 class4gl/c4gl_sim.o4759326-4
 create mode 100644 class4gl/c4gl_sim.o4759326-5
 create mode 100644 class4gl/c4gl_sim.o4759326-6
 create mode 100644 class4gl/c4gl_sim.o4759326-7

diff --git a/class4gl/c4gl_setup.o4759290-0 b/class4gl/c4gl_setup.o4759290-0
new file mode 100644
index 0000000..45f4cc7
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-0
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.yaml
+0 1981-01-01 00:00:00
+1 1981-01-02 00:00:00
+2 1981-01-03 00:00:00
+3 1981-01-04 00:00:00
+4 1981-01-05 00:00:00
+5 1981-01-06 00:00:00
+6 1981-01-07 00:00:00
+7 1981-01-08 00:00:00
+8 1981-01-09 00:00:00
+9 1981-01-10 00:00:00
+10 1981-01-11 00:00:00
+11 1981-01-12 00:00:00
+12 1981-01-13 00:00:00
+13 1981-01-14 00:00:00
+14 1981-01-15 00:00:00
+15 1981-01-16 00:00:00
+16 1981-01-17 00:00:00
+17 1981-01-18 00:00:00
+18 1981-01-19 00:00:00
+19 1981-01-20 00:00:00
+20 1981-01-21 00:00:00
+21 1981-01-22 00:00:00
+22 1981-01-23 00:00:00
+23 1981-01-24 00:00:00
+24 1981-01-25 00:00:00
+25 1981-01-26 00:00:00
+26 1981-01-27 00:00:00
+27 1981-01-28 00:00:00
+28 1981-01-29 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-01-30 00:00:00
+30 1981-01-31 00:00:00
+31 1981-02-01 00:00:00
+32 1981-02-02 00:00:00
+33 1981-02-03 00:00:00
+34 1981-02-04 00:00:00
+35 1981-02-05 00:00:00
+36 1981-02-06 00:00:00
+37 1981-02-07 00:00:00
+38 1981-02-08 00:00:00
+39 1981-02-09 00:00:00
+40 1981-02-10 00:00:00
+41 1981-02-11 00:00:00
+42 1981-02-12 00:00:00
+43 1981-02-13 00:00:00
+44 1981-02-14 00:00:00
+45 1981-02-15 00:00:00
+46 1981-02-16 00:00:00
+47 1981-02-17 00:00:00
+48 1981-02-18 00:00:00
+49 1981-02-19 00:00:00
+pkl file older than yaml file, so I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 51396
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.51396').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.51396 
+ obs record registered
+ next record: 102622
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.102622').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.102622 
+ obs record registered
+ next record: 155319
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.155319').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.155319 
+ obs record registered
+ next record: 207321
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.207321').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.207321 
+ obs record registered
+ next record: 261499
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.261499').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.261499 
+ obs record registered
+ next record: 312096
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.312096').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.312096 
+ obs record registered
+ next record: 363706
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.363706').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.363706 
+ obs record registered
+ next record: 416568
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.416568').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.416568 
+ obs record registered
+ next record: 470247
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.470247').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.470247 
+ obs record registered
+ next record: 524351
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.524351').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.524351 
+ obs record registered
+ next record: 577126
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.577126').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.577126 
+ obs record registered
+ next record: 629651
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.629651').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.629651 
+ obs record registered
+ next record: 682101
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.682101').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.682101 
+ obs record registered
+ next record: 734044
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.734044').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.734044 
+ obs record registered
+ next record: 786398
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.786398').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.786398 
+ obs record registered
+ next record: 838710
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.838710').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.838710 
+ obs record registered
+ next record: 891166
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.891166').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.891166 
+ obs record registered
+ next record: 943327
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.943327').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.943327 
+ obs record registered
+ next record: 995731
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.995731').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.995731 
+ obs record registered
+ next record: 1050024
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1050024').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1050024 
+ obs record registered
+ next record: 1102851
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1102851').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1102851 
+ obs record registered
+ next record: 1154781
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1154781').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1154781 
+ obs record registered
+ next record: 1208836
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1208836').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1208836 
+ obs record registered
+ next record: 1263823
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1263823').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1263823 
+ obs record registered
+ next record: 1318780
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1318780').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1318780 
+ obs record registered
+ next record: 1368688
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1368688').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1368688 
+ obs record registered
+ next record: 1420916
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1420916').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1420916 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1472567
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1472567').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1472567 
+ obs record registered
+ next record: 1524967
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1524967').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1524967 
+ obs record registered
+ next record: 1576324
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1576324').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1576324 
+ obs record registered
+ next record: 1629313
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1629313').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1629313 
+ obs record registered
+ next record: 1680371
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1680371').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1680371 
+ obs record registered
+ next record: 1731186
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1731186').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1731186 
+ obs record registered
+ next record: 1782240
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1782240').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1782240 
+ obs record registered
+ next record: 1833412
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1833412').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1833412 
+ obs record registered
+ next record: 1886384
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1886384').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1886384 
+ obs record registered
+ next record: 1937565
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1937565').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1937565 
+ obs record registered
+ next record: 1988408
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1988408').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1988408 
+ obs record registered
+ next record: 2038456
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2038456').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2038456 
+ obs record registered
+ next record: 2089432
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2089432').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2089432 
+ obs record registered
+ next record: 2141348
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2141348').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2141348 
+ obs record registered
+ next record: 2192964
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2192964').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2192964 
+ obs record registered
+ next record: 2247428
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2247428').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2247428 
+ obs record registered
+ next record: 2302316
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2302316').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2302316 
+ obs record registered
+ next record: 2356848
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2356848').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2356848 
+ obs record registered
+ next record: 2411058
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2411058').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2411058 
+ obs record registered
+ next record: 2462275
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2462275').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2462275 
+ obs record registered
+ next record: 2516002
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2516002').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2516002 
+ obs record registered
+ next record: 2568449
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2568449').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2568449 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-1 b/class4gl/c4gl_setup.o4759290-1
new file mode 100644
index 0000000..d958506
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-1
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.yaml
+0 1981-02-20 00:00:00
+1 1981-02-21 00:00:00
+2 1981-02-22 00:00:00
+3 1981-02-23 00:00:00
+4 1981-02-24 00:00:00
+5 1981-02-25 00:00:00
+6 1981-02-26 00:00:00
+7 1981-02-27 00:00:00
+8 1981-02-28 00:00:00
+9 1981-03-01 00:00:00
+10 1981-03-02 00:00:00
+11 1981-03-03 00:00:00
+12 1981-03-04 00:00:00
+13 1981-03-05 00:00:00
+14 1981-03-06 00:00:00
+15 1981-03-07 00:00:00
+16 1981-03-08 00:00:00
+17 1981-03-09 00:00:00
+18 1981-03-10 00:00:00
+19 1981-03-11 00:00:00
+20 1981-03-12 00:00:00
+21 1981-03-13 00:00:00
+22 1981-03-14 00:00:00
+23 1981-03-15 00:00:00
+24 1981-03-16 00:00:00
+25 1981-03-17 00:00:00
+26 1981-03-18 00:00:00
+27 1981-03-19 00:00:00
+28 1981-03-20 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-03-21 00:00:00
+30 1981-03-22 00:00:00
+31 1981-03-23 00:00:00
+32 1981-03-24 00:00:00
+33 1981-03-25 00:00:00
+34 1981-03-26 00:00:00
+35 1981-03-27 00:00:00
+36 1981-03-28 00:00:00
+37 1981-03-29 00:00:00
+38 1981-03-30 00:00:00
+39 1981-03-31 00:00:00
+40 1981-04-01 00:00:00
+41 1981-04-02 00:00:00
+42 1981-04-03 00:00:00
+43 1981-04-04 00:00:00
+44 1981-04-05 00:00:00
+45 1981-04-06 00:00:00
+46 1981-04-07 00:00:00
+47 1981-04-08 00:00:00
+48 1981-04-09 00:00:00
+49 1981-04-10 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 53735
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.53735').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.53735 
+ obs record registered
+ next record: 107051
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.107051').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.107051 
+ obs record registered
+ next record: 159809
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.159809').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.159809 
+ obs record registered
+ next record: 210583
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.210583').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.210583 
+ obs record registered
+ next record: 262669
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.262669').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.262669 
+ obs record registered
+ next record: 315998
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.315998').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.315998 
+ obs record registered
+ next record: 368733
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.368733').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.368733 
+ obs record registered
+ next record: 422182
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.422182').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.422182 
+ obs record registered
+ next record: 473690
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.473690').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.473690 
+ obs record registered
+ next record: 525232
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.525232').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.525232 
+ obs record registered
+ next record: 578431
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.578431').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.578431 
+ obs record registered
+ next record: 633046
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.633046').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.633046 
+ obs record registered
+ next record: 686127
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.686127').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.686127 
+ obs record registered
+ next record: 738483
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.738483').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.738483 
+ obs record registered
+ next record: 790644
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.790644').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.790644 
+ obs record registered
+ next record: 844792
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.844792').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.844792 
+ obs record registered
+ next record: 897654
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.897654').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.897654 
+ obs record registered
+ next record: 950524
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.950524').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.950524 
+ obs record registered
+ next record: 1003819
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1003819').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1003819 
+ obs record registered
+ next record: 1057080
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1057080').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1057080 
+ obs record registered
+ next record: 1109138
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1109138').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1109138 
+ obs record registered
+ next record: 1162278
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1162278').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1162278 
+ obs record registered
+ next record: 1216404
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1216404').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1216404 
+ obs record registered
+ next record: 1268030
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1268030').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1268030 
+ obs record registered
+ next record: 1320921
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1320921').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1320921 
+ obs record registered
+ next record: 1372574
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1372574').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1372574 
+ obs record registered
+ next record: 1424704
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1424704').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1424704 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1476503
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1476503').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1476503 
+ obs record registered
+ next record: 1528706
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1528706').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1528706 
+ obs record registered
+ next record: 1583120
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1583120').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1583120 
+ obs record registered
+ next record: 1635666
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1635666').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1635666 
+ obs record registered
+ next record: 1689323
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1689323').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1689323 
+ obs record registered
+ next record: 1741836
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1741836').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1741836 
+ obs record registered
+ next record: 1795229
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1795229').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1795229 
+ obs record registered
+ next record: 1847312
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1847312').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1847312 
+ obs record registered
+ next record: 1900220
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1900220').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1900220 
+ obs record registered
+ next record: 1954053
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1954053').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1954053 
+ obs record registered
+ next record: 2005224
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2005224').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2005224 
+ obs record registered
+ next record: 2055085
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2055085').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2055085 
+ obs record registered
+ next record: 2109238
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2109238').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2109238 
+ obs record registered
+ next record: 2158379
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2158379').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2158379 
+ obs record registered
+ next record: 2213439
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2213439').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2213439 
+ obs record registered
+ next record: 2265091
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2265091').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2265091 
+ obs record registered
+ next record: 2312591
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2312591').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2312591 
+ obs record registered
+ next record: 2363001
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2363001').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2363001 
+ obs record registered
+ next record: 2417249
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2417249').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2417249 
+ obs record registered
+ next record: 2472181
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2472181').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2472181 
+ obs record registered
+ next record: 2523821
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2523821').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2523821 
+ obs record registered
+ next record: 2576934
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2576934').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2576934 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-2 b/class4gl/c4gl_setup.o4759290-2
new file mode 100644
index 0000000..d9771a1
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-2
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.yaml
+0 1981-04-11 00:00:00
+1 1981-04-12 00:00:00
+2 1981-04-13 00:00:00
+3 1981-04-14 00:00:00
+4 1981-04-15 00:00:00
+5 1981-04-16 00:00:00
+6 1981-04-17 00:00:00
+7 1981-04-18 00:00:00
+8 1981-04-19 00:00:00
+9 1981-04-20 00:00:00
+10 1981-04-21 00:00:00
+11 1981-04-22 00:00:00
+12 1981-04-23 00:00:00
+13 1981-04-24 00:00:00
+14 1981-04-25 00:00:00
+15 1981-04-26 00:00:00
+16 1981-04-27 00:00:00
+17 1981-04-28 00:00:00
+18 1981-04-29 00:00:00
+19 1981-04-30 00:00:00
+20 1981-05-01 00:00:00
+21 1981-05-02 00:00:00
+22 1981-05-03 00:00:00
+23 1981-05-04 00:00:00
+24 1981-05-05 00:00:00
+25 1981-05-06 00:00:00
+26 1981-05-07 00:00:00
+27 1981-05-08 00:00:00
+28 1981-05-09 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-05-10 00:00:00
+30 1981-05-11 00:00:00
+31 1981-05-12 00:00:00
+32 1981-05-13 00:00:00
+33 1981-05-14 00:00:00
+34 1981-05-15 00:00:00
+35 1981-05-16 00:00:00
+36 1981-05-17 00:00:00
+37 1981-05-18 00:00:00
+38 1981-05-19 00:00:00
+39 1981-05-20 00:00:00
+40 1981-05-21 00:00:00
+41 1981-05-22 00:00:00
+42 1981-05-23 00:00:00
+43 1981-05-24 00:00:00
+44 1981-05-25 00:00:00
+45 1981-05-26 00:00:00
+46 1981-05-27 00:00:00
+47 1981-05-28 00:00:00
+48 1981-05-29 00:00:00
+49 1981-05-30 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 51067
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.51067').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.51067 
+ obs record registered
+ next record: 104322
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.104322').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.104322 
+ obs record registered
+ next record: 158125
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.158125').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.158125 
+ obs record registered
+ next record: 210001
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.210001').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.210001 
+ obs record registered
+ next record: 263270
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.263270').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.263270 
+ obs record registered
+ next record: 317457
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.317457').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.317457 
+ obs record registered
+ next record: 368268
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.368268').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.368268 
+ obs record registered
+ next record: 422781
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.422781').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.422781 
+ obs record registered
+ next record: 476050
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.476050').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.476050 
+ obs record registered
+ next record: 528316
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.528316').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.528316 
+ obs record registered
+ next record: 580931
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.580931').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.580931 
+ obs record registered
+ next record: 635773
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.635773').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.635773 
+ obs record registered
+ next record: 686190
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.686190').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.686190 
+ obs record registered
+ next record: 736979
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.736979').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.736979 
+ obs record registered
+ next record: 791508
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.791508').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.791508 
+ obs record registered
+ next record: 844468
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.844468').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.844468 
+ obs record registered
+ next record: 899058
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.899058').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.899058 
+ obs record registered
+ next record: 950395
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.950395').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.950395 
+ obs record registered
+ next record: 1001879
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1001879').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1001879 
+ obs record registered
+ next record: 1054711
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1054711').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1054711 
+ obs record registered
+ next record: 1107591
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1107591').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1107591 
+ obs record registered
+ next record: 1161498
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1161498').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1161498 
+ obs record registered
+ next record: 1216461
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1216461').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1216461 
+ obs record registered
+ next record: 1270638
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1270638').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1270638 
+ obs record registered
+ next record: 1323032
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1323032').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1323032 
+ obs record registered
+ next record: 1374748
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1374748').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1374748 
+ obs record registered
+ next record: 1426449
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1426449').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1426449 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1479418
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1479418').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1479418 
+ obs record registered
+ next record: 1533453
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1533453').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1533453 
+ obs record registered
+ next record: 1585360
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1585360').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1585360 
+ obs record registered
+ next record: 1638642
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1638642').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1638642 
+ obs record registered
+ next record: 1693282
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1693282').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1693282 
+ obs record registered
+ next record: 1746969
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1746969').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1746969 
+ obs record registered
+ next record: 1798551
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1798551').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1798551 
+ obs record registered
+ next record: 1851912
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1851912').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1851912 
+ obs record registered
+ next record: 1906431
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1906431').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1906431 
+ obs record registered
+ next record: 1957671
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1957671').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1957671 
+ obs record registered
+ next record: 2008390
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2008390').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2008390 
+ obs record registered
+ next record: 2059270
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2059270').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2059270 
+ obs record registered
+ next record: 2113052
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2113052').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2113052 
+ obs record registered
+ next record: 2167306
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2167306').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2167306 
+ obs record registered
+ next record: 2222303
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2222303').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2222303 
+ obs record registered
+ next record: 2276984
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2276984').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2276984 
+ obs record registered
+ next record: 2328277
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2328277').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2328277 
+ obs record registered
+ next record: 2381273
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2381273').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2381273 
+ obs record registered
+ next record: 2435826
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2435826').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2435826 
+ obs record registered
+ next record: 2489579
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2489579').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2489579 
+ obs record registered
+ next record: 2543769
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2543769').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2543769 
+ obs record registered
+ next record: 2597864
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2597864').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2597864 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-3 b/class4gl/c4gl_setup.o4759290-3
new file mode 100644
index 0000000..17f4526
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-3
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.yaml
+0 1981-05-31 00:00:00
+1 1981-06-01 00:00:00
+2 1981-06-02 00:00:00
+3 1981-06-03 00:00:00
+4 1981-06-04 00:00:00
+5 1981-06-05 00:00:00
+6 1981-06-06 00:00:00
+7 1981-06-07 00:00:00
+8 1981-06-08 00:00:00
+9 1981-06-09 00:00:00
+10 1981-06-10 00:00:00
+11 1981-06-11 00:00:00
+12 1981-06-12 00:00:00
+13 1981-06-13 00:00:00
+14 1981-06-14 00:00:00
+15 1981-06-15 00:00:00
+16 1981-06-16 00:00:00
+17 1981-06-17 00:00:00
+18 1981-06-18 00:00:00
+19 1981-06-19 00:00:00
+20 1981-06-20 00:00:00
+21 1981-06-21 00:00:00
+22 1981-06-22 00:00:00
+23 1981-06-23 00:00:00
+24 1981-06-24 00:00:00
+25 1981-06-25 00:00:00
+26 1981-06-26 00:00:00
+27 1981-06-27 00:00:00
+28 1981-06-28 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-06-29 00:00:00
+30 1981-06-30 00:00:00
+31 1981-07-01 00:00:00
+32 1981-07-02 00:00:00
+33 1981-07-03 00:00:00
+34 1981-07-04 00:00:00
+35 1981-07-05 00:00:00
+36 1981-07-06 00:00:00
+37 1981-07-07 00:00:00
+38 1981-07-08 00:00:00
+39 1981-07-09 00:00:00
+40 1981-07-10 00:00:00
+41 1981-07-11 00:00:00
+42 1981-07-12 00:00:00
+43 1981-07-13 00:00:00
+44 1981-07-14 00:00:00
+45 1981-07-15 00:00:00
+46 1981-07-16 00:00:00
+47 1981-07-17 00:00:00
+48 1981-07-18 00:00:00
+49 1981-07-19 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 54256
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.54256').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.54256 
+ obs record registered
+ next record: 106783
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.106783').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.106783 
+ obs record registered
+ next record: 161041
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.161041').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.161041 
+ obs record registered
+ next record: 213499
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.213499').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.213499 
+ obs record registered
+ next record: 268606
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.268606').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.268606 
+ obs record registered
+ next record: 321942
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.321942').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.321942 
+ obs record registered
+ next record: 376318
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.376318').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.376318 
+ obs record registered
+ next record: 429745
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.429745').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.429745 
+ obs record registered
+ next record: 480238
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.480238').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.480238 
+ obs record registered
+ next record: 534353
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.534353').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.534353 
+ obs record registered
+ next record: 587556
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.587556').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.587556 
+ obs record registered
+ next record: 642546
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.642546').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.642546 
+ obs record registered
+ next record: 696672
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.696672').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.696672 
+ obs record registered
+ next record: 748004
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.748004').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.748004 
+ obs record registered
+ next record: 798522
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.798522').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.798522 
+ obs record registered
+ next record: 849366
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.849366').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.849366 
+ obs record registered
+ next record: 900431
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.900431').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.900431 
+ obs record registered
+ next record: 955333
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.955333').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.955333 
+ obs record registered
+ next record: 1008606
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1008606').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1008606 
+ obs record registered
+ next record: 1062216
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1062216').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1062216 
+ obs record registered
+ next record: 1116153
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1116153').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1116153 
+ obs record registered
+ next record: 1169750
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1169750').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1169750 
+ obs record registered
+ next record: 1220569
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1220569').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1220569 
+ obs record registered
+ next record: 1274715
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1274715').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1274715 
+ obs record registered
+ next record: 1325734
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1325734').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1325734 
+ obs record registered
+ next record: 1378235
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1378235').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1378235 
+ obs record registered
+ next record: 1430796
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1430796').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1430796 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1484617
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1484617').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1484617 
+ obs record registered
+ next record: 1539402
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1539402').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1539402 
+ obs record registered
+ next record: 1591441
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1591441').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1591441 
+ obs record registered
+ next record: 1645914
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1645914').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1645914 
+ obs record registered
+ next record: 1698613
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1698613').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1698613 
+ obs record registered
+ next record: 1751712
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1751712').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1751712 
+ obs record registered
+ next record: 1805578
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1805578').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1805578 
+ obs record registered
+ next record: 1859984
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1859984').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1859984 
+ obs record registered
+ next record: 1912886
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1912886').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1912886 
+ obs record registered
+ next record: 1966234
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1966234').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1966234 
+ obs record registered
+ next record: 2020399
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2020399').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2020399 
+ obs record registered
+ next record: 2071191
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2071191').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2071191 
+ obs record registered
+ next record: 2122985
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2122985').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2122985 
+ obs record registered
+ next record: 2176760
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2176760').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2176760 
+ obs record registered
+ next record: 2230944
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2230944').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2230944 
+ obs record registered
+ next record: 2284495
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2284495').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2284495 
+ obs record registered
+ next record: 2337035
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2337035').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2337035 
+ obs record registered
+ next record: 2390194
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2390194').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2390194 
+ obs record registered
+ next record: 2444461
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2444461').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2444461 
+ obs record registered
+ next record: 2497851
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2497851').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2497851 
+ obs record registered
+ next record: 2551916
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2551916').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2551916 
+ obs record registered
+ next record: 2606609
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2606609').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2606609 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-4 b/class4gl/c4gl_setup.o4759290-4
new file mode 100644
index 0000000..7fc0cea
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-4
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.yaml
+0 1981-07-20 00:00:00
+1 1981-07-21 00:00:00
+2 1981-07-22 00:00:00
+3 1981-07-23 00:00:00
+4 1981-07-24 00:00:00
+5 1981-07-25 00:00:00
+6 1981-07-26 00:00:00
+7 1981-07-27 00:00:00
+8 1981-07-28 00:00:00
+9 1981-07-29 00:00:00
+10 1981-07-30 00:00:00
+11 1981-07-31 00:00:00
+12 1981-08-01 00:00:00
+13 1981-08-02 00:00:00
+14 1981-08-03 00:00:00
+15 1981-08-04 00:00:00
+16 1981-08-05 00:00:00
+17 1981-08-06 00:00:00
+18 1981-08-07 00:00:00
+19 1981-08-08 00:00:00
+20 1981-08-09 00:00:00
+21 1981-08-10 00:00:00
+22 1981-08-11 00:00:00
+23 1981-08-12 00:00:00
+24 1981-08-13 00:00:00
+25 1981-08-14 00:00:00
+26 1981-08-15 00:00:00
+27 1981-08-16 00:00:00
+28 1981-08-17 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-08-18 00:00:00
+30 1981-08-19 00:00:00
+31 1981-08-20 00:00:00
+32 1981-08-21 00:00:00
+33 1981-08-22 00:00:00
+34 1981-08-23 00:00:00
+35 1981-08-24 00:00:00
+36 1981-08-25 00:00:00
+37 1981-08-26 00:00:00
+38 1981-08-27 00:00:00
+39 1981-08-28 00:00:00
+40 1981-08-29 00:00:00
+41 1981-08-30 00:00:00
+42 1981-08-31 00:00:00
+43 1981-09-01 00:00:00
+44 1981-09-02 00:00:00
+45 1981-09-03 00:00:00
+46 1981-09-04 00:00:00
+47 1981-09-05 00:00:00
+48 1981-09-06 00:00:00
+49 1981-09-07 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 51731
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.51731').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.51731 
+ obs record registered
+ next record: 104580
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.104580').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.104580 
+ obs record registered
+ next record: 157565
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.157565').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.157565 
+ obs record registered
+ next record: 210613
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.210613').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.210613 
+ obs record registered
+ next record: 264470
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.264470').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.264470 
+ obs record registered
+ next record: 318729
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.318729').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.318729 
+ obs record registered
+ next record: 370870
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.370870').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.370870 
+ obs record registered
+ next record: 423492
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.423492').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.423492 
+ obs record registered
+ next record: 474750
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.474750').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.474750 
+ obs record registered
+ next record: 529842
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.529842').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.529842 
+ obs record registered
+ next record: 583680
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.583680').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.583680 
+ obs record registered
+ next record: 637895
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.637895').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.637895 
+ obs record registered
+ next record: 692631
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.692631').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.692631 
+ obs record registered
+ next record: 746761
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.746761').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.746761 
+ obs record registered
+ next record: 800591
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.800591').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.800591 
+ obs record registered
+ next record: 854394
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.854394').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.854394 
+ obs record registered
+ next record: 909045
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.909045').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.909045 
+ obs record registered
+ next record: 963060
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.963060').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.963060 
+ obs record registered
+ next record: 1015112
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1015112').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1015112 
+ obs record registered
+ next record: 1067515
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1067515').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1067515 
+ obs record registered
+ next record: 1121265
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1121265').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1121265 
+ obs record registered
+ next record: 1175359
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1175359').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1175359 
+ obs record registered
+ next record: 1227436
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1227436').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1227436 
+ obs record registered
+ next record: 1279990
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1279990').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1279990 
+ obs record registered
+ next record: 1333481
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1333481').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1333481 
+ obs record registered
+ next record: 1388277
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1388277').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1388277 
+ obs record registered
+ next record: 1440447
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1440447').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1440447 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1492950
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1492950').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1492950 
+ obs record registered
+ next record: 1544819
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1544819').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1544819 
+ obs record registered
+ next record: 1597484
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1597484').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1597484 
+ obs record registered
+ next record: 1650523
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1650523').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1650523 
+ obs record registered
+ next record: 1703570
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1703570').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1703570 
+ obs record registered
+ next record: 1755369
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1755369').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1755369 
+ obs record registered
+ next record: 1808308
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1808308').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1808308 
+ obs record registered
+ next record: 1862932
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1862932').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1862932 
+ obs record registered
+ next record: 1917988
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1917988').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1917988 
+ obs record registered
+ next record: 1972660
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1972660').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1972660 
+ obs record registered
+ next record: 2027192
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2027192').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2027192 
+ obs record registered
+ next record: 2080750
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2080750').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2080750 
+ obs record registered
+ next record: 2134876
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2134876').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2134876 
+ obs record registered
+ next record: 2187060
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2187060').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2187060 
+ obs record registered
+ next record: 2240918
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2240918').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2240918 
+ obs record registered
+ next record: 2293459
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2293459').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2293459 
+ obs record registered
+ next record: 2345446
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2345446').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2345446 
+ obs record registered
+ next record: 2399559
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2399559').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2399559 
+ obs record registered
+ next record: 2453394
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2453394').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2453394 
+ obs record registered
+ next record: 2507446
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2507446').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2507446 
+ obs record registered
+ next record: 2560803
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2560803').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2560803 
+ obs record registered
+ next record: 2613551
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2613551').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2613551 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-5 b/class4gl/c4gl_setup.o4759290-5
new file mode 100644
index 0000000..3ffddc2
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-5
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.yaml
+0 1981-09-08 00:00:00
+1 1981-09-09 00:00:00
+2 1981-09-10 00:00:00
+3 1981-09-11 00:00:00
+4 1981-09-12 00:00:00
+5 1981-09-13 00:00:00
+6 1981-09-14 00:00:00
+7 1981-09-15 00:00:00
+8 1981-09-16 00:00:00
+9 1981-09-17 00:00:00
+10 1981-09-18 00:00:00
+11 1981-09-19 00:00:00
+12 1981-09-20 00:00:00
+13 1981-09-21 00:00:00
+14 1981-09-22 00:00:00
+15 1981-09-23 00:00:00
+16 1981-09-24 00:00:00
+17 1981-09-25 00:00:00
+18 1981-09-26 00:00:00
+19 1981-09-27 00:00:00
+20 1981-09-28 00:00:00
+21 1981-09-29 00:00:00
+22 1981-09-30 00:00:00
+23 1981-10-01 00:00:00
+24 1981-10-02 00:00:00
+25 1981-10-03 00:00:00
+26 1981-10-04 00:00:00
+27 1981-10-05 00:00:00
+28 1981-10-06 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-10-07 00:00:00
+30 1981-10-08 00:00:00
+31 1981-10-09 00:00:00
+32 1981-10-10 00:00:00
+33 1981-10-11 00:00:00
+34 1981-10-12 00:00:00
+35 1981-10-13 00:00:00
+36 1981-10-14 00:00:00
+37 1981-10-15 00:00:00
+38 1981-10-16 00:00:00
+39 1981-10-17 00:00:00
+40 1981-10-18 00:00:00
+41 1981-10-19 00:00:00
+42 1981-10-20 00:00:00
+43 1981-10-21 00:00:00
+44 1981-10-22 00:00:00
+45 1981-10-23 00:00:00
+46 1981-10-24 00:00:00
+47 1981-10-25 00:00:00
+48 1981-10-26 00:00:00
+49 1981-10-27 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 53791
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.53791').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.53791 
+ obs record registered
+ next record: 108418
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.108418').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.108418 
+ obs record registered
+ next record: 160595
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.160595').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.160595 
+ obs record registered
+ next record: 213556
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.213556').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.213556 
+ obs record registered
+ next record: 267801
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.267801').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.267801 
+ obs record registered
+ next record: 322353
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.322353').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.322353 
+ obs record registered
+ next record: 376117
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.376117').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.376117 
+ obs record registered
+ next record: 428437
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.428437').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.428437 
+ obs record registered
+ next record: 482412
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.482412').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.482412 
+ obs record registered
+ next record: 534383
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.534383').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.534383 
+ obs record registered
+ next record: 587814
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.587814').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.587814 
+ obs record registered
+ next record: 639583
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.639583').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.639583 
+ obs record registered
+ next record: 692536
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.692536').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.692536 
+ obs record registered
+ next record: 746174
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.746174').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.746174 
+ obs record registered
+ next record: 798143
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.798143').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.798143 
+ obs record registered
+ next record: 850877
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.850877').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.850877 
+ obs record registered
+ next record: 905196
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.905196').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.905196 
+ obs record registered
+ next record: 959041
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.959041').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.959041 
+ obs record registered
+ next record: 1010607
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1010607').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1010607 
+ obs record registered
+ next record: 1063046
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1063046').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1063046 
+ obs record registered
+ next record: 1118074
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1118074').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1118074 
+ obs record registered
+ next record: 1173117
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1173117').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1173117 
+ obs record registered
+ next record: 1225841
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1225841').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1225841 
+ obs record registered
+ next record: 1276984
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1276984').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1276984 
+ obs record registered
+ next record: 1329882
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1329882').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1329882 
+ obs record registered
+ next record: 1384174
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1384174').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1384174 
+ obs record registered
+ next record: 1438389
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1438389').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1438389 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1492962
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1492962').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1492962 
+ obs record registered
+ next record: 1544189
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1544189').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1544189 
+ obs record registered
+ next record: 1598345
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1598345').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1598345 
+ obs record registered
+ next record: 1651761
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1651761').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1651761 
+ obs record registered
+ next record: 1705208
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1705208').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1705208 
+ obs record registered
+ next record: 1759539
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1759539').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1759539 
+ obs record registered
+ next record: 1812463
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1812463').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1812463 
+ obs record registered
+ next record: 1865446
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1865446').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1865446 
+ obs record registered
+ next record: 1918746
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1918746').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1918746 
+ obs record registered
+ next record: 1972414
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1972414').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1972414 
+ obs record registered
+ next record: 2025735
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2025735').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2025735 
+ obs record registered
+ next record: 2079164
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2079164').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2079164 
+ obs record registered
+ next record: 2132791
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2132791').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2132791 
+ obs record registered
+ next record: 2181910
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2181910').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2181910 
+ obs record registered
+ next record: 2233649
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2233649').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2233649 
+ obs record registered
+ next record: 2284947
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2284947').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2284947 
+ obs record registered
+ next record: 2339510
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2339510').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2339510 
+ obs record registered
+ next record: 2391981
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2391981').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2391981 
+ obs record registered
+ next record: 2443387
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2443387').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2443387 
+ obs record registered
+ next record: 2497540
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2497540').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2497540 
+ obs record registered
+ next record: 2552060
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2552060').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2552060 
+ obs record registered
+ next record: 2604997
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2604997').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2604997 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-6 b/class4gl/c4gl_setup.o4759290-6
new file mode 100644
index 0000000..bd1fb54
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-6
@@ -0,0 +1,1251 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.yaml
+0 1981-10-28 00:00:00
+1 1981-10-29 00:00:00
+2 1981-10-30 00:00:00
+3 1981-10-31 00:00:00
+4 1981-11-01 00:00:00
+5 1981-11-02 00:00:00
+6 1981-11-03 00:00:00
+7 1981-11-04 00:00:00
+8 1981-11-05 00:00:00
+9 1981-11-06 00:00:00
+10 1981-11-07 00:00:00
+11 1981-11-08 00:00:00
+12 1981-11-09 00:00:00
+13 1981-11-10 00:00:00
+14 1981-11-11 00:00:00
+15 1981-11-12 00:00:00
+16 1981-11-13 00:00:00
+17 1981-11-14 00:00:00
+18 1981-11-15 00:00:00
+19 1981-11-16 00:00:00
+20 1981-11-17 00:00:00
+21 1981-11-18 00:00:00
+22 1981-11-19 00:00:00
+23 1981-11-20 00:00:00
+24 1981-11-21 00:00:00
+25 1981-11-22 00:00:00
+26 1981-11-23 00:00:00
+27 1981-11-24 00:00:00
+28 1981-11-25 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+
+29 1981-11-26 00:00:00
+30 1981-11-27 00:00:00
+31 1981-11-28 00:00:00
+32 1981-11-29 00:00:00
+33 1981-11-30 00:00:00
+34 1981-12-01 00:00:00
+35 1981-12-02 00:00:00
+36 1981-12-03 00:00:00
+37 1981-12-04 00:00:00
+38 1981-12-05 00:00:00
+39 1981-12-06 00:00:00
+40 1981-12-07 00:00:00
+41 1981-12-08 00:00:00
+42 1981-12-09 00:00:00
+43 1981-12-10 00:00:00
+44 1981-12-11 00:00:00
+45 1981-12-12 00:00:00
+46 1981-12-13 00:00:00
+47 1981-12-14 00:00:00
+48 1981-12-15 00:00:00
+49 1981-12-16 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.yaml"...
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 55140
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.55140').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.55140 
+ obs record registered
+ next record: 108893
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.108893').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.108893 
+ obs record registered
+ next record: 163443
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.163443').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.163443 
+ obs record registered
+ next record: 218397
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.218397').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.218397 
+ obs record registered
+ next record: 271582
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.271582').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.271582 
+ obs record registered
+ next record: 324721
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.324721').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.324721 
+ obs record registered
+ next record: 377607
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.377607').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.377607 
+ obs record registered
+ next record: 430834
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.430834').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.430834 
+ obs record registered
+ next record: 483626
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.483626').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.483626 
+ obs record registered
+ next record: 535205
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.535205').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.535205 
+ obs record registered
+ next record: 589944
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.589944').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.589944 
+ obs record registered
+ next record: 642475
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.642475').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.642475 
+ obs record registered
+ next record: 694024
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.694024').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.694024 
+ obs record registered
+ next record: 748833
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.748833').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.748833 
+ obs record registered
+ next record: 803389
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.803389').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.803389 
+ obs record registered
+ next record: 856629
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.856629').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.856629 
+ obs record registered
+ next record: 910612
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.910612').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.910612 
+ obs record registered
+ next record: 965476
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.965476').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.965476 
+ obs record registered
+ next record: 1020450
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1020450').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1020450 
+ obs record registered
+ next record: 1074651
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1074651').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1074651 
+ obs record registered
+ next record: 1127994
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1127994').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1127994 
+ obs record registered
+ next record: 1182110
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1182110').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1182110 
+ obs record registered
+ next record: 1233619
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1233619').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1233619 
+ obs record registered
+ next record: 1284930
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1284930').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1284930 
+ obs record registered
+ next record: 1337484
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1337484').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1337484 
+ obs record registered
+ next record: 1392464
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1392464').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1392464 
+ obs record registered
+ next record: 1446248
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1446248').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1446248 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ obs record registered
+ next record: 1499604
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1499604').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1499604 
+ obs record registered
+ next record: 1552283
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1552283').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1552283 
+ obs record registered
+ next record: 1606822
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1606822').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1606822 
+ obs record registered
+ next record: 1658571
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1658571').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1658571 
+ obs record registered
+ next record: 1712543
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1712543').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1712543 
+ obs record registered
+ next record: 1766710
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1766710').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1766710 
+ obs record registered
+ next record: 1819637
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1819637').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1819637 
+ obs record registered
+ next record: 1870451
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1870451').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1870451 
+ obs record registered
+ next record: 1921627
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1921627').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1921627 
+ obs record registered
+ next record: 1975018
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1975018').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1975018 
+ obs record registered
+ next record: 2027134
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2027134').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2027134 
+ obs record registered
+ next record: 2079285
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2079285').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2079285 
+ obs record registered
+ next record: 2134131
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2134131').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2134131 
+ obs record registered
+ next record: 2185735
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2185735').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2185735 
+ obs record registered
+ next record: 2237453
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2237453').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2237453 
+ obs record registered
+ next record: 2291618
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2291618').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2291618 
+ obs record registered
+ next record: 2345783
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2345783').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2345783 
+ obs record registered
+ next record: 2399221
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2399221').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2399221 
+ obs record registered
+ next record: 2454054
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2454054').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2454054 
+ obs record registered
+ next record: 2508671
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2508671').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2508671 
+ obs record registered
+ next record: 2563249
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2563249').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2563249 
+ obs record registered
+ next record: 2616063
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2616063').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2616063 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-7 b/class4gl/c4gl_setup.o4759290-7
new file mode 100644
index 0000000..ee5acc9
--- /dev/null
+++ b/class4gl/c4gl_setup.o4759290-7
@@ -0,0 +1,469 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_error_handling
+C4GLJOB_exec
+C4GLJOB_first_YYYYMMDD
+C4GLJOB_last_YYYYMMDD
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_experiments
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
+  warnings.warn('omitting pressure field p and advection')
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  enable_cftimeindex)
+/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
+  return self.func(self.array[key])
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+Initializing global data
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
+setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
+setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
+setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
+calculating texture
+calculating texture type
+calculating soil parameter
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
+opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
+start looping over chunk
+Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.yaml
+0 1981-12-17 00:00:00
+1 1981-12-18 00:00:00
+2 1981-12-19 00:00:00
+3 1981-12-20 00:00:00
+4 1981-12-21 00:00:00
+5 1981-12-22 00:00:00
+6 1981-12-23 00:00:00
+7 1981-12-24 00:00:00
+8 1981-12-25 00:00:00
+9 1981-12-26 00:00:00
+10 1981-12-27 00:00:00
+11 1981-12-28 00:00:00
+12 1981-12-29 00:00:00
+13 1981-12-30 00:00:00
+14 1981-12-31 00:00:00
+15 1982-01-01 00:00:00
+pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.yaml"...
+refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.yaml".../user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame.
+Try using .loc[row_indexer,col_indexer] = value instead
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  air_ap_head[column] = ml_mean[column]
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
+A value is trying to be set on a copy of a slice from a DataFrame
+
+See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
+  mode=air_ap_mode)
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
+  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+ next record: 4
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.4 
+ obs record registered
+ next record: 53111
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.53111').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.53111 
+ obs record registered
+ next record: 106374
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.106374').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.106374 
+ obs record registered
+ next record: 159575
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.159575').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.159575 
+ obs record registered
+ next record: 214479
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.214479').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.214479 
+ obs record registered
+ next record: 265342
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.265342').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.265342 
+ obs record registered
+ next record: 320174
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.320174').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.320174 
+ obs record registered
+ next record: 372684
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.372684').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.372684 
+ obs record registered
+ next record: 423846
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.423846').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.423846 
+ obs record registered
+ next record: 476614
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.476614').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.476614 
+ obs record registered
+ next record: 529909
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.529909').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.529909 
+ obs record registered
+ next record: 581984
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.581984').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.581984 
+ obs record registered
+ next record: 634675
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.634675').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.634675 
+ obs record registered
+ next record: 688831
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.688831').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.688831 
+ obs record registered
+ next record: 743330
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.743330').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.743330 
+ obs record registered
+ next record: 797329
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.797329').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.797329 
+ obs record registered
+writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_sim.o4759326-0 b/class4gl/c4gl_sim.o4759326-0
new file mode 100644
index 0000000..f8ba869
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-0
@@ -0,0 +1,227 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+       Unnamed: 0          filename  latitude  longitude
+74560           0  74560_0_ini.yaml     40.15     -89.33
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (0)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 0
+Fetching initial/forcing records
+starting station chunk number: 0(size: 50 soundings)
+starting 1 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.4 
+Warning: Ps input was provided (99384.96484375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91367.05564375Pa).
+run not succesfull
+starting 2 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.51396').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.51396 
+Warning: Ps input was provided (100124.83463541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89846.70143541667Pa).
+run not succesfull
+starting 3 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.102622').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.102622 
+Warning: Ps input was provided (99980.35416666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96094.41696666667Pa).
+run not succesfull
+starting 4 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.155319').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.155319 
+Warning: Ps input was provided (101228.97786458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94990.99506458333Pa).
+run not succesfull
+starting 5 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.207321').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.207321 
+Warning: Ps input was provided (100561.96223958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99736.74503958333Pa).
+run not succesfull
+starting 6 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.261499').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.261499 
+Warning: Ps input was provided (98617.18880208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89281.99280208333Pa).
+run not succesfull
+starting 7 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.312096').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.312096 
+Warning: Ps input was provided (99861.65104166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91664.80744166666Pa).
+run not succesfull
+starting 8 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.363706').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.363706 
+Warning: Ps input was provided (99783.63932291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96377.99972291668Pa).
+run not succesfull
+starting 9 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.416568').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.416568 
+Warning: Ps input was provided (100101.34765625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98246.08045625Pa).
+run not succesfull
+starting 10 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.470247').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.470247 
+Warning: Ps input was provided (101217.34895833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100305.01895833333Pa).
+run not succesfull
+starting 11 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.524351').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.524351 
+Warning: Ps input was provided (101140.765625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99524.470025Pa).
+run not succesfull
+starting 12 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.577126').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.577126 
+Warning: Ps input was provided (99971.93880208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94347.27720208332Pa).
+run not succesfull
+starting 13 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.629651').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.629651 
+Warning: Ps input was provided (98563.27864583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93396.54784583332Pa).
+run not succesfull
+starting 14 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.682101').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.682101 
+Warning: Ps input was provided (98789.33203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97675.70083125Pa).
+run not succesfull
+starting 15 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.734044').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.734044 
+Warning: Ps input was provided (99563.50390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95163.13030625Pa).
+run not succesfull
+starting 16 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.786398').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.786398 
+Warning: Ps input was provided (100669.421875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95027.102275Pa).
+run not succesfull
+starting 17 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.838710').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.838710 
+Warning: Ps input was provided (100948.26692708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96705.63812708332Pa).
+run not succesfull
+starting 18 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.891166').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.891166 
+Warning: Ps input was provided (100156.60026041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92800.27746041668Pa).
+run not succesfull
+starting 19 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.943327').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.943327 
+Warning: Ps input was provided (99349.36979166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93320.92859166667Pa).
+run not succesfull
+starting 20 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.995731').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.995731 
+Warning: Ps input was provided (99228.83203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98584.90363125Pa).
+run not succesfull
+starting 21 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1050024').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1050024 
+Warning: Ps input was provided (99770.28515625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96156.28115625Pa).
+run not succesfull
+starting 22 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1102851').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1102851 
+Warning: Ps input was provided (99380.921875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94221.254275Pa).
+run not succesfull
+starting 23 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1154781').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1154781 
+Warning: Ps input was provided (99095.53255208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98209.10095208333Pa).
+run not succesfull
+starting 24 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1208836').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1208836 
+Warning: Ps input was provided (98733.72786458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98616.00786458333Pa).
+run not succesfull
+starting 25 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1263823').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1263823 
+Warning: Ps input was provided (97895.48697916667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97777.76697916667Pa).
+run not succesfull
+starting 26 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1318780').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1318780 
+Warning: Ps input was provided (98477.62369791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (78494.65369791667Pa).
+run not succesfull
+starting 27 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1368688').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1368688 
+Warning: Ps input was provided (98800.078125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93290.782125Pa).
+run not succesfull
+starting 28 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1420916').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1420916 
+Warning: Ps input was provided (99248.63541666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90668.02461666668Pa).
+run not succesfull
+starting 29 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1472567').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1472567 
+Warning: Ps input was provided (100427.43880208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96005.87560208332Pa).
+run not succesfull
+starting 30 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1524967').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1524967 
+Warning: Ps input was provided (101123.49348958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90677.02068958333Pa).
+run not succesfull
+starting 31 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1576324').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1576324 
+Warning: Ps input was provided (99848.95182291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95586.31062291667Pa).
+run not succesfull
+starting 32 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1629313').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1629313 
+Warning: Ps input was provided (97864.29296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87103.50776875Pa).
+run not succesfull
+starting 33 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1680371').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1680371 
+Warning: Ps input was provided (99503.52864583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87630.28944583333Pa).
+run not succesfull
+starting 34 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1731186').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1731186 
+Warning: Ps input was provided (100169.15364583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86911.52724583333Pa).
+run not succesfull
+starting 35 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1782240').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1782240 
+Warning: Ps input was provided (100522.32291666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88935.14331666667Pa).
+run not succesfull
+starting 36 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1833412').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1833412 
+Warning: Ps input was provided (100123.75260416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96482.67300416667Pa).
+run not succesfull
+starting 37 out of 50 (station total: /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
+  warnings.warn("Key '"+key+"' may not be implemented.")
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
+  self.L = zsl/self.zeta
+ 366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1886384').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1886384 
+Warning: Ps input was provided (99870.125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89217.6422Pa).
+run not succesfull
+starting 38 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1937565').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1937565 
+Warning: Ps input was provided (98704.2734375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86112.9422375Pa).
+run not succesfull
+starting 39 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1988408').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1988408 
+Warning: Ps input was provided (99314.90885416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (81565.08725416668Pa).
+run not succesfull
+starting 40 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2038456').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2038456 
+Warning: Ps input was provided (99467.91145833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (84809.41705833333Pa).
+run not succesfull
+starting 41 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2089432').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2089432 
+Warning: Ps input was provided (97532.21354166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91573.22714166668Pa).
+run not succesfull
+starting 42 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2141348').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2141348 
+Warning: Ps input was provided (100356.95442708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92337.86802708333Pa).
+run not succesfull
+starting 43 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2192964').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2192964 
+Warning: Ps input was provided (101625.39453125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (101236.91853125Pa).
+run not succesfull
+starting 44 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2247428').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2247428 
+Warning: Ps input was provided (101831.296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (101700.627675Pa).
+run not succesfull
+starting 45 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2302316').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2302316 
+Warning: Ps input was provided (101392.83463541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (101039.67463541667Pa).
+run not succesfull
+starting 46 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2356848').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2356848 
+Warning: Ps input was provided (100442.67317708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99184.24637708333Pa).
+run not succesfull
+starting 47 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2411058').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2411058 
+Warning: Ps input was provided (99915.86848958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88082.65408958333Pa).
+run not succesfull
+starting 48 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2462275').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2462275 
+Warning: Ps input was provided (99762.47265625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97849.52265625Pa).
+run not succesfull
+starting 49 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2516002').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2516002 
+Warning: Ps input was provided (99270.4765625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93171.4033625Pa).
+run not succesfull
+starting 50 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2568449').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2568449 
+Warning: Ps input was provided (98692.77994791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97569.73114791667Pa).
+run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-1 b/class4gl/c4gl_sim.o4759326-1
new file mode 100644
index 0000000..87274a1
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-1
@@ -0,0 +1,231 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+       Unnamed: 0          filename  latitude  longitude
+74560           0  74560_0_ini.yaml     40.15     -89.33
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (1)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 1
+Fetching initial/forcing records
+starting station chunk number: 1(size: 50 soundings)
+starting 1 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.4 
+Warning: Ps input was provided (99189.77604166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97712.39004166667Pa).
+run not succesfull
+starting 2 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.53735').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.53735 
+Warning: Ps input was provided (98917.74609375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96435.03129375Pa).
+run not succesfull
+starting 3 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.107051').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.107051 
+Warning: Ps input was provided (97149.79296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93409.82856875Pa).
+run not succesfull
+starting 4 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.159809').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.159809 
+Warning: Ps input was provided (97484.921875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (82631.012275Pa).
+run not succesfull
+starting 5 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.210583').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.210583 
+Warning: Ps input was provided (99121.34895833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93010.50375833333Pa).
+run not succesfull
+starting 6 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.262669').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.262669 
+Warning: Ps input was provided (99433.81119791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97557.35439791667Pa).
+run not succesfull
+starting 7 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.315998').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.315998 
+Warning: Ps input was provided (100124.39713541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95708.71993541667Pa).
+run not succesfull
+starting 8 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.368733').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.368733 
+Warning: Ps input was provided (99289.96223958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96497.64383958332Pa).
+run not succesfull
+starting 9 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.422182').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.422182 
+Warning: Ps input was provided (99191.98567708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90040.43287708332Pa).
+run not succesfull
+starting 10 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.473690').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.473690 
+Warning: Ps input was provided (99402.77604166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90833.93724166667Pa).
+run not succesfull
+starting 11 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.525232').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.525232 
+Warning: Ps input was provided (99445.84114583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96549.92914583333Pa).
+run not succesfull
+starting 12 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.578431').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.578431 
+Warning: Ps input was provided (99561.35026041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99443.63026041667Pa).
+run not succesfull
+starting 13 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.633046').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.633046 
+Warning: Ps input was provided (98115.078125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97646.552525Pa).
+run not succesfull
+starting 14 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.686127').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.686127 
+Warning: Ps input was provided (98767.19401041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93118.98841041667Pa).
+run not succesfull
+starting 15 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.738483').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.738483 
+Warning: Ps input was provided (99317.18489583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94815.57209583333Pa).
+run not succesfull
+starting 16 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.790644').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.790644 
+Warning: Ps input was provided (100321.02083333333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100132.66883333333Pa).
+run not succesfull
+starting 17 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.844792').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.844792 
+Warning: Ps input was provided (100697.07291666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100287.40731666668Pa).
+run not succesfull
+starting 18 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.897654').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.897654 
+Warning: Ps input was provided (100432.74088541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99341.47648541667Pa).
+run not succesfull
+starting 19 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.950524').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.950524 
+Warning: Ps input was provided (100163.28125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99861.91805Pa).
+run not succesfull
+starting 20 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1003819').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1003819 
+Warning: Ps input was provided (100273.30598958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98120.20718958332Pa).
+run not succesfull
+starting 21 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1057080').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1057080 
+Warning: Ps input was provided (99064.4140625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91138.3264625Pa).
+run not succesfull
+starting 22 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1109138').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1109138 
+Warning: Ps input was provided (99177.81380208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96793.98380208333Pa).
+run not succesfull
+starting 23 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1162278').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1162278 
+Warning: Ps input was provided (99862.77734375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99573.18614375Pa).
+run not succesfull
+starting 24 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1216404').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1216404 
+Warning: Ps input was provided (98215.37630208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88103.22830208333Pa).
+run not succesfull
+starting 25 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1268030').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1268030 
+Warning: Ps input was provided (99520.6640625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98864.9636625Pa).
+run not succesfull
+starting 26 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1320921').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1320921 
+Warning: Ps input was provided (97935.72135416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90289.80735416667Pa).
+run not succesfull
+starting 27 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1372574').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1372574 
+Warning: Ps input was provided (98352.68619791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92378.39619791668Pa).
+run not succesfull
+starting 28 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1424704').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1424704 
+Warning: Ps input was provided (98539.91276041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92226.58916041667Pa).
+run not succesfull
+starting 29 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1476503').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1476503 
+Warning: Ps input was provided (98725.5390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93864.8802625Pa).
+run not succesfull
+starting 30 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1528706').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1528706 
+Warning: Ps input was provided (98957.06510416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98042.38070416667Pa).
+run not succesfull
+starting 31 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1583120').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1583120 
+Warning: Ps input was provided (99557.94270833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93847.34550833332Pa).
+run not succesfull
+starting 32 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1635666').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1635666 
+Warning: Ps input was provided (99887.29296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98538.22176875Pa).
+run not succesfull
+starting 33 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1689323').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1689323 
+Warning: Ps input was provided (99869.75911458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98305.26031458333Pa).
+run not succesfull
+starting 34 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1741836').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1741836 
+Warning: Ps input was provided (100003.671875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98346.174275Pa).
+run not succesfull
+starting 35 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1795229').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1795229 
+Warning: Ps input was provided (99218.69921875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91785.85841875Pa).
+run not succesfull
+starting 36 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1847312').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1847312 
+Warning: Ps input was provided (100100.86328125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96563.37728125Pa).
+run not succesfull
+starting 37 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1900220').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1900220 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
+  warnings.warn("Key '"+key+"' may not be implemented.")
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
+  self.L = zsl/self.zeta
+
+Warning: Ps input was provided (99535.61067708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98012.31387708333Pa).
+run not succesfull
+starting 38 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1954053').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1954053 
+Warning: Ps input was provided (98121.40234375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86747.29594375Pa).
+run not succesfull
+starting 39 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2005224').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2005224 
+Warning: Ps input was provided (98072.640625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (76835.952625Pa).
+run not succesfull
+starting 40 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2055085').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2055085 
+Warning: Ps input was provided (98379.67578125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97726.32978125Pa).
+run not succesfull
+starting 41 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2109238').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2109238 
+Warning: Ps input was provided (98887.31901041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (71017.10901041667Pa).
+run not succesfull
+starting 42 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2158379').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2158379 
+Warning: Ps input was provided (99447.98372395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99166.63292395833Pa).
+run not succesfull
+starting 43 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2213439').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2213439 
+Warning: Ps input was provided (98189.0390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89527.2014625Pa).
+run not succesfull
+starting 44 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2265091').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2265091 
+Warning: Ps input was provided (97722.154296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (49966.681896875Pa).
+LCL calculation not converged!!
+RHlcl = -0.000035, zlcl=7775135.081090, theta=296.584021, q=0.004835
+LCL calculation not converged!!
+RHlcl = -0.000035, zlcl=7775135.081090, theta=296.584021, q=0.004835
+run not succesfull
+starting 45 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2312591').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2312591 
+Warning: Ps input was provided (99512.51041666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (83614.42441666668Pa).
+run not succesfull
+starting 46 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2363001').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2363001 
+Warning: Ps input was provided (100458.30859375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100280.55139375Pa).
+run not succesfull
+starting 47 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2417249').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2417249 
+Warning: Ps input was provided (99404.5234375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99218.5258375Pa).
+run not succesfull
+starting 48 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2472181').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2472181 
+Warning: Ps input was provided (98729.45247395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90835.14927395833Pa).
+run not succesfull
+starting 49 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2523821').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2523821 
+Warning: Ps input was provided (99818.05989583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97451.88789583332Pa).
+run not succesfull
+starting 50 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2576934').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2576934 
+Warning: Ps input was provided (99569.44205729167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97954.32365729166Pa).
+run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-2 b/class4gl/c4gl_sim.o4759326-2
new file mode 100644
index 0000000..6c90b67
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-2
@@ -0,0 +1,227 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+       Unnamed: 0          filename  latitude  longitude
+74560           0  74560_0_ini.yaml     40.15     -89.33
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (2)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 2
+Fetching initial/forcing records
+starting station chunk number: 2(size: 50 soundings)
+starting 1 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.4 
+Warning: Ps input was provided (99219.09375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87069.21255Pa).
+run not succesfull
+starting 2 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.51067').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.51067 
+Warning: Ps input was provided (99111.41341145833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96887.68261145832Pa).
+run not succesfull
+starting 3 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.104322').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.104322 
+Warning: Ps input was provided (99240.26953125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97729.92193125Pa).
+run not succesfull
+starting 4 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.158125').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.158125 
+Warning: Ps input was provided (100401.01627604167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94004.11147604167Pa).
+run not succesfull
+starting 5 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.210001').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.210001 
+Warning: Ps input was provided (101597.61653645833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98645.19893645833Pa).
+run not succesfull
+starting 6 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.263270').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.263270 
+Warning: Ps input was provided (100343.29231770833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99933.62671770834Pa).
+run not succesfull
+starting 7 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.317457').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.317457 
+Warning: Ps input was provided (99029.62109375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (83470.56869375Pa).
+run not succesfull
+starting 8 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.368268').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.368268 
+Warning: Ps input was provided (100035.25390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99644.42350625Pa).
+run not succesfull
+starting 9 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.422781').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.422781 
+Warning: Ps input was provided (99447.54231770833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96552.80751770832Pa).
+run not succesfull
+starting 10 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.476050').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.476050 
+Warning: Ps input was provided (100032.83854166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92228.00254166668Pa).
+run not succesfull
+starting 11 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.528316').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.528316 
+Warning: Ps input was provided (99849.60807291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94654.62447291666Pa).
+run not succesfull
+starting 12 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.580931').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.580931 
+Warning: Ps input was provided (98739.5625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98586.5265Pa).
+run not succesfull
+starting 13 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.635773').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.635773 
+Warning: Ps input was provided (98331.50260416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (82513.46620416667Pa).
+run not succesfull
+starting 14 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.686190').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.686190 
+Warning: Ps input was provided (98651.11783854167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85587.72943854168Pa).
+run not succesfull
+starting 15 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.736979').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.736979 
+Warning: Ps input was provided (99153.72395833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98759.36195833333Pa).
+run not succesfull
+starting 16 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.791508').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.791508 
+Warning: Ps input was provided (98915.3125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95224.7905Pa).
+run not succesfull
+starting 17 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.844468').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.844468 
+Warning: Ps input was provided (99249.35872395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98826.74392395833Pa).
+run not succesfull
+starting 18 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.899058').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.899058 
+Warning: Ps input was provided (98736.89192708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87358.07672708333Pa).
+run not succesfull
+starting 19 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.950395').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.950395 
+Warning: Ps input was provided (98866.00325520833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89488.42805520832Pa).
+run not succesfull
+starting 20 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1001879').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1001879 
+Warning: Ps input was provided (98843.9375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94478.8799Pa).
+run not succesfull
+starting 21 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1054711').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1054711 
+Warning: Ps input was provided (99160.09635416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95736.79875416667Pa).
+run not succesfull
+starting 22 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1107591').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1107591 
+Warning: Ps input was provided (99728.29817708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98725.32377708332Pa).
+run not succesfull
+starting 23 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1161498').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1161498 
+Warning: Ps input was provided (99118.85416666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98987.00776666668Pa).
+run not succesfull
+starting 24 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1216461').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1216461 
+Warning: Ps input was provided (99008.45247395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97757.08887395833Pa).
+run not succesfull
+starting 25 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1270638').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1270638 
+Warning: Ps input was provided (99078.38802083333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93394.86642083334Pa).
+run not succesfull
+starting 26 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1323032').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1323032 
+Warning: Ps input was provided (99683.82486979167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91330.41366979167Pa).
+run not succesfull
+starting 27 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1374748').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1374748 
+Warning: Ps input was provided (99829.79166666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89606.98686666667Pa).
+run not succesfull
+starting 28 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1426449').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1426449 
+Warning: Ps input was provided (99290.98893229167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95774.69253229166Pa).
+run not succesfull
+starting 29 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1479418').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1479418 
+Warning: Ps input was provided (99007.84830729167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98137.89750729167Pa).
+run not succesfull
+starting 30 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1533453').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1533453 
+Warning: Ps input was provided (98365.1796875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91098.3240875Pa).
+run not succesfull
+starting 31 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1585360').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1585360 
+Warning: Ps input was provided (98840.94791666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98285.30951666668Pa).
+run not succesfull
+starting 32 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1638642').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1638642 
+Warning: Ps input was provided (99649.287109375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99080.699509375Pa).
+run not succesfull
+starting 33 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1693282').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1693282 
+Warning: Ps input was provided (99164.91276041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97164.84996041667Pa).
+run not succesfull
+starting 34 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1746969').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1746969 
+Warning: Ps input was provided (98023.13151041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89570.83551041667Pa).
+run not succesfull
+starting 35 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1798551').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1798551 
+Warning: Ps input was provided (99066.55598958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96748.64918958333Pa).
+run not succesfull
+starting 36 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1851912').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1851912 
+Warning: Ps input was provided (99497.17252604167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99370.03492604167Pa).
+run not succesfull
+starting 37 out of 50 (station total: /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
+  warnings.warn("Key '"+key+"' may not be implemented.")
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
+  self.L = zsl/self.zeta
+ 366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1906431').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1906431 
+Warning: Ps input was provided (99586.98111979167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94123.59591979167Pa).
+run not succesfull
+starting 38 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1957671').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1957671 
+Warning: Ps input was provided (99235.24479166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (84176.50239166667Pa).
+run not succesfull
+starting 39 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2008390').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2008390 
+Warning: Ps input was provided (99279.59440104167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85735.90840104167Pa).
+run not succesfull
+starting 40 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2059270').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2059270 
+Warning: Ps input was provided (99774.5234375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98984.6222375Pa).
+run not succesfull
+starting 41 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2113052').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2113052 
+Warning: Ps input was provided (99802.658203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98880.910603125Pa).
+run not succesfull
+starting 42 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2167306').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2167306 
+Warning: Ps input was provided (99349.02734375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99158.32094375Pa).
+run not succesfull
+starting 43 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2222303').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2222303 
+Warning: Ps input was provided (98955.87239583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98241.31199583333Pa).
+run not succesfull
+starting 44 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2276984').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2276984 
+Warning: Ps input was provided (98735.572265625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86832.903065625Pa).
+run not succesfull
+starting 45 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2328277').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2328277 
+Warning: Ps input was provided (99026.02408854167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95189.52928854167Pa).
+run not succesfull
+starting 46 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2381273').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2381273 
+Warning: Ps input was provided (98960.017578125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98404.379178125Pa).
+run not succesfull
+starting 47 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2435826').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2435826 
+Warning: Ps input was provided (98886.17122395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97028.54962395833Pa).
+run not succesfull
+starting 48 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2489579').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2489579 
+Warning: Ps input was provided (99096.7578125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97814.7870125Pa).
+run not succesfull
+starting 49 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2543769').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2543769 
+Warning: Ps input was provided (98649.583984375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98244.627184375Pa).
+run not succesfull
+starting 50 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2597864').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2597864 
+Warning: Ps input was provided (98760.32682291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97915.09722291667Pa).
+run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-3 b/class4gl/c4gl_sim.o4759326-3
new file mode 100644
index 0000000..57c4168
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-3
@@ -0,0 +1,227 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
+getting a list of stations
+defining all_stations_select
+Selecting station by ID
+station numbers included in the whole batch (all chunks): [74560]
+       Unnamed: 0          filename  latitude  longitude
+74560           0  74560_0_ini.yaml     40.15     -89.33
+getting all records of the whole batch
+determining the station and its chunk number according global_chunk_number (3)
+chunks_current_station 8
+station =  [74560]
+station chunk number: 3
+Fetching initial/forcing records
+starting station chunk number: 3(size: 50 soundings)
+starting 1 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.4 
+Warning: Ps input was provided (99334.38498263889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98008.8577826389Pa).
+run not succesfull
+starting 2 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.54256').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.54256 
+Warning: Ps input was provided (98974.41840277778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93374.47800277777Pa).
+run not succesfull
+starting 3 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.106783').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.106783 
+Warning: Ps input was provided (98773.94704861111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97569.67144861112Pa).
+run not succesfull
+starting 4 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.161041').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.161041 
+Warning: Ps input was provided (98655.97786458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93577.53706458333Pa).
+run not succesfull
+starting 5 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.213499').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.213499 
+Warning: Ps input was provided (99006.80989583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98821.98949583333Pa).
+run not succesfull
+starting 6 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.268606').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.268606 
+Warning: Ps input was provided (98851.09939236111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96326.00539236111Pa).
+run not succesfull
+starting 7 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.321942').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.321942 
+Warning: Ps input was provided (98580.45746527778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97994.21186527779Pa).
+run not succesfull
+starting 8 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.376318').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.376318 
+Warning: Ps input was provided (98692.86935763889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98522.17535763889Pa).
+run not succesfull
+starting 9 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.429745').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.429745 
+Warning: Ps input was provided (97911.22092013889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (80203.77852013889Pa).
+run not succesfull
+starting 10 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.480238').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.480238 
+Warning: Ps input was provided (98075.484375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97173.749175Pa).
+run not succesfull
+starting 11 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.534353').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.534353 
+Warning: Ps input was provided (98681.65972222222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96007.06132222222Pa).
+run not succesfull
+starting 12 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.587556').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.587556 
+Warning: Ps input was provided (99226.03385416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98950.56905416667Pa).
+run not succesfull
+starting 13 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.642546').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.642546 
+Warning: Ps input was provided (99068.84461805556Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97983.46621805556Pa).
+run not succesfull
+starting 14 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.696672').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.696672 
+Warning: Ps input was provided (98850.8203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86857.5067125Pa).
+run not succesfull
+starting 15 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.748004').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.748004 
+Warning: Ps input was provided (98882.85069444444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (80400.81069444443Pa).
+run not succesfull
+starting 16 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.798522').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.798522 
+Warning: Ps input was provided (98935.29166666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (84607.59046666668Pa).
+run not succesfull
+starting 17 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.849366').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.849366 
+Warning: Ps input was provided (99346.84895833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87599.57015833333Pa).
+run not succesfull
+starting 18 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.900431').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.900431 
+Warning: Ps input was provided (99613.65928819444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99492.40768819443Pa).
+run not succesfull
+starting 19 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.955333').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.955333 
+Warning: Ps input was provided (99327.306640625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96987.033040625Pa).
+run not succesfull
+starting 20 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1008606').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1008606 
+Warning: Ps input was provided (99090.17664930556Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97185.46704930556Pa).
+run not succesfull
+starting 21 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1062216').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1062216 
+Warning: Ps input was provided (98801.10199652778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96923.46799652778Pa).
+run not succesfull
+starting 22 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1116153').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1116153 
+Warning: Ps input was provided (98502.55295138889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97001.6229513889Pa).
+run not succesfull
+starting 23 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1169750').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1169750 
+Warning: Ps input was provided (98958.93836805556Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (83577.64316805557Pa).
+run not succesfull
+starting 24 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1220569').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1220569 
+Warning: Ps input was provided (99661.33854166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98757.24894166666Pa).
+run not succesfull
+starting 25 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1274715').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1274715 
+Warning: Ps input was provided (99208.20182291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85307.82422291668Pa).
+run not succesfull
+starting 26 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1325734').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1325734 
+Warning: Ps input was provided (99667.44791666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93415.33871666667Pa).
+run not succesfull
+starting 27 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1378235').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1378235 
+Warning: Ps input was provided (99869.86935763889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98178.23295763889Pa).
+run not succesfull
+starting 28 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1430796').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1430796 
+Warning: Ps input was provided (99770.04644097222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97653.44084097222Pa).
+run not succesfull
+starting 29 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1484617').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1484617 
+Warning: Ps input was provided (99515.08116319444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99389.12076319444Pa).
+run not succesfull
+starting 30 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1539402').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1539402 
+Warning: Ps input was provided (99617.5703125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92184.7295125Pa).
+run not succesfull
+starting 31 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1591441').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1591441 
+Warning: Ps input was provided (99509.58333333333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98973.95733333332Pa).
+run not succesfull
+starting 32 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1645914').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1645914 
+Warning: Ps input was provided (99475.33116319444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93772.97436319444Pa).
+run not succesfull
+starting 33 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1698613').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1698613 
+Warning: Ps input was provided (99442.09157986111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95909.31437986111Pa).
+run not succesfull
+starting 34 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1751712').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1751712 
+Warning: Ps input was provided (99330.49479166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97372.81119166667Pa).
+run not succesfull
+starting 35 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1805578').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1805578 
+Warning: Ps input was provided (98861.93576388889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98717.1401638889Pa).
+run not succesfull
+starting 36 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1859984').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1859984 
+Warning: Ps input was provided (98695.76085069444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94772.15325069443Pa).
+run not succesfull
+starting 37 out of 50 (station total: /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
+  warnings.warn("Key '"+key+"' may not be implemented.")
+/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
+  self.L = zsl/self.zeta
+ 366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1912886').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1912886 
+Warning: Ps input was provided (99132.26996527778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96674.27636527778Pa).
+run not succesfull
+starting 38 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1966234').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1966234 
+Warning: Ps input was provided (99497.02994791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98510.53634791667Pa).
+run not succesfull
+starting 39 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2020399').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2020399 
+Warning: Ps input was provided (99728.96788194444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85688.50348194444Pa).
+run not succesfull
+starting 40 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2071191').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2071191 
+Warning: Ps input was provided (99652.02690972222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88797.06570972221Pa).
+run not succesfull
+starting 41 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2122985').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2122985 
+Warning: Ps input was provided (99688.40147569444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98187.47147569444Pa).
+run not succesfull
+starting 42 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2176760').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2176760 
+Warning: Ps input was provided (99471.74782986111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98455.82422986111Pa).
+run not succesfull
+starting 43 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2230944').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2230944 
+Warning: Ps input was provided (99348.51519097222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96937.60959097222Pa).
+run not succesfull
+starting 44 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2284495').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2284495 
+Warning: Ps input was provided (99275.59982638889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92649.1410263889Pa).
+run not succesfull
+starting 45 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2337035').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2337035 
+Warning: Ps input was provided (99152.58463541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96658.09783541667Pa).
+run not succesfull
+starting 46 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2390194').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2390194 
+Warning: Ps input was provided (98868.35069444444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97950.13469444444Pa).
+run not succesfull
+starting 47 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2444461').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2444461 
+Warning: Ps input was provided (99050.93793402778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96213.88593402778Pa).
+run not succesfull
+starting 48 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2497851').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2497851 
+Warning: Ps input was provided (99175.79644097222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97772.57404097222Pa).
+run not succesfull
+starting 49 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2551916').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2551916 
+Warning: Ps input was provided (99178.48090277778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98734.67650277779Pa).
+run not succesfull
+starting 50 out of 50 (station total:  366 )
+ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2606609').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2606609 
+Warning: Ps input was provided (98840.40798611111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94689.60078611111Pa).
+run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-4 b/class4gl/c4gl_sim.o4759326-4
new file mode 100644
index 0000000..1dfedff
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-4
@@ -0,0 +1,10 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/c4gl_sim.o4759326-5 b/class4gl/c4gl_sim.o4759326-5
new file mode 100644
index 0000000..6d9339a
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-5
@@ -0,0 +1,10 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/c4gl_sim.o4759326-6 b/class4gl/c4gl_sim.o4759326-6
new file mode 100644
index 0000000..d48e632
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-6
@@ -0,0 +1,10 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/c4gl_sim.o4759326-7 b/class4gl/c4gl_sim.o4759326-7
new file mode 100644
index 0000000..5976742
--- /dev/null
+++ b/class4gl/c4gl_sim.o4759326-7
@@ -0,0 +1,10 @@
+C4GLJOB_c4gl_path_lib
+C4GLJOB_exec
+C4GLJOB_experiments
+C4GLJOB_path_experiments
+C4GLJOB_path_forcing
+C4GLJOB_runtime
+C4GLJOB_split_by
+C4GLJOB_station_id
+C4GLJOB_subset_forcing
+Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 1ed64f8..a3c8cc8 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -116,7 +116,7 @@ class class4gl_input(object):
     #    class4gl_input = type('class4gl_input', (model_input,gl_input,gl_dia), dict(c='c'))
     """
 
-    def __init__(self,set_pars_defaults=True,debug_level=None):
+    def __init__(self,set_pars_defaults=True,debug_level=logging.WARNING):
 
         """ set up logger (see: https://docs.python.org/2/howto/logging.html)
         """
@@ -125,26 +125,26 @@ def __init__(self,set_pars_defaults=True,debug_level=None):
         if debug_level is not None:
             self.logger.setLevel(debug_level)
 
-        # # create logger
-        # self.logger = logging.getLogger('class4gl_input')
-        # self.logger.setLevel(debug_level)
-
-        # # create console handler and set level to debug
-        # ch = logging.StreamHandler()
-        # ch.setLevel(debug_level)
-
-        # # create formatter
-        # formatter = logging.Formatter('%(asctime)s - \
-        #                                %(name)s - \
-        #                                %(levelname)s - \
-        #                                %(message)s')
-        # add formatter to ch
-        # ch.setFormatter(formatter)
+            # # create logger
+            # #self.logger = logging.getLogger('class4gl_input')
+            # #self.logger.setLevel(debug_level)
+
+            # # create console handler and set level to debug
+            # ch = logging.StreamHandler()
+            # ch.setLevel(debug_level)
+
+            # # create formatter
+            # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+            # # add formatter to ch
+            # ch.setFormatter(formatter)
      
-        # # add ch to logger
-        # self.logger.addHandler(ch)
+            # # add ch to logger
+            # self.logger.addHandler(ch)
+            # # print("TESTTESTSETSTETETS")
+            # # self.logger.warning("testsetsetsttets")
+            # #stop
 
-        # """ end set up logger """
+            # # """ end set up logger """
 
 
 
@@ -1321,6 +1321,7 @@ def check_source(self,source,check_only_sections=None):
                     # self.logger.info('Expected key '+datakey+\
                     #                  ' is not in parameter input')                        
                     source_ok = False                                           
+                    print(datakey)
                 elif (checkdata[datakey] is None) or \
                      (pd.isnull(checkdata[datakey]) is True):                    
         
@@ -1328,6 +1329,7 @@ def check_source(self,source,check_only_sections=None):
                     #                  '" is invalid: ('+ \
                     # str(self.__dict__[section].__dict__[datakey])+')')         
                     source_ok = False
+                    self.logger.warning(datakey+' is invalid: '+ str(checkdata[datakey]))
 
         return source_ok
 
@@ -1346,13 +1348,13 @@ def check_source_globaldata(self):
         # we only allow non-polar stations
         if not (self.pars.lat <= 60.):
             source_globaldata_ok = False
-            self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+            self.logger.warning('cveg  is invalid: ('+str(self.pars.cveg)+')')
         
         # check lat and lon
         if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)):
             source_globaldata_ok = False
-            self.logger.info('lat  is invalid: ('+str(self.pars.lat)+')')
-            self.logger.info('or lon  is invalid: ('+str(self.pars.lon)+')')
+            self.logger.warning('lat  is invalid: ('+str(self.pars.lat)+')')
+            self.logger.warning('or lon  is invalid: ('+str(self.pars.lon)+')')
         else:
             # we only check the ground parameter data (pars section). The 
             # profile data (air_ap section) are supposed to be valid in any 
diff --git a/class4gl/interface/interface_cloudiness.py b/class4gl/interface/interface_cloudiness.py
index 7dded65..82afd1e 100644
--- a/class4gl/interface/interface_cloudiness.py
+++ b/class4gl/interface/interface_cloudiness.py
@@ -127,7 +127,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       globaldata,\
                       refetch_records=False
                     )
-    '''
+    
+'''
 if args.make_figures:
     # the lines below activate TaylorPlots but it is disabled for now
     fig = plt.figure(figsize=(10,7))   #width,height
@@ -347,7 +348,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
         sns.set_style('whitegrid')
         #sns.set()
-        fig = pl.figure(figsize=(7,5))
+        fig = pl.figure(figsize=(11,7))
         i = 1
         axes = {}
         data_all = pd.DataFrame()
@@ -493,6 +494,17 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                     ax.set_yticklabels([])
                     ax.set_ylabel('')
 
+                if varkey == 'q':
+                    ticks = ticker.FuncFormatter(lambda x, pos:
+                                                 '{0:g}'.format(x*1000.))
+                    #ax.xaxis.set_major_formatter(ticks)
+                    ax.yaxis.set_major_formatter(ticks)
+
+                    ax.set_ylabel(latex['d'+varkey+'dt']+' ['+r'$10^{-3} \times $'+units['d'+varkey+'dt']+']')        
+                else:
+                    ax.set_ylabel(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')        
+
+
                 for j,artist in enumerate(ax.artists):
                     if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
                         # Set the linecolor on the artist to the facecolor, and set the facecolor to None
@@ -531,7 +543,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         fig.tight_layout()
         fig.subplots_adjust( bottom=0.12,left=0.15,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
         if args.figure_filename_2 is not None:
-            fig.savefig(args.figure_filename_2,dpi=250); print("Image file written to:", args.figure_filename_2)
+            fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
         fig.show()
 
 
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
index 0054c20..9a71bf8 100644
--- a/class4gl/interface/interface_koeppen.py
+++ b/class4gl/interface/interface_koeppen.py
@@ -1,4 +1,4 @@
-'''
+
 import numpy as np
 
 import pandas as pd
@@ -41,7 +41,6 @@
 # importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
 
 
-
 if args.experiments_labels is None:
     keylabels = args.experiments.strip().split(' ')
 else:
@@ -128,7 +127,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       globaldata,\
                       refetch_records=False
                     )
-'''
+
+
 key = args.experiments.strip(' ').split(' ')[0]
 xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
 koeppenlookuptable = pd.DataFrame()
@@ -195,7 +195,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
 if args.make_figures:
     # the lines below activate TaylorPlots but it is disabled for now
-    fig = plt.figure(figsize=(7,5))   #width,height
+    fig = plt.figure(figsize=(11,7))   #width,height
     i = 1                                                                           
     axes = {}         
     axes_taylor = {}         
@@ -401,12 +401,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                            'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] 
 
 
-        ann = axes[varkey].annotate(annotate_text, xy=(0.05, .95 ), xycoords='axes fraction',fontsize=7,
+        ann = axes[varkey].annotate(annotate_text, xy=(0.05, .95 ), xycoords='axes fraction',fontsize=8,
        horizontalalignment='left', verticalalignment='top' 
         )
 
 
-
         axes[varkey].set_xlabel('observations')     
         if i==0:                                    
             axes[varkey].set_ylabel('model')                                            
@@ -471,7 +470,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
     
     if args.figure_filename is not None:
-        fig.savefig(args.figure_filename,dpi=250); print("Image file written to:",args.figure_filename)
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
     fig.show()  
 
     if bool(args.show_control_parameters):
@@ -501,7 +500,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
         sns.set_style('whitegrid')
         #sns.set()
-        fig = pl.figure(figsize=(7,5))
+        fig = pl.figure(figsize=(11,7))
         i = 1
         axes = {}
         data_all = pd.DataFrame()
@@ -617,6 +616,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             data_all = data_all[select_data]
             #print('hello12')
             data_input = data_input[select_data.values]
+
+            data_input = data_input[data_all.KGCname.isin(list(koeppenlookuptable.KGCID))]
+            data_all = data_all[data_all.KGCname.isin(list(koeppenlookuptable.KGCID))]
             #print('hello13')
             #print(data_input.shape)
             #print(data_all.shape)
@@ -638,7 +640,24 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                  ax.get_legend().set_visible(False)
             #     plt.legend('off')
             if i >= 3:
-                ax.set_xticklabels(labels=ax.get_xticklabels())
+                idx = 0
+                for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+
+                    ax.annotate(koeppen.KGCID,
+                                xy=((idx+.5)/len(koeppenlookuptable),-0.00),
+                                color=koeppen.textcolor, 
+                                xycoords='axes fraction',
+                                weight='bold',
+                                fontsize=8.,
+                                horizontalalignment='center',
+                                verticalalignment='center' ,
+                                bbox={'edgecolor':'black',
+                                      'boxstyle':'circle',
+                                      'fc':koeppen.color,
+                                      'alpha':1.0}
+                               )
+                    idx+=1
+                ax.set_xticklabels([])#labels=ax.get_xticklabels())
                 ax.set_xlabel('Köppen climate class')
             else:
                 ax.set_xticklabels([])
@@ -646,6 +665,18 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
             # ax.set_yticklabels([])
             # ax.set_ylabel('')
+            if varkey == 'q':
+                ticks = ticker.FuncFormatter(lambda x, pos:
+                                             '{0:g}'.format(x*1000.))
+                #ax.xaxis.set_major_formatter(ticks)
+                ax.yaxis.set_major_formatter(ticks)
+
+                ax.set_ylabel(latex['d'+varkey+'dt']+' ['+r'$10^{-3} \times $'+units['d'+varkey+'dt']+']')        
+            else:
+                ax.set_ylabel(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')        
+
+
+
 
             for j,artist in enumerate(ax.artists):
                 if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
@@ -679,13 +710,14 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
 
 
+
             #ax.grid()
             #sns.despine(offset=10, trim=True)
             i +=1
         fig.tight_layout()
         fig.subplots_adjust( bottom=0.12,left=0.15,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
         if args.figure_filename_2 is not None:
-            fig.savefig(args.figure_filename_2,dpi=250); print("Image file written to:", args.figure_filename_2)
+            fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
         fig.show()
 
 
diff --git a/class4gl/model.py b/class4gl/model.py
index 6a9d2c3..3993200 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -376,16 +376,23 @@ def init(self):
                                  layer height needs to be equal to the second \
                                  and third \
                                  level of the vertical profile input!")
-            # initialize q from its profile when available
-            p_old = self.Ps
-            p_new = self.air_ap.p[indexh[0][0]]
-            
-            if ((p_old is not None) & (p_old != p_new)):
-                print("Warning: Ps input was provided ("+str(p_old)+\
-                    "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
-                    +str(p_new)+"Pa).")
-                                    
-            self.Ps = p_new
+
+            # # initialize q from its profile when available
+            # p_old = self.Ps
+            # p_new = self.air_ap.p[indexh[0][0]]
+            # print(indexh)
+            # #stop
+            # 
+            # if ((p_old is not None) & (p_old != p_new)):
+            #     print("Warning: Ps input was provided ("+str(p_old)+\
+            #         "Pa), but it is now overwritten by the first level (index 0) of p_pro which is different ("\
+            #         +str(p_new)+"Pa).")
+            #                         
+            # self.Ps = p_new
+
+
+
+
             # these variables/namings are more convenient to work with in the code
             # we will update the original variables afterwards
             #self.air_ap['q'] = self.air_ap.QABS/1000.
@@ -1534,7 +1541,11 @@ def jarvis_stewart(self):
         f3 = 1. / np.exp(- self.gD * (self.esat - self.e) / 100.)
         f4 = 1./ (1. - 0.0016 * (298.0-self.theta)**2.)
   
+        #if np.isnan(self.LAI):
+
         self.rs = self.rsmin / self.LAI * f1 * f2 * f3 * f4
+        # print(self.rs,self.LAI,f1,f2,f3,f4)
+        # stop
 
     def factorial(self,k):
         factorial = 1
@@ -1641,8 +1652,8 @@ def run_land_surface(self):
           self.ra = (self.Cs * ueff)**-1.
         else:
           self.ra = ueff / max(1.e-3, self.ustar)**2.
+        # print(self.ra,self.Cs,ueff)
 
-        #print('ra',self.ra,self.ustar,ueff)
 
         # first calculate essential thermodynamic variables
         self.esat    = esat(self.theta)
@@ -1677,9 +1688,10 @@ def run_land_surface(self):
             / (self.rho * self.cp / self.ra + self.cveg * (1. - self.cliq) * self.rho * self.Lv / (self.ra + self.rs) * self.dqsatdT \
             + (1. - self.cveg) * self.rho * self.Lv / (self.ra + self.rssoil) * self.dqsatdT + self.cveg * self.cliq * self.rho * self.Lv / self.ra * self.dqsatdT + self.Lambda)
 
-        #print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
-        #print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
-        #print('Ts',self.rs)
+        # print('Ts',self.Ts,self.Q,self.rho,self.cp,self.ra,self.theta)
+        # print('Ts',self.cveg, self.cliq,self.Lv,self.Lambda,self.dqsatdT)
+        # print('Ts',self.rs)
+        #print(self.air_ap.p)
 
         esatsurf      = esat(self.Ts)
         self.qsatsurf = qsat(self.Ts, self.Ps)
@@ -1692,7 +1704,11 @@ def run_land_surface(self):
   
         self.LE     = self.LEsoil + self.LEveg + self.LEliq
         self.H      = self.rho * self.cp / self.ra * (self.Ts - self.theta)
-        #print('H',self.ra,self.Ts,self.theta)
+
+        # print('ra',self.ra,self.ustar,ueff)
+        # print(self.Cs)
+        # print('H',self.ra,self.Ts,self.theta)
+
         self.G      = self.Lambda * (self.Ts - self.Tsoil)
         self.LEpot  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv)
         self.LEref  = (self.dqsatdT * (self.Q - self.G) + self.rho * self.cp / self.ra * (self.qsat - self.q)) / (self.dqsatdT + self.cp / self.Lv * (1. + self.rsmin / self.LAI / self.ra))
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 114d23a..7b02734 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -272,8 +272,14 @@
 
                     if args.error_handling == 'dump_always':
                         try:
-                            c4gl.run()
-                            print('run succesfull')
+                            if c4gli_morning.check_source_globaldata():
+                            
+
+                                c4gl.run()
+                                print('run succesfull')
+                            else:
+                                print('global data not ok')
+                                raise ValueError('global data not ok')
                         except:
                             print('run not succesfull')
                         onerun = True
@@ -289,9 +295,13 @@
                     # in this case, only the file will dumped if the runs were
                     # successful
                     elif args.error_handling == 'dump_on_success':
-                        try:
-                            c4gl.run()
-                            print('run succesfull')
+                       try:
+                            if c4gli_morning.check_source_globaldata():
+                                c4gl.run()
+                                print('run succesfull')
+                            else:
+                                print('global data not ok')
+                                raise ValueError('global data not ok')
                             c4gli_morning.dump(file_ini)
                             
                             
@@ -300,8 +310,8 @@
                                       #timeseries_only=timeseries_only,\
                                      )
                             onerun = True
-                        except:
-                            print('run not succesfull')
+                       except:
+                           print('run not succesfull')
                     isim += 1
 
 

From b82d0021f067ec7c678868d6378159620cd13ba2 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 18 Sep 2018 15:09:06 +0200
Subject: [PATCH 074/129] Fix Ps input; implement warning message in case of
 invalid input

---
 class4gl/c4gl_sim.o4759326-0 | 227 ----------------------------------
 class4gl/c4gl_sim.o4759326-1 | 231 -----------------------------------
 class4gl/c4gl_sim.o4759326-2 | 227 ----------------------------------
 class4gl/c4gl_sim.o4759326-3 | 227 ----------------------------------
 class4gl/c4gl_sim.o4759326-4 |  10 --
 class4gl/c4gl_sim.o4759326-5 |  10 --
 class4gl/c4gl_sim.o4759326-6 |  10 --
 class4gl/c4gl_sim.o4759326-7 |  10 --
 8 files changed, 952 deletions(-)
 delete mode 100644 class4gl/c4gl_sim.o4759326-0
 delete mode 100644 class4gl/c4gl_sim.o4759326-1
 delete mode 100644 class4gl/c4gl_sim.o4759326-2
 delete mode 100644 class4gl/c4gl_sim.o4759326-3
 delete mode 100644 class4gl/c4gl_sim.o4759326-4
 delete mode 100644 class4gl/c4gl_sim.o4759326-5
 delete mode 100644 class4gl/c4gl_sim.o4759326-6
 delete mode 100644 class4gl/c4gl_sim.o4759326-7

diff --git a/class4gl/c4gl_sim.o4759326-0 b/class4gl/c4gl_sim.o4759326-0
deleted file mode 100644
index f8ba869..0000000
--- a/class4gl/c4gl_sim.o4759326-0
+++ /dev/null
@@ -1,227 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-       Unnamed: 0          filename  latitude  longitude
-74560           0  74560_0_ini.yaml     40.15     -89.33
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (0)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 0
-Fetching initial/forcing records
-starting station chunk number: 0(size: 50 soundings)
-starting 1 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.4 
-Warning: Ps input was provided (99384.96484375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91367.05564375Pa).
-run not succesfull
-starting 2 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.51396').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.51396 
-Warning: Ps input was provided (100124.83463541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89846.70143541667Pa).
-run not succesfull
-starting 3 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.102622').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.102622 
-Warning: Ps input was provided (99980.35416666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96094.41696666667Pa).
-run not succesfull
-starting 4 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.155319').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.155319 
-Warning: Ps input was provided (101228.97786458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94990.99506458333Pa).
-run not succesfull
-starting 5 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.207321').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.207321 
-Warning: Ps input was provided (100561.96223958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99736.74503958333Pa).
-run not succesfull
-starting 6 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.261499').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.261499 
-Warning: Ps input was provided (98617.18880208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89281.99280208333Pa).
-run not succesfull
-starting 7 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.312096').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.312096 
-Warning: Ps input was provided (99861.65104166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91664.80744166666Pa).
-run not succesfull
-starting 8 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.363706').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.363706 
-Warning: Ps input was provided (99783.63932291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96377.99972291668Pa).
-run not succesfull
-starting 9 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.416568').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.416568 
-Warning: Ps input was provided (100101.34765625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98246.08045625Pa).
-run not succesfull
-starting 10 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.470247').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.470247 
-Warning: Ps input was provided (101217.34895833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100305.01895833333Pa).
-run not succesfull
-starting 11 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.524351').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.524351 
-Warning: Ps input was provided (101140.765625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99524.470025Pa).
-run not succesfull
-starting 12 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.577126').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.577126 
-Warning: Ps input was provided (99971.93880208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94347.27720208332Pa).
-run not succesfull
-starting 13 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.629651').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.629651 
-Warning: Ps input was provided (98563.27864583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93396.54784583332Pa).
-run not succesfull
-starting 14 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.682101').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.682101 
-Warning: Ps input was provided (98789.33203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97675.70083125Pa).
-run not succesfull
-starting 15 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.734044').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.734044 
-Warning: Ps input was provided (99563.50390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95163.13030625Pa).
-run not succesfull
-starting 16 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.786398').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.786398 
-Warning: Ps input was provided (100669.421875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95027.102275Pa).
-run not succesfull
-starting 17 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.838710').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.838710 
-Warning: Ps input was provided (100948.26692708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96705.63812708332Pa).
-run not succesfull
-starting 18 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.891166').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.891166 
-Warning: Ps input was provided (100156.60026041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92800.27746041668Pa).
-run not succesfull
-starting 19 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.943327').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.943327 
-Warning: Ps input was provided (99349.36979166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93320.92859166667Pa).
-run not succesfull
-starting 20 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.995731').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.995731 
-Warning: Ps input was provided (99228.83203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98584.90363125Pa).
-run not succesfull
-starting 21 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1050024').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1050024 
-Warning: Ps input was provided (99770.28515625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96156.28115625Pa).
-run not succesfull
-starting 22 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1102851').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1102851 
-Warning: Ps input was provided (99380.921875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94221.254275Pa).
-run not succesfull
-starting 23 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1154781').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1154781 
-Warning: Ps input was provided (99095.53255208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98209.10095208333Pa).
-run not succesfull
-starting 24 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1208836').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1208836 
-Warning: Ps input was provided (98733.72786458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98616.00786458333Pa).
-run not succesfull
-starting 25 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1263823').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1263823 
-Warning: Ps input was provided (97895.48697916667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97777.76697916667Pa).
-run not succesfull
-starting 26 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1318780').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1318780 
-Warning: Ps input was provided (98477.62369791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (78494.65369791667Pa).
-run not succesfull
-starting 27 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1368688').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1368688 
-Warning: Ps input was provided (98800.078125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93290.782125Pa).
-run not succesfull
-starting 28 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1420916').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1420916 
-Warning: Ps input was provided (99248.63541666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90668.02461666668Pa).
-run not succesfull
-starting 29 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1472567').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1472567 
-Warning: Ps input was provided (100427.43880208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96005.87560208332Pa).
-run not succesfull
-starting 30 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1524967').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1524967 
-Warning: Ps input was provided (101123.49348958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90677.02068958333Pa).
-run not succesfull
-starting 31 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1576324').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1576324 
-Warning: Ps input was provided (99848.95182291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95586.31062291667Pa).
-run not succesfull
-starting 32 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1629313').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1629313 
-Warning: Ps input was provided (97864.29296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87103.50776875Pa).
-run not succesfull
-starting 33 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1680371').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1680371 
-Warning: Ps input was provided (99503.52864583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87630.28944583333Pa).
-run not succesfull
-starting 34 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1731186').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1731186 
-Warning: Ps input was provided (100169.15364583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86911.52724583333Pa).
-run not succesfull
-starting 35 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1782240').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1782240 
-Warning: Ps input was provided (100522.32291666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88935.14331666667Pa).
-run not succesfull
-starting 36 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1833412').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1833412 
-Warning: Ps input was provided (100123.75260416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96482.67300416667Pa).
-run not succesfull
-starting 37 out of 50 (station total: /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
-  warnings.warn("Key '"+key+"' may not be implemented.")
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
-  self.L = zsl/self.zeta
- 366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1886384').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1886384 
-Warning: Ps input was provided (99870.125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89217.6422Pa).
-run not succesfull
-starting 38 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1937565').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1937565 
-Warning: Ps input was provided (98704.2734375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86112.9422375Pa).
-run not succesfull
-starting 39 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1988408').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1988408 
-Warning: Ps input was provided (99314.90885416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (81565.08725416668Pa).
-run not succesfull
-starting 40 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2038456').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2038456 
-Warning: Ps input was provided (99467.91145833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (84809.41705833333Pa).
-run not succesfull
-starting 41 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2089432').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2089432 
-Warning: Ps input was provided (97532.21354166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91573.22714166668Pa).
-run not succesfull
-starting 42 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2141348').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2141348 
-Warning: Ps input was provided (100356.95442708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92337.86802708333Pa).
-run not succesfull
-starting 43 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2192964').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2192964 
-Warning: Ps input was provided (101625.39453125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (101236.91853125Pa).
-run not succesfull
-starting 44 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2247428').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2247428 
-Warning: Ps input was provided (101831.296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (101700.627675Pa).
-run not succesfull
-starting 45 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2302316').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2302316 
-Warning: Ps input was provided (101392.83463541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (101039.67463541667Pa).
-run not succesfull
-starting 46 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2356848').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2356848 
-Warning: Ps input was provided (100442.67317708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99184.24637708333Pa).
-run not succesfull
-starting 47 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2411058').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2411058 
-Warning: Ps input was provided (99915.86848958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88082.65408958333Pa).
-run not succesfull
-starting 48 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2462275').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2462275 
-Warning: Ps input was provided (99762.47265625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97849.52265625Pa).
-run not succesfull
-starting 49 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2516002').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2516002 
-Warning: Ps input was provided (99270.4765625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93171.4033625Pa).
-run not succesfull
-starting 50 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2568449').to_json" > /local/4759326[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2568449 
-Warning: Ps input was provided (98692.77994791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97569.73114791667Pa).
-run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-1 b/class4gl/c4gl_sim.o4759326-1
deleted file mode 100644
index 87274a1..0000000
--- a/class4gl/c4gl_sim.o4759326-1
+++ /dev/null
@@ -1,231 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-       Unnamed: 0          filename  latitude  longitude
-74560           0  74560_0_ini.yaml     40.15     -89.33
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (1)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 1
-Fetching initial/forcing records
-starting station chunk number: 1(size: 50 soundings)
-starting 1 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.4 
-Warning: Ps input was provided (99189.77604166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97712.39004166667Pa).
-run not succesfull
-starting 2 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.53735').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.53735 
-Warning: Ps input was provided (98917.74609375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96435.03129375Pa).
-run not succesfull
-starting 3 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.107051').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.107051 
-Warning: Ps input was provided (97149.79296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93409.82856875Pa).
-run not succesfull
-starting 4 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.159809').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.159809 
-Warning: Ps input was provided (97484.921875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (82631.012275Pa).
-run not succesfull
-starting 5 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.210583').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.210583 
-Warning: Ps input was provided (99121.34895833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93010.50375833333Pa).
-run not succesfull
-starting 6 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.262669').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.262669 
-Warning: Ps input was provided (99433.81119791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97557.35439791667Pa).
-run not succesfull
-starting 7 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.315998').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.315998 
-Warning: Ps input was provided (100124.39713541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95708.71993541667Pa).
-run not succesfull
-starting 8 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.368733').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.368733 
-Warning: Ps input was provided (99289.96223958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96497.64383958332Pa).
-run not succesfull
-starting 9 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.422182').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.422182 
-Warning: Ps input was provided (99191.98567708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90040.43287708332Pa).
-run not succesfull
-starting 10 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.473690').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.473690 
-Warning: Ps input was provided (99402.77604166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90833.93724166667Pa).
-run not succesfull
-starting 11 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.525232').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.525232 
-Warning: Ps input was provided (99445.84114583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96549.92914583333Pa).
-run not succesfull
-starting 12 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.578431').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.578431 
-Warning: Ps input was provided (99561.35026041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99443.63026041667Pa).
-run not succesfull
-starting 13 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.633046').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.633046 
-Warning: Ps input was provided (98115.078125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97646.552525Pa).
-run not succesfull
-starting 14 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.686127').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.686127 
-Warning: Ps input was provided (98767.19401041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93118.98841041667Pa).
-run not succesfull
-starting 15 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.738483').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.738483 
-Warning: Ps input was provided (99317.18489583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94815.57209583333Pa).
-run not succesfull
-starting 16 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.790644').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.790644 
-Warning: Ps input was provided (100321.02083333333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100132.66883333333Pa).
-run not succesfull
-starting 17 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.844792').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.844792 
-Warning: Ps input was provided (100697.07291666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100287.40731666668Pa).
-run not succesfull
-starting 18 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.897654').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.897654 
-Warning: Ps input was provided (100432.74088541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99341.47648541667Pa).
-run not succesfull
-starting 19 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.950524').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.950524 
-Warning: Ps input was provided (100163.28125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99861.91805Pa).
-run not succesfull
-starting 20 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1003819').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1003819 
-Warning: Ps input was provided (100273.30598958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98120.20718958332Pa).
-run not succesfull
-starting 21 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1057080').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1057080 
-Warning: Ps input was provided (99064.4140625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91138.3264625Pa).
-run not succesfull
-starting 22 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1109138').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1109138 
-Warning: Ps input was provided (99177.81380208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96793.98380208333Pa).
-run not succesfull
-starting 23 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1162278').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1162278 
-Warning: Ps input was provided (99862.77734375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99573.18614375Pa).
-run not succesfull
-starting 24 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1216404').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1216404 
-Warning: Ps input was provided (98215.37630208333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88103.22830208333Pa).
-run not succesfull
-starting 25 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1268030').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1268030 
-Warning: Ps input was provided (99520.6640625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98864.9636625Pa).
-run not succesfull
-starting 26 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1320921').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1320921 
-Warning: Ps input was provided (97935.72135416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90289.80735416667Pa).
-run not succesfull
-starting 27 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1372574').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1372574 
-Warning: Ps input was provided (98352.68619791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92378.39619791668Pa).
-run not succesfull
-starting 28 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1424704').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1424704 
-Warning: Ps input was provided (98539.91276041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92226.58916041667Pa).
-run not succesfull
-starting 29 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1476503').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1476503 
-Warning: Ps input was provided (98725.5390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93864.8802625Pa).
-run not succesfull
-starting 30 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1528706').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1528706 
-Warning: Ps input was provided (98957.06510416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98042.38070416667Pa).
-run not succesfull
-starting 31 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1583120').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1583120 
-Warning: Ps input was provided (99557.94270833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93847.34550833332Pa).
-run not succesfull
-starting 32 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1635666').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1635666 
-Warning: Ps input was provided (99887.29296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98538.22176875Pa).
-run not succesfull
-starting 33 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1689323').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1689323 
-Warning: Ps input was provided (99869.75911458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98305.26031458333Pa).
-run not succesfull
-starting 34 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1741836').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1741836 
-Warning: Ps input was provided (100003.671875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98346.174275Pa).
-run not succesfull
-starting 35 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1795229').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1795229 
-Warning: Ps input was provided (99218.69921875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91785.85841875Pa).
-run not succesfull
-starting 36 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1847312').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1847312 
-Warning: Ps input was provided (100100.86328125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96563.37728125Pa).
-run not succesfull
-starting 37 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1900220').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1900220 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
-  warnings.warn("Key '"+key+"' may not be implemented.")
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
-  self.L = zsl/self.zeta
-
-Warning: Ps input was provided (99535.61067708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98012.31387708333Pa).
-run not succesfull
-starting 38 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1954053').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1954053 
-Warning: Ps input was provided (98121.40234375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86747.29594375Pa).
-run not succesfull
-starting 39 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2005224').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2005224 
-Warning: Ps input was provided (98072.640625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (76835.952625Pa).
-run not succesfull
-starting 40 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2055085').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2055085 
-Warning: Ps input was provided (98379.67578125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97726.32978125Pa).
-run not succesfull
-starting 41 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2109238').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2109238 
-Warning: Ps input was provided (98887.31901041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (71017.10901041667Pa).
-run not succesfull
-starting 42 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2158379').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2158379 
-Warning: Ps input was provided (99447.98372395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99166.63292395833Pa).
-run not succesfull
-starting 43 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2213439').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2213439 
-Warning: Ps input was provided (98189.0390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89527.2014625Pa).
-run not succesfull
-starting 44 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2265091').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2265091 
-Warning: Ps input was provided (97722.154296875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (49966.681896875Pa).
-LCL calculation not converged!!
-RHlcl = -0.000035, zlcl=7775135.081090, theta=296.584021, q=0.004835
-LCL calculation not converged!!
-RHlcl = -0.000035, zlcl=7775135.081090, theta=296.584021, q=0.004835
-run not succesfull
-starting 45 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2312591').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2312591 
-Warning: Ps input was provided (99512.51041666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (83614.42441666668Pa).
-run not succesfull
-starting 46 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2363001').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2363001 
-Warning: Ps input was provided (100458.30859375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (100280.55139375Pa).
-run not succesfull
-starting 47 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2417249').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2417249 
-Warning: Ps input was provided (99404.5234375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99218.5258375Pa).
-run not succesfull
-starting 48 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2472181').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2472181 
-Warning: Ps input was provided (98729.45247395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (90835.14927395833Pa).
-run not succesfull
-starting 49 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2523821').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2523821 
-Warning: Ps input was provided (99818.05989583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97451.88789583332Pa).
-run not succesfull
-starting 50 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2576934').to_json" > /local/4759326[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2576934 
-Warning: Ps input was provided (99569.44205729167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97954.32365729166Pa).
-run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-2 b/class4gl/c4gl_sim.o4759326-2
deleted file mode 100644
index 6c90b67..0000000
--- a/class4gl/c4gl_sim.o4759326-2
+++ /dev/null
@@ -1,227 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-       Unnamed: 0          filename  latitude  longitude
-74560           0  74560_0_ini.yaml     40.15     -89.33
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (2)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 2
-Fetching initial/forcing records
-starting station chunk number: 2(size: 50 soundings)
-starting 1 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.4 
-Warning: Ps input was provided (99219.09375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87069.21255Pa).
-run not succesfull
-starting 2 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.51067').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.51067 
-Warning: Ps input was provided (99111.41341145833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96887.68261145832Pa).
-run not succesfull
-starting 3 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.104322').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.104322 
-Warning: Ps input was provided (99240.26953125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97729.92193125Pa).
-run not succesfull
-starting 4 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.158125').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.158125 
-Warning: Ps input was provided (100401.01627604167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94004.11147604167Pa).
-run not succesfull
-starting 5 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.210001').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.210001 
-Warning: Ps input was provided (101597.61653645833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98645.19893645833Pa).
-run not succesfull
-starting 6 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.263270').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.263270 
-Warning: Ps input was provided (100343.29231770833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99933.62671770834Pa).
-run not succesfull
-starting 7 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.317457').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.317457 
-Warning: Ps input was provided (99029.62109375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (83470.56869375Pa).
-run not succesfull
-starting 8 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.368268').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.368268 
-Warning: Ps input was provided (100035.25390625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99644.42350625Pa).
-run not succesfull
-starting 9 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.422781').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.422781 
-Warning: Ps input was provided (99447.54231770833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96552.80751770832Pa).
-run not succesfull
-starting 10 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.476050').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.476050 
-Warning: Ps input was provided (100032.83854166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92228.00254166668Pa).
-run not succesfull
-starting 11 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.528316').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.528316 
-Warning: Ps input was provided (99849.60807291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94654.62447291666Pa).
-run not succesfull
-starting 12 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.580931').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.580931 
-Warning: Ps input was provided (98739.5625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98586.5265Pa).
-run not succesfull
-starting 13 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.635773').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.635773 
-Warning: Ps input was provided (98331.50260416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (82513.46620416667Pa).
-run not succesfull
-starting 14 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.686190').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.686190 
-Warning: Ps input was provided (98651.11783854167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85587.72943854168Pa).
-run not succesfull
-starting 15 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.736979').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.736979 
-Warning: Ps input was provided (99153.72395833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98759.36195833333Pa).
-run not succesfull
-starting 16 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.791508').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.791508 
-Warning: Ps input was provided (98915.3125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95224.7905Pa).
-run not succesfull
-starting 17 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.844468').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.844468 
-Warning: Ps input was provided (99249.35872395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98826.74392395833Pa).
-run not succesfull
-starting 18 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.899058').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.899058 
-Warning: Ps input was provided (98736.89192708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87358.07672708333Pa).
-run not succesfull
-starting 19 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.950395').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.950395 
-Warning: Ps input was provided (98866.00325520833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89488.42805520832Pa).
-run not succesfull
-starting 20 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1001879').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1001879 
-Warning: Ps input was provided (98843.9375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94478.8799Pa).
-run not succesfull
-starting 21 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1054711').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1054711 
-Warning: Ps input was provided (99160.09635416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95736.79875416667Pa).
-run not succesfull
-starting 22 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1107591').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1107591 
-Warning: Ps input was provided (99728.29817708333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98725.32377708332Pa).
-run not succesfull
-starting 23 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1161498').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1161498 
-Warning: Ps input was provided (99118.85416666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98987.00776666668Pa).
-run not succesfull
-starting 24 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1216461').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1216461 
-Warning: Ps input was provided (99008.45247395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97757.08887395833Pa).
-run not succesfull
-starting 25 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1270638').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1270638 
-Warning: Ps input was provided (99078.38802083333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93394.86642083334Pa).
-run not succesfull
-starting 26 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1323032').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1323032 
-Warning: Ps input was provided (99683.82486979167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91330.41366979167Pa).
-run not succesfull
-starting 27 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1374748').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1374748 
-Warning: Ps input was provided (99829.79166666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89606.98686666667Pa).
-run not succesfull
-starting 28 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1426449').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1426449 
-Warning: Ps input was provided (99290.98893229167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95774.69253229166Pa).
-run not succesfull
-starting 29 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1479418').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1479418 
-Warning: Ps input was provided (99007.84830729167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98137.89750729167Pa).
-run not succesfull
-starting 30 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1533453').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1533453 
-Warning: Ps input was provided (98365.1796875Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (91098.3240875Pa).
-run not succesfull
-starting 31 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1585360').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1585360 
-Warning: Ps input was provided (98840.94791666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98285.30951666668Pa).
-run not succesfull
-starting 32 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1638642').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1638642 
-Warning: Ps input was provided (99649.287109375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99080.699509375Pa).
-run not succesfull
-starting 33 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1693282').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1693282 
-Warning: Ps input was provided (99164.91276041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97164.84996041667Pa).
-run not succesfull
-starting 34 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1746969').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1746969 
-Warning: Ps input was provided (98023.13151041667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (89570.83551041667Pa).
-run not succesfull
-starting 35 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1798551').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1798551 
-Warning: Ps input was provided (99066.55598958333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96748.64918958333Pa).
-run not succesfull
-starting 36 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1851912').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1851912 
-Warning: Ps input was provided (99497.17252604167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99370.03492604167Pa).
-run not succesfull
-starting 37 out of 50 (station total: /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
-  warnings.warn("Key '"+key+"' may not be implemented.")
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
-  self.L = zsl/self.zeta
- 366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1906431').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1906431 
-Warning: Ps input was provided (99586.98111979167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94123.59591979167Pa).
-run not succesfull
-starting 38 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1957671').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1957671 
-Warning: Ps input was provided (99235.24479166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (84176.50239166667Pa).
-run not succesfull
-starting 39 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2008390').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2008390 
-Warning: Ps input was provided (99279.59440104167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85735.90840104167Pa).
-run not succesfull
-starting 40 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2059270').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2059270 
-Warning: Ps input was provided (99774.5234375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98984.6222375Pa).
-run not succesfull
-starting 41 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2113052').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2113052 
-Warning: Ps input was provided (99802.658203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98880.910603125Pa).
-run not succesfull
-starting 42 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2167306').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2167306 
-Warning: Ps input was provided (99349.02734375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99158.32094375Pa).
-run not succesfull
-starting 43 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2222303').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2222303 
-Warning: Ps input was provided (98955.87239583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98241.31199583333Pa).
-run not succesfull
-starting 44 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2276984').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2276984 
-Warning: Ps input was provided (98735.572265625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86832.903065625Pa).
-run not succesfull
-starting 45 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2328277').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2328277 
-Warning: Ps input was provided (99026.02408854167Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95189.52928854167Pa).
-run not succesfull
-starting 46 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2381273').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2381273 
-Warning: Ps input was provided (98960.017578125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98404.379178125Pa).
-run not succesfull
-starting 47 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2435826').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2435826 
-Warning: Ps input was provided (98886.17122395833Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97028.54962395833Pa).
-run not succesfull
-starting 48 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2489579').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2489579 
-Warning: Ps input was provided (99096.7578125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97814.7870125Pa).
-run not succesfull
-starting 49 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2543769').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2543769 
-Warning: Ps input was provided (98649.583984375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98244.627184375Pa).
-run not succesfull
-starting 50 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2597864').to_json" > /local/4759326[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2597864 
-Warning: Ps input was provided (98760.32682291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97915.09722291667Pa).
-run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-3 b/class4gl/c4gl_sim.o4759326-3
deleted file mode 100644
index 57c4168..0000000
--- a/class4gl/c4gl_sim.o4759326-3
+++ /dev/null
@@ -1,227 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-       Unnamed: 0          filename  latitude  longitude
-74560           0  74560_0_ini.yaml     40.15     -89.33
-getting all records of the whole batch
-determining the station and its chunk number according global_chunk_number (3)
-chunks_current_station 8
-station =  [74560]
-station chunk number: 3
-Fetching initial/forcing records
-starting station chunk number: 3(size: 50 soundings)
-starting 1 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.4').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.4 
-Warning: Ps input was provided (99334.38498263889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98008.8577826389Pa).
-run not succesfull
-starting 2 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.54256').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.54256 
-Warning: Ps input was provided (98974.41840277778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93374.47800277777Pa).
-run not succesfull
-starting 3 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.106783').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.106783 
-Warning: Ps input was provided (98773.94704861111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97569.67144861112Pa).
-run not succesfull
-starting 4 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.161041').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.161041 
-Warning: Ps input was provided (98655.97786458333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93577.53706458333Pa).
-run not succesfull
-starting 5 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.213499').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.213499 
-Warning: Ps input was provided (99006.80989583333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98821.98949583333Pa).
-run not succesfull
-starting 6 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.268606').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.268606 
-Warning: Ps input was provided (98851.09939236111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96326.00539236111Pa).
-run not succesfull
-starting 7 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.321942').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.321942 
-Warning: Ps input was provided (98580.45746527778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97994.21186527779Pa).
-run not succesfull
-starting 8 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.376318').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.376318 
-Warning: Ps input was provided (98692.86935763889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98522.17535763889Pa).
-run not succesfull
-starting 9 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.429745').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.429745 
-Warning: Ps input was provided (97911.22092013889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (80203.77852013889Pa).
-run not succesfull
-starting 10 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.480238').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.480238 
-Warning: Ps input was provided (98075.484375Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97173.749175Pa).
-run not succesfull
-starting 11 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.534353').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.534353 
-Warning: Ps input was provided (98681.65972222222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96007.06132222222Pa).
-run not succesfull
-starting 12 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.587556').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.587556 
-Warning: Ps input was provided (99226.03385416667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98950.56905416667Pa).
-run not succesfull
-starting 13 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.642546').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.642546 
-Warning: Ps input was provided (99068.84461805556Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97983.46621805556Pa).
-run not succesfull
-starting 14 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.696672').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.696672 
-Warning: Ps input was provided (98850.8203125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (86857.5067125Pa).
-run not succesfull
-starting 15 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.748004').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.748004 
-Warning: Ps input was provided (98882.85069444444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (80400.81069444443Pa).
-run not succesfull
-starting 16 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.798522').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.798522 
-Warning: Ps input was provided (98935.29166666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (84607.59046666668Pa).
-run not succesfull
-starting 17 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.849366').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.849366 
-Warning: Ps input was provided (99346.84895833333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (87599.57015833333Pa).
-run not succesfull
-starting 18 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.900431').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.900431 
-Warning: Ps input was provided (99613.65928819444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99492.40768819443Pa).
-run not succesfull
-starting 19 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.955333').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.955333 
-Warning: Ps input was provided (99327.306640625Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96987.033040625Pa).
-run not succesfull
-starting 20 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1008606').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1008606 
-Warning: Ps input was provided (99090.17664930556Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97185.46704930556Pa).
-run not succesfull
-starting 21 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1062216').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1062216 
-Warning: Ps input was provided (98801.10199652778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96923.46799652778Pa).
-run not succesfull
-starting 22 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1116153').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1116153 
-Warning: Ps input was provided (98502.55295138889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97001.6229513889Pa).
-run not succesfull
-starting 23 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1169750').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1169750 
-Warning: Ps input was provided (98958.93836805556Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (83577.64316805557Pa).
-run not succesfull
-starting 24 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1220569').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1220569 
-Warning: Ps input was provided (99661.33854166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98757.24894166666Pa).
-run not succesfull
-starting 25 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1274715').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1274715 
-Warning: Ps input was provided (99208.20182291667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85307.82422291668Pa).
-run not succesfull
-starting 26 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1325734').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1325734 
-Warning: Ps input was provided (99667.44791666667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93415.33871666667Pa).
-run not succesfull
-starting 27 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1378235').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1378235 
-Warning: Ps input was provided (99869.86935763889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98178.23295763889Pa).
-run not succesfull
-starting 28 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1430796').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1430796 
-Warning: Ps input was provided (99770.04644097222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97653.44084097222Pa).
-run not succesfull
-starting 29 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1484617').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1484617 
-Warning: Ps input was provided (99515.08116319444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (99389.12076319444Pa).
-run not succesfull
-starting 30 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1539402').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1539402 
-Warning: Ps input was provided (99617.5703125Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92184.7295125Pa).
-run not succesfull
-starting 31 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1591441').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1591441 
-Warning: Ps input was provided (99509.58333333333Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98973.95733333332Pa).
-run not succesfull
-starting 32 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1645914').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1645914 
-Warning: Ps input was provided (99475.33116319444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (93772.97436319444Pa).
-run not succesfull
-starting 33 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1698613').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1698613 
-Warning: Ps input was provided (99442.09157986111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (95909.31437986111Pa).
-run not succesfull
-starting 34 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1751712').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1751712 
-Warning: Ps input was provided (99330.49479166667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97372.81119166667Pa).
-run not succesfull
-starting 35 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1805578').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1805578 
-Warning: Ps input was provided (98861.93576388889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98717.1401638889Pa).
-run not succesfull
-starting 36 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1859984').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1859984 
-Warning: Ps input was provided (98695.76085069444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94772.15325069443Pa).
-run not succesfull
-starting 37 out of 50 (station total: /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:345: UserWarning: Key 'index' may not be implemented.
-  warnings.warn("Key '"+key+"' may not be implemented.")
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/model.py:1454: RuntimeWarning: divide by zero encountered in double_scalars
-  self.L = zsl/self.zeta
- 366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1912886').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1912886 
-Warning: Ps input was provided (99132.26996527778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96674.27636527778Pa).
-run not succesfull
-starting 38 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1966234').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1966234 
-Warning: Ps input was provided (99497.02994791667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98510.53634791667Pa).
-run not succesfull
-starting 39 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2020399').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2020399 
-Warning: Ps input was provided (99728.96788194444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (85688.50348194444Pa).
-run not succesfull
-starting 40 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2071191').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2071191 
-Warning: Ps input was provided (99652.02690972222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (88797.06570972221Pa).
-run not succesfull
-starting 41 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2122985').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2122985 
-Warning: Ps input was provided (99688.40147569444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98187.47147569444Pa).
-run not succesfull
-starting 42 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2176760').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2176760 
-Warning: Ps input was provided (99471.74782986111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98455.82422986111Pa).
-run not succesfull
-starting 43 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2230944').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2230944 
-Warning: Ps input was provided (99348.51519097222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96937.60959097222Pa).
-run not succesfull
-starting 44 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2284495').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2284495 
-Warning: Ps input was provided (99275.59982638889Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (92649.1410263889Pa).
-run not succesfull
-starting 45 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2337035').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2337035 
-Warning: Ps input was provided (99152.58463541667Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96658.09783541667Pa).
-run not succesfull
-starting 46 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2390194').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2390194 
-Warning: Ps input was provided (98868.35069444444Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97950.13469444444Pa).
-run not succesfull
-starting 47 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2444461').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2444461 
-Warning: Ps input was provided (99050.93793402778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (96213.88593402778Pa).
-run not succesfull
-starting 48 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2497851').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2497851 
-Warning: Ps input was provided (99175.79644097222Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (97772.57404097222Pa).
-run not succesfull
-starting 49 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2551916').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2551916 
-Warning: Ps input was provided (99178.48090277778Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (98734.67650277779Pa).
-run not succesfull
-starting 50 out of 50 (station total:  366 )
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2606609').to_json" > /local/4759326[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2606609 
-Warning: Ps input was provided (98840.40798611111Pa), but it is now overwritten by the first level (index 0) of p_pro which is different (94689.60078611111Pa).
-run not succesfull
diff --git a/class4gl/c4gl_sim.o4759326-4 b/class4gl/c4gl_sim.o4759326-4
deleted file mode 100644
index 1dfedff..0000000
--- a/class4gl/c4gl_sim.o4759326-4
+++ /dev/null
@@ -1,10 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/c4gl_sim.o4759326-5 b/class4gl/c4gl_sim.o4759326-5
deleted file mode 100644
index 6d9339a..0000000
--- a/class4gl/c4gl_sim.o4759326-5
+++ /dev/null
@@ -1,10 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/c4gl_sim.o4759326-6 b/class4gl/c4gl_sim.o4759326-6
deleted file mode 100644
index d48e632..0000000
--- a/class4gl/c4gl_sim.o4759326-6
+++ /dev/null
@@ -1,10 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini
diff --git a/class4gl/c4gl_sim.o4759326-7 b/class4gl/c4gl_sim.o4759326-7
deleted file mode 100644
index 5976742..0000000
--- a/class4gl/c4gl_sim.o4759326-7
+++ /dev/null
@@ -1,10 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_exec
-C4GLJOB_experiments
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/simulations/simulations.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --experiments=ERA_NOAC --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ --path_forcing=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --runtime=from_input --split_by=50 --station_id=74560 --subset_forcing=ini

From ff9253c76127c79896812130e91867e419174402 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 18 Sep 2018 16:18:20 +0200
Subject: [PATCH 075/129] fix searching for first file of a station.

---
 class4gl/interface_functions.py | 39 ++++++++++++++++++++++++++-------
 class4gl/interface_multi.py     |  4 ++++
 2 files changed, 35 insertions(+), 8 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 0da9a00..ca45f7e 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -192,10 +192,26 @@ def __init__(self,path,suffix='ini',refetch_stations=True):
         self.table = self.table.set_index('STNID')
 
     def get_stations(self,suffix):
-        stations_list_files = glob.glob(self.path+'/?????_0_'+suffix+'.yaml')
+        stations_list_files = glob.glob(self.path+'/?????_*_'+suffix+'.yaml')
         if len(stations_list_files) == 0:
             stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml')
+        else:
+            # this weird section retreives the first file of every station
+            stations_list_files_1 = [station_file[:len(self.path+'/?????')] for \
+                                   station_file in stations_list_files]
+            stations_list_files_2 = [station_file[len(self.path+'/?????'):] for \
+                                   station_file in stations_list_files]
+            print(stations_list_files_1)
+            stations_list_files_new = []
+            stations_list_files_skip = []
+            for istat,stations_file_1 in  enumerate(stations_list_files_1):
+                if stations_file_1 not in stations_list_files_skip:
+                    stations_list_files_skip.append(stations_file_1)
+                    stations_list_files_new.append(stations_file_1+stations_list_files_2[istat])
+            stations_list_files = stations_list_files_new
+            
         stations_list_files.sort()
+
         if len(stations_list_files) == 0:
             raise ValueError('no stations found that match "'+self.path+'/?????[_0]_'+suffix+'.yaml'+'"')
         stations_list = []
@@ -379,13 +395,20 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
             else:
                 chunk = 0
                 end_of_chunks = False
-                while not end_of_chunks:
-                    fn = format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
-                    if os.path.isfile(path_yaml+'/'+fn):
-                        dictfnchunks.append(dict(fn=fn,chunk=chunk))
-                    else:
-                        end_of_chunks = True
-                    chunk += 1
+                station_list_files = glob.glob(path_yaml+'/'+format(STNID,'05d')+'_*_'+subset+'.yaml')
+                station_list_files.sort()
+                for station_path_file in station_list_files:
+                    fn = station_path_file.split('/')[-1]
+                    chunk = int(fn.split('_')[1])
+                    dictfnchunks.append(dict(fn=fn,chunk=chunk))
+
+                # while not end_of_chunks:
+                #     fn = format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
+                #     if os.path.isfile(path_yaml+'/'+fn):
+                #         dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                #     else:
+                #         end_of_chunks = True
+                #     chunk += 1
 
             # globyamlfilenames = path_yaml+'/'+format(STNID,'05d')+'*_'+subset+'.yaml'
             # yamlfilenames = glob.glob(globyamlfilenames)
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 6e7cd2f..25f0f09 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -129,6 +129,10 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
         self.frames['stats']['records_all_stations_mod'].index = \
             self.frames['stats']['records_all_stations_ini'].index 
 
+        
+        if len(self.frames['stats']['records_all_stations_ini']):
+            raise ValueError('no class records found. Aborting')
+
         self.frames['stats']['records_all_stations_ini']['dates'] = \
             self.frames['stats']['records_all_stations_ini']['ldatetime'].dt.date
 

From b13181353e0ede40a66bf29841c55fed44965ee4 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 18 Sep 2018 21:11:26 +0200
Subject: [PATCH 076/129] fix searching for first file of a station.

---
 class4gl/interface/interface.py | 12 +++++++-----
 class4gl/interface_functions.py |  2 +-
 class4gl/interface_multi.py     |  6 +++---
 3 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 1af9e4c..2e1c1be 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -205,11 +205,13 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     for varkey in ['h','theta','q']:                                                    
         ikey = 0
         key = list(args.experiments.strip().split(' '))[ikey]
-        cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-        clearsky = (cc < 0.05)
+        # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        # clearsky = (cc < 0.05)
     
-        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
     
     
         nbins=40       
@@ -433,7 +435,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         #print(data_all.columns)
         #print('hello7')
         for varkey in ['h','theta','q']:
-            input_keys =['wg','cc']
+            input_keys =['wg','cc','advt']
             for input_key in input_keys:
                 varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
 
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index ca45f7e..0c0cf32 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -201,7 +201,7 @@ def get_stations(self,suffix):
                                    station_file in stations_list_files]
             stations_list_files_2 = [station_file[len(self.path+'/?????'):] for \
                                    station_file in stations_list_files]
-            print(stations_list_files_1)
+            #print(stations_list_files_1)
             stations_list_files_new = []
             stations_list_files_skip = []
             for istat,stations_file_1 in  enumerate(stations_list_files_1):
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 25f0f09..9509387 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -130,7 +130,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
             self.frames['stats']['records_all_stations_ini'].index 
 
         
-        if len(self.frames['stats']['records_all_stations_ini']):
+        if len(self.frames['stats']['records_all_stations_ini']) ==0:
             raise ValueError('no class records found. Aborting')
 
         self.frames['stats']['records_all_stations_ini']['dates'] = \
@@ -1266,8 +1266,8 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                                  +'LT')
 
             #print('r17')
-            print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
-            print(hmax)
+            #print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            #print(hmax)
             valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
             if valid_mod:
 

From 861e7f8b7007476d5de959f6377c264c7e7562b9 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 19 Sep 2018 09:45:44 +0200
Subject: [PATCH 077/129] make simulations.py less strict, so that writing out
 data does not fail.

---
 class4gl/class4gl.py                | 11 ++++++-----
 class4gl/simulations/simulations.py | 24 ++++++++++--------------
 2 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index a3c8cc8..7afe040 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -1285,7 +1285,7 @@ def query_source(self,var):
             if var in vars_in_source:
                 return source
 
-    def check_source(self,source,check_only_sections=None):
+    def check_source(self,source,check_only_sections=None,ignore_keys=[]):
         """ this procedure checks whether data of a specified source is valid.
 
         INPUT:
@@ -1321,9 +1321,9 @@ def check_source(self,source,check_only_sections=None):
                     # self.logger.info('Expected key '+datakey+\
                     #                  ' is not in parameter input')                        
                     source_ok = False                                           
-                    print(datakey)
-                elif (checkdata[datakey] is None) or \
-                     (pd.isnull(checkdata[datakey]) is True):                    
+                elif (datakey not in ignore_keys) and \
+                     ((checkdata[datakey] is None) or \
+                     (pd.isnull(checkdata[datakey]) is True)):                    
         
                     # self.logger.info('Key value of "'+datakey+\
                     #                  '" is invalid: ('+ \
@@ -1362,7 +1362,8 @@ def check_source_globaldata(self):
             source_ok = self.check_source(source='globaldata',\
                                           check_only_sections=['air_ac',\
                                                                'air_ap',\
-                                                               'pars'])
+                                                               'pars'],
+                                         ignore_keys=[])
             if not source_ok:
                 source_globaldata_ok = False
         
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 7b02734..444d87b 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -272,14 +272,11 @@
 
                     if args.error_handling == 'dump_always':
                         try:
-                            if c4gli_morning.check_source_globaldata():
-                            
-
-                                c4gl.run()
-                                print('run succesfull')
-                            else:
-                                print('global data not ok')
-                                raise ValueError('global data not ok')
+                            print('checking data sources')
+                            if not c4gli_morning.check_source_globaldata():
+                                print('Warning: some input sources appear invalid')
+                            c4gl.run()
+                            print('run succesfull')
                         except:
                             print('run not succesfull')
                         onerun = True
@@ -296,12 +293,11 @@
                     # successful
                     elif args.error_handling == 'dump_on_success':
                        try:
-                            if c4gli_morning.check_source_globaldata():
-                                c4gl.run()
-                                print('run succesfull')
-                            else:
-                                print('global data not ok')
-                                raise ValueError('global data not ok')
+                            print('checking data sources')
+                            if not c4gli_morning.check_source_globaldata():
+                                print('Warning: some input sources appear invalid')
+                            c4gl.run()
+                            print('run succesfull')
                             c4gli_morning.dump(file_ini)
                             
                             

From cfd1914de77ce2547a11fb2acd6ffe4b00355171 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 19 Sep 2018 14:55:05 +0200
Subject: [PATCH 078/129] Fix errors in case there are more than 10 chunk
 files.

---
 class4gl/c4gl_setup.o4759290-0      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-1      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-2      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-3      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-4      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-5      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-6      | 1251 ---------------------------
 class4gl/c4gl_setup.o4759290-7      |  469 ----------
 class4gl/simulations/simulations.py |    4 +-
 9 files changed, 2 insertions(+), 9228 deletions(-)
 delete mode 100644 class4gl/c4gl_setup.o4759290-0
 delete mode 100644 class4gl/c4gl_setup.o4759290-1
 delete mode 100644 class4gl/c4gl_setup.o4759290-2
 delete mode 100644 class4gl/c4gl_setup.o4759290-3
 delete mode 100644 class4gl/c4gl_setup.o4759290-4
 delete mode 100644 class4gl/c4gl_setup.o4759290-5
 delete mode 100644 class4gl/c4gl_setup.o4759290-6
 delete mode 100644 class4gl/c4gl_setup.o4759290-7

diff --git a/class4gl/c4gl_setup.o4759290-0 b/class4gl/c4gl_setup.o4759290-0
deleted file mode 100644
index 45f4cc7..0000000
--- a/class4gl/c4gl_setup.o4759290-0
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 0 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.yaml
-0 1981-01-01 00:00:00
-1 1981-01-02 00:00:00
-2 1981-01-03 00:00:00
-3 1981-01-04 00:00:00
-4 1981-01-05 00:00:00
-5 1981-01-06 00:00:00
-6 1981-01-07 00:00:00
-7 1981-01-08 00:00:00
-8 1981-01-09 00:00:00
-9 1981-01-10 00:00:00
-10 1981-01-11 00:00:00
-11 1981-01-12 00:00:00
-12 1981-01-13 00:00:00
-13 1981-01-14 00:00:00
-14 1981-01-15 00:00:00
-15 1981-01-16 00:00:00
-16 1981-01-17 00:00:00
-17 1981-01-18 00:00:00
-18 1981-01-19 00:00:00
-19 1981-01-20 00:00:00
-20 1981-01-21 00:00:00
-21 1981-01-22 00:00:00
-22 1981-01-23 00:00:00
-23 1981-01-24 00:00:00
-24 1981-01-25 00:00:00
-25 1981-01-26 00:00:00
-26 1981-01-27 00:00:00
-27 1981-01-28 00:00:00
-28 1981-01-29 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-01-30 00:00:00
-30 1981-01-31 00:00:00
-31 1981-02-01 00:00:00
-32 1981-02-02 00:00:00
-33 1981-02-03 00:00:00
-34 1981-02-04 00:00:00
-35 1981-02-05 00:00:00
-36 1981-02-06 00:00:00
-37 1981-02-07 00:00:00
-38 1981-02-08 00:00:00
-39 1981-02-09 00:00:00
-40 1981-02-10 00:00:00
-41 1981-02-11 00:00:00
-42 1981-02-12 00:00:00
-43 1981-02-13 00:00:00
-44 1981-02-14 00:00:00
-45 1981-02-15 00:00:00
-46 1981-02-16 00:00:00
-47 1981-02-17 00:00:00
-48 1981-02-18 00:00:00
-49 1981-02-19 00:00:00
-pkl file older than yaml file, so I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.4 
- obs record registered
- next record: 51396
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.51396').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.51396 
- obs record registered
- next record: 102622
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.102622').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.102622 
- obs record registered
- next record: 155319
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.155319').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.155319 
- obs record registered
- next record: 207321
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.207321').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.207321 
- obs record registered
- next record: 261499
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.261499').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.261499 
- obs record registered
- next record: 312096
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.312096').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.312096 
- obs record registered
- next record: 363706
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.363706').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.363706 
- obs record registered
- next record: 416568
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.416568').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.416568 
- obs record registered
- next record: 470247
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.470247').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.470247 
- obs record registered
- next record: 524351
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.524351').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.524351 
- obs record registered
- next record: 577126
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.577126').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.577126 
- obs record registered
- next record: 629651
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.629651').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.629651 
- obs record registered
- next record: 682101
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.682101').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.682101 
- obs record registered
- next record: 734044
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.734044').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.734044 
- obs record registered
- next record: 786398
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.786398').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.786398 
- obs record registered
- next record: 838710
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.838710').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.838710 
- obs record registered
- next record: 891166
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.891166').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.891166 
- obs record registered
- next record: 943327
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.943327').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.943327 
- obs record registered
- next record: 995731
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.995731').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.995731 
- obs record registered
- next record: 1050024
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1050024').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1050024 
- obs record registered
- next record: 1102851
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1102851').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1102851 
- obs record registered
- next record: 1154781
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1154781').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1154781 
- obs record registered
- next record: 1208836
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1208836').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1208836 
- obs record registered
- next record: 1263823
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1263823').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1263823 
- obs record registered
- next record: 1318780
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1318780').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1318780 
- obs record registered
- next record: 1368688
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1368688').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1368688 
- obs record registered
- next record: 1420916
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1420916').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1420916 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1472567
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1472567').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1472567 
- obs record registered
- next record: 1524967
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1524967').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1524967 
- obs record registered
- next record: 1576324
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1576324').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1576324 
- obs record registered
- next record: 1629313
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1629313').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1629313 
- obs record registered
- next record: 1680371
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1680371').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1680371 
- obs record registered
- next record: 1731186
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1731186').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1731186 
- obs record registered
- next record: 1782240
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1782240').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1782240 
- obs record registered
- next record: 1833412
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1833412').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1833412 
- obs record registered
- next record: 1886384
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1886384').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1886384 
- obs record registered
- next record: 1937565
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1937565').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1937565 
- obs record registered
- next record: 1988408
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.1988408').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.1988408 
- obs record registered
- next record: 2038456
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2038456').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2038456 
- obs record registered
- next record: 2089432
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2089432').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2089432 
- obs record registered
- next record: 2141348
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2141348').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2141348 
- obs record registered
- next record: 2192964
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2192964').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2192964 
- obs record registered
- next record: 2247428
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2247428').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2247428 
- obs record registered
- next record: 2302316
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2302316').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2302316 
- obs record registered
- next record: 2356848
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2356848').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2356848 
- obs record registered
- next record: 2411058
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2411058').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2411058 
- obs record registered
- next record: 2462275
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2462275').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2462275 
- obs record registered
- next record: 2516002
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2516002').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2516002 
- obs record registered
- next record: 2568449
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.yaml.2568449').to_json" > /local/4759290[0].master15.delcatty.gent.vsc/74560_0_ini.yaml.buffer.json.2568449 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_0_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-1 b/class4gl/c4gl_setup.o4759290-1
deleted file mode 100644
index d958506..0000000
--- a/class4gl/c4gl_setup.o4759290-1
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 1 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.yaml
-0 1981-02-20 00:00:00
-1 1981-02-21 00:00:00
-2 1981-02-22 00:00:00
-3 1981-02-23 00:00:00
-4 1981-02-24 00:00:00
-5 1981-02-25 00:00:00
-6 1981-02-26 00:00:00
-7 1981-02-27 00:00:00
-8 1981-02-28 00:00:00
-9 1981-03-01 00:00:00
-10 1981-03-02 00:00:00
-11 1981-03-03 00:00:00
-12 1981-03-04 00:00:00
-13 1981-03-05 00:00:00
-14 1981-03-06 00:00:00
-15 1981-03-07 00:00:00
-16 1981-03-08 00:00:00
-17 1981-03-09 00:00:00
-18 1981-03-10 00:00:00
-19 1981-03-11 00:00:00
-20 1981-03-12 00:00:00
-21 1981-03-13 00:00:00
-22 1981-03-14 00:00:00
-23 1981-03-15 00:00:00
-24 1981-03-16 00:00:00
-25 1981-03-17 00:00:00
-26 1981-03-18 00:00:00
-27 1981-03-19 00:00:00
-28 1981-03-20 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-03-21 00:00:00
-30 1981-03-22 00:00:00
-31 1981-03-23 00:00:00
-32 1981-03-24 00:00:00
-33 1981-03-25 00:00:00
-34 1981-03-26 00:00:00
-35 1981-03-27 00:00:00
-36 1981-03-28 00:00:00
-37 1981-03-29 00:00:00
-38 1981-03-30 00:00:00
-39 1981-03-31 00:00:00
-40 1981-04-01 00:00:00
-41 1981-04-02 00:00:00
-42 1981-04-03 00:00:00
-43 1981-04-04 00:00:00
-44 1981-04-05 00:00:00
-45 1981-04-06 00:00:00
-46 1981-04-07 00:00:00
-47 1981-04-08 00:00:00
-48 1981-04-09 00:00:00
-49 1981-04-10 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.4 
- obs record registered
- next record: 53735
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.53735').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.53735 
- obs record registered
- next record: 107051
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.107051').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.107051 
- obs record registered
- next record: 159809
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.159809').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.159809 
- obs record registered
- next record: 210583
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.210583').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.210583 
- obs record registered
- next record: 262669
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.262669').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.262669 
- obs record registered
- next record: 315998
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.315998').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.315998 
- obs record registered
- next record: 368733
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.368733').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.368733 
- obs record registered
- next record: 422182
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.422182').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.422182 
- obs record registered
- next record: 473690
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.473690').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.473690 
- obs record registered
- next record: 525232
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.525232').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.525232 
- obs record registered
- next record: 578431
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.578431').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.578431 
- obs record registered
- next record: 633046
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.633046').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.633046 
- obs record registered
- next record: 686127
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.686127').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.686127 
- obs record registered
- next record: 738483
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.738483').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.738483 
- obs record registered
- next record: 790644
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.790644').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.790644 
- obs record registered
- next record: 844792
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.844792').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.844792 
- obs record registered
- next record: 897654
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.897654').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.897654 
- obs record registered
- next record: 950524
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.950524').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.950524 
- obs record registered
- next record: 1003819
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1003819').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1003819 
- obs record registered
- next record: 1057080
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1057080').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1057080 
- obs record registered
- next record: 1109138
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1109138').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1109138 
- obs record registered
- next record: 1162278
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1162278').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1162278 
- obs record registered
- next record: 1216404
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1216404').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1216404 
- obs record registered
- next record: 1268030
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1268030').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1268030 
- obs record registered
- next record: 1320921
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1320921').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1320921 
- obs record registered
- next record: 1372574
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1372574').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1372574 
- obs record registered
- next record: 1424704
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1424704').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1424704 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1476503
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1476503').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1476503 
- obs record registered
- next record: 1528706
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1528706').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1528706 
- obs record registered
- next record: 1583120
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1583120').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1583120 
- obs record registered
- next record: 1635666
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1635666').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1635666 
- obs record registered
- next record: 1689323
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1689323').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1689323 
- obs record registered
- next record: 1741836
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1741836').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1741836 
- obs record registered
- next record: 1795229
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1795229').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1795229 
- obs record registered
- next record: 1847312
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1847312').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1847312 
- obs record registered
- next record: 1900220
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1900220').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1900220 
- obs record registered
- next record: 1954053
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.1954053').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.1954053 
- obs record registered
- next record: 2005224
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2005224').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2005224 
- obs record registered
- next record: 2055085
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2055085').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2055085 
- obs record registered
- next record: 2109238
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2109238').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2109238 
- obs record registered
- next record: 2158379
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2158379').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2158379 
- obs record registered
- next record: 2213439
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2213439').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2213439 
- obs record registered
- next record: 2265091
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2265091').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2265091 
- obs record registered
- next record: 2312591
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2312591').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2312591 
- obs record registered
- next record: 2363001
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2363001').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2363001 
- obs record registered
- next record: 2417249
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2417249').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2417249 
- obs record registered
- next record: 2472181
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2472181').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2472181 
- obs record registered
- next record: 2523821
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2523821').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2523821 
- obs record registered
- next record: 2576934
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.yaml.2576934').to_json" > /local/4759290[1].master15.delcatty.gent.vsc/74560_1_ini.yaml.buffer.json.2576934 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_1_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-2 b/class4gl/c4gl_setup.o4759290-2
deleted file mode 100644
index d9771a1..0000000
--- a/class4gl/c4gl_setup.o4759290-2
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 2 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.yaml
-0 1981-04-11 00:00:00
-1 1981-04-12 00:00:00
-2 1981-04-13 00:00:00
-3 1981-04-14 00:00:00
-4 1981-04-15 00:00:00
-5 1981-04-16 00:00:00
-6 1981-04-17 00:00:00
-7 1981-04-18 00:00:00
-8 1981-04-19 00:00:00
-9 1981-04-20 00:00:00
-10 1981-04-21 00:00:00
-11 1981-04-22 00:00:00
-12 1981-04-23 00:00:00
-13 1981-04-24 00:00:00
-14 1981-04-25 00:00:00
-15 1981-04-26 00:00:00
-16 1981-04-27 00:00:00
-17 1981-04-28 00:00:00
-18 1981-04-29 00:00:00
-19 1981-04-30 00:00:00
-20 1981-05-01 00:00:00
-21 1981-05-02 00:00:00
-22 1981-05-03 00:00:00
-23 1981-05-04 00:00:00
-24 1981-05-05 00:00:00
-25 1981-05-06 00:00:00
-26 1981-05-07 00:00:00
-27 1981-05-08 00:00:00
-28 1981-05-09 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-05-10 00:00:00
-30 1981-05-11 00:00:00
-31 1981-05-12 00:00:00
-32 1981-05-13 00:00:00
-33 1981-05-14 00:00:00
-34 1981-05-15 00:00:00
-35 1981-05-16 00:00:00
-36 1981-05-17 00:00:00
-37 1981-05-18 00:00:00
-38 1981-05-19 00:00:00
-39 1981-05-20 00:00:00
-40 1981-05-21 00:00:00
-41 1981-05-22 00:00:00
-42 1981-05-23 00:00:00
-43 1981-05-24 00:00:00
-44 1981-05-25 00:00:00
-45 1981-05-26 00:00:00
-46 1981-05-27 00:00:00
-47 1981-05-28 00:00:00
-48 1981-05-29 00:00:00
-49 1981-05-30 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.4 
- obs record registered
- next record: 51067
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.51067').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.51067 
- obs record registered
- next record: 104322
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.104322').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.104322 
- obs record registered
- next record: 158125
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.158125').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.158125 
- obs record registered
- next record: 210001
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.210001').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.210001 
- obs record registered
- next record: 263270
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.263270').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.263270 
- obs record registered
- next record: 317457
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.317457').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.317457 
- obs record registered
- next record: 368268
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.368268').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.368268 
- obs record registered
- next record: 422781
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.422781').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.422781 
- obs record registered
- next record: 476050
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.476050').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.476050 
- obs record registered
- next record: 528316
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.528316').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.528316 
- obs record registered
- next record: 580931
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.580931').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.580931 
- obs record registered
- next record: 635773
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.635773').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.635773 
- obs record registered
- next record: 686190
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.686190').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.686190 
- obs record registered
- next record: 736979
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.736979').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.736979 
- obs record registered
- next record: 791508
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.791508').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.791508 
- obs record registered
- next record: 844468
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.844468').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.844468 
- obs record registered
- next record: 899058
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.899058').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.899058 
- obs record registered
- next record: 950395
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.950395').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.950395 
- obs record registered
- next record: 1001879
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1001879').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1001879 
- obs record registered
- next record: 1054711
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1054711').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1054711 
- obs record registered
- next record: 1107591
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1107591').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1107591 
- obs record registered
- next record: 1161498
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1161498').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1161498 
- obs record registered
- next record: 1216461
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1216461').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1216461 
- obs record registered
- next record: 1270638
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1270638').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1270638 
- obs record registered
- next record: 1323032
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1323032').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1323032 
- obs record registered
- next record: 1374748
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1374748').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1374748 
- obs record registered
- next record: 1426449
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1426449').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1426449 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1479418
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1479418').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1479418 
- obs record registered
- next record: 1533453
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1533453').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1533453 
- obs record registered
- next record: 1585360
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1585360').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1585360 
- obs record registered
- next record: 1638642
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1638642').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1638642 
- obs record registered
- next record: 1693282
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1693282').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1693282 
- obs record registered
- next record: 1746969
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1746969').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1746969 
- obs record registered
- next record: 1798551
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1798551').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1798551 
- obs record registered
- next record: 1851912
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1851912').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1851912 
- obs record registered
- next record: 1906431
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1906431').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1906431 
- obs record registered
- next record: 1957671
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.1957671').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.1957671 
- obs record registered
- next record: 2008390
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2008390').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2008390 
- obs record registered
- next record: 2059270
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2059270').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2059270 
- obs record registered
- next record: 2113052
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2113052').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2113052 
- obs record registered
- next record: 2167306
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2167306').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2167306 
- obs record registered
- next record: 2222303
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2222303').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2222303 
- obs record registered
- next record: 2276984
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2276984').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2276984 
- obs record registered
- next record: 2328277
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2328277').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2328277 
- obs record registered
- next record: 2381273
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2381273').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2381273 
- obs record registered
- next record: 2435826
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2435826').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2435826 
- obs record registered
- next record: 2489579
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2489579').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2489579 
- obs record registered
- next record: 2543769
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2543769').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2543769 
- obs record registered
- next record: 2597864
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.yaml.2597864').to_json" > /local/4759290[2].master15.delcatty.gent.vsc/74560_2_ini.yaml.buffer.json.2597864 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_2_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-3 b/class4gl/c4gl_setup.o4759290-3
deleted file mode 100644
index 17f4526..0000000
--- a/class4gl/c4gl_setup.o4759290-3
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 3 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.yaml
-0 1981-05-31 00:00:00
-1 1981-06-01 00:00:00
-2 1981-06-02 00:00:00
-3 1981-06-03 00:00:00
-4 1981-06-04 00:00:00
-5 1981-06-05 00:00:00
-6 1981-06-06 00:00:00
-7 1981-06-07 00:00:00
-8 1981-06-08 00:00:00
-9 1981-06-09 00:00:00
-10 1981-06-10 00:00:00
-11 1981-06-11 00:00:00
-12 1981-06-12 00:00:00
-13 1981-06-13 00:00:00
-14 1981-06-14 00:00:00
-15 1981-06-15 00:00:00
-16 1981-06-16 00:00:00
-17 1981-06-17 00:00:00
-18 1981-06-18 00:00:00
-19 1981-06-19 00:00:00
-20 1981-06-20 00:00:00
-21 1981-06-21 00:00:00
-22 1981-06-22 00:00:00
-23 1981-06-23 00:00:00
-24 1981-06-24 00:00:00
-25 1981-06-25 00:00:00
-26 1981-06-26 00:00:00
-27 1981-06-27 00:00:00
-28 1981-06-28 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-06-29 00:00:00
-30 1981-06-30 00:00:00
-31 1981-07-01 00:00:00
-32 1981-07-02 00:00:00
-33 1981-07-03 00:00:00
-34 1981-07-04 00:00:00
-35 1981-07-05 00:00:00
-36 1981-07-06 00:00:00
-37 1981-07-07 00:00:00
-38 1981-07-08 00:00:00
-39 1981-07-09 00:00:00
-40 1981-07-10 00:00:00
-41 1981-07-11 00:00:00
-42 1981-07-12 00:00:00
-43 1981-07-13 00:00:00
-44 1981-07-14 00:00:00
-45 1981-07-15 00:00:00
-46 1981-07-16 00:00:00
-47 1981-07-17 00:00:00
-48 1981-07-18 00:00:00
-49 1981-07-19 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.4 
- obs record registered
- next record: 54256
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.54256').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.54256 
- obs record registered
- next record: 106783
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.106783').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.106783 
- obs record registered
- next record: 161041
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.161041').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.161041 
- obs record registered
- next record: 213499
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.213499').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.213499 
- obs record registered
- next record: 268606
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.268606').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.268606 
- obs record registered
- next record: 321942
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.321942').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.321942 
- obs record registered
- next record: 376318
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.376318').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.376318 
- obs record registered
- next record: 429745
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.429745').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.429745 
- obs record registered
- next record: 480238
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.480238').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.480238 
- obs record registered
- next record: 534353
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.534353').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.534353 
- obs record registered
- next record: 587556
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.587556').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.587556 
- obs record registered
- next record: 642546
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.642546').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.642546 
- obs record registered
- next record: 696672
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.696672').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.696672 
- obs record registered
- next record: 748004
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.748004').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.748004 
- obs record registered
- next record: 798522
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.798522').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.798522 
- obs record registered
- next record: 849366
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.849366').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.849366 
- obs record registered
- next record: 900431
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.900431').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.900431 
- obs record registered
- next record: 955333
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.955333').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.955333 
- obs record registered
- next record: 1008606
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1008606').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1008606 
- obs record registered
- next record: 1062216
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1062216').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1062216 
- obs record registered
- next record: 1116153
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1116153').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1116153 
- obs record registered
- next record: 1169750
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1169750').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1169750 
- obs record registered
- next record: 1220569
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1220569').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1220569 
- obs record registered
- next record: 1274715
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1274715').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1274715 
- obs record registered
- next record: 1325734
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1325734').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1325734 
- obs record registered
- next record: 1378235
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1378235').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1378235 
- obs record registered
- next record: 1430796
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1430796').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1430796 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1484617
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1484617').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1484617 
- obs record registered
- next record: 1539402
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1539402').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1539402 
- obs record registered
- next record: 1591441
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1591441').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1591441 
- obs record registered
- next record: 1645914
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1645914').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1645914 
- obs record registered
- next record: 1698613
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1698613').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1698613 
- obs record registered
- next record: 1751712
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1751712').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1751712 
- obs record registered
- next record: 1805578
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1805578').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1805578 
- obs record registered
- next record: 1859984
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1859984').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1859984 
- obs record registered
- next record: 1912886
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1912886').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1912886 
- obs record registered
- next record: 1966234
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.1966234').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.1966234 
- obs record registered
- next record: 2020399
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2020399').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2020399 
- obs record registered
- next record: 2071191
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2071191').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2071191 
- obs record registered
- next record: 2122985
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2122985').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2122985 
- obs record registered
- next record: 2176760
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2176760').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2176760 
- obs record registered
- next record: 2230944
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2230944').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2230944 
- obs record registered
- next record: 2284495
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2284495').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2284495 
- obs record registered
- next record: 2337035
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2337035').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2337035 
- obs record registered
- next record: 2390194
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2390194').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2390194 
- obs record registered
- next record: 2444461
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2444461').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2444461 
- obs record registered
- next record: 2497851
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2497851').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2497851 
- obs record registered
- next record: 2551916
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2551916').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2551916 
- obs record registered
- next record: 2606609
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.yaml.2606609').to_json" > /local/4759290[3].master15.delcatty.gent.vsc/74560_3_ini.yaml.buffer.json.2606609 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_3_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-4 b/class4gl/c4gl_setup.o4759290-4
deleted file mode 100644
index 7fc0cea..0000000
--- a/class4gl/c4gl_setup.o4759290-4
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 4 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.yaml
-0 1981-07-20 00:00:00
-1 1981-07-21 00:00:00
-2 1981-07-22 00:00:00
-3 1981-07-23 00:00:00
-4 1981-07-24 00:00:00
-5 1981-07-25 00:00:00
-6 1981-07-26 00:00:00
-7 1981-07-27 00:00:00
-8 1981-07-28 00:00:00
-9 1981-07-29 00:00:00
-10 1981-07-30 00:00:00
-11 1981-07-31 00:00:00
-12 1981-08-01 00:00:00
-13 1981-08-02 00:00:00
-14 1981-08-03 00:00:00
-15 1981-08-04 00:00:00
-16 1981-08-05 00:00:00
-17 1981-08-06 00:00:00
-18 1981-08-07 00:00:00
-19 1981-08-08 00:00:00
-20 1981-08-09 00:00:00
-21 1981-08-10 00:00:00
-22 1981-08-11 00:00:00
-23 1981-08-12 00:00:00
-24 1981-08-13 00:00:00
-25 1981-08-14 00:00:00
-26 1981-08-15 00:00:00
-27 1981-08-16 00:00:00
-28 1981-08-17 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-08-18 00:00:00
-30 1981-08-19 00:00:00
-31 1981-08-20 00:00:00
-32 1981-08-21 00:00:00
-33 1981-08-22 00:00:00
-34 1981-08-23 00:00:00
-35 1981-08-24 00:00:00
-36 1981-08-25 00:00:00
-37 1981-08-26 00:00:00
-38 1981-08-27 00:00:00
-39 1981-08-28 00:00:00
-40 1981-08-29 00:00:00
-41 1981-08-30 00:00:00
-42 1981-08-31 00:00:00
-43 1981-09-01 00:00:00
-44 1981-09-02 00:00:00
-45 1981-09-03 00:00:00
-46 1981-09-04 00:00:00
-47 1981-09-05 00:00:00
-48 1981-09-06 00:00:00
-49 1981-09-07 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.4 
- obs record registered
- next record: 51731
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.51731').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.51731 
- obs record registered
- next record: 104580
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.104580').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.104580 
- obs record registered
- next record: 157565
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.157565').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.157565 
- obs record registered
- next record: 210613
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.210613').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.210613 
- obs record registered
- next record: 264470
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.264470').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.264470 
- obs record registered
- next record: 318729
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.318729').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.318729 
- obs record registered
- next record: 370870
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.370870').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.370870 
- obs record registered
- next record: 423492
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.423492').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.423492 
- obs record registered
- next record: 474750
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.474750').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.474750 
- obs record registered
- next record: 529842
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.529842').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.529842 
- obs record registered
- next record: 583680
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.583680').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.583680 
- obs record registered
- next record: 637895
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.637895').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.637895 
- obs record registered
- next record: 692631
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.692631').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.692631 
- obs record registered
- next record: 746761
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.746761').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.746761 
- obs record registered
- next record: 800591
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.800591').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.800591 
- obs record registered
- next record: 854394
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.854394').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.854394 
- obs record registered
- next record: 909045
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.909045').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.909045 
- obs record registered
- next record: 963060
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.963060').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.963060 
- obs record registered
- next record: 1015112
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1015112').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1015112 
- obs record registered
- next record: 1067515
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1067515').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1067515 
- obs record registered
- next record: 1121265
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1121265').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1121265 
- obs record registered
- next record: 1175359
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1175359').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1175359 
- obs record registered
- next record: 1227436
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1227436').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1227436 
- obs record registered
- next record: 1279990
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1279990').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1279990 
- obs record registered
- next record: 1333481
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1333481').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1333481 
- obs record registered
- next record: 1388277
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1388277').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1388277 
- obs record registered
- next record: 1440447
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1440447').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1440447 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1492950
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1492950').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1492950 
- obs record registered
- next record: 1544819
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1544819').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1544819 
- obs record registered
- next record: 1597484
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1597484').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1597484 
- obs record registered
- next record: 1650523
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1650523').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1650523 
- obs record registered
- next record: 1703570
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1703570').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1703570 
- obs record registered
- next record: 1755369
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1755369').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1755369 
- obs record registered
- next record: 1808308
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1808308').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1808308 
- obs record registered
- next record: 1862932
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1862932').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1862932 
- obs record registered
- next record: 1917988
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1917988').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1917988 
- obs record registered
- next record: 1972660
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.1972660').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.1972660 
- obs record registered
- next record: 2027192
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2027192').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2027192 
- obs record registered
- next record: 2080750
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2080750').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2080750 
- obs record registered
- next record: 2134876
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2134876').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2134876 
- obs record registered
- next record: 2187060
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2187060').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2187060 
- obs record registered
- next record: 2240918
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2240918').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2240918 
- obs record registered
- next record: 2293459
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2293459').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2293459 
- obs record registered
- next record: 2345446
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2345446').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2345446 
- obs record registered
- next record: 2399559
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2399559').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2399559 
- obs record registered
- next record: 2453394
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2453394').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2453394 
- obs record registered
- next record: 2507446
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2507446').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2507446 
- obs record registered
- next record: 2560803
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2560803').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2560803 
- obs record registered
- next record: 2613551
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.yaml.2613551').to_json" > /local/4759290[4].master15.delcatty.gent.vsc/74560_4_ini.yaml.buffer.json.2613551 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_4_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-5 b/class4gl/c4gl_setup.o4759290-5
deleted file mode 100644
index 3ffddc2..0000000
--- a/class4gl/c4gl_setup.o4759290-5
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 5 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.yaml
-0 1981-09-08 00:00:00
-1 1981-09-09 00:00:00
-2 1981-09-10 00:00:00
-3 1981-09-11 00:00:00
-4 1981-09-12 00:00:00
-5 1981-09-13 00:00:00
-6 1981-09-14 00:00:00
-7 1981-09-15 00:00:00
-8 1981-09-16 00:00:00
-9 1981-09-17 00:00:00
-10 1981-09-18 00:00:00
-11 1981-09-19 00:00:00
-12 1981-09-20 00:00:00
-13 1981-09-21 00:00:00
-14 1981-09-22 00:00:00
-15 1981-09-23 00:00:00
-16 1981-09-24 00:00:00
-17 1981-09-25 00:00:00
-18 1981-09-26 00:00:00
-19 1981-09-27 00:00:00
-20 1981-09-28 00:00:00
-21 1981-09-29 00:00:00
-22 1981-09-30 00:00:00
-23 1981-10-01 00:00:00
-24 1981-10-02 00:00:00
-25 1981-10-03 00:00:00
-26 1981-10-04 00:00:00
-27 1981-10-05 00:00:00
-28 1981-10-06 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-10-07 00:00:00
-30 1981-10-08 00:00:00
-31 1981-10-09 00:00:00
-32 1981-10-10 00:00:00
-33 1981-10-11 00:00:00
-34 1981-10-12 00:00:00
-35 1981-10-13 00:00:00
-36 1981-10-14 00:00:00
-37 1981-10-15 00:00:00
-38 1981-10-16 00:00:00
-39 1981-10-17 00:00:00
-40 1981-10-18 00:00:00
-41 1981-10-19 00:00:00
-42 1981-10-20 00:00:00
-43 1981-10-21 00:00:00
-44 1981-10-22 00:00:00
-45 1981-10-23 00:00:00
-46 1981-10-24 00:00:00
-47 1981-10-25 00:00:00
-48 1981-10-26 00:00:00
-49 1981-10-27 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.4 
- obs record registered
- next record: 53791
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.53791').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.53791 
- obs record registered
- next record: 108418
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.108418').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.108418 
- obs record registered
- next record: 160595
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.160595').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.160595 
- obs record registered
- next record: 213556
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.213556').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.213556 
- obs record registered
- next record: 267801
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.267801').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.267801 
- obs record registered
- next record: 322353
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.322353').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.322353 
- obs record registered
- next record: 376117
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.376117').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.376117 
- obs record registered
- next record: 428437
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.428437').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.428437 
- obs record registered
- next record: 482412
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.482412').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.482412 
- obs record registered
- next record: 534383
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.534383').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.534383 
- obs record registered
- next record: 587814
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.587814').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.587814 
- obs record registered
- next record: 639583
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.639583').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.639583 
- obs record registered
- next record: 692536
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.692536').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.692536 
- obs record registered
- next record: 746174
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.746174').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.746174 
- obs record registered
- next record: 798143
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.798143').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.798143 
- obs record registered
- next record: 850877
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.850877').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.850877 
- obs record registered
- next record: 905196
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.905196').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.905196 
- obs record registered
- next record: 959041
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.959041').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.959041 
- obs record registered
- next record: 1010607
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1010607').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1010607 
- obs record registered
- next record: 1063046
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1063046').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1063046 
- obs record registered
- next record: 1118074
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1118074').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1118074 
- obs record registered
- next record: 1173117
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1173117').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1173117 
- obs record registered
- next record: 1225841
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1225841').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1225841 
- obs record registered
- next record: 1276984
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1276984').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1276984 
- obs record registered
- next record: 1329882
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1329882').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1329882 
- obs record registered
- next record: 1384174
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1384174').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1384174 
- obs record registered
- next record: 1438389
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1438389').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1438389 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1492962
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1492962').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1492962 
- obs record registered
- next record: 1544189
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1544189').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1544189 
- obs record registered
- next record: 1598345
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1598345').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1598345 
- obs record registered
- next record: 1651761
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1651761').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1651761 
- obs record registered
- next record: 1705208
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1705208').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1705208 
- obs record registered
- next record: 1759539
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1759539').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1759539 
- obs record registered
- next record: 1812463
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1812463').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1812463 
- obs record registered
- next record: 1865446
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1865446').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1865446 
- obs record registered
- next record: 1918746
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1918746').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1918746 
- obs record registered
- next record: 1972414
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.1972414').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.1972414 
- obs record registered
- next record: 2025735
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2025735').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2025735 
- obs record registered
- next record: 2079164
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2079164').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2079164 
- obs record registered
- next record: 2132791
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2132791').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2132791 
- obs record registered
- next record: 2181910
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2181910').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2181910 
- obs record registered
- next record: 2233649
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2233649').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2233649 
- obs record registered
- next record: 2284947
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2284947').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2284947 
- obs record registered
- next record: 2339510
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2339510').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2339510 
- obs record registered
- next record: 2391981
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2391981').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2391981 
- obs record registered
- next record: 2443387
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2443387').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2443387 
- obs record registered
- next record: 2497540
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2497540').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2497540 
- obs record registered
- next record: 2552060
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2552060').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2552060 
- obs record registered
- next record: 2604997
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.yaml.2604997').to_json" > /local/4759290[5].master15.delcatty.gent.vsc/74560_5_ini.yaml.buffer.json.2604997 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_5_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-6 b/class4gl/c4gl_setup.o4759290-6
deleted file mode 100644
index bd1fb54..0000000
--- a/class4gl/c4gl_setup.o4759290-6
+++ /dev/null
@@ -1,1251 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 6 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.yaml
-0 1981-10-28 00:00:00
-1 1981-10-29 00:00:00
-2 1981-10-30 00:00:00
-3 1981-10-31 00:00:00
-4 1981-11-01 00:00:00
-5 1981-11-02 00:00:00
-6 1981-11-03 00:00:00
-7 1981-11-04 00:00:00
-8 1981-11-05 00:00:00
-9 1981-11-06 00:00:00
-10 1981-11-07 00:00:00
-11 1981-11-08 00:00:00
-12 1981-11-09 00:00:00
-13 1981-11-10 00:00:00
-14 1981-11-11 00:00:00
-15 1981-11-12 00:00:00
-16 1981-11-13 00:00:00
-17 1981-11-14 00:00:00
-18 1981-11-15 00:00:00
-19 1981-11-16 00:00:00
-20 1981-11-17 00:00:00
-21 1981-11-18 00:00:00
-22 1981-11-19 00:00:00
-23 1981-11-20 00:00:00
-24 1981-11-21 00:00:00
-25 1981-11-22 00:00:00
-26 1981-11-23 00:00:00
-27 1981-11-24 00:00:00
-28 1981-11-25 00:00:00/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-
-29 1981-11-26 00:00:00
-30 1981-11-27 00:00:00
-31 1981-11-28 00:00:00
-32 1981-11-29 00:00:00
-33 1981-11-30 00:00:00
-34 1981-12-01 00:00:00
-35 1981-12-02 00:00:00
-36 1981-12-03 00:00:00
-37 1981-12-04 00:00:00
-38 1981-12-05 00:00:00
-39 1981-12-06 00:00:00
-40 1981-12-07 00:00:00
-41 1981-12-08 00:00:00
-42 1981-12-09 00:00:00
-43 1981-12-10 00:00:00
-44 1981-12-11 00:00:00
-45 1981-12-12 00:00:00
-46 1981-12-13 00:00:00
-47 1981-12-14 00:00:00
-48 1981-12-15 00:00:00
-49 1981-12-16 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.yaml"...
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.4 
- obs record registered
- next record: 55140
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.55140').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.55140 
- obs record registered
- next record: 108893
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.108893').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.108893 
- obs record registered
- next record: 163443
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.163443').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.163443 
- obs record registered
- next record: 218397
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.218397').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.218397 
- obs record registered
- next record: 271582
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.271582').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.271582 
- obs record registered
- next record: 324721
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.324721').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.324721 
- obs record registered
- next record: 377607
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.377607').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.377607 
- obs record registered
- next record: 430834
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.430834').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.430834 
- obs record registered
- next record: 483626
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.483626').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.483626 
- obs record registered
- next record: 535205
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.535205').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.535205 
- obs record registered
- next record: 589944
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.589944').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.589944 
- obs record registered
- next record: 642475
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.642475').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.642475 
- obs record registered
- next record: 694024
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.694024').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.694024 
- obs record registered
- next record: 748833
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.748833').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.748833 
- obs record registered
- next record: 803389
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.803389').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.803389 
- obs record registered
- next record: 856629
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.856629').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.856629 
- obs record registered
- next record: 910612
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.910612').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.910612 
- obs record registered
- next record: 965476
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.965476').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.965476 
- obs record registered
- next record: 1020450
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1020450').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1020450 
- obs record registered
- next record: 1074651
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1074651').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1074651 
- obs record registered
- next record: 1127994
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1127994').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1127994 
- obs record registered
- next record: 1182110
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1182110').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1182110 
- obs record registered
- next record: 1233619
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1233619').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1233619 
- obs record registered
- next record: 1284930
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1284930').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1284930 
- obs record registered
- next record: 1337484
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1337484').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1337484 
- obs record registered
- next record: 1392464
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1392464').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1392464 
- obs record registered
- next record: 1446248
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1446248').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1446248 /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- obs record registered
- next record: 1499604
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1499604').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1499604 
- obs record registered
- next record: 1552283
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1552283').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1552283 
- obs record registered
- next record: 1606822
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1606822').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1606822 
- obs record registered
- next record: 1658571
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1658571').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1658571 
- obs record registered
- next record: 1712543
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1712543').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1712543 
- obs record registered
- next record: 1766710
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1766710').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1766710 
- obs record registered
- next record: 1819637
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1819637').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1819637 
- obs record registered
- next record: 1870451
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1870451').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1870451 
- obs record registered
- next record: 1921627
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1921627').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1921627 
- obs record registered
- next record: 1975018
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.1975018').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.1975018 
- obs record registered
- next record: 2027134
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2027134').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2027134 
- obs record registered
- next record: 2079285
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2079285').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2079285 
- obs record registered
- next record: 2134131
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2134131').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2134131 
- obs record registered
- next record: 2185735
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2185735').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2185735 
- obs record registered
- next record: 2237453
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2237453').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2237453 
- obs record registered
- next record: 2291618
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2291618').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2291618 
- obs record registered
- next record: 2345783
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2345783').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2345783 
- obs record registered
- next record: 2399221
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2399221').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2399221 
- obs record registered
- next record: 2454054
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2454054').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2454054 
- obs record registered
- next record: 2508671
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2508671').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2508671 
- obs record registered
- next record: 2563249
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2563249').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2563249 
- obs record registered
- next record: 2616063
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.yaml.2616063').to_json" > /local/4759290[6].master15.delcatty.gent.vsc/74560_6_ini.yaml.buffer.json.2616063 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_6_ini.pkl) for station 74560
diff --git a/class4gl/c4gl_setup.o4759290-7 b/class4gl/c4gl_setup.o4759290-7
deleted file mode 100644
index ee5acc9..0000000
--- a/class4gl/c4gl_setup.o4759290-7
+++ /dev/null
@@ -1,469 +0,0 @@
-C4GLJOB_c4gl_path_lib
-C4GLJOB_error_handling
-C4GLJOB_exec
-C4GLJOB_first_YYYYMMDD
-C4GLJOB_last_YYYYMMDD
-C4GLJOB_path_experiments
-C4GLJOB_path_forcing
-C4GLJOB_runtime
-C4GLJOB_split_by
-C4GLJOB_station_id
-C4GLJOB_subset_experiments
-C4GLJOB_subset_forcing
-Executing: python /user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py --global_chunk_number 7 --c4gl_path_lib=/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl --error_handling=dump_on_success --first_YYYYMMDD=19810101 --last_YYYYMMDD=19820101 --path_experiments=/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA/ --path_forcing=/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/ --runtime=from_afternoon_profile --split_by=50 --station_id=74560 --subset_experiments=ini --subset_forcing=morning
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/data_global.py:275: UserWarning: omitting pressure field p and advection
-  warnings.warn('omitting pressure field p and advection')
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/times.py:132: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  enable_cftimeindex)
-/apps/gent/CO7/sandybridge/software/xarray/0.10.8-intel-2018a-Python-3.6.4/lib/python3.6/site-packages/xarray-0.10.8-py3.6.egg/xarray/coding/variables.py:66: SerializationWarning: Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy cftime.datetime objects instead, reason: dates out of range
-  return self.func(self.array[key])
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-Initializing global data
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-setting KGC as KGC from /user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-setting wg as SMsurf from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-setting w2 as SMroot from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-setting EF as EF from /user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-setting fW as fW from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fB as fB from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fH as fH from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc is already in the library.
-setting fTC as fTC from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-setting z0m as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc is already in the library.
-setting z0h as Band1 from /user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-setting wsat as wsat from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wsat.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-setting Ts as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-Warning: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc is already in the library.
-setting Tsoil as stl1 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl1_3hourly_xarray/stl1*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-setting T2 as stl2 from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/stl2_3hourly_xarray/stl2*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-setting cc as tcc from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/tcc_3hourly_xarray/tcc*_3hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-setting wfc as wfc from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wfc.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-setting wwilt as wwp from /user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/wwp.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-setting cveg as fv from /user/data/gent/gvo000/gvo00090/EXT/data/MOD44B/fv.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-setting DSMW as DSMW from /user/data/gent/gvo000/gvo00090/EXT/data/DSMW/FAO_DSMW_DP.nc
-calculating texture
-calculating texture type
-calculating soil parameter
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-setting LAIpixel as LAI from /user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-setting advt_x as advt_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-setting advt_y as advt_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-setting advq_x as advq_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-setting advq_y as advq_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-setting advu_x as advu_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-setting advu_y as advu_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-setting advv_x as advv_x from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-setting advv_y as advv_y from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-setting sp as sp from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-setting wp as w from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-setting t as t from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-setting q as q from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-setting u as u from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc
-opening: /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-setting v as v from /user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc
-getting a list of stations
-defining all_stations_select
-Selecting station by ID
-station numbers included in the whole batch (all chunks): [74560]
-Creating daily timeseries from 1981-01-01 00:00:00  to  1982-01-01 00:00:00
-start looping over chunk
-Writing to:  /user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.yaml
-0 1981-12-17 00:00:00
-1 1981-12-18 00:00:00
-2 1981-12-19 00:00:00
-3 1981-12-20 00:00:00
-4 1981-12-21 00:00:00
-5 1981-12-22 00:00:00
-6 1981-12-23 00:00:00
-7 1981-12-24 00:00:00
-8 1981-12-25 00:00:00
-9 1981-12-26 00:00:00
-10 1981-12-27 00:00:00
-11 1981-12-28 00:00:00
-12 1981-12-29 00:00:00
-13 1981-12-30 00:00:00
-14 1981-12-31 00:00:00
-15 1982-01-01 00:00:00
-pkl file does not exist. I generate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.yaml"...
-refetch_records flag is True. I regenerate "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.pkl" from "/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.yaml".../user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1471: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head['z'] = pd.Series(np.array([2.,self.pars.h ,self.pars.h ]))
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1487: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame.
-Try using .loc[row_indexer,col_indexer] = value instead
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  air_ap_head[column] = ml_mean[column]
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/setup/setup_era.py:246: SettingWithCopyWarning: 
-A value is trying to be set on a copy of a slice from a DataFrame
-
-See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
-  mode=air_ap_mode)
-/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/class4gl.py:1509: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access
-  air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
-
- next record: 4
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.4').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.4 
- obs record registered
- next record: 53111
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.53111').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.53111 
- obs record registered
- next record: 106374
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.106374').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.106374 
- obs record registered
- next record: 159575
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.159575').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.159575 
- obs record registered
- next record: 214479
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.214479').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.214479 
- obs record registered
- next record: 265342
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.265342').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.265342 
- obs record registered
- next record: 320174
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.320174').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.320174 
- obs record registered
- next record: 372684
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.372684').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.372684 
- obs record registered
- next record: 423846
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.423846').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.423846 
- obs record registered
- next record: 476614
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.476614').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.476614 
- obs record registered
- next record: 529909
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.529909').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.529909 
- obs record registered
- next record: 581984
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.581984').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.581984 
- obs record registered
- next record: 634675
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.634675').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.634675 
- obs record registered
- next record: 688831
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.688831').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.688831 
- obs record registered
- next record: 743330
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.743330').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.743330 
- obs record registered
- next record: 797329
-ruby -rjson -ryaml -e "puts YAML.load_file('/local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.yaml.797329').to_json" > /local/4759290[7].master15.delcatty.gent.vsc/74560_7_ini.yaml.buffer.json.797329 
- obs record registered
-writing table file (/user/data/gent/gvo000/gvo00090/vsc42247/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA//74560_7_ini.pkl) for station 74560
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 444d87b..8d91763 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -196,7 +196,7 @@
     records_afternoon.set_index(['STNID','dates'],inplace=True)
     ini_index_dates = records_morning.set_index(['STNID','dates']).index
     records_afternoon = records_afternoon.loc[ini_index_dates]
-    records_afternoon.index = records_morning.index
+    records_afternoon.index = records_morning.inde= run_station_chunkx
 
 experiments = args.experiments.strip(' ').split(' ')
 for expname in experiments:
@@ -233,7 +233,7 @@
             print('starting station chunk number: '\
                   +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
 
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            records_morning_station_chunk = records_morning_station.query('STNID == '+str(current_station.name)+' and chunk == '+str(run_station_chunk)) #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
 
             isim = 0
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():

From 6fbe86b9e9ac529e21448bb4f34b07e7c2b4ef97 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 19 Sep 2018 15:06:49 +0200
Subject: [PATCH 079/129] Allways sort chunk files in the right way.

---
 class4gl/interface_functions.py | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 0c0cf32..1de341d 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -397,9 +397,15 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                 end_of_chunks = False
                 station_list_files = glob.glob(path_yaml+'/'+format(STNID,'05d')+'_*_'+subset+'.yaml')
                 station_list_files.sort()
+                chunks = []
                 for station_path_file in station_list_files:
-                    fn = station_path_file.split('/')[-1]
-                    chunk = int(fn.split('_')[1])
+                #    fn = station_path_file.split('/')[-1]
+                    chunks.append(int(fn.split('_')[1]))
+
+                # sort according to chunk number
+                chunks.sort()
+                for chunk in chunks:
+                    fn = glob.glob(path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml')
                     dictfnchunks.append(dict(fn=fn,chunk=chunk))
 
                 # while not end_of_chunks:

From 1f7203595afd97a39ed9a3e1b0c8553407875d2e Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 19 Sep 2018 15:19:05 +0200
Subject: [PATCH 080/129] Allways sort chunk files in the right way.

---
 class4gl/interface_functions.py     | 4 ++--
 class4gl/simulations/simulations.py | 1 +
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 1de341d..a1a2ad8 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -399,13 +399,13 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                 station_list_files.sort()
                 chunks = []
                 for station_path_file in station_list_files:
-                #    fn = station_path_file.split('/')[-1]
+                    fn = station_path_file.split('/')[-1]
                     chunks.append(int(fn.split('_')[1]))
 
                 # sort according to chunk number
                 chunks.sort()
                 for chunk in chunks:
-                    fn = glob.glob(path_yaml+'/'+format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml')
+                    fn = format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
                     dictfnchunks.append(dict(fn=fn,chunk=chunk))
 
                 # while not end_of_chunks:
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 8d91763..ea6b9ba 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -172,6 +172,7 @@
                               subset=args.subset_forcing,
                               refetch_records=False,
                               )
+stop
 
 # note that if runtime is an integer number, we don't need to get the afternoon
 # profiles. 

From dbbe06c08703137818cdcc2365614994db2b1def Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 20 Sep 2018 22:17:57 +0200
Subject: [PATCH 081/129] matplotlib was loaded in a wrong way

---
 class4gl/simulations/batch_simulations.pbs |  2 +-
 class4gl/simulations/update_yaml_old.py    | 20 +++++++++-----------
 2 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
index 54e3168..92e496c 100644
--- a/class4gl/simulations/batch_simulations.pbs
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -7,7 +7,7 @@
 #PBS -m a
 #PBS -N c4gl_sim
 
-module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python Ruby
+module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
 
diff --git a/class4gl/simulations/update_yaml_old.py b/class4gl/simulations/update_yaml_old.py
index 18b3071..4428cc1 100644
--- a/class4gl/simulations/update_yaml_old.py
+++ b/class4gl/simulations/update_yaml_old.py
@@ -94,8 +94,8 @@
         raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
 
 
-    if not (int(args.split_by) > 0) :
-            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+    # if not (int(args.split_by) > 0) :
+    #         raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
 
     run_station_chunk = None
     print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
@@ -156,9 +156,6 @@
 # os.system('mkdir -p "'+backupdir+'"')
 
 
-
-
-
 for istation,current_station in run_stations.iterrows():
     records_forcing_station = records_forcing.query('STNID == ' +\
                                                     str(current_station.name))
@@ -205,15 +202,15 @@
 
             fn_forcing = \
                     args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                    str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                    args.subset_forcing+'.yaml'
             file_forcing = \
                 open(fn_forcing,'r')
             fn_experiment = args.path_experiments+'/'+format(current_station.name,'05d')+'_'+\
-                     args.subset_forcing+'.yaml'
+                     str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
             file_experiment = \
                 open(fn_experiment,'w')
             fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
-                     args.subset_forcing+'.pkl'
+                     str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
 
             # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
             #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
@@ -235,11 +232,14 @@
                   str(len(records_forcing_station_chunk) )+\
                   ' (station total: ',str(len(records_forcing_station)),')')  
             
-        
                 c4gli_forcing = get_record_yaml(file_forcing, 
                                                 record_forcing.index_start, 
                                                 record_forcing.index_end,
                                                 mode=args.mode)
+                seltropo = (c4gli_forcing.air_ac.p > c4gli_forcing.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                profile_tropo = c4gli_forcing.air_ac[seltropo]
+                mean_advt_tropo = np.mean(profile_tropo.advt_x +profile_tropo.advt_y )
+                c4gli_forcing.update(source='era-interim',pars={'advt_tropo':mean_advt_tropo})
                 
                 #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
                 
@@ -249,8 +249,6 @@
                         globaldata, 
                         only_keys=args.global_keys.strip(' ').split(' ')
                     )
-        
-
 
                 c4gli_forcing.dump(file_experiment)
                     

From 64e1c8bbe4590b240732d7470f8acfa3a235aff0 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 20 Sep 2018 22:19:18 +0200
Subject: [PATCH 082/129] matplotlib was loaded in a wrong way

---
 class4gl/setup/batch_setup_era.pbs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/setup/batch_setup_era.pbs b/class4gl/setup/batch_setup_era.pbs
index 7735a7a..1ee9b9b 100644
--- a/class4gl/setup/batch_setup_era.pbs
+++ b/class4gl/setup/batch_setup_era.pbs
@@ -7,7 +7,7 @@
 #PBS -m a
 #PBS -N c4gl_setup
 
-module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray matplotlib Pysolar PyYAML netcdf4-python Ruby
+module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
 

From 3a3259ffd51ada6c911b1a2127b0e6473b3773a3 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Fri, 21 Sep 2018 17:33:30 +0200
Subject: [PATCH 083/129] fix in interface_multi so that correct chunk is
 loaded in case of next_record()

---
 class4gl/class4gl.py                    |   2 +
 class4gl/interface/interface.py         |  62 ++++---
 class4gl/interface/interface_koeppen.py |   8 +-
 class4gl/interface_multi.py             |  22 +++
 class4gl/simulations/simulations.py     |   9 +-
 class4gl/simulations/update_yaml_old.py | 227 ++++++++++++------------
 6 files changed, 191 insertions(+), 139 deletions(-)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 7afe040..7095194 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -99,6 +99,8 @@ def __init__(self):
          'dqdt':'kg/kg/h',
          'BR': '-',
          'EF': '-',
+         'advt_x': 'K/s',
+         'advt_y': 'K/s',
 }
 
 class class4gl_input(object):
diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 2e1c1be..8ef0038 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -1,4 +1,3 @@
-
 import numpy as np
 
 import pandas as pd
@@ -18,6 +17,7 @@
 parser.add_argument('--show_control_parameters',default=True)
 parser.add_argument('--figure_filename',default=None)
 parser.add_argument('--figure_filename_2',default=None)
+parser.add_argument('--experiments_labels',default=None)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -91,6 +91,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     return np.sqrt(rmse_temp)
 
 
+if args.experiments_labels is None:
+    keylabels = args.experiments.strip().split(' ')
+else:
+    keylabels = args.experiments_labels.strip().split(';')
 
 
 
@@ -134,7 +138,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     symbols = ['*','x','+']
     dias = {}
     
-    for varkey in ['h','theta','q']:                                                    
+    varkeys = ['h','theta','q']
+    for varkey in varkeys:                                                    
         axes[varkey] = fig.add_subplot(2,3,i)                                       
         #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
     
@@ -342,7 +347,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
         sns.set_style('whitegrid')
         #sns.set()
-        fig = pl.figure(figsize=(12,8))
+        fig = pl.figure(figsize=(11,7))
         i = 1
         axes = {}
         data_all = pd.DataFrame()
@@ -364,22 +369,22 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         data_all = pd.DataFrame()
 
         tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
-        tempdatamodstats["source"] = "soundings"
-        tempdatamodstats["source_index"] = "soundings"
+        tempdatamodstats["source"] = "Soundings"
+        tempdatamodstats["source_index"] = "Soundings"
 
         ini_ref = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
         tempdataini_this = pd.DataFrame(ini_ref.copy())
 
         tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
         tempdatamodstats['STNID']= tempdataini_this.STNID
-        tempdatamodstats['source']= "soundings"
-        tempdatamodstats['source_index']= "soundings"
+        tempdatamodstats['source']= "Soundings"
+        tempdatamodstats['source_index']= "Soundings"
         tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
         #print('hello')
 
         tempdataini = pd.DataFrame(ini_ref)
-        tempdataini["source"] = "soundings"
-        tempdataini["source_index"] = "soundings"
+        tempdataini["source"] = "Soundings"
+        tempdataini["source_index"] = "Soundings"
         tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
         #print('hello2')
 
@@ -390,21 +395,22 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         #print(data_all.shape)
 
             
-        for key in list(args.experiments.strip().split(' ')):
+        for ikey,key in enumerate(list(args.experiments.strip().split(' '))):
+            keylabel = keylabels[ikey]
 
             tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
             tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
             tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
             tempdatamodstats['STNID']= tempdataini_this.STNID
-            tempdatamodstats['source']= key
-            tempdatamodstats['source_index']= key
+            tempdatamodstats['source']= keylabel
+            tempdatamodstats['source_index']= keylabel
             tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
             #print('hello')
 
 
             tempdataini = pd.DataFrame(ini_ref.copy())
-            tempdataini["source"] = key 
-            tempdataini["source_index"] = key
+            tempdataini["source"] = keylabel
+            tempdataini["source_index"] = keylabel
             tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
     
 
@@ -429,22 +435,25 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         for varkey in ['h','theta','q']:
             varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
             data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
+            data_all['advt_tropo'] = data_input['advt_tropo']
             #print(data_input.shape)
             #print(data_all.shape)
         #print('hello6')
         #print(data_all.columns)
         #print('hello7')
+        i = 1
         for varkey in ['h','theta','q']:
-            input_keys =['wg','cc','advt']
+            input_keys =['wg','advt_tropo']
             for input_key in input_keys:
                 varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
 
                 #print('hello8')
                 #print(data_input.shape)
                 #print(data_all.shape)
+                units['advt_tropo'] = 'K/s'
                 input_key_full = input_key + "["+units[input_key]+"]"
-                data_all[input_key_full] = pd.cut(x=data_input[input_key].values,bins=10,precision=2)
-                data_input[input_key_full] = pd.cut(x=data_input[input_key].values,bins=10,precision=2,)
+                data_all[input_key_full] = pd.cut(x=data_input[input_key].values,bins=8,precision=2)
+                data_input[input_key_full] = pd.cut(x=data_input[input_key].values,bins=8,precision=2,)
                 #print('hello9')
                 #print(data_input.shape)
                 #print(data_all.shape)
@@ -462,7 +471,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                 #print('hello10')
                 
                 sns.set(style="ticks", palette="pastel")
-                ax = fig.add_subplot(3,2,i)
+                ax = fig.add_subplot(3,len(input_keys),i)
                 #sns.violinplot(x=input_key_full,y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
                 
                 #ax.set_title(input_key_full)
@@ -476,15 +485,28 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                      ax.get_legend().set_visible(False)
                 #     plt.legend('off')
                 if i >= 5:
+                    #ax.set_xticklabels(labels=['['+str(i)+','+str(i+1)+'[' for i in list(range(0,7))]+['[7,8]'])
+
                     ax.set_xticklabels(labels=ax.get_xticklabels(),rotation=45.,ha='right')
                 else:
                     ax.set_xticklabels([])
                     ax.set_xlabel('')
 
-                if np.mod(i,len(input_keys)) == 0:
+                if np.mod(i,len(input_keys)) != 0:
                     ax.set_yticklabels([])
                     ax.set_ylabel('')
 
+                if varkey == 'q':
+                    ticks = ticker.FuncFormatter(lambda x, pos:
+                                                 '{0:g}'.format(x*1000.))
+                    #ax.xaxis.set_major_formatter(ticks)
+                    ax.yaxis.set_major_formatter(ticks)
+
+                    ax.set_ylabel(latex['d'+varkey+'dt']+' ['+r'$10^{-3} \times $'+units['d'+varkey+'dt']+']')        
+                else:
+                    ax.set_ylabel(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')        
+
+
                 for j,artist in enumerate(ax.artists):
                     if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
                         # Set the linecolor on the artist to the facecolor, and set the facecolor to None
@@ -521,7 +543,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                 #sns.despine(offset=10, trim=True)
                 i +=1
         fig.tight_layout()
-        fig.subplots_adjust( bottom=0.18,left=0.09,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
+        fig.subplots_adjust( bottom=0.12,left=0.15,top=0.99,right=0.99,wspace=0.05,hspace=0.05,)
         if args.figure_filename_2 is not None:
             fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
         fig.show()
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
index 9a71bf8..1cf134d 100644
--- a/class4gl/interface/interface_koeppen.py
+++ b/class4gl/interface/interface_koeppen.py
@@ -328,11 +328,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         ikey = 0
         key = list(args.experiments.strip().split(' '))[ikey]
         keylabel = keylabels[ikey]
-        cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
-        clearsky = (cc < 0.05)
+        # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        # clearsky = (cc < 0.05)
     
-        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
     
     
         nbins=40       
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 9509387..907c0f0 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -364,6 +364,9 @@ def update_station(self):
         self.update_record()
 
     def next_record(self,event=None,jump=1):
+        
+        old_chunk =  self.frames['profiles']['current_record_chunk']
+
         with suppress(StopIteration):
             (self.frames['profiles']['STNID'] , \
             self.frames['profiles']['current_record_chunk'] , \
@@ -382,6 +385,25 @@ def next_record(self,event=None,jump=1):
 
         for key in self.frames['profiles'].keys():
             self.frames['stats'][key] = self.frames['profiles'][key]
+
+        # chunk file has changed! So we need to open it!
+        if self.frames['profiles']['current_record_chunk'] != old_chunk:
+
+            STNID = self.frames['profiles']['STNID']
+            chunk = self.frames['profiles']['current_record_chunk']
+
+
+
+            if 'current_station_file_ini' in self.frames['profiles'].keys():
+                self.frames['profiles']['current_station_file_ini'].close()
+            self.frames['profiles']['current_station_file_ini'] = \
+                open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
+
+            if 'current_station_file_mod' in self.frames['profiles'].keys():
+                self.frames['profiles']['current_station_file_mod'].close()
+            self.frames['profiles']['current_station_file_mod'] = \
+                open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+
         self.update_record()
 
     def prev_record(self,event=None):
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index ea6b9ba..4b81d34 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -247,8 +247,13 @@
                                                     record_morning.index_start, 
                                                     record_morning.index_end,
                                                     mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+
+                    # add tropospheric parameters on advection and subsidence
+                    seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                    profile_tropo = c4gli_morning.air_ac[seltropo]
+                    for var in ['t','q','u','v',]:
+                        mean_adv_tropo = np.mean(profile_tropo['adv'+var+'_x']+profile_tropo['adv'+var+'_y'] )
+                        c4gli_morning.update(source='era-interim',pars={'adv'+var+'_tropo':mean_adv_tropo})
                     
                     
                     if args.runtime == 'from_afternoon_profile':
diff --git a/class4gl/simulations/update_yaml_old.py b/class4gl/simulations/update_yaml_old.py
index 4428cc1..aafed46 100644
--- a/class4gl/simulations/update_yaml_old.py
+++ b/class4gl/simulations/update_yaml_old.py
@@ -87,7 +87,6 @@
                                          refetch_records=False,
                                          )
 
-os.system('mkdir -p '+args.path_experiments)
 # only run a specific chunck from the selection
 if args.global_chunk_number is not None:
     if args.station_chunk_number is not None:
@@ -156,120 +155,122 @@
 # os.system('mkdir -p "'+backupdir+'"')
 
 
-for istation,current_station in run_stations.iterrows():
-    records_forcing_station = records_forcing.query('STNID == ' +\
-                                                    str(current_station.name))
-
-    records_forcing_station_chunk = records_forcing.query('STNID == ' +\
-                                                    str(current_station.name)+\
-                                                   '& chunk == '+str(run_station_chunk))
-    print('lenrecords_forcing_station: ',len(records_forcing_station))
-    print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk))
-    print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1))
+for EXP in args.experiments.strip().split(" "):
+    os.system('mkdir -p '+args.path_experiments+'/'+EXP+'/')
+    for istation,current_station in run_stations.iterrows():
+        records_forcing_station = records_forcing.query('STNID == ' +\
+                                                        str(current_station.name))
     
-    # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)):
-    #     print("warning: outside of profile number range for station "+\
-    #           str(current_station)+". Skipping chunk number for this station.")
-    if len(records_forcing_station_chunk) == 0:
-        print("warning: outside of profile number range for station "+\
-              str(current_station)+". Skipping chunk number for this station.")
-    else:
-        # normal case
-        if ((int(args.split_by) > 0) or \
-            (os.path.isfile(args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                 str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'))):
-            fn_forcing = \
-                    args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                    str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-            file_forcing = \
-                open(fn_forcing,'r')
-            fn_experiment = args.path_experiments+'/'+format(current_station.name,'05d')+'_'+\
-                     str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-            file_experiment = \
-                open(fn_experiment,'w')
-            fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                     str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
-
-            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
-            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
-            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+        records_forcing_station_chunk = records_forcing.query('STNID == ' +\
+                                                        str(current_station.name)+\
+                                                       '& chunk == '+str(run_station_chunk))
+        print('lenrecords_forcing_station: ',len(records_forcing_station))
+        print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk))
+        print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1))
+        
+        # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)):
+        #     print("warning: outside of profile number range for station "+\
+        #           str(current_station)+". Skipping chunk number for this station.")
+        if len(records_forcing_station_chunk) == 0:
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
         else:
-            print("\
-Warning. We are choosing chunk 0 without specifying it in filename.    \
- No-chunk naming will be removed in the future."\
-                 )
-
-            fn_forcing = \
-                    args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                    args.subset_forcing+'.yaml'
-            file_forcing = \
-                open(fn_forcing,'r')
-            fn_experiment = args.path_experiments+'/'+format(current_station.name,'05d')+'_'+\
-                     str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-            file_experiment = \
-                open(fn_experiment,'w')
-            fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
-                     str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
-
-            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
-            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
-            #          args.subset_forcing+'.pkl'
-
-        onerun = False
-        print('starting station chunk number: '\
-              +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
-
-        #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-
-        # records_forcing_station_chunk = records_forcing.query('STNID == ' +\
-        #                                                 str(current_station.name)+\
-        #                                                '& chunk == '+str(run_station_chunk))
-        isim = 0
-        for (STNID,chunk,index),record_forcing in records_forcing_station_chunk.iterrows():
-                print('starting '+str(isim+1)+' out of '+\
-                  str(len(records_forcing_station_chunk) )+\
-                  ' (station total: ',str(len(records_forcing_station)),')')  
-            
-                c4gli_forcing = get_record_yaml(file_forcing, 
-                                                record_forcing.index_start, 
-                                                record_forcing.index_end,
-                                                mode=args.mode)
-                seltropo = (c4gli_forcing.air_ac.p > c4gli_forcing.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
-                profile_tropo = c4gli_forcing.air_ac[seltropo]
-                mean_advt_tropo = np.mean(profile_tropo.advt_x +profile_tropo.advt_y )
-                c4gli_forcing.update(source='era-interim',pars={'advt_tropo':mean_advt_tropo})
-                
-                #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
+            # normal case
+            if ((int(args.split_by) > 0) or \
+                (os.path.isfile(args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'))):
+                fn_forcing = \
+                        args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                        str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_forcing = \
+                    open(fn_forcing,'r')
+                fn_experiment = args.path_experiments+'/'+EXP+'/'+format(current_station.name,'05d')+'_'+\
+                         str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_experiment = \
+                    open(fn_experiment,'w')
+                fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                         str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+    
+                # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+                #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+                #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+            else:
+                print("\
+    Warning. We are choosing chunk 0 without specifying it in filename.    \
+     No-chunk naming will be removed in the future."\
+                     )
+    
+                fn_forcing = \
+                        args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+                        args.subset_forcing+'.yaml'
+                file_forcing = \
+                    open(fn_forcing,'r')
+                fn_experiment = args.path_experiments+'/'+EXP+'/'+format(current_station.name,'05d')+'_'+\
+                         str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_experiment = \
+                    open(fn_experiment,'w')
+                fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
+                         str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+    
+                # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+                #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+                #          args.subset_forcing+'.pkl'
+    
+            onerun = False
+            print('starting station chunk number: '\
+                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+    
+            #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+    
+            # records_forcing_station_chunk = records_forcing.query('STNID == ' +\
+            #                                                 str(current_station.name)+\
+            #                                                '& chunk == '+str(run_station_chunk))
+            isim = 0
+            for (STNID,chunk,index),record_forcing in records_forcing_station_chunk.iterrows():
+                    print('starting '+str(isim+1)+' out of '+\
+                      str(len(records_forcing_station_chunk) )+\
+                      ' (station total: ',str(len(records_forcing_station)),')')  
                 
-                if args.global_keys is not None:
-                    print(args.global_keys.strip(' ').split(' '))
-                    c4gli_forcing.get_global_input(
-                        globaldata, 
-                        only_keys=args.global_keys.strip(' ').split(' ')
-                    )
-
-                c4gli_forcing.dump(file_experiment)
+                    c4gli_forcing = get_record_yaml(file_forcing, 
+                                                    record_forcing.index_start, 
+                                                    record_forcing.index_end,
+                                                    mode=args.mode)
+                    seltropo = (c4gli_forcing.air_ac.p > c4gli_forcing.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                    profile_tropo = c4gli_forcing.air_ac[seltropo]
+                    mean_advt_tropo = np.mean(profile_tropo.advt_x +profile_tropo.advt_y )
+                    c4gli_forcing.update(source='era-interim',pars={'advt_tropo':mean_advt_tropo})
                     
+                    #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
                     
-                onerun = True
-                isim += 1
-
-
-        file_forcing.close()
-        file_experiment.close()
-
-        if onerun:
-            # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"')
-            # if os.path.isfile(fn_forcing_pkl):
-            #     os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"')
-            # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"')
-            # print('mv "'+fn_experiment+'" "'+fn_forcing+'"')
-            records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\
-                                                       args.path_experiments,\
-                                                       getchunk = int(run_station_chunk),\
-                                                       subset=args.subset_forcing,
-                                                       refetch_records=True,
-                                                       )
-
+                    if args.global_keys is not None:
+                        print(args.global_keys.strip(' ').split(' '))
+                        c4gli_forcing.get_global_input(
+                            globaldata, 
+                            only_keys=args.global_keys.strip(' ').split(' ')
+                        )
+    
+                    c4gli_forcing.dump(file_experiment)
+                        
+                        
+                    onerun = True
+                    isim += 1
+    
+    
+            file_forcing.close()
+            file_experiment.close()
+    
+            if onerun:
+                # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"')
+                # if os.path.isfile(fn_forcing_pkl):
+                #     os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"')
+                # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+                # print('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+                records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\
+                                                           args.path_experiments+'/'+EXP+'/',\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset=args.subset_forcing,
+                                                           refetch_records=True,
+                                                           )
+    

From 4a41cd59241f4b4d7a3a79dd59d0ee387e3d3fe7 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 24 Sep 2018 12:48:12 +0200
Subject: [PATCH 084/129] fix station lat lon in setup_era.py.

---
 class4gl/setup/setup_era.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 96ce88f..8c7e967 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -189,8 +189,10 @@
     print(iDT,DT)
     c4gli = class4gl_input(debug_level=logging.INFO)
     c4gli.update(source='STNID'+format(STNID,'05d'),\
-                 pars=dict(latitude = float(run_station.latitude), \
+                 pars=dict(latitude  = float(run_station.latitude), \
                            longitude = float(run_station.longitude),\
+                           latitude  = float(run_station.latitude), \
+                           lon       = 0.,\
                            STNID=int(STNID)))
 
     lSunrise, lSunset = GetSunriseSunset(c4gli.pars.latitude,0.,DT)

From cddbd892d962649ca26484188ec28b63119b4699 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 24 Sep 2018 12:51:11 +0200
Subject: [PATCH 085/129] fix station lat lon in setup_era.py.

---
 class4gl/setup/setup_era.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 8c7e967..4e58c1d 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -191,9 +191,12 @@
     c4gli.update(source='STNID'+format(STNID,'05d'),\
                  pars=dict(latitude  = float(run_station.latitude), \
                            longitude = float(run_station.longitude),\
-                           latitude  = float(run_station.latitude), \
+                           lat       = float(run_station.latitude), \
+                           # Note the difference between longitude and lon. The
+                           # lon variable should always be zero because we are
+                           # always working in solar time for running CLASS
                            lon       = 0.,\
-                           STNID=int(STNID)))
+                           STNID     = int(STNID)))
 
     lSunrise, lSunset = GetSunriseSunset(c4gli.pars.latitude,0.,DT)
 

From b00c3acc484144aa85c7957ab914c12679700ebd Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 24 Sep 2018 14:31:53 +0200
Subject: [PATCH 086/129] remove stop statement.

---
 class4gl/simulations/simulations.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 4b81d34..bdb890f 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -172,7 +172,6 @@
                               subset=args.subset_forcing,
                               refetch_records=False,
                               )
-stop
 
 # note that if runtime is an integer number, we don't need to get the afternoon
 # profiles. 

From 5dd62f0bd18f316329c484917b2f991d966e0f0d Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 26 Sep 2018 11:36:13 +0200
Subject: [PATCH 087/129] fix simulations.py and implelemt vizualization of 3d
 fields in interface_multi.py

---
 class4gl/interface/interface.py     |   5 +-
 class4gl/interface_multi.py         | 206 ++++++++++++++++++++--------
 class4gl/simulations/simulations.py |   2 +-
 3 files changed, 154 insertions(+), 59 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 8ef0038..8c42180 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -1,3 +1,4 @@
+
 import numpy as np
 
 import pandas as pd
@@ -435,7 +436,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         for varkey in ['h','theta','q']:
             varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
             data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
-            data_all['advt_tropo'] = data_input['advt_tropo']
+            
+        data_input['advt_tropo'] = - data_input['advt_tropo']
+        data_all['advt_tropo'] = data_input['advt_tropo']
             #print(data_input.shape)
             #print(data_all.shape)
         #print('hello6')
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 907c0f0..b9da41c 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -603,47 +603,77 @@ def plot(self):
 
 
         """ buttons definitions """
+        button_height = 0.055
+        button_hspace = 0.005
+        button_width  = 0.095
+        button_wspace = 0.005
+        buttons_upper = 0.28
+        buttons_left = 0.25
+
+        button_types = ['dataset','datetime','level','station','record']
         
-        label = 'bprev_dataset'
-        axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous dataset')
-        btns[label].on_clicked(self.prev_dataset)
+        for ibutton_type,button_type in enumerate(button_types):
+            label='bprev'+button_type
+            axes[label] = fig.add_axes([
+                buttons_left,\
+                buttons_upper-ibutton_type*(button_height+button_hspace),\
+                button_width,\
+                button_height\
+                                                     ])
+            btns[label] = Button(axes[label], 'Previous '+button_type)
+            btns[label].on_clicked(getattr(self, 'prev_'+button_type))
+
+            label='bnext'+button_type
+            axes[label] = fig.add_axes([
+                buttons_left+button_width+button_wspace,\
+                buttons_upper-ibutton_type*(button_height+button_hspace),\
+                button_width,\
+                button_height\
+                                                     ])
+            btns[label] = Button(axes[label], 'Next '+button_type)
+            btns[label].on_clicked(getattr(self, 'next_'+button_type))
 
-        label = 'bnext_dataset'
-        axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next dataset')
-        btns[label].on_clicked(self.next_dataset)
+        
+        # label = 'bprev_dataset'
+        # axes[label] = fig.add_axes([0.25,0.28,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Previous dataset')
+        # btns[label].on_clicked(self.prev_dataset)
+
+        # label = 'bnext_dataset'
+        # axes[label] = fig.add_axes([0.35,0.28,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Next dataset')
+        # btns[label].on_clicked(self.next_dataset)
 
-        label = 'bprev_datetime'
-        axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous datetime')
-        btns[label].on_clicked(self.prev_datetime)
+        # label = 'bprev_datetime'
+        # axes[label] = fig.add_axes([0.25,0.20,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Previous datetime')
+        # btns[label].on_clicked(self.prev_datetime)
 
-        label = 'bnext_datetime'
-        axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next datetime')
-        btns[label].on_clicked(self.next_datetime)
+        # label = 'bnext_datetime'
+        # axes[label] = fig.add_axes([0.35,0.20,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Next datetime')
+        # btns[label].on_clicked(self.next_datetime)
 
 
-        label = 'bprev_station'
-        axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous station')
-        btns[label].on_clicked(self.prev_station)
+        # label = 'bprev_station'
+        # axes[label] = fig.add_axes([0.25,0.12,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Previous station')
+        # btns[label].on_clicked(self.prev_station)
 
-        label = 'bnext_station'
-        axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next station')
-        btns[label].on_clicked(self.next_station)
+        # label = 'bnext_station'
+        # axes[label] = fig.add_axes([0.35,0.12,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Next station')
+        # btns[label].on_clicked(self.next_station)
 
-        label = 'bprev_record'
-        axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Previous record')
-        btns[label].on_clicked(self.prev_record)
+        # label = 'bprev_record'
+        # axes[label] = fig.add_axes([0.25,0.04,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Previous record')
+        # btns[label].on_clicked(self.prev_record)
 
-        label = 'bnext_record'
-        axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
-        btns[label] = Button(axes[label], 'Next record')
-        btns[label].on_clicked(self.next_record)
+        # label = 'bnext_record'
+        # axes[label] = fig.add_axes([0.35,0.04,0.10,0.075])
+        # btns[label] = Button(axes[label], 'Next record')
+        # btns[label].on_clicked(self.next_record)
 
 
         # self.nstatsview = nstatsview
@@ -831,15 +861,68 @@ def prev_dataset(self,event=None):
         ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys'])
         self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey])
 
-
     def sel_dataset(self,inputkey):
         self.frames['worldmap']['inputkey'] = inputkey
         self.frames['stats']['inputkey'] = self.frames['worldmap']['inputkey'] # this is used for showing the percentiles per station in color.
         self.goto_datetime_worldmap(
             self.frames['profiles']['current_record_ini'].datetime.to_pydatetime(),
             'after')# get nearest datetime of the current dataset to the profile
+
+        print('seldata0')
+        if 'level' not in self.frames['worldmap'].keys():
+            levels = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev']
+            self.frames['worldmap']['level'] = np.max(levels)
+            print('seldata1')
+
+            minlev = np.min(levels)
+            maxlev = np.max(levels)
+            curlev = self.frames['worldmap']['level']
+            curlev = np.max([curlev,np.min(levels)])
+            curlev = np.min([curlev,np.max(levels)])
+            print('seldata2')
+
+            self.frames['worldmap']['level'] = curlev
+            print('seldata3')
+
+
+        print('seldata4')
+        self.sel_level(self.frames['worldmap']['level'])
+
+
+
+    def sel_level(self,level):
+
+        if 'lev' not in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims):
+            raise ValueError('lev dimension not in dataset '+self.frames['worldmap']['inputkey'])
+
+        print('seldata5')
+
+
+        if level > (np.max(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev'])):
+            raise ValueError('Level '+str(level)+' exceed those of the current dataset: '+str(self.globaldata.datasets[frames['worldmap']['inputkey']].page['lev']))
+        if level < (np.min(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev'])):
+            raise ValueError('Level '+str(level)+' is lower than those of the current dataset: '+str(self.globaldata.datasets[frames['worldmap']['inputkey']].page['lev']))
+        print('seldata6')
+        self.frames['worldmap']['level'] = level
+
+        print(level)
         if "fig" in self.__dict__.keys():
             self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
+
+        print('seldata7')
+
+    def next_level(self,event=None,jump=1):
+        if 'lev' not in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims.keys()):
+            raise ValueError('lev dimension not in dataset'+self.frames['worldmap']['inputkey'])
+        levels =  self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev']
+        level = self.frames['worldmap']['level']
+        level =  ((level + jump - min(levels)) % (max(levels)-min(levels))) + min(levels)
+        self.sel_level(level)
+
+    def prev_level(self,event=None):
+        self.next_level(jump=-1)
+
+        #self.frames['worldmap']['level'] = level: 
        
     # def prev_station(self,event=None):
     #     self.istation = (self.istation - 1) % self.stations.shape[0]
@@ -869,11 +952,14 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
         if self.globaldata is not None:
             if (only is None) or ('worldmap' in only):
                 globaldata = self.globaldata
+                print('hello0')
                 if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
                     globaldata.datasets[frames['worldmap']['inputkey']].browse_page(time=frames['worldmap']['DT'])
                     datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page.isel(time = frames['worldmap']['iDT'])
                 else:
                     datasetxr = globaldata.datasets[frames['worldmap']['inputkey']].page
+                if 'lev' in datasetxr.dims:
+                    datasetxr = datasetxr.isel(lev=self.frames['worldmap']['level'])
                 keystotranspose = ['lat','lon']
                 for key in dict(datasetxr.dims).keys():
                     if key not in keystotranspose:
@@ -881,16 +967,18 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
 
                 datasetxr = datasetxr.transpose(*keystotranspose)
                 datasetxr = datasetxr.sortby('lat',ascending=False)
+                print('hello1')
 
                 lonleft = datasetxr['lon'].where(datasetxr.lon > 180.,drop=True) 
                 lonleft = lonleft - 360.
+                print('hello2')
                 lonright = datasetxr['lon'].where(datasetxr.lon <= 180.,drop=True) 
                 label = 'worldmap'
                 axes[label].clear()
                 axes[label].lon = xr.concat([lonleft,lonright],'lon').values
                 axes[label].lat = np.sort(globaldata.datasets[frames['worldmap']['inputkey']].page.variables['lat'].values)[::-1] #sortby('lat',ascending=False).values
+                print('hello3')
 
-        if (only is None) or ('worldmap' in only):
             #if 'axmap' not in self.__dict__ :
             #    self.axmap = self.fig.add_axes([0.39,0.5,0.34,0.5])
             #else:
@@ -917,36 +1005,40 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
            #if 'time' in list(dict(self.datasets[self.axes['worldmap'].focus['key']].variables[self.axes['worldmap'].focus['key']].dims).keys()):
 
 
-            fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
-            fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+                fieldleft =  datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon > 180.,drop=True) 
+                fieldright = datasetxr[frames['worldmap']['inputkey']].where(datasetxr.lon <= 180.,drop=True) 
+                print('hello4')
 
-            field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
-            if 'lev' in field.dims:
-                field = field.isel(lev=-1)
+                field =xr.concat([fieldleft,fieldright],'lon') #.sortby('lat',ascending=False).values
 
-            #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
-            axes[label].axis('off')
+                #np.concatenate([viewframe.datasets['cc']['cc'].page.isel(time=0).where(viewframe.datasets['cc'].lon > 180).values,viewframe.datasets['cc']['cc'].isel(time=0).where(viewframe.datasets['cc'].lon <= 180).values],axis=1)
+                axes[label].axis('off')
+                print('hello5')
 
-            from matplotlib import cm
-            axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
-            
-            title=frames['worldmap']['inputkey']
-            if globaldata is not None: 
-                if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
-                    title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
-            axes[label].set_title(title)
-
-            label ='worldmap_colorbar'
-            axes[label].clear()
-            axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
+                from matplotlib import cm
+                axes[label].fields[label] = axes[label].imshow(field[:,:],interpolation='none',cmap = cm.viridis )
+                
+                print('hello6')
+                title=frames['worldmap']['inputkey']
+                if globaldata is not None: 
+                    if 'time' in globaldata.datasets[frames['worldmap']['inputkey']].page.variables[frames['worldmap']['inputkey']].dims:
+                        title = title+' ['+pd.to_datetime(frames['worldmap']['DT']).strftime("%Y/%m/%d %H:%M") +'UTC]'
+                axes[label].set_title(title)
+                print('hello7')
+
+                label ='worldmap_colorbar'
+                axes[label].clear()
+                axes[label].fields[label] = fig.colorbar(axes['worldmap'].fields['worldmap'],cax=axes[label],orientation='horizontal',label=frames['worldmap']['inputkey']+' ['+self.units[frames['worldmap']['inputkey']]+']')
 
 
-            # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
-            # x,y = self.gmap(lons,lats)
-            # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
-            # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+                # lons, lats = np.meshgrid(axes[label].lon,axes[label].lat)
+                # x,y = self.gmap(lons,lats)
+                # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
+                # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
 
         if (self.path_obs is not None) and \
+           (self.frames['worldmap']['inputkey'] in self.frames['stats']['records_all_stations_ini_pct'].keys()) and \
+           (self.path_obs is not None) and \
            ((only is None) or ('stats' in only) or ('stats_lightupdate' in only)):
 
             statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index bdb890f..92f0f01 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -196,7 +196,7 @@
     records_afternoon.set_index(['STNID','dates'],inplace=True)
     ini_index_dates = records_morning.set_index(['STNID','dates']).index
     records_afternoon = records_afternoon.loc[ini_index_dates]
-    records_afternoon.index = records_morning.inde= run_station_chunkx
+    records_afternoon.index = records_morning.index
 
 experiments = args.experiments.strip(' ').split(' ')
 for expname in experiments:

From 857173881ec63389869e3753b4492eb91eff1bb6 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 2 Oct 2018 10:34:19 +0200
Subject: [PATCH 088/129] add switch for calculating tropospheric variables

---
 class4gl/simulations/simulations.py | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 92f0f01..3746cd4 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -20,6 +20,7 @@
 parser.add_argument('--last_station_row')
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=None)
 parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
                                                       # to initialize with.
                                                       # Most common options are
@@ -248,11 +249,17 @@
                                                     mode='ini')
 
                     # add tropospheric parameters on advection and subsidence
-                    seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
-                    profile_tropo = c4gli_morning.air_ac[seltropo]
-                    for var in ['t','q','u','v',]:
-                        mean_adv_tropo = np.mean(profile_tropo['adv'+var+'_x']+profile_tropo['adv'+var+'_y'] )
-                        c4gli_morning.update(source='era-interim',pars={'adv'+var+'_tropo':mean_adv_tropo})
+                    # (for diagnosis)
+
+                    if args.diag_tropo is not None:
+                        seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                        profile_tropo = c4gli_morning.air_ac[seltropo]
+                        for var in diag_tropo:#['t','q','u','v',]:
+                            if var[:3] == 'adv':
+                                mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                                c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                            else:
+                                print("warning: tropospheric variable "+var+" not recognized")
                     
                     
                     if args.runtime == 'from_afternoon_profile':

From 5e1c6b0d9da85f96f453e795572db4283c6ba4f4 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 3 Oct 2018 10:10:32 +0200
Subject: [PATCH 089/129] put warning messages instead of info messages when
 input is missing

---
 class4gl/class4gl.py | 113 +++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 109 insertions(+), 4 deletions(-)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 7095194..204990b 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -1271,6 +1271,104 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
             #             air_ach=pd.DataFrame({'wrho':list(wrho)}))
 
 
+    # def get_idx_in_dataset(self,
+    #                        globaldata,
+    #                        latspan = 0.5):
+    #                        lonspan = 0.5):
+    #     """ 
+    #     purpose:
+    #         get the xarray indices that are representative between the starting and
+    #         stopping time of the class simulations
+
+    #     input:
+    #         self: definition of the class input
+    #         globaldata: book of class4gl global dataset
+    #         key: key variable in the global dataset
+    #         latspan: the span of the lat coordinate
+    #         lonspan: the span of the lon coordinate
+
+
+    #     output:
+    #         itimes: time coordinates during of the class simulatios
+    #         lats: 
+    #         lons:
+    #         """
+
+    #     # first, we browse to the correct file that has the current time
+    #     if 'time' in list(globaldata.datasets[key].page[key].dims):
+    #         globaldata.datasets[key].browse_page(time=classdatetime)
+    #     
+    #     if (globaldata.datasets[key].page is not None):
+    #         # find longitude and latitude coordinates
+    #         ilats = (np.abs(globaldata.datasets[key].page.lat -
+    #                         self.pars.latitude) < latspan)
+    #         # In case we didn't find any latitude in the allowed range, we take the closest one.
+    #         if len(ilats) == 0:
+    #             ilats = np.where(\
+    #                      globaldata.datasets[key].page.lat.isin(
+    #                       globaldata.datasets[key].page.lat.sel(lat=self.pars.latitude)\
+    #                      ))[0]
+    #         ilons = (np.abs(globaldata.datasets[key].page.lon -
+    #                         self.pars.longitude) < lonspan)
+    #         # In case we didn't find any longitude in the allowed range, we take the closest one.
+    #         if len(ilon) == 0:
+    #             ilon = np.where(\
+    #                      globaldata.datasets[key].page.lon.isin(
+    #                       globaldata.datasets[key].page.lon.sel(lon=self.pars.longitude)\
+    #                      ))[0]
+    #         
+    #         # if we have a time dimension, then we look up the required timesteps during the class simulation
+    #         if 'time' in list(globaldata.datasets[key].page[key].dims):
+
+    #             DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
+    #             
+    #             idatetime = np.where((DIST) == np.min(DIST))[0][0]
+    #             #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime)
+    #             if key not in ['t','u','v','q']:
+    #                 if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ):
+    #                     idatetime += 1
+    #             
+    #             DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime_stop))
+    #             idatetimeend = np.where((DIST) == np.min(DIST))[0][0]
+    #             #print('idatetimeend',idatetimeend,globaldata.datasets[key].variables['time'].values[idatetime],classdatetimeend)
+    #             if ((globaldata.datasets[key].page.variables['time'].values[idatetimeend] > classdatetime_stop)):
+    #                 idatetimeend -= 1
+    #             idatetime = np.min((idatetime,idatetimeend))
+    #             #for gleam, we take the previous day values
+
+    #             # in case of soil temperature, we take the exact
+    #             # timing (which is the morning)
+    #             if key in ['t','u','v','q']:
+    #                 idatetimeend = idatetime
+    #             
+    #             itimes = range(idatetime,idatetimeend+1)
+    #             #print(key,'itimes',itimes)
+
+
+    #             # In case we didn't find any correct time, we take the
+    #             # closest one.
+    #             if len(itimes) == 0:
+
+
+    #                 classdatetimemean = \
+    #                     np.datetime64(self.pars.datetime_daylight + \
+    #                     dt.timedelta(seconds=int(self.pars.runtime/2.)
+    #                                 ))
+
+    #                 dstimes = globaldata.datasets[key].page.time
+    #                 time = dstimes.sel(time=classdatetimemean,method='nearest')
+    #                 itimes = (globaldata.datasets[key].page.time ==
+    #                           time)
+    #                 
+    #         else:
+    #             # we don't have a time coordinate so it doesn't matter
+    #             # what itimes is
+    #             itimes = 0
+
+    #         #multiplication by 1 is a trick to remove the array()-type in case of zero dimensions (single value).
+    #       return itimes,ilats,ilons
+
+
     def query_source(self,var):
         """ 
         purpose:
@@ -1368,16 +1466,17 @@ def check_source_globaldata(self):
                                          ignore_keys=[])
             if not source_ok:
                 source_globaldata_ok = False
+                self.logger.warning('something was wrong with the profiles')
         
             # Additional check: we exclude desert-like
             if ((self.pars.cveg is None) or pd.isnull(self.pars.cveg)):
                 source_globaldata_ok = False
-                self.logger.info('cveg  is invalid: ('+str(self.pars.cveg)+')')
+                self.logger.warning('cveg  is invalid: ('+str(self.pars.cveg)+')')
             if ((self.pars.LAI is None) or pd.isnull(self.pars.LAI)):
                 source_globaldata_ok = False
-                self.logger.info('LAI  is invalid: ('+str(self.pars.LAI)+')')
+                self.logger.warning('LAI  is invalid: ('+str(self.pars.LAI)+')')
             elif self.pars.cveg < 0.02:
-                self.logger.info('cveg  is too low: ('+str(self.pars.cveg)+')')
+                self.logger.warning('cveg  is too low: ('+str(self.pars.cveg)+')')
                 source_globaldata_ok = False
 
         return source_globaldata_ok
@@ -1632,13 +1731,19 @@ def get_lifted_index(self,timestep=-1):
 
 
 
-def blh(HAGL,THTV,WSPD,RiBc = 0.5,RiBce = 0.25):
+def blh(HAGL,THTV,WSPD,RiBc = 0.31,RiBce = 0.08):
     """ Calculate mixed-layer height from temperature and wind speed profile
 
         Input:
             HAGL: height coordinates [m]
             THTV: virtual potential temperature profile [K]
             WSPD: wind speed profile [m/s]
+            RIBc: critical Richardson Number. 
+                According to Zhang et al., 2014 (GMD), it should  equal to 0.24
+                for strongly stable boundary layers, 0.31 for weakly stable
+                boundary layers, and 0.39 for unstable boundary layers. By
+                default, it is set to the average of the three cases and an
+                error RiBce value that comprises all values.
 
         Output:
             BLH: best-guess mixed-layer height

From 3b639535fdbcefc55a825cf87c1ac6b0e6865231 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 3 Oct 2018 11:51:03 +0200
Subject: [PATCH 090/129] fix era profile input in case of missing (advection)
 values

---
 class4gl/class4gl.py           | 160 +++++++++++++++++----------------
 class4gl/setup/setup_global.py |  18 +++-
 2 files changed, 97 insertions(+), 81 deletions(-)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 204990b..24a4076 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -1484,6 +1484,10 @@ def check_source_globaldata(self):
     def mixed_layer_fit(self,air_ap,source,mode):
         """ 
             Purpose: 
+                make a profile fit and write it to the air_ap section of the
+                class4gl_input object (self).
+            Input:
+                air_ap: input profile
 
 
         """
@@ -1500,18 +1504,18 @@ def mixed_layer_fit(self,air_ap,source,mode):
 
 
         # Therefore, determine the sounding that are valid for 'any' column 
-        is_valid = ~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)
-        #is_valid = (air_ap.z >= 0)
+        # is_valid = ~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)
+        is_valid = (air_ap.z >= 0)
         # # this is an alternative pipe/numpy method
         # (~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)).pipe(np.where)[0]
         valid_indices = air_ap.index[is_valid].values
-        #print(valid_indices)
+        print(valid_indices)
 
 
         hvalues = {}
         if len(valid_indices) > 0:
             #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile
-            hvalues['h_b'] ,hvalues['h_u'],hvalues['h_l']  = blh(air_ap.z,air_ap.thetav,np.sqrt(air_ap.u**2. + air_ap.u**2.))
+            hvalues['h_b'] ,hvalues['h_u'],hvalues['h_l']  = blh(air_ap.z,air_ap.thetav,np.sqrt(air_ap.u**2. + air_ap.v**2.))
             
             hvalues['h_b']  = np.max((hvalues['h_b'] ,10.))
             hvalues['h_u']  = np.max((hvalues['h_u'] ,10.)) #upper limit of mixed layer height
@@ -1526,10 +1530,11 @@ def mixed_layer_fit(self,air_ap,source,mode):
             hvalues['h_l']  =np.nan
             hvalues['h_e']  =np.nan
             hvalues['h']    =np.nan
+
         self.update(source='fit_from_'+source,pars=hvalues)
 
         if np.isnan(self.pars.h ):
-            self.pars.Ps  = nan
+            self.pars.Ps  = np.nan
 
         mlvalues = {}
         if ~np.isnan(self.pars.h ):
@@ -1731,78 +1736,6 @@ def get_lifted_index(self,timestep=-1):
 
 
 
-def blh(HAGL,THTV,WSPD,RiBc = 0.31,RiBce = 0.08):
-    """ Calculate mixed-layer height from temperature and wind speed profile
-
-        Input:
-            HAGL: height coordinates [m]
-            THTV: virtual potential temperature profile [K]
-            WSPD: wind speed profile [m/s]
-            RIBc: critical Richardson Number. 
-                According to Zhang et al., 2014 (GMD), it should  equal to 0.24
-                for strongly stable boundary layers, 0.31 for weakly stable
-                boundary layers, and 0.39 for unstable boundary layers. By
-                default, it is set to the average of the three cases and an
-                error RiBce value that comprises all values.
-
-        Output:
-            BLH: best-guess mixed-layer height
-            BLHu: upper limit of mixed-layer height
-            BLHl: lower limit of mixed-layer height
-
-    """
-    
-    #initialize error BLH
-    BLHe = 0.
-    eps = 2.#security limit
-    iTHTV_0 = np.where(~np.isnan(THTV))[0]
-    if len(iTHTV_0) > 0:
-        iTHTV_0 = iTHTV_0[0]
-        THTV_0 = THTV[iTHTV_0]
-    else:
-        THTV_0 = np.nan
-
-    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
-    
-    
-    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
-    #RiB - RiBc = 0
-    
-    #best guess of BLH
-    
-    #print("RiB: ",RiB)
-    #print("RiBc: ",RiBc)
-    
-    
-    
-    BLHi = np.where(RiB > RiBc)[0]
-    if len(BLHi ) > 0:
-        BLHi = BLHi[0]
-        #print("BLHi: ",BLHi)
-        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-        
-        # possible error is calculated as the difference height levels used for the interpolation
-        BLHu = np.max([BLH,HAGL[BLHi]-eps])
-        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
-        # calculate an alternative BLH based on another critical Richardson number (RiBce):
-        BLHi =np.where(RiB > RiBce)[0]
-        if len(BLHi ) > 0:    
-            BLHi = BLHi[0]
-                
-            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
-            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
-            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
-            
-            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
-            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
-        
-        else:
-            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-
-    else:
-        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
-        
-    return BLH,BLHu,BLHl
 
 
 
@@ -1890,6 +1823,79 @@ def get_lifted_index(startp,startt,startqv,pres,theta,endp=50000.):
     #print(endtemp)
     return endtempenv - endtemp
 
+def blh(HAGL,THTV,WSPD,RiBc = 0.31,RiBce = 0.08):
+    """ Calculate mixed-layer height from temperature and wind speed profile
+
+        Input:
+            HAGL: height coordinates [m]
+            THTV: virtual potential temperature profile [K]
+            WSPD: wind speed profile [m/s]
+            RIBc: critical Richardson Number. 
+                According to Zhang et al., 2014 (GMD), it should  equal to 0.24
+                for strongly stable boundary layers, 0.31 for weakly stable
+                boundary layers, and 0.39 for unstable boundary layers. By
+                default, it is set to the average of the three cases and an
+                error RiBce value that comprises all values.
+
+        Output:
+            BLH: best-guess mixed-layer height
+            BLHu: upper limit of mixed-layer height
+            BLHl: lower limit of mixed-layer height
+
+    """
+    
+    #initialize error BLH
+    BLHe = 0.
+    eps = 2.#security limit
+    iTHTV_0 = np.where(~np.isnan(THTV))[0]
+    if len(iTHTV_0) > 0:
+        iTHTV_0 = iTHTV_0[0]
+        THTV_0 = THTV[iTHTV_0]
+    else:
+        THTV_0 = np.nan
+
+    RiB = 9.81/THTV_0 * ( THTV - THTV_0) * HAGL / np.clip(WSPD,a_min=0.1,a_max=None)**2.
+
+    
+    
+    #RiB = 9.81/THTV_0 * ( THTV[i-1] +  (HGHT[i] - HGHT[i-1])/ - THTV_0) * HAGL / WSPD**2
+    #RiB - RiBc = 0
+    
+    #best guess of BLH
+    
+    #print("RiB: ",RiB)
+    #print("RiBc: ",RiBc)
+    
+    
+    
+    BLHi = np.where(RiB > RiBc)[0]
+    if len(BLHi ) > 0:
+        BLHi = BLHi[0]
+        #print("BLHi: ",BLHi)
+        BLH = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+        
+        # possible error is calculated as the difference height levels used for the interpolation
+        BLHu = np.max([BLH,HAGL[BLHi]-eps])
+        BLHl = np.min([BLH,HAGL[BLHi-1]+eps])
+        # calculate an alternative BLH based on another critical Richardson number (RiBce):
+        BLHi =np.where(RiB > RiBce)[0]
+        if len(BLHi ) > 0:    
+            BLHi = BLHi[0]
+                
+            BLHa = (HAGL[BLHi] - HAGL[BLHi-1])/(RiB[BLHi] -RiB[BLHi-1]) * (RiBc - RiB[BLHi-1]) + HAGL[BLHi-1]
+            BLHu = np.max([BLHu,HAGL[BLHi]-eps])
+            BLHl = np.min([BLHl,HAGL[BLHi-1]+eps])
+            
+            BLHu = np.max([BLHu,BLH + abs(BLH-BLHa)])
+            BLHl = np.min([BLHl,BLH - abs(BLH-BLHa)])
+        
+        else:
+            BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+
+    else:
+        BLH,BLHu,BLHl = np.nan, np.nan,np.nan
+        
+    return BLH,BLHu,BLHl
 
 class class4gl(model):
     """ the extension of the 'class model' class """
diff --git a/class4gl/setup/setup_global.py b/class4gl/setup/setup_global.py
index 08adbb4..b5396a5 100644
--- a/class4gl/setup/setup_global.py
+++ b/class4gl/setup/setup_global.py
@@ -175,9 +175,14 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
                 logic = dict()
                 logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+
+                # Sounding should have taken place after 3 hours before sunrise.
+                # Note that the actual simulation only start at sunrise
+                # (specified by ldatetime_daylight), so the ABL cooling af the time
+                # before sunrise is ignored by the simulation.
                 logic['daylight'] = \
-                    ((c4gli.pars.ldatetime_daylight - 
-                      c4gli.pars.ldatetime).total_seconds()/3600. <= 4.)
+                    ((c4gli.pars.ldatetime - 
+                      c4gli.pars.lSunrise).total_seconds()/3600. >= -3.)
                 
                 logic['springsummer'] = (c4gli.pars.theta > 278.)
                 
@@ -245,10 +250,15 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
                         logic_afternoon['afternoon'] = \
                             (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                        # the sounding should have taken place before 2 hours
+                        # before sunset. This is to minimize the change that a
+                        # stable boundary layer (yielding very low mixed layer
+                        # heights) is formed which can not be represented by
+                        # class.
                         logic_afternoon['daylight'] = \
                           ((c4gli_afternoon.pars.ldatetime - \
-                            c4gli_afternoon.pars.ldatetime_daylight \
-                           ).total_seconds()/3600. <= 0.)
+                            c4gli_afternoon.pars.lSunset \
+                           ).total_seconds()/3600. <= -2.)
 
 
                         le3000_afternoon = \

From 182d94e28e56901960489a0623a0d723a66e63e8 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 3 Oct 2018 12:01:21 +0200
Subject: [PATCH 091/129] fix wind speed in blh calculation

---
 class4gl/class4gl.py | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 24a4076..9da9ba7 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -1505,6 +1505,11 @@ def mixed_layer_fit(self,air_ap,source,mode):
 
         # Therefore, determine the sounding that are valid for 'any' column 
         # is_valid = ~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)
+        
+        if len(~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)) == 0.:
+            self.logger.warning('Warning, not all profile input is valid!  Please check input fields!', air_ap)
+
+
         is_valid = (air_ap.z >= 0)
         # # this is an alternative pipe/numpy method
         # (~np.isnan(air_ap).any(axis=1) & (air_ap.z >= 0)).pipe(np.where)[0]

From 92192ad12b3da78b115243923ea36feb59d4e658 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 3 Oct 2018 12:16:45 +0200
Subject: [PATCH 092/129] check input source in get_era.py

---
 class4gl/setup/setup_era.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py
index 4e58c1d..1674d80 100644
--- a/class4gl/setup/setup_era.py
+++ b/class4gl/setup/setup_era.py
@@ -250,6 +250,8 @@
                          source=air_ap_input_source,
                          mode=air_ap_mode)
 
+    if not c4gli.check_source_globaldata():
+        print('Warning: some input sources appear invalid')
 
     c4gli.dump(file_ini)
 

From 36b0d2eba54b5e5a809703f59552e299d290ae79 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 3 Oct 2018 16:50:01 +0200
Subject: [PATCH 093/129] full update

---
 class4gl/era_advection.py | 160 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 160 insertions(+)
 create mode 100644 class4gl/era_advection.py

diff --git a/class4gl/era_advection.py b/class4gl/era_advection.py
new file mode 100644
index 0000000..a6c8e8d
--- /dev/null
+++ b/class4gl/era_advection.py
@@ -0,0 +1,160 @@
+
+
+    self.get_idx_in_dataset(globaldata)
+    xrin = globaldata['t'].page['t']
+
+    # 3. prepare artificial xarray input datasets which will allow to make gradient calculations along the W-E directions with xarray on the fly with just one rule
+    # 3.1 we make 'left' and 'right' datasets which will be substracted for calculating gradients
+    # dataset of ilon = 1,2,3...
+    xrleft = xrin.sel(lon=xrin.lon[:-25])
+    # dataset of ilon = 0,1,2...
+    xrright = xrin.sel(lon=xrin.lon[25:])
+    
+    # 3.2 The output will be on a staggered grid with the lon coordinate to the half-level calculated just hereafer in 3.3. 
+    #     Still, we will need to full-level longitude values in the xarray dataset for calculating the grid spacing for the gradients.
+    xrright['flon'] = (('lon',), xrright.lon.values)
+    xrleft['flon'] = (('lon',), xrleft.lon.values)
+    
+    
+    # 3.3 In order to make xarray doing the calculation advection correctly, the 'left' and 'right' values that we need on each grid cell requires equal underlying longitude coordinate values. 
+    xrleft.lon.values = (xrleft.lon.values+xrright.lon.values)/2.
+    xrright.lon.values = xrleft.lon.values
+
+
+    # 4. We do similar preparations for S-N direction. Please note that the advection results for S-N and W-E direction are on different grids, that are also different from the original grid.
+    xrbottom = xrin.sel(lat=xrin.lat[:-25])
+    xrtop = xrin.sel(lat=xrin.lat[25:])
+    xrtop['flat'] = (('lat',), xrtop.lat.values)
+    xrbottom['flat'] = (('lat',), xrbottom.lat.values)
+    xrbottom.lat.values = (xrbottom.lat.values+xrtop.lat.values)/2.
+    xrtop.lat.values = xrbottom.lat.values
+    
+    
+    dia_earth = 40000000.
+
+    # for input variables (COSMO naming)
+    VARS_COSMO = ['QV','U','V','T']
+    # for output variables (ECMWF naming)
+    vars_ECMWF = ['q','u','v','t']
+
+
+    # some netcdf polishing: add units and description to netcdf output. 
+    units = dict(
+          advq_x='kg kg-1 s-1', advq_y='kg kg-1 s-1',
+          advt_x='K s-1',       advt_y='K s-1',
+          advu_x='m s-2',       advu_y='m s-2',
+          advv_x='m s-2',       advv_y='m s-2',
+          divU_x='s-1',         divU_y='s-1',
+                )
+    long_names = dict(
+          advq_x='zonal advection of specific humidity',        
+          advt_x='zonal advection of heat',                     
+          advu_x='zonal advection of zonal wind component',     
+          advv_x='zonal advection of meridional wind component',
+          divU_x='horizontal wind divergence in the zonal direction',
+          advq_y='meridional advection of specific humidity',
+          advt_y='meridional advection of heat',                            
+          advu_y='meridional advection of zonal wind component',            
+          advv_y='meridional advection of meridional wind component',
+          divU_y='horizontal wind divergence in the meridional direction',
+                )
+    #print((xrtop.flat - xrbottom.flat)/360.*dia_earth)
+    # 5. loop over each variable
+
+    # make the selections
+    xrleft_sel = xrleft.isel(time=itimes,lat=ilats,lon=ilons)
+    xrright_sel = xrright.isel(time=itimes,lat=ilats,lon=ilons)
+    xrtop_sel = xrtop.isel(time=itimes,lat=ilats,lon=ilons)
+    xrbottom_sel = xrbottom.isel(time=itimes,lat=ilats,lon=ilons)
+
+    for ivar,var in enumerate(vars_ECMWF):
+        VAR = VARS_COSMO[ivar]
+    
+
+        dims = globaldata.datasets[key].page[key].dims
+        namesmean = list(dims)
+        namesmean.remove('lev')
+        idxmean = [dims.index(namemean) for namemean in namesmean]
+        # over which dimensions we take a mean:
+        dims = globaldata.datasets[key].page[key].dims
+        namesmean = list(dims)
+        namesmean.remove('lev')
+        idxmean = [dims.index(namemean) for namemean in namesmean]
+        #6. actual calculation for the W-E direction
+        #######################################################
+        print('calculation of advection')
+        #######################################################
+
+        if var == 't':
+            self.update(source='era-interim_calc',pars={'adv'+var+'_x':\
+
+
+        ( - (xrright_sel.p**(Rdcp) * xrright_sel.u*xrright[VAR] -
+             xrleft_sel.p**(Rdcp) * xrleft_sel.u*rleft[VAR]) /\
+                ((xrright.flon - xrleft.flon) /360.*dia_earth *np.cos(xrright.lat/180.*np.pi)) /\
+                ((xrright.p**(Rdcp)+xrleft.p**(Rdcp))/2.)
+
+
+
+        self.update(source='era-interim_calc',pars={'adv'+var+'_x':\
+                               (- (xrright_sel.u*xrright_sel[var] - 
+                                   xrleft_sel.u * xrleft_sel[var]) 
+                                  /\
+                                  ((xrright_sel.flon - xrleft_sel.flon) /360.*dia_earth 
+                                   *np.cos(xrright_sel.lat/180.*np.pi))).mean(axis=tuple(idxmean)).values *1.\
+        self.update(source='era-interim_calc',pars={'adv'+var+'_x':\
+                                +\
+                               (- (xrtop_sel.u*xrtop_sel[var] - 
+                                   xrbottom_sel.u * xrbottom_sel[var]) 
+                                  /\
+                                  ((xrtop_sel.flon - xrbottom_sel.flon)\
+                                   /360.*dia_earth).mean(axis=tuple(idxmean)).values *1.
+                                                   })
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+          # over which dimensions we take a mean:
+          dims = globaldata.datasets[key].page[key].dims
+          namesmean = list(dims)
+          namesmean.remove('lev')
+          idxmean = [dims.index(namemean) for namemean in namesmean]
+          
+          value = \
+          globaldata.datasets[key].page[key].isel(time=itimes,
+                                                  lat=ilats,lon=ilons).mean(axis=tuple(idxmean)).values * 1.
+
+          # Ideally, source should be equal to the datakey of globaldata.library 
+          # or globaldata.datasets (eg., DSMW, IGBP-DIS, ERA-INTERIM etc.) 
+          #  but therefore the globaldata class requires a revision to make this work
+          self.update(source='globaldata',air_ac=pd.DataFrame({key:list(value)})) 
+

From 345e33363846ce718df26e05f870183c6b44c20d Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 24 Oct 2018 18:46:02 +0200
Subject: [PATCH 094/129] lots of updates, including load performance increase
 with unified pkl data

---
 class4gl/class4gl.py                          |    3 +-
 class4gl/interface/interface.py               |    8 +-
 class4gl/interface/interface_koeppen.py       |   87 +-
 class4gl/interface/interface_new_koeppen.py   | 1056 +++++++++++++++++
 class4gl/interface/interface_stations.py      |  121 +-
 class4gl/interface/test.test                  |  276 +++++
 class4gl/interface_functions.py               |  241 ++--
 class4gl/interface_multi.py                   |   52 +-
 class4gl/model.py                             |   14 +-
 ...up_global.py => batch_setup_global_old.py} |    0
 class4gl/setup/batch_setup_igra.pbs           |   30 +
 class4gl/setup/batch_setup_igra.py            |  187 +++
 .../{simulations => setup}/batch_update.py    |   73 +-
 class4gl/setup/setup_bllast.py                |    4 +-
 class4gl/setup/setup_global_afternoon.py      |    4 +-
 class4gl/setup/setup_goamazon.py              |    4 +-
 class4gl/setup/setup_humppa.py                |    4 +-
 .../setup/{setup_global.py => setup_igra.py}  |   48 +-
 class4gl/setup/setup_igra_pkl.py              |  359 ++++++
 class4gl/setup/update_setup.py                |  327 +++++
 class4gl/simulations/batch_simulations.py     |    2 +-
 class4gl/simulations/simulations.py           |   21 +-
 class4gl/simulations/simulations_iter.py      |  266 +++--
 .../simulations/simulations_iter_bowen.py     |  475 ++++++++
 .../{update_yaml.py => simulations_veg.py}    |  177 ++-
 class4gl/simulations/simulations_wwilt_wfc.py |   75 +-
 class4gl/simulations/update_yaml_old.py       |  276 -----
 27 files changed, 3515 insertions(+), 675 deletions(-)
 create mode 100644 class4gl/interface/interface_new_koeppen.py
 create mode 100644 class4gl/interface/test.test
 rename class4gl/setup/{batch_setup_global.py => batch_setup_global_old.py} (100%)
 create mode 100644 class4gl/setup/batch_setup_igra.pbs
 create mode 100644 class4gl/setup/batch_setup_igra.py
 rename class4gl/{simulations => setup}/batch_update.py (71%)
 rename class4gl/setup/{setup_global.py => setup_igra.py} (91%)
 create mode 100644 class4gl/setup/setup_igra_pkl.py
 create mode 100644 class4gl/setup/update_setup.py
 create mode 100644 class4gl/simulations/simulations_iter_bowen.py
 rename class4gl/simulations/{update_yaml.py => simulations_veg.py} (64%)
 delete mode 100644 class4gl/simulations/update_yaml_old.py

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 9da9ba7..e0a307a 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -913,7 +913,6 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
         # without lev argument), so that we can check afterwards whether the
         # data is well-fetched or not.
 
-
         for key in keys:
             if not ((key in globaldata.datasets) and \
                 (globaldata.datasets[key].page is not None) and \
@@ -967,7 +966,7 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                             idatetime = np.min((idatetime,idatetimeend))
                             #for gleam, we take the previous day values
 
-                            # in case of soil temperature, we take the exact
+                            # in case of soil temperature or profile temperature, we take the exact
                             # timing (which is the morning)
                             if key in ['t','u','v','q']:
                                 idatetimeend = idatetime
diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 8c42180..b895ea4 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -135,8 +135,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     axes = {}         
     axes_taylor = {}         
     
-    colors = ['r','g','b','m']
-    symbols = ['*','x','+']
+    colors = ['r','g','b','m','y','c']
+    symbols = ['*','x','+','o']
     dias = {}
     
     varkeys = ['h','theta','q']
@@ -437,7 +437,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
             data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
             
-        data_input['advt_tropo'] = - data_input['advt_tropo']
+        data_input['advt_tropo'] = data_input['advt_tropo'] * 3600.
         data_all['advt_tropo'] = data_input['advt_tropo']
             #print(data_input.shape)
             #print(data_all.shape)
@@ -453,7 +453,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                 #print('hello8')
                 #print(data_input.shape)
                 #print(data_all.shape)
-                units['advt_tropo'] = 'K/s'
+                units['advt_tropo'] = 'K/h'
                 input_key_full = input_key + "["+units[input_key]+"]"
                 data_all[input_key_full] = pd.cut(x=data_input[input_key].values,bins=8,precision=2)
                 data_input[input_key_full] = pd.cut(x=data_input[input_key].values,bins=8,precision=2,)
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
index 1cf134d..ec77bb8 100644
--- a/class4gl/interface/interface_koeppen.py
+++ b/class4gl/interface/interface_koeppen.py
@@ -1,4 +1,3 @@
-
 import numpy as np
 
 import pandas as pd
@@ -128,24 +127,48 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       refetch_records=False
                     )
 
-
 key = args.experiments.strip(' ').split(' ')[0]
 xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
 koeppenlookuptable = pd.DataFrame()
 koeppenlookuptable['KGCID'] = pd.Series(xrkoeppen['KGCID'])
 
 
-kgccolors = {
-    'Dfa':['navy','white'],
-    'Cfb':['green','white']       ,
-    'BSk':['tan','black']      ,
-    'Csb':['lightgreen','black'] ,     
-    'Cfa':['darkgreen','white']  ,    
-    'BWh':['orange','black']      ,
-    'Aw' :['pink','black'],
-    'Dwc':['rebeccapurple','white'] ,    
-    'Dfb':['darkviolet','white']    , 
-}
+
+
+KGCID=    ['Af', 'Am', 'As', 'Aw', 'BSh', 'BSk', 'BWh', 'BWk', 'Cfa', 'Cfb','Cfc', 'Csa', 'Csb', 'Csc', 'Cwa','Cwb', 'Cwc', 'Dfa', 'Dfb', 'Dfc','Dfd', 'Dsa', 'Dsb', 'Dsc', 'Dsd','Dwa', 'Dwb', 'Dwc', 'Dwd', 'EF','ET', 'Ocean'] 
+KGCcolors=["#960000", "#FF0000", "#FF6E6E", "#FFCCCC", "#CC8D14", "#CCAA54", "#FFCC00", "#FFFF64", "#007800", "#005000", "#003200", "#96FF00", "#00D700", "#00AA00", "#BEBE00", "#8C8C00", "#5A5A00", "#550055", "#820082", "#C800C8", "#FF6EFF", "#646464", "#8C8C8C", "#BEBEBE", "#E6E6E6", "#6E28B4", "#B464FA", "#C89BFA", "#C8C8FF", "#6496FF", "#64FFFF", "#F5FFFF"]
+
+def brightness(rrggbb):
+    """ W3C brightness definition
+        input:
+            hexadecimal color in the format:
+            #RRGGBB
+        output: value between 0 and 1
+    """
+    print(rrggbb)
+    rr = int(rrggbb[1:3],16)/int('FF',16)
+    gg = int(rrggbb[3:5],16)/int('FF',16)
+    bb = int(rrggbb[5:7],16)/int('FF',16)
+    #rr = math.floor(rrggbb/10000.)
+    #gg = math.floor((rrggbb - rr*10000.)/100.)
+    #bb = rrggbb - rr*10000 - gg*100
+    return (rr * 299. + gg * 587. + bb * 114.) / 1000.
+
+kgccolors = {}
+for iKGC,KGCname in enumerate(KGCID):
+    kgccolors[KGCname] = [KGCcolors[iKGC],'white' if (brightness(KGCcolors[iKGC])<0.5) else 'black']
+
+# kgccolors = {
+#     'Dfa':['navy','white'],
+#     'Cfb':['green','white']       ,
+#     'BSk':['tan','black']      ,
+#     'Csb':['lightgreen','black'] ,     
+#     'Cfa':['darkgreen','white']  ,    
+#     'BWh':['orange','black']      ,
+#     'Aw' :['pink','black'],
+#     'Dwc':['rebeccapurple','white'] ,    
+#     'Dfb':['darkviolet','white']    , 
+# }
 kgcnames = {
     'Dfa':'snow \n fully humid \n hot summer',
     'Cfb':'green'       ,
@@ -181,12 +204,20 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] =  \
     c4gldata[key].frames['stats']['records_all_stations_ini']['KGC'].map(koeppenlookuptable['KGCID'])
 
+print('sort the climate classes according to the amount ')
 koeppenlookuptable['amount'] = ""
+
+exclude_koeppen = ['Dfc','Cwb']
+
 for ikoeppen,koeppen in koeppenlookuptable.iterrows():
-    print(ikoeppen,':',koeppen)
-    kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
-    print(np.sum(kgc_select))
-    koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
+
+    if koeppen['KGCID'] not in exclude_koeppen:
+        print(ikoeppen,':',koeppen)
+        kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
+        print(np.sum(kgc_select))
+        koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
+    else:
+        koeppenlookuptable.iloc[ikoeppen]['amount'] = 0
 
 koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
 koeppenlookuptable = koeppenlookuptable[:9]
@@ -288,6 +319,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
             mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
             obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+
+            print ('filtering classes (showing bad performance)', exclude_koeppen,' from results!')
+            filter_classes = ~(c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(exclude_koeppen))
+            mod = mod.loc[filter_classes]
+            obs = obs.loc[filter_classes]
             x, y = obs.values,mod.values
             print(key,len(obs.values))
     
@@ -331,9 +367,15 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
         # clearsky = (cc < 0.05)
     
+        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+    
         mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
         obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
-    
+        print ('filtering classes (showing bad performance)', exclude_koeppen,' from results!')
+        filter_classess = ~(c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(exclude_koeppen))
+        mod = mod.loc[filter_classes]
+        obs = obs.loc[filter_classes]
     
         nbins=40       
         x, y = obs.values,mod.values
@@ -522,6 +564,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         data_all = pd.DataFrame()
 
         tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
+        
+
         tempdatamodstats["source"] = "soundings"
         tempdatamodstats["source_index"] = "soundings"
 
@@ -553,6 +597,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
             tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
             tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+
             tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
             tempdatamodstats['STNID']= tempdataini_this.STNID
             tempdatamodstats['source']= keylabel
@@ -577,6 +622,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             #print('hello5')
 
 
+            print ('filtering classes (showing bad performance)', exclude_koeppen,' from results!')
             # data[varkey] = tempdatamodstats['d'+varkey+'dt']
             data_all = pd.concat([data_all,tempdatamodstats],axis=0)
             data_input = pd.concat([data_input, tempdataini],axis=0)
@@ -589,6 +635,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
             data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
             data_all['KGCname'] = data_input['KGCname']
+
+
+
+
             #print(data_input.shape)
             #print(data_all.shape)
         # xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
@@ -609,6 +659,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             #print('hello9')
             #print(data_input.shape)
             #print(data_all.shape)
+            print ('Excluding extreme values from the classes plots')
             qvalmax = data_all[varkey_full].quantile(0.999)
             qvalmin = data_all[varkey_full].quantile(0.001)
             select_data = (data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)
diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py
new file mode 100644
index 0000000..a7bd245
--- /dev/null
+++ b/class4gl/interface/interface_new_koeppen.py
@@ -0,0 +1,1056 @@
+import numpy as np
+import pandas as pd
+import sys
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=False)
+parser.add_argument('--make_figures',default=None)
+parser.add_argument('--show_control_parameters',default=True)
+parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--figure_filename_2',default=None)
+parser.add_argument('--experiments_labels',default=None)
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+import xarray as xr
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+if args.experiments_labels is None:
+    keylabels = args.experiments.strip().split(' ')
+else:
+    keylabels = args.experiments_labels.strip().split(';')
+
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if bool(args.load_globaldata):
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = {}
+for key in args.experiments.strip(' ').split(' '):
+    
+    c4gldata[key] = c4gl_interface_soundings( \
+                      args.path_experiments+'/'+key+'/',\
+                      args.path_forcing+'/',\
+                      globaldata,\
+                      refetch_records=False
+                    )
+
+sns.reset_orig()
+key = args.experiments.strip(' ').split(' ')[0]
+xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
+koeppenlookuptable = pd.DataFrame()
+koeppenlookuptable['KGCID'] = pd.Series(xrkoeppen['KGCID'])
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    xlim = axis.get_xlim()
+    ylim = axis.get_ylim()
+    x_vals = np.array([np.min([xlim[0],ylim[0]]),np.max([xlim[1],ylim[1]])])
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+KGCID=    ['Af', 'Am', 'As', 'Aw', 'BSh', 'BSk', 'BWh', 'BWk', 'Cfa', 'Cfb','Cfc', 'Csa', 'Csb', 'Csc', 'Cwa','Cwb', 'Cwc', 'Dfa', 'Dfb', 'Dfc','Dfd', 'Dsa', 'Dsb', 'Dsc', 'Dsd','Dwa', 'Dwb', 'Dwc', 'Dwd', 'EF','ET', 'Ocean'] 
+KGCcolors=["#960000", "#FF0000", "#FF6E6E", "#FFCCCC", "#CC8D14", "#CCAA54", "#FFCC00", "#FFFF64", "#007800", "#005000", "#003200", "#96FF00", "#00D700", "#00AA00", "#BEBE00", "#8C8C00", "#5A5A00", "#550055", "#820082", "#C800C8", "#FF6EFF", "#646464", "#8C8C8C", "#BEBEBE", "#E6E6E6", "#6E28B4", "#B464FA", "#C89BFA", "#C8C8FF", "#6496FF", "#64FFFF", "#F5FFFF"]
+
+def brightness(rrggbb):
+    """ W3C brightness definition
+        input:
+            hexadecimal color in the format:
+            #RRGGBB
+        output: value between 0 and 1
+    """
+    print(rrggbb)
+    rr = int(rrggbb[1:3],16)/int('FF',16)
+    gg = int(rrggbb[3:5],16)/int('FF',16)
+    bb = int(rrggbb[5:7],16)/int('FF',16)
+    #rr = math.floor(rrggbb/10000.)
+    #gg = math.floor((rrggbb - rr*10000.)/100.)
+    #bb = rrggbb - rr*10000 - gg*100
+    return (rr * 299. + gg * 587. + bb * 114.) / 1000.
+
+kgccolors = {}
+for iKGC,KGCname in enumerate(KGCID):
+    kgccolors[KGCname] = [KGCcolors[iKGC],'white' if (brightness(KGCcolors[iKGC])<0.5) else 'black']
+
+# kgccolors = {
+#     'Dfa':['navy','white'],
+#     'Cfb':['green','white']       ,
+#     'BSk':['tan','black']      ,
+#     'Csb':['lightgreen','black'] ,     
+#     'Cfa':['darkgreen','white']  ,    
+#     'BWh':['orange','black']      ,
+#     'Aw' :['pink','black'],
+#     'Dwc':['rebeccapurple','white'] ,    
+#     'Dfb':['darkviolet','white']    , 
+# }
+kgcnames = {
+    'Dfa':'snow \n fully humid \n hot summer',
+    'Cfb':'green'       ,
+    'BSk':'4'      ,
+    'Csb':'5'      ,
+    'Cfa':'darkgreen' ,     
+    'BWh':'6'      ,
+    'Aw' :'7'     ,
+    'Dwc':'8'     ,
+    'Dfb':'9'     ,
+    #'Dfa':'',
+}
+for KGCID in list(pd.Series(xrkoeppen['KGCID'])):
+    if KGCID not in kgcnames.keys():
+        kgcnames[KGCID] = KGCID
+    if KGCID not in kgccolors.keys():
+        kgccolors[KGCID] = ['k','k']
+
+
+koeppenlookuptable['color'] = ""
+koeppenlookuptable['textcolor'] = ""
+koeppenlookuptable['name'] = ""
+for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+    print(ikoeppen)
+    print(koeppen.KGCID)
+    print(kgccolors[koeppen.KGCID])
+    koeppenlookuptable['color'].loc[ikoeppen] = kgccolors[koeppen.KGCID][0]
+    koeppenlookuptable['textcolor'].loc[ikoeppen] = kgccolors[koeppen.KGCID][1]
+    koeppenlookuptable['name'].loc[ikoeppen] = kgcnames[koeppen.KGCID]
+
+
+
+c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] =  \
+    c4gldata[key].frames['stats']['records_all_stations_ini']['KGC'].map(koeppenlookuptable['KGCID'])
+
+print('sort the climate classes according to the amount ')
+koeppenlookuptable['amount'] = ""
+
+#exclude_koeppen = ['Dfc','Cwb']
+
+for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+
+    print(ikoeppen,':',koeppen)
+    kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
+    print(np.sum(kgc_select))
+    koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
+
+koeppenlookuptable = koeppenlookuptable[koeppenlookuptable.amount >= 200]
+koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
+# koeppenlookuptable = koeppenlookuptable[:9]
+include_koeppen = list(koeppenlookuptable.KGCID)
+
+
+if args.make_figures:
+    fig = plt.figure(figsize=(11,7))   #width,height
+    i = 1                                                                           
+    axes = {}         
+    axes_taylor = {}         
+    
+    colors = ['r','g','b','m','y','purple','orange','sienna','navy']
+    symbols = ['*','x','+']
+    dias = {}
+
+
+    i = 1
+    for varkey in ['h','theta','q']:                                                    
+        dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+        axes[varkey] = fig.add_subplot(2,3,i)                                       
+        #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+    
+        #print(obs.std())
+        #if i == 2:
+        dias[varkey]._ax.axis["left"].label.set_text(\
+            "Normalized standard deviation")
+        if i == 1:
+            axes[varkey].annotate('Normalized standard deviation',\
+                        xy= (0.05,0.27),
+                        color='black',
+                        rotation=90.,
+                        xycoords='figure fraction',
+                        weight='normal',
+                        fontsize=10.,
+                        horizontalalignment='center',
+                        verticalalignment='center' ,
+                        #bbox={'edgecolor':'black',
+                        #      'boxstyle':'circle',
+                        #      'fc':koeppen.color,
+                        #      'alpha':1.0}
+                       )
+
+
+
+            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+        #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # Q95 = obs.quantile(0.95)
+        # Q95 = obs.quantile(0.90)
+        # Add RMS contours, and label them
+        contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+        dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+        #dia._ax.set_title(season.capitalize())
+    
+        dias[varkey].add_grid()
+    
+    
+        #dia.ax.plot(x99,y99,color='k')
+        i += 1
+    
+    i = 1
+    for varkey in ['h','theta','q']:                                                    
+        #for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]):
+            # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+            # clearsky = (cc < 0.05)
+            # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+            # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+            mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+            obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+
+            print ('filtering classes that have sufficient samples: ', include_koeppen)
+            filter_classes = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen))
+            mod = mod.loc[filter_classes]
+            obs = obs.loc[filter_classes]
+            x, y = obs.values,mod.values
+            print(key,len(obs.values))
+    
+            STD_OBS = obs.std()
+            #scores
+            PR = pearsonr(mod,obs)[0]
+            RMSE = rmse(obs,mod)                                               
+            BIAS = np.mean(mod) - np.mean(obs)
+            STD = mod.std()
+            
+            # fit = np.polyfit(x,y,deg=1)
+            # axes[varkey].plot(x, fit[0] * x + fit[1],\
+            #                   color=colors[ikey],alpha=0.8,lw=2,\
+            #                   label=key+", "+\
+            #                               'R = '+str(round(PR,3))+', '+\
+            #                               'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
+            #                               'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
+            # axes[varkey].legend(fontsize=5)
+            
+            # print(STD)
+            # print(PR)
+            dias[varkey].add_sample(STD/STD_OBS, PR,\
+                           marker='o',ls='', mfc='white',mec='black',
+                           zorder=-100,
+                           ms=10.*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\
+                                np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
+                           # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                           # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                           )
+            dias[varkey].add_sample(STD/STD_OBS, PR,\
+                           marker='o',ls='', mfc='none',mec='black',
+                           zorder=700,
+                           ms=10.*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\
+                                np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
+                           # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                           # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                           )
+            dias[varkey].add_sample(STD/STD_OBS, PR,\
+                           marker='o',ls='', mfc='none',mec='black',
+                           zorder=700,
+                           ms=1.
+                           # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                           # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                           )
+            # dias[varkey].add_sample(STD/STD_OBS, PR,\
+            #                    annotate='All', alpha=1.,color='black',weight='bold',fontsize=5.,\
+            #                 zorder=100,\
+            #                         bbox={'edgecolor':'black','boxstyle':'circle','alpha':0.0}\
+            #                 )
+    
+        # put ticker position, see
+        # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+        # dia.ax.axis['bottom'].
+        # dia.ax.axis['left'].
+        # dia.ax.axis['left'].
+    
+        i += 1
+
+    i = 1
+    for varkey in ['h','theta','q']:                                                    
+
+        for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]):
+            icolor = 0
+            for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+                print(ikoeppen,':',koeppen)
+                kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
+                koeppen_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'][kgc_select]
+                koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'][kgc_select]
+    
+                #axes[varkey].scatter(koeppen_obs,koeppen_mod,marker=symbols[ikoeppen],color=colors[ikey])
+                         #  label=key+", "+\
+                         #                    'R = '+str(round(PR[0],3))+', '+\
+                         #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                         #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+    
+    
+    
+            # # pl.scatter(obs,mod,label=key+", "+\
+            # #                              'R = '+str(round(PR[0],3))+', '+\
+            # #                              'RMSE = '+str(round(RMSE,5))+', '+\
+            # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+                
+                dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                               pearsonr(koeppen_mod,koeppen_obs)[0],
+                               marker='o',linewidth=0.5,
+                                        mfc=koeppen.color,mec='black',#koeppen.color,
+                                        zorder=300+icolor,
+                               ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
+                               # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                               # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                               )
+                dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                               pearsonr(koeppen_mod,koeppen_obs)[0],
+                               marker='o',linewidth=0.5,
+                                        mfc=koeppen.color,mec='black',#koeppen.color,
+                                        zorder=301+icolor, ms=1
+                               # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                               # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                               )
+
+
+                # dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                #                pearsonr(koeppen_mod,koeppen_obs)[0],
+                #                         marker='o',linewidth=0.5, mfc='none',mec=str(koeppen.color),
+                #                         zorder=600+icolor,
+                #                ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
+                #                # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                #                # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                #                )
+
+                icolor += 1
+    
+            latex = {}
+            latex['dthetadt'] =  r'$d \theta / dt $'
+            latex['dqdt'] =      r'$d q / dt $'
+            latex['dhdt'] =      r'$d h / dt $'
+    
+            axes[varkey].set_xlabel('Observed')     
+
+
+            if varkey == 'q':
+                units_final = r'[$g\, kg^{-1}\, h^{-1}$]'
+            elif varkey == 'theta':
+                units_final = r'[$K\, h^{-1}$]'
+            elif varkey == 'h':
+                units_final = r'[$m\, h^{-1}$]'
+
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=12.)                                     
+        if i==1:                                    
+            axes[varkey].set_ylabel('Modelled')                                            
+        abline(1,0,axis=axes[varkey])
+        i +=1
+
+    
+
+    i = 1
+    for varkey in ['h','theta','q']:                                                    
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        keylabel = keylabels[ikey]
+        # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
+        # clearsky = (cc < 0.05)
+    
+        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
+    
+        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+        print ('filtering classes that have sufficient samples: ', include_koeppen)
+        filter_classess = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen))
+        mod = mod.loc[filter_classes]
+        obs = obs.loc[filter_classes]
+    
+        nbins=40       
+        x, y = obs.values,mod.values
+        
+        xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
+        zi = np.zeros_like(xi)*np.nan       
+        for ibin in range(nbins):
+            xmin = x.min() + ibin * (x.max() - x.min())/nbins
+            xmax = xmin + (x.max() - x.min())/nbins
+            in_bin = ((x >= xmin) & (x < xmax))
+            ybin = y[in_bin]
+            xbin = x[in_bin]
+            if len(ybin) > 20:
+                k = kde.gaussian_kde((ybin))
+                zi[ibin] = k(np.vstack([yi[ibin].flatten()]))
+        zi = zi/np.sum(zi,axis=1)[:,np.newaxis]
+        zi_int = zi.cumsum(axis=1) 
+                     #  label=key+", "+\
+                     #                    'R = '+str(round(PR[0],3))+', '+\
+                     #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                     #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+        axes[varkey].contour(xi, yi, zi_int.reshape(xi.shape),levels=[0.25,0.5,0.75] ,
+                colors=['darkred','lightgreen','darkred'],linewidths=[1,2,1])
+        axes[varkey].contourf(xi, yi, zi_int.reshape(xi.shape),levels=[0.25,0.75] ,
+                colors=['darkred'],alpha=0.5,)
+
+        # if varkey == 'q':
+        nani = np.concatenate([xi[zi != np.nan],yi[zi != np.nan]])
+        # axes[varkey].set_ylim((np.percentile(nani,20),np.percentile(nani,80)))
+        # #axes[varkey].set_ylim((nani.min(),nani.max()))
+        # print(varkey,(nani.min(),nani.max()))
+    
+    
+        latex = {}
+        latex['dthetadt'] =  r'$d \theta / dt $'
+        latex['dqdt'] =      r'$d q / dt $'
+        latex['dhdt'] =      r'$d h / dt $'
+    
+        axes[varkey].set_xlabel('observations')     
+
+        if varkey == 'q':
+            units_final = r'[$\mathrm{g\, kg^{-1}\, h^{-1}}$]'
+        elif varkey == 'theta':
+            units_final = r'[$\mathrm{K\, h^{-1}}$]'
+        elif varkey == 'h':
+            units_final = r'[$\mathrm{m\, h^{-1}}$]'
+
+        if varkey == 'q':
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=15)        
+        elif varkey == 'theta':
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=15)
+        elif varkey == 'h':
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=15)
+       #  c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname']
+
+        PR = pearsonr(mod,obs)[0]
+        RMSE = rmse(obs,mod)                                               
+        BIAS = np.mean(mod) - np.mean(obs)
+        STD = mod.std()
+    
+        
+        axes[varkey].scatter(obs,mod, label='All',s=0.1,alpha=0.14,color='k')
+
+
+
+        #axes[varkey].legend(fontsize=5)
+
+        #trans = ax.get_xaxis_transform() # x in data untis, y in axes fraction
+        if varkey == 'q':
+            annotate_text = \
+                           'RMSE = '+format((RMSE*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+ '\n'+\
+                           'Bias = '+format((BIAS*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+' \n'+\
+                           r'$R$ = '+format(PR,'0.2f')
+        elif varkey == 'h':
+            annotate_text = \
+                            'RMSE = '+format(RMSE,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+                            'Bias = '+format(BIAS,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+                            r'$R$ = '+format(PR,'0.2f')
+        else:
+            annotate_text = \
+                            'RMSE = '+format(RMSE,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+                            'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+                            r'$R$ = '+format(PR,'0.2f')
+
+
+        ann = axes[varkey].annotate(annotate_text, xy=(0.05, .98 ), xycoords='axes fraction',fontsize=9,
+       horizontalalignment='left', verticalalignment='top' 
+        )
+
+        if varkey == 'q':
+            print('get_xlim not working well...STRANGE')
+            limits =  [np.percentile(nani,1),np.percentile(nani,99)]
+        else:
+            limits =  [np.percentile(nani,1.0),np.percentile(nani,99.0)]
+
+        axes[varkey].set_xlabel('Observed')     
+        if i==1:                                    
+            axes[varkey].set_ylabel('Modelled')                                            
+        abline(1,0,axis=axes[varkey])
+
+
+        i +=1
+
+        #axes[varkey].axis('equal')
+        axes[varkey].set_aspect('equal')
+        axes[varkey].set_xlim(limits)
+        axes[varkey].set_ylim(limits)
+
+        # To specify the number of ticks on both or any single axes
+        # plt.locator_params(axis='x', nbins=6)
+        #plt.locator_params( nbins=10)
+        axes[varkey].xaxis.set_major_locator(ticker.MaxNLocator(4))
+        axes[varkey].yaxis.set_major_locator(ticker.MaxNLocator(4))
+        # axes[varkey].xaxis.set_major_locator(ticker.MultipleLocator(5))
+        # axes[varkey].yaxis.set_major_locator(ticker.MultipleLocator(5))
+
+        if varkey == 'q':
+            ticks = ticker.FuncFormatter(lambda x, pos:
+                                         '{0:g}'.format(x*1000.))
+            axes[varkey].xaxis.set_major_formatter(ticks)
+            axes[varkey].yaxis.set_major_formatter(ticks)
+
+        #     # axes[varkey].set_xticklabels(labels=ax.get_xticklabels()*1000.)
+        #     # axes[varkey].set_yticklabels(labels=ax.get_yticklabels()*1000.)
+    
+    
+    # # legend for different forcing simulations (colors)
+    # ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # for ikey,key in enumerate(args.experiments.strip().split(' ')):
+    #     leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10)
+    #     leg.append(leg1)
+    # ax.axis('off')
+    # #leg1 =
+    # ax.legend(leg,list(args.experiments.strip().split(' ')),loc=2,fontsize=10)
+    
+    
+    # # legend for different stations (symbols)
+    # ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # isymbol = 0
+    # for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+    #     leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+    #     leg.append(leg1)
+    #     isymbol += 1
+    # 
+    # # symbol for all stations
+    # leg1, = ax.plot([],'ko',markersize=10)
+    # leg.append(leg1)
+    
+    # ax.axis('off')
+    # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+    
+    
+    fig.subplots_adjust(top=0.95,bottom=0.09,left=0.08,right=0.94,hspace=0.35,wspace=0.29)
+    
+    
+    #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+    # figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/global_eval_report_cs.png'
+    # fig.savefig(figfn,dpi=200); print("Image file written to:", figfn)
+    
+    if args.figure_filename is not None:
+        fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+    fig.show()  
+
+    koeppenlookuptable = koeppenlookuptable.sort_index()
+    if bool(args.show_control_parameters):
+
+
+        pkmn_type_colors = [
+                                            '#A0A0A0',  # Poison
+                                            '#78C850',  # Grass
+                                            '#F08030',  # Fire
+                                            '#6890F0',  # Water
+                                            '#F08030',  # Fire
+                                            '#C03028',  # Fighting
+                                            '#F85888',  # Psychic
+                                            '#A8B820',  # Bug
+                                            '#A8A878',  # Normal
+                                            '#F8D030',  # Electric
+                                            '#E0C068',  # Ground
+                                            '#EE99AC',  # Fairy
+                                            '#B8A038',  # Rock
+                                            '#705898',  # Ghost
+                                            '#98D8D8',  # Ice
+                                            '#7038F8',  # Dragon
+                                           ]
+
+
+
+        #sns.set()
+        #fig = pl.figure(figsize=(11,7))
+        i = 1
+        #axes = {}
+        data_all = pd.DataFrame()
+        data_input = pd.DataFrame()
+        
+        
+        
+        # #for varkey in ['theta','q']:     
+        # EF =\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR/(1.+\
+        #     c4gldata[key].frames['stats']['records_all_stations_ini'].BR)
+        # EF[EF<0] = np.nan
+        # EF[EF>1] = np.nan
+        
+        # c4gldata[key].frames['stats']['records_all_stations_ini']['EF'] = EF
+        
+        ikey = 0
+        key = list(args.experiments.strip().split(' '))[ikey]
+        data_all = pd.DataFrame()
+
+        tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
+        
+
+        tempdatamodstats["source"] = "soundings"
+        tempdatamodstats["source_index"] = "soundings"
+
+        ini_ref = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+        tempdataini_this = pd.DataFrame(ini_ref.copy())
+
+        tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+        tempdatamodstats['STNID']= tempdataini_this.STNID
+        tempdatamodstats['source']= "soundings"
+        tempdatamodstats['source_index']= "soundings"
+        tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+        #print('hello')
+
+        tempdataini = pd.DataFrame(ini_ref)
+        tempdataini["source"] = "soundings"
+        tempdataini["source_index"] = "soundings"
+        tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+        #print('hello2')
+
+
+        data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+        data_input = pd.concat([data_input,tempdataini],axis=0)
+        #print(data_input.shape)
+        #print(data_all.shape)
+
+            
+        for ikey,key in enumerate(list(args.experiments.strip().split(' '))):
+            keylabel = keylabels[ikey]
+
+            tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
+            tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
+
+            tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
+            tempdatamodstats['STNID']= tempdataini_this.STNID
+            tempdatamodstats['source']= keylabel
+            tempdatamodstats['source_index']= keylabel
+            tempdatamodstats.set_index(['source_index','STNID','dates'],inplace=True)
+            #print('hello')
+
+
+            tempdataini = pd.DataFrame(ini_ref.copy())
+            tempdataini["source"] = keylabel
+            tempdataini["source_index"] = keylabel
+            tempdataini = tempdataini.set_index(['source_index','STNID','dates'])
+    
+
+            #print('hello2')
+            index_intersect = tempdataini.index.intersection(tempdatamodstats.index)
+            #print('hello3')
+
+            tempdataini = tempdataini.loc[index_intersect]
+            #print('hello4')
+            tempdatamodstats = tempdatamodstats.loc[index_intersect]
+            #print('hello5
+
+            # data[varkey] = tempdatamodstats['d'+varkey+'dt']
+            data_all = pd.concat([data_all,tempdatamodstats],axis=0)
+            data_input = pd.concat([data_input, tempdataini],axis=0)
+            #print(data_input.shape)
+            #print(data_all.shape)
+
+        data_input.cc = data_input.cc.clip(0.,+np.inf)
+
+        for varkey in ['h','theta','q']:
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+            data_all = data_all.rename(columns={'d'+varkey+'dt':varkey_full})
+            data_all['KGCname'] = data_input['KGCname']
+
+
+
+
+            #print(data_input.shape)
+            #print(data_all.shape)
+        # xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
+        # lookuptable = pd.Series(xrkoeppen['KGCID'])
+        # data_all['KGCname'] = data_input['KGC'].map(lookuptable)
+        #print('hello6')
+        #print(data_all.columns)
+        #print('hello7')
+
+        
+
+
+        varkeys = ['h','theta','q']
+        for varkey in varkeys:
+            #input_keys =['wg','cc']
+            #for input_key in input_keys:
+            varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+
+            #print('hello8')
+            #print(data_input.shape)
+            #print(data_all.shape)
+            #input_key_full = input_key + "["+units[input_key]+"]"
+            #print('hello9')
+            #print(data_input.shape)
+            #print(data_all.shape)
+            print ('Excluding extreme values from the classes plots')
+            qvalmax = data_all[varkey_full].quantile(0.999)
+            qvalmin = data_all[varkey_full].quantile(0.001)
+            select_data = (data_all[varkey_full] >= qvalmin) & (data_all[varkey_full] < qvalmax)
+            #print('hello11')
+            data_all = data_all[select_data]
+            #print('hello12')
+            data_input = data_input[select_data.values]
+
+            data_input = data_input[data_all.KGCname.isin(list(koeppenlookuptable.KGCID))]
+            data_all = data_all[data_all.KGCname.isin(list(koeppenlookuptable.KGCID))]
+            #print('hello13')
+            #print(data_input.shape)
+            #print(data_all.shape)
+            #print('hello10')
+            
+
+        #sns.set(style="ticks", palette="deep")
+
+        
+        exppairs = {'obs|ref'     :['soundings','GLOBAL_ADV'],
+                    'fcap|wilt'   :['GLOBAL_ADV_FC'  ,'GLOBAL_ADV_WILT'],
+                    'allveg|noveg':['GLOBAL_ADV_VMAX','GLOBAL_ADV_VMIN']
+                   }
+        current_palette = sns.color_palette('deep')
+        exppalettes = {'obs|ref'     :['white','grey'],
+                    'fcap|wilt'   :[current_palette[0],current_palette[3]],
+                       'allveg|noveg':[current_palette[2],current_palette[8]]
+                   }
+
+        data_all['expname'] = ""
+        print('making alternative names for legends')
+        expnames = {'soundings':'obs',\
+                    'GLOBAL_ADV':'ref',\
+                    'GLOBAL_ADV_WILT':'dry',\
+                    'GLOBAL_ADV_FC':'wet',\
+                    'GLOBAL_ADV_VMIN':'noveg',\
+                    'GLOBAL_ADV_VMAX':'fullveg',\
+                   }
+        for expname_orig,expname in expnames.items():
+            data_all['expname'][data_all['source'] == expname_orig] = expname
+
+        data_all['exppair'] = ""
+        for exppairname,exppair in exppairs.items():
+            data_all['exppair'][  data_all['source'].isin(exppair)  ] = exppairname
+
+        icolor = 0
+        
+        fig, axes = plt.subplots(nrows=len(varkeys)*len(koeppenlookuptable), \
+                                 ncols=len(exppairs), \
+                                 figsize=(8, 13), #width, height
+                                 sharex='col',
+                                 #gridspec_kw=dict(height_ratios=(1, 3), 
+                                 gridspec_kw=dict(hspace=0.20,wspace=0.08,top=0.94,bottom=0.06,left=0.15,right=0.99))
+
+        data_all['d'+varkey+'dt ['+units[varkey]+'/h]'] *= 1000.  
+
+        icol = 0
+        irow = 0
+        sns.set_style('whitegrid')
+        for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+            for exppairname,exppair in exppairs.items():
+                for varkey in varkeys:
+                    varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+                    ax = axes[irow,icol]
+
+                 #   axes[i] = fig.add_subplot(len(varkeys)*len(koeppenlookuptable),len(exppairs),icolor)
+            #sns.violinplot(x='KGC',y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+            
+            #ax.set_title(input_key_full)
+                    current_data = data_all[(data_all['exppair'] == exppairname) & (data_all['KGCname']  == koeppen.KGCID)]
+                    sns.violinplot(y='exppair', x=varkey_full,
+                                        hue="expname",split=True,
+                             palette=exppalettes[exppairname],
+                            # palette=["m", "g",'r','b'],
+                             linewidth=1.0,inner='quart',
+                                        data=current_data,sym='',legend=False,ax=ax)
+                    ax.legend("")
+                    ax.legend_.draw_frame(False)
+                    ax.set_yticks([])
+                    ax.set_ylabel("")
+
+                    # if varkey == 'q':
+                    #     ticks = ticker.FuncFormatter(lambda x, pos:
+                    #                                  '{0:g}'.format(x*1000.))
+                    #     ax.xaxis.set_major_formatter(ticks)
+
+                    if varkey == 'q':
+                        title_final = r'$dq/dt$'
+                        xlabel_final = r'[$\mathrm{g\, kg^{-1}\, h^{-1}}$]'
+                    elif varkey == 'theta':
+                        title_final = r'$d\theta/dt$'
+                        xlabel_final = r'[$\mathrm{K\, h^{-1}}$]'
+                    elif varkey == 'h':
+                        title_final = r'$dh/dt$'
+                        xlabel_final = r'[$\mathrm{m\, h^{-1}}$]'
+
+
+                    ax.set_xlabel("")
+                    #sns.despine(left=True, right=True, bottom=False, top=False)
+                    if irow == (len(varkeys)*len(koeppenlookuptable)-1):
+                        #ax.set_frame_on(False)
+
+                        ax.set_xlabel(xlabel_final)
+                        ax.tick_params(top='off', bottom='on', left='off',
+                                        right='off', labelleft='off',
+                                        labeltop='off',
+                                        labelbottom='on'
+                                      )
+                        ax.spines['top'].set_visible(False)
+                        ax.spines['bottom'].set_visible(True)
+                        ax.spines['left'].set_visible(True)
+                        ax.spines['right'].set_visible(True)
+                        #sns.despine(left=True, right=True, bottom=True, top=False)
+                    elif irow == 0:
+                        ax.set_title(title_final,fontsize=17.)
+                        ax.tick_params(top='off', bottom='off', left='off',
+                                        right='off', labelleft='off',
+                                        labelbottom='off')
+                        #ax.set_frame_on(False)
+                        # ax.spines['left'].set_visible(True)
+                        # ax.spines['right'].set_visible(True)
+                        ax.spines['top'].set_visible(True)
+                        ax.spines['bottom'].set_visible(False)
+                        ax.spines['left'].set_visible(True)
+                        ax.spines['right'].set_visible(True)
+                        #sns.despine(left=True, right=True, bottom=False, top=True)
+                        #ax.axis("off")
+                    elif np.mod(irow,len(exppairs)) == 0:
+                        ax.tick_params(top='off', bottom='off', left='off',
+                                        right='off', labelleft='off',
+                                        labelbottom='off')
+                        #ax.set_frame_on(False)
+                        # ax.spines['left'].set_visible(True)
+                        # ax.spines['right'].set_visible(True)
+                        ax.spines['top'].set_visible(True)
+                        ax.spines['bottom'].set_visible(False)
+                        ax.spines['left'].set_visible(True)
+                        ax.spines['right'].set_visible(True)
+                        #sns.despine(left=True, right=True, bottom=False, top=True)
+                        #ax.axis("off")
+                    elif np.mod(irow,len(exppairs)) == 2:
+                        ax.tick_params(top='off', bottom='on', left='off',
+                                        right='off', labelleft='off',
+                                        labelbottom='off')
+                        #ax.set_frame_on(False)
+                        # ax.spines['left'].set_visible(True)
+                        # ax.spines['right'].set_visible(True)
+                        ax.spines['top'].set_visible(False)
+                        ax.spines['bottom'].set_visible(True)
+                        ax.spines['left'].set_visible(True)
+                        ax.spines['right'].set_visible(True)
+                        #sns.despine(left=True, right=True, bottom=False, top=True)
+                        #ax.axis("off")
+                    else:
+                        ax.tick_params(top='off', bottom='off', left='off',
+                                        right='off', labelleft='off',
+                                        labelbottom='off')
+                        #ax.set_frame_on(False)
+                        #ax.spines['left'].set_visible(True)
+                        #ax.spines['right'].set_visible(True)
+                        ax.spines['top'].set_visible(False)
+                        ax.spines['bottom'].set_visible(False)
+                        ax.spines['left'].set_visible(True)
+                        ax.spines['right'].set_visible(True)
+                        #ax.axis("off")
+                    icol +=1
+                irow +=1
+                icol=0
+
+        idx = 0
+        for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+            ax.annotate(koeppen.KGCID,
+                        xy= (0.01,0.09 + (1.-0.12)*(1. - (idx+.5)/len(koeppenlookuptable))),
+                        color=koeppen.textcolor, 
+                        family='monospace',
+                        xycoords='figure fraction',
+                        weight='bold',
+                        fontsize=8.,
+                        horizontalalignment='top',
+                        verticalalignment='left' ,
+                        bbox={'edgecolor':'black',
+                              'boxstyle':'circle',
+                              'fc':koeppen.color,
+                              'alpha':1.0}
+                       )
+            data_select = data_input[(data_input['source'] == 'soundings') & (data_input['KGCname']  == koeppen.KGCID)]
+            ax.annotate('#:   '+r'$'+str(koeppen.amount)+' '+'$'+'\n'+\
+                        'sm: '+r'$'+str(round(data_select.wg.mean(),2))+'$'+r'$\, \mathrm{m^{3}\, m^{-3}}$'+'\n'+\
+                        'vf: '+str(round(data_select.cveg.mean(),2))+'\n'+\
+                        'cc:  '+str(round(data_select.cc.mean(),2))+'\n'+\
+                        'lat: '+r'$'+str(int(data_select.latitude.mean()))+r'\, ^\circ$'+' \n',\
+                        xy= (0.01,0.015 + (1.-0.12)*(1. - (idx+.5)/len(koeppenlookuptable))),
+                        color='black',
+                        family='monospace',
+                        xycoords='figure fraction',
+                        weight='normal',
+                        fontsize=8.,
+                        horizontalalignment='top',
+                        verticalalignment='left' ,
+                        #bbox={'edgecolor':'black',
+                        #      'boxstyle':'circle',
+                        #      'fc':koeppen.color,
+                        #      'alpha':1.0}
+                       )
+            idx+=1
+
+
+
+
+            # if i ==1:
+            #      plt.legend(loc='upper right',fontsize=7.,frameon=True,framealpha=0.7)
+            # else:
+            #      ax.get_legend().set_visible(False)
+            # #     plt.legend('off')
+            # if i >= 3:
+            #     idx = 0
+            #     for ikoeppen,koeppen in koeppenlookuptable.iterrows():
+
+            #         ax.annotate(koeppen.KGCID,
+            #                     xy=((idx+.5)/len(koeppenlookuptable),-0.00),
+            #                     color=koeppen.textcolor, 
+            #                     xycoords='axes fraction',
+            #                     weight='bold',
+            #                     fontsize=8.,
+            #                     horizontalalignment='center',
+            #                     verticalalignment='center' ,
+            #                     bbox={'edgecolor':'black',
+            #                           'boxstyle':'circle',
+            #                           'fc':koeppen.color,
+            #                           'alpha':1.0}
+            #                    )
+            #         idx+=1
+            #     ax.set_xticklabels([])#labels=ax.get_xticklabels())
+            #     ax.set_xlabel('Köppen climate class')
+            # else:
+            #     ax.set_xticklabels([])
+            #     ax.set_xlabel('')
+
+            # # ax.set_yticklabels([])
+            # # ax.set_ylabel('')
+            # if varkey == 'q':
+            #     ticks = ticker.FuncFormatter(lambda x, pos:
+            #                                  '{0:g}'.format(x*1000.))
+            #     #ax.xaxis.set_major_formatter(ticks)
+            #     ax.yaxis.set_major_formatter(ticks)
+
+            #     ax.set_ylabel(latex['d'+varkey+'dt']+' ['+r'$10^{-3} \times $'+units['d'+varkey+'dt']+']')        
+            # else:
+            #     ax.set_ylabel(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')        
+
+
+
+
+            # for j,artist in enumerate(ax.artists):
+            #     if np.mod(j,len(list(args.experiments.strip().split(' ')))+1) !=0:
+            #         # Set the linecolor on the artist to the facecolor, and set the facecolor to None
+            #         #print(j,artist)
+            #         col = artist.get_facecolor()
+            #         #print(j,artist)
+            #         artist.set_edgecolor(col)
+            #         #print(j,artist)
+            #         artist.set_facecolor('None')
+            # 
+            #         # Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
+            #         # Loop over them here, and use the same colour as above
+            #         
+            #         for k in range(j*5,j*5+5):
+            #             line = ax.lines[k]
+            #             line.set_color(col)
+            #             line.set_mfc(col)
+            #             line.set_mec(col)
+            # 
+            # # Also fix the legend
+            # j = 0
+            # for legpatch in ax.get_legend().get_patches():
+            #     if j > 0:
+
+            #         col = legpatch.get_facecolor()
+            #         legpatch.set_edgecolor(col)
+            #         legpatch.set_facecolor('None')
+            #     j +=1
+
+
+
+
+
+            #ax.grid()
+            #sns.despine(offset=10, trim=True)
+        fig.tight_layout()
+        # fig.subplots_adjust(
+        #     bottom=0.12,left=0.15,top=0.99,right=0.99,wspace=0.10,hspace=0.05,)
+        if args.figure_filename_2 is not None:
+            fig.savefig(args.figure_filename_2,dpi=200); print("Image file written to:", args.figure_filename_2)
+
+            fig.savefig(args.figure_filename_2.replace('png','pdf')); print("Image file written to:", args.figure_filename_2)
+        fig.show()
+
+
+
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
index c7ac908..4fe9218 100644
--- a/class4gl/interface/interface_stations.py
+++ b/class4gl/interface/interface_stations.py
@@ -1,3 +1,4 @@
+'''
 import numpy as np
 
 import pandas as pd
@@ -125,6 +126,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       globaldata,\
                       refetch_records=False
                     )
+'''
 
 if bool(args.make_figures):
     fig = plt.figure(figsize=(10,7))   #width,height
@@ -132,7 +134,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     axes = {}         
     axes_taylor = {}         
     
-    colors = ['r','g','b','m']
+    #colors = ['r','g','b','m']
+    colors = ['k']
     symbols = ['*','x','+']
     dias = {}
     
@@ -144,11 +147,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
         STD_OBS = obs.std()
         dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
-        if i == 2:
-            dias[varkey]._ax.axis["left"].label.set_text(\
-                "Standard deviation (model) / Standard deviation (observations)")
-            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+        dias[varkey]._ax.axis["left"].label.set_text(\
+            "Normalized standard deviation")
+        # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+        # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
         #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
         # Q95 = obs.quantile(0.95)
         # Q95 = obs.quantile(0.90)
@@ -176,13 +178,72 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             STD = mod.std()
             
             fit = np.polyfit(x,y,deg=1)
-            axes[varkey].plot(x, fit[0] * x + fit[1],\
-                              color=colors[ikey],alpha=0.8,lw=2,\
-                              label=key+", "+\
-                                          'R = '+str(round(PR,3))+', '+\
-                                          'RMSE = '+str(round(RMSE,5))+units['d'+varkey+'dt']+', '+\
-                                          'BIAS = '+str(round(BIAS,5))+units['d'+varkey+'dt'] )
-            axes[varkey].legend(fontsize=5)
+
+            if varkey == 'q':
+                axes[varkey].plot(x, fit[0] * x + fit[1],\
+                                  color=colors[ikey],alpha=0.8,lw=2,\
+                                  label=key+", "+\
+                           'RMSE = '+format((RMSE*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+ '\n'+\
+                           'Bias = '+format((BIAS*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+' \n'+\
+                           r'$R$ = '+format(PR,'0.2f') )
+
+
+            elif varkey == 'h':
+                axes[varkey].plot(x, fit[0] * x + fit[1],\
+                                  color=colors[ikey],alpha=0.8,lw=2,\
+                                  label=key+", "+\
+                            'RMSE = '+format(RMSE,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+                            'Bias = '+format(BIAS,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+                            r'$R$ = '+format(PR,'0.2f'))
+            else: #theta
+                axes[varkey].plot(x, fit[0] * x + fit[1],\
+                                  color=colors[ikey],alpha=0.8,lw=2,\
+                                  label=key+", "+\
+                            'RMSE = '+format(RMSE,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+                            'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+                            r'$R$ = '+format(PR,'0.2f'))
+
+            if varkey == 'q':
+                annotate_text = \
+                               'RMSE = '+format((RMSE*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+ '\n'+\
+                               'Bias = '+format((BIAS*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+' \n'+\
+                               r'$R$ = '+format(PR,'0.2f')
+                ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9,
+       horizontalalignment='right', verticalalignment='bottom' ,
+        bbox={'edgecolor':'black',
+                          'fc':'white',  
+                              'boxstyle':'square',
+                              'alpha':0.8}
+                                       )
+            elif varkey == 'h':
+                annotate_text = \
+                                'RMSE = '+format(RMSE,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+                                'Bias = '+format(BIAS,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+                                r'$R$ = '+format(PR,'0.2f')
+                ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9,
+       horizontalalignment='right', verticalalignment='bottom' ,
+        bbox={'edgecolor':'black',
+                          'fc':'white',  
+                              'boxstyle':'square',
+                              'alpha':0.8}
+                                       )
+            else:
+                annotate_text = \
+                                'RMSE = '+format(RMSE,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+                                'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+                                r'$R$ = '+format(PR,'0.2f')
+
+                ann = axes[varkey].annotate(annotate_text, xy=(0.05, .98 ), xycoords='axes fraction',fontsize=9,
+       horizontalalignment='left', verticalalignment='top' ,
+        bbox={'edgecolor':'black',
+                          'fc':'white',  
+                              'boxstyle':'square',
+                              'alpha':0.8}
+                                       )
+
+
+
+
             
             # print(STD)
             # print(PR)
@@ -223,16 +284,22 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
                 
                 dias[varkey].add_sample(station_mod.std()/station_obs.std(),
-                               pearsonr(station_mod,station_obs)[0],annotate=symbols[istation],
+                               pearsonr(station_mod,station_obs)[0],#annotate=symbols[istation],
                                marker=symbols[istation], ms=5, ls='',
-                               #mfc='k', mec='k', # B&W
-                               mfc=colors[ikey], mec=colors[ikey], # Colors
+                               mfc='k', mec='k', # B&W
+                               #mfc=colors[ikey], mec=colors[ikey], # Colors
                                label=key)
                 istation += 1
     
+            if varkey == 'q':
+                units_final = r'[$g\, kg^{-1}\, h^{-1}$]'
+            elif varkey == 'theta':
+                units_final = r'[$K\, h^{-1}$]'
+            elif varkey == 'h':
+                units_final = r'[$m\, h^{-1}$]'
     
             axes[varkey].set_xlabel('observations')     
-            axes[varkey].set_title(latex['d'+varkey+'dt']+' ['+units['d'+varkey+'dt']+']')                                     
+            axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=12)                                     
         if i==0:                                    
             axes[varkey].set_ylabel('model')                                            
         abline(1,0,axis=axes[varkey])
@@ -240,19 +307,19 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     
     
-    # legend for different forcing simulations (colors)
-    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
-    leg = []
-    for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
-        leg1, = ax.plot([],colors[ikey]+'s' ,markersize=10)
-        leg.append(leg1)
-    ax.axis('off')
-    #leg1 =
-    ax.legend(leg,list(args.experiments.strip(' ').split(' ')),loc=2,fontsize=10)
+    # # legend for different forcing simulations (colors)
+    # ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    # leg = []
+    # for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+    #     leg1, = ax.plot([],colors[ikey]+'s' ,markersize=10)
+    #     leg.append(leg1)
+    # ax.axis('off')
+    # #leg1 =
+    # ax.legend(leg,list(args.experiments.strip(' ').split(' ')),loc=2,fontsize=10)
     
     
     # legend for different stations (symbols)
-    ax = fig.add_axes([0.25,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
     leg = []
     isymbol = 0
     for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
diff --git a/class4gl/interface/test.test b/class4gl/interface/test.test
new file mode 100644
index 0000000..969d1d3
--- /dev/null
+++ b/class4gl/interface/test.test
@@ -0,0 +1,276 @@
+---
+# CLASS4GL input; format version: 0.1
+index: 4
+pars:
+  Ammax298: [2.2, 1.7]
+  CO2: 400.8322760704748
+  CO22_h: null
+  CO2comp298: [68.5, 4.3]
+  CO2tend: 0.0
+  Cm: 0.014889170302298154
+  Cs: 0.012038840098238213
+  Cw: 0.0016
+  E0: 53300.0
+  G: 47.29774110009936
+  H: 190.5623007925413
+  Kx: [0.7, 0.7]
+  L: -6.853794630136477
+  LE: 214.88314861335857
+  LEliq: 0.0
+  LEpot: 319.34762471715567
+  LEref: 243.84639777941322
+  LEsoil: 43.588158263249724
+  LEveg: 171.29499035010883
+  Lwin: 307.8635861165938
+  Lwout: 426.33896646668103
+  M: 0.0
+  P_h: 82175.50598724303
+  Q: 452.7431905059985
+  Q10Am: [2.0, 2.0]
+  Q10CO2: [1.5, 1.5]
+  Q10gm: [2.0, 2.0]
+  R10: 0.23
+  RH_h: 1.674122332852863
+  Rib: -6.70652127177839
+  Swin: 739.103621663443
+  Swout: 167.8850508073574
+  T1Am: [281.0, 286.0]
+  T1gm: [278.0, 286.0]
+  T2: 284.68364969889325
+  T2Am: [311.0, 311.0]
+  T2gm: [301.0, 309.0]
+  T2m: 282.3123067089991
+  T_h: 274.8936229768578
+  Ts: 294.47523957923886
+  Tsoil: 286.4586732910864
+  ac: 0.0
+  ad: [0.07, 0.15]
+  advCO2: 0.0
+  advt: 1.0355662748843815e-05
+  alpha0: [0.017, 0.014]
+  c3c4: c3
+  c_beta: 0
+  dCO2: -22.832276070474784
+  dCO2tend: 0.0
+  dFz: 0.0
+  divU: 0.0
+  dq: -0.003052156485017018
+  dtcur: 60.0
+  dtheta: 0.1
+  dthetav: -0.4365906447159773
+  dtmax: 1598.8514968081563
+  du: -0.4229086217862608
+  dv: 0.28673977724043115
+  dz: 50.0
+  dz_h: 50
+  dztend: -0.1395839151513645
+  e: 1388.9934836777713
+  e2m: 952.6963178543036
+  esat: 1731.8968600367527
+  esat2m: 1160.2806224411495
+  f0: [0.89, 0.85]
+  firsttime: true
+  gammaCO2: 0.0
+  gammaq: -2.2753052840179227e-05
+  gammatheta: 0.002144498648676304
+  gammau: 0.028469046551934885
+  gammav: 0.017160662703889926
+  gm298: [7.0, 17.5]
+  gmin: [0.00025, 0.00025]
+  h: 1383.2363985770135
+  lcl: 428.232209487189
+  ls_type: js
+  mair: 28.9
+  mco2: 44.0
+  nuco2q: 1.6
+  q: 0.008771106059366231
+  q2_h: null
+  q2m: 0.006016011266044435
+  qsat: 0.010936445146628022
+  qsurf: 0.011939539970569558
+  ra: 38.47545242647453
+  rs: 49.409679791439665
+  sp: 99023.24348958333
+  substep: false
+  substeps: 0
+  sw_ac: [adv]
+  sw_ap: true
+  sw_cu: false
+  sw_fixft: false
+  sw_lit: false
+  sw_ml: true
+  test: 0.0
+  testing: 0.0
+  theta: 288.39566185252
+  thetasurf: 294.472849715022
+  thetav: 289.93868670419346
+  thetavsurf: 296.61753063426823
+  time: 12.116666666666667
+  tsteps: 360
+  u: 0.817076520336863
+  u2m: 0.8170765203368628
+  ustar: 0.26343099197090714
+  uw: -0.026264240506899815
+  v: 0.04516931051262895
+  v2m: 0.045169310512628943
+  vw: -0.0014519296605725784
+  wCO2: 0.0
+  wCO2A: 0
+  wCO2M: 0
+  wCO2R: 0
+  wCO2e: 0.0
+  wf: 0.0
+  wg: 0.35105639595370375
+  wmax: 0.55
+  wmin: 0.005
+  wq: 7.162771620445285e-05
+  wqM: 0.0
+  wqe: 0.0
+  wstar: 1.9991113910017524
+  wtheta: 0.15801185803693307
+  wthetae: -0.0
+  wthetav: 0.17070715833083436
+  wthetave: -0.03414143166616687
+  z0h: 0.0
+  z0m: 0.0
+  zeta: -20.182052034282645
+  zlcl: 428.232209487189
+  zslz0m: 480.2904085186834
+---
+# CLASS4GL input; format version: 0.1
+index: 2944
+pars:
+  Ammax298: [2.2, 1.7]
+  CO2: 400.8322760704748
+  CO22_h: null
+  CO2comp298: [68.5, 4.3]
+  CO2tend: 0.0
+  Cm: 0.014889170302298154
+  Cs: 0.012038840098238213
+  Cw: 0.0016
+  E0: 53300.0
+  G: 47.29774110009936
+  H: 190.5623007925413
+  Kx: [0.7, 0.7]
+  L: -6.853794630136477
+  LE: 214.88314861335857
+  LEliq: 0.0
+  LEpot: 319.34762471715567
+  LEref: 243.84639777941322
+  LEsoil: 43.588158263249724
+  LEveg: 171.29499035010883
+  Lwin: 307.8635861165938
+  Lwout: 426.33896646668103
+  M: 0.0
+  P_h: 82175.50598724303
+  Q: 452.7431905059985
+  Q10Am: [2.0, 2.0]
+  Q10CO2: [1.5, 1.5]
+  Q10gm: [2.0, 2.0]
+  R10: 0.23
+  RH_h: 1.674122332852863
+  Rib: -6.70652127177839
+  Swin: 739.103621663443
+  Swout: 167.8850508073574
+  T1Am: [281.0, 286.0]
+  T1gm: [278.0, 286.0]
+  T2: 284.68364969889325
+  T2Am: [311.0, 311.0]
+  T2gm: [301.0, 309.0]
+  T2m: 282.3123067089991
+  T_h: 274.8936229768578
+  Ts: 294.47523957923886
+  Tsoil: 286.4586732910864
+  ac: 0.0
+  ad: [0.07, 0.15]
+  advCO2: 0.0
+  advt: 1.0355662748843815e-05
+  alpha0: [0.017, 0.014]
+  c3c4: c3
+  c_beta: 0
+  dCO2: -22.832276070474784
+  dCO2tend: 0.0
+  dFz: 0.0
+  divU: 0.0
+  dq: -0.003052156485017018
+  dtcur: 60.0
+  dtheta: 0.1
+  dthetav: -0.4365906447159773
+  dtmax: 1598.8514968081563
+  du: -0.4229086217862608
+  dv: 0.28673977724043115
+  dz: 50.0
+  dz_h: 50
+  dztend: -0.1395839151513645
+  e: 1388.9934836777713
+  e2m: 952.6963178543036
+  esat: 1731.8968600367527
+  esat2m: 1160.2806224411495
+  f0: [0.89, 0.85]
+  firsttime: true
+  gammaCO2: 0.0
+  gammaq: -2.2753052840179227e-05
+  gammatheta: 0.002144498648676304
+  gammau: 0.028469046551934885
+  gammav: 0.017160662703889926
+  gm298: [7.0, 17.5]
+  gmin: [0.00025, 0.00025]
+  h: 1383.2363985770135
+  lcl: 428.232209487189
+  ls_type: js
+  mair: 28.9
+  mco2: 44.0
+  nuco2q: 1.6
+  q: 0.008771106059366231
+  q2_h: null
+  q2m: 0.006016011266044435
+  qsat: 0.010936445146628022
+  qsurf: 0.011939539970569558
+  ra: 38.47545242647453
+  rs: 49.409679791439665
+  sp: 99023.24348958333
+  substep: false
+  substeps: 0
+  sw_ac: [adv]
+  sw_ap: true
+  sw_cu: false
+  sw_fixft: false
+  sw_lit: false
+  sw_ml: true
+  test: 0.0
+  testing: 0.0
+  theta: 288.39566185252
+  thetasurf: 294.472849715022
+  thetav: 289.93868670419346
+  thetavsurf: 296.61753063426823
+  time: 12.116666666666667
+  tsteps: 360
+  u: 0.817076520336863
+  u2m: 0.8170765203368628
+  ustar: 0.26343099197090714
+  uw: -0.026264240506899815
+  v: 0.04516931051262895
+  v2m: 0.045169310512628943
+  vw: -0.0014519296605725784
+  wCO2: 0.0
+  wCO2A: 0
+  wCO2M: 0
+  wCO2R: 0
+  wCO2e: 0.0
+  wf: 0.0
+  wg: 0.35105639595370375
+  wmax: 0.55
+  wmin: 0.005
+  wq: 7.162771620445285e-05
+  wqM: 0.0
+  wqe: 0.0
+  wstar: 1.9991113910017524
+  wtheta: 0.15801185803693307
+  wthetae: -0.0
+  wthetav: 0.17070715833083436
+  wthetave: -0.03414143166616687
+  z0h: 0.0
+  z0m: 0.0
+  zeta: -20.182052034282645
+  zlcl: 428.232209487189
+  zslz0m: 480.2904085186834
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index a1a2ad8..fbc4db0 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -189,6 +189,7 @@ def __init__(self,path,suffix='ini',refetch_stations=True):
             self.table = self.get_stations(suffix=suffix)
             self.table.to_csv(self.file)
         
+        print(self.table.columns)
         self.table = self.table.set_index('STNID')
 
     def get_stations(self,suffix):
@@ -378,10 +379,11 @@ def __prev__(self):
 
 
 def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_records=False):
-
+    print(stations)
     records = pd.DataFrame()
     for STNID,station in stations.iterrows():
         dictfnchunks = []
+        pklchunks = []
         if getchunk is 'all':
 
             # we try the old single-chunk filename format first (usually for
@@ -390,6 +392,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
             if os.path.isfile(path_yaml+'/'+fn):
                 chunk = 0
                 dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                pklchunks.append(fn.replace('.yaml','.pkl'))
 
             # otherwise, we use the new multi-chunk filename format
             else:
@@ -407,6 +410,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
                 for chunk in chunks:
                     fn = format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
                     dictfnchunks.append(dict(fn=fn,chunk=chunk))
+                    pklchunks.append(fn.replace('.yaml','.pkl'))
 
                 # while not end_of_chunks:
                 #     fn = format(STNID,'05d')+'_'+str(chunk)+'_'+subset+'.yaml'
@@ -422,113 +426,146 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor
         else:
             fn = format(STNID,'05d')+'_'+str(getchunk)+'_'+subset+'.yaml'
             dictfnchunks.append(dict(fn=fn,chunk=getchunk))
-            
-        if len(dictfnchunks) > 0:
-            for dictfnchunk in dictfnchunks:
-                yamlfilename = dictfnchunk['fn']
-                chunk = dictfnchunk['chunk']
-
-                #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
-                pklfilename = yamlfilename.replace('.yaml','.pkl')
-
-                #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
-                #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
-                generate_pkl = False
-                if not os.path.isfile(path_yaml+'/'+pklfilename): 
-                    print('pkl file does not exist. I generate "'+\
-                          path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
-                    generate_pkl = True
-                elif not (os.path.getmtime(path_yaml+'/'+yamlfilename) <  \
-                    os.path.getmtime(path_yaml+'/'+pklfilename)):
-                    print('pkl file older than yaml file, so I regenerate "'+\
-                          path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
-                    generate_pkl = True
-
-                if refetch_records:
-                    print('refetch_records flag is True. I regenerate "'+\
-                          path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
-                    generate_pkl = True
-                if not generate_pkl:
-                    records = pd.concat([records,pd.read_pickle(path_yaml+'/'+pklfilename)])
-                   # irecord = 0
-                else:
-                    with open(path_yaml+'/'+yamlfilename) as yaml_file:
-
-                        dictout = {}
-
-                        next_record_found = False
-                        end_of_file = False
-                        while (not next_record_found) and (not end_of_file):
-                            linebuffer = yaml_file.readline()
-                            next_record_found = (linebuffer == '---\n')
-                            end_of_file = (linebuffer == '')
-                        next_tell = yaml_file.tell()
-                        
-                        while not end_of_file:
-
-                            print(' next record:',next_tell)
-                            current_tell = next_tell
+
+        if (len(dictfnchunks) > 0):
+            load_from_unified_pkl = False    
+            pklfilename_unified = format(STNID,'05d')+'_'+subset+'.pkl'
+            if (getchunk is 'all') and (os.path.isfile(path_yaml+'/'+pklfilename_unified)):
+                load_from_unified_pkl = True
+                for dictfnchunk in dictfnchunks:
+                    yamlfilename = dictfnchunk['fn']
+                    chunk = dictfnchunk['chunk']
+                    pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+
+                    if \
+                       (pklfilename_unified in pklchunks) or \
+                       (not os.path.isfile(path_yaml+'/'+pklfilename)) or \
+                       (os.path.getmtime(path_yaml+'/'+yamlfilename) > os.path.getmtime(path_yaml+'/'+pklfilename_unified)) or\
+                       (os.path.getmtime(path_yaml+'/'+pklfilename) > os.path.getmtime(path_yaml+'/'+pklfilename_unified)):
+                        load_from_unified_pkl = False
+
+            if load_from_unified_pkl:
+                pklfilename_unified = format(STNID,'05d')+'_'+subset+'.pkl'
+                print('reading unified table file ('+path_yaml+'/'+pklfilename_unified+') for station '\
+                              +str(STNID))
+
+                records_station = pd.read_pickle(path_yaml+'/'+pklfilename_unified)
+            else:
+                records_station = pd.DataFrame()
+                for dictfnchunk in dictfnchunks:
+                    yamlfilename = dictfnchunk['fn']
+                    chunk = dictfnchunk['chunk']
+
+                    #pklfilename = path_yaml+'/'+format(STNID,'05d')+'_'+subset+'.pkl'
+                    pklfilename = yamlfilename.replace('.yaml','.pkl')
+
+                    #print(yamlfilename+": "+str(os.path.getmtime(yamlfilename)))
+                    #print(pklfilename+": "+str(os.path.getmtime(pklfilename)))
+                    generate_pkl = False
+                    if not os.path.isfile(path_yaml+'/'+pklfilename): 
+                        print('pkl file does not exist. I generate "'+\
+                              path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
+                        generate_pkl = True
+                    elif not (os.path.getmtime(path_yaml+'/'+yamlfilename) <  \
+                        os.path.getmtime(path_yaml+'/'+pklfilename)):
+                        print('pkl file older than yaml file, so I regenerate "'+\
+                              path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
+                        generate_pkl = True
+
+                    if refetch_records:
+                        print('refetch_records flag is True. I regenerate "'+\
+                              path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...')
+                        generate_pkl = True
+                    if not generate_pkl:
+                        records_station_chunk = pd.read_pickle(path_yaml+'/'+pklfilename)
+                        records_station = pd.concat([records_station,records_station_chunk])
+                       # irecord = 0
+                    else:
+                        with open(path_yaml+'/'+yamlfilename) as yaml_file:
+
+                            dictout = {}
+
                             next_record_found = False
-                            yaml_file.seek(current_tell)
-                            os.system('mkdir -p '+TEMPDIR)
-                            filebuffer = open(TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
-                            linebuffer = ''
-                            while ( (not next_record_found) and (not end_of_file)):
-                                filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                            end_of_file = False
+                            while (not next_record_found) and (not end_of_file):
                                 linebuffer = yaml_file.readline()
                                 next_record_found = (linebuffer == '---\n')
                                 end_of_file = (linebuffer == '')
-                            filebuffer.close()
-                            
                             next_tell = yaml_file.tell()
-                            index_start = current_tell
-                            index_end = next_tell
-
-                            
-                            if which('ruby') is None:
-                                raise RuntimeError ('ruby is not found. Aborting...')
-                            #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
-                            command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
-                            print(command)
                             
-                            os.system(command)
-                            #jsonoutput = subprocess.check_output(command,shell=True) 
-                            #print(jsonoutput)
-                            #jsonstream = io.StringIO(jsonoutput)
-                            jsonstream = open(TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
-                            record = json.load(jsonstream)
-                            dictouttemp = {}
-                            for key,value in record['pars'].items():
-                                # we don't want the key with columns that have none values
-                                if value is not None: 
-                                   regular_numeric_types =[ type(x) for x in[0,False,0.0]]
-                                   if (type(value) in regular_numeric_types):
-                                        dictouttemp[key] = value
-                                   elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
-                                       #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
-                                       dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
-                                       # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
-                                       dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
-                            recordindex = record['index']
-                            dictouttemp['chunk'] = chunk
-                            dictouttemp['index_start'] = index_start
-                            dictouttemp['index_end'] = index_end
-                            os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
-                            for key,value in dictouttemp.items():
-                                if key not in dictout.keys():
-                                    dictout[key] = {}
-                                dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
-                            print(' obs record registered')
-                            jsonstream.close()
-                            os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell))
-                    records_station = pd.DataFrame.from_dict(dictout)
-                    records_station.index.set_names(('STNID','chunk','index'),inplace=True)
-                    print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\
-                          +str(STNID))
-                    records_station.to_pickle(path_yaml+'/'+pklfilename)
-                    # else:
-                    #     os.system('rm '+pklfilename)
-                    records = pd.concat([records,records_station])
+                            while not end_of_file:
+
+                                print(' next record:',next_tell)
+                                current_tell = next_tell
+                                next_record_found = False
+                                yaml_file.seek(current_tell)
+                                os.system('mkdir -p '+TEMPDIR)
+                                filebuffer = open(TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w')
+                                linebuffer = ''
+                                while ( (not next_record_found) and (not end_of_file)):
+                                    filebuffer.write(linebuffer.replace('inf','0').replace('nan','0'))
+                                    linebuffer = yaml_file.readline()
+                                    next_record_found = (linebuffer == '---\n')
+                                    end_of_file = (linebuffer == '')
+                                filebuffer.close()
+                                
+                                next_tell = yaml_file.tell()
+                                index_start = current_tell
+                                index_end = next_tell
+
+                                
+                                if which('ruby') is None:
+                                    raise RuntimeError ('ruby is not found. Aborting...')
+                                #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) :
+                                command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' 
+                                print(command)
+                                
+                                os.system(command)
+                                #jsonoutput = subprocess.check_output(command,shell=True) 
+                                #print(jsonoutput)
+                                #jsonstream = io.StringIO(jsonoutput)
+                                jsonstream = open(TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
+                                record = json.load(jsonstream)
+                                dictouttemp = {}
+                                for key,value in record['pars'].items():
+                                    # we don't want the key with columns that have none values
+                                    if value is not None: 
+                                       regular_numeric_types =[ type(x) for x in[0,False,0.0]]
+                                       if (type(value) in regular_numeric_types):
+                                            dictouttemp[key] = value
+                                       elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str):
+                                           #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S")
+                                           dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z")
+                                           # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!!
+                                           dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC)
+                                recordindex = record['index']
+                                dictouttemp['chunk'] = chunk
+                                dictouttemp['index_start'] = index_start
+                                dictouttemp['index_end'] = index_end
+                                os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell))
+                                for key,value in dictouttemp.items():
+                                    if key not in dictout.keys():
+                                        dictout[key] = {}
+                                    dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key]
+                                print(' obs record registered')
+                                jsonstream.close()
+                                os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell))
+                            records_station_chunk = pd.DataFrame.from_dict(dictout)
+                            records_station_chunk.index.set_names(('STNID','chunk','index'),inplace=True)
+                            print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\
+                                  +str(STNID)+', chunk number '+str(chunk))
+                            records_station_chunk.to_pickle(path_yaml+'/'+pklfilename)
+                            records_station = pd.concat([records_station,records_station_chunk])
+                        # else:
+                        #     os.system('rm '+pklfilename)
+                if (getchunk == 'all') and (pklfilename_unified not in pklchunks):
+                    pklfilename_unified = format(STNID,'05d')+'_'+subset+'.pkl'
+                    print('writing unified table file ('+path_yaml+'/'+pklfilename_unified+') for station '\
+                                  +str(STNID))
+                    records_station.to_pickle(path_yaml+'/'+pklfilename_unified)
+
+            records = pd.concat([records,records_station])
     return records
 
 def stdrel(mod,obs,columns):
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index b9da41c..0409509 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -220,15 +220,37 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
             # investigated. In the meantime, we filter them
 
             if self.path_obs is not None:
-                valid = ((self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt > - 0.0020) & 
-                        ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                        ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+                print('exclude exceptional observations')
+                print('exclude unrealistic model output -> should be investigated!')
+                valid = (\
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt >  0.250) & 
+                         #(self.frames['stats']['records_all_stations_mod_stats'].dthetadt >  0.25000) & 
+                         #(self.frames['stats']['records_all_stations_mod_stats'].dthetadt <  1.8000) & 
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt <  1.8000) & 
+                         #(self.frames['stats']['records_all_stations_mod_stats'].dhdt >  50.0000) & 
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt >  40.0000) & 
+                         #(self.frames['stats']['records_all_stations_mod_stats'].dhdt <  350.) & 
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt <  350.) & 
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dqdt >  -.00055) & 
+                         #(self.frames['stats']['records_all_stations_mod_stats'].dqdt >  -.00055) & 
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dqdt <  .0003) & 
+
+                         # filter 'extreme' model output -> should be investigated!
+                         (self.frames['stats']['records_all_stations_mod_stats'].dqdt <  .0006) & 
+                         (self.frames['stats']['records_all_stations_mod_stats'].dqdt >  -.0006) & 
+                         (self.frames['stats']['records_all_stations_mod_stats'].dthetadt >  .2) & 
+                         (self.frames['stats']['records_all_stations_mod_stats'].dthetadt <  2.) & 
+                         # (self.frames['stats']['records_all_stations_mod_stats'].dqdt <  .0003) & 
+                         # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & 
+                         # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & 
+                         ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
+                         ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
 
                 for key in self.frames['stats'].keys():
                     if (type(self.frames['stats'][key]) == pd.DataFrame) and \
                        (self.frames['stats'][key].index.names == indextype):
                         self.frames['stats'][key] = self.frames['stats'][key][valid]
-                print(str(len(valid) - np.sum(valid))+' soundings are filtered')
+                print("WARNING WARNING!: "+ str(len(valid) - np.sum(valid))+' soundings are filtered')
 
         self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
 
@@ -404,6 +426,12 @@ def next_record(self,event=None,jump=1):
             self.frames['profiles']['current_station_file_mod'] = \
                 open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
 
+            if self.path_obs is not None:
+                if 'current_station_file_afternoon' in self.frames['profiles'].keys():
+                    self.frames['profiles']['current_station_file_afternoon'].close()
+                self.frames['profiles']['current_station_file_afternoon'] = \
+                    open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_afternoon.yaml','r')
+
         self.update_record()
 
     def prev_record(self,event=None):
@@ -572,7 +600,7 @@ def plot(self):
 
         label = 'times'
                
-        axes[label] = fig.add_axes([0.30,0.90,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
+        axes[label] = fig.add_axes([0.30,0.87,0.30,0.10]) #[*left*, *bottom*, *width*,    *height*]
         # add pointers to the data of the axes
         axes[label].data = {}
         # add pointers to color fields (for maps and colorbars) in the axes
@@ -697,13 +725,13 @@ def plot(self):
         self.axes[label] = fig.add_axes([0.86,0.44,0.12,0.50], label=label)
 
         label = 'out:h'
-        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.10], label=label)
+        self.axes[label] = fig.add_axes([0.50,0.27,0.22,0.09], label=label)
 
         label = 'out:theta'
-        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.10], label=label)
+        self.axes[label] = fig.add_axes([0.50,0.17,0.22,0.09], label=label)
 
         label = 'out:q'
-        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.10], label=label)
+        self.axes[label] = fig.add_axes([0.50,0.07,0.22,0.09], label=label)
 
         label = 'SEB'
         self.axes[label] = fig.add_axes([0.77,0.07,0.22,0.30], label=label)
@@ -1142,9 +1170,9 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                                                xy=(x,y),\
                                                xytext=(0.05,0.05),\
                                                textcoords='axes fraction',\
-                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z)),\
+                                               bbox=dict(boxstyle="round",fc=self.statsviewcmap(z),edgecolor='black'),\
                                                color='white',\
-                                               arrowprops=dict(arrowstyle="->",linewidth=1.1))
+                                               arrowprops=dict(arrowstyle="->",linewidth=1.1,color='black'))
                 # self.axes['stats_'+key].data[key+'_current_record'] = \
                 #        self.axes['stats_'+key].scatter(x,y, c=z,\
                 #                 cmap=self.statsviewcmap,\
@@ -1273,9 +1301,9 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                                          xytext=(0.05,0.05),
                                          textcoords='axes fraction', 
                                          bbox=dict(boxstyle="round",
-                                         fc = cm.viridis(colorstation)),
+                                         fc = cm.viridis(colorstation),edgecolor='black'),
                                          arrowprops=dict(arrowstyle="->",
-                                                         linewidth=1.1),
+                                                         linewidth=1.1,color='black'),
                                          color='white' if colorstation < 0.5 else 'black')
                     #print('r9')
 
diff --git a/class4gl/model.py b/class4gl/model.py
index 3993200..2ae873f 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -933,7 +933,7 @@ def run_mixed_layer(self):
         htend_pre       = self.we + self.ws + self.wf - self.M
         
         #self.thetatend   = (self.wtheta - self.wthetae             ) / self.h + self.advtheta 
-        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h - self.advtheta
+        thetatend_pre = (self.wtheta - self.wthetae             ) / self.h + self.advtheta
         
  
         #print('thetatend_pre',thetatend_pre)
@@ -972,15 +972,15 @@ def run_mixed_layer(self):
         self.dthetatend = l_entrainment*dthetatend_pre + \
                         (1.-l_entrainment)*0.
         self.thetatend = l_entrainment*thetatend_pre + \
-                        (1.-l_entrainment)*((self.wtheta  ) / self.h - self.advtheta)
+                        (1.-l_entrainment)*((self.wtheta  ) / self.h + self.advtheta)
         self.htend = l_entrainment*htend_pre + \
                      (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta)
         #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta)
         #stop
 
 
-        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h - self.advq
-        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h - self.advCO2
+        self.qtend       = (self.wq     - l_entrainment*self.wqe     - self.wqM  ) / self.h + self.advq
+        self.CO2tend     = (self.wCO2   - l_entrainment*self.wCO2e   - self.wCO2M) / self.h + self.advCO2
 
 
         # self.qtend = l_entrainment*qtend_pre + \
@@ -1017,8 +1017,8 @@ def run_mixed_layer(self):
      
         # assume u + du = ug, so ug - u = du
         if(self.sw_wind):
-            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h - self.advu
-            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h - self.advv
+            self.utend       = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du)  / self.h + self.advu
+            self.vtend       =  self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv)  / self.h + self.advv
   
             self.dutend      = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend
             self.dvtend      = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend
@@ -1083,7 +1083,7 @@ def integrate_mixed_layer(self):
 
             # take into account advection for the whole profile
                 
-                self.air_ap[var] = self.air_ap[var] - self.dtcur * self.air_ap['adv'+var]
+                self.air_ap[var] = self.air_ap[var] + self.dtcur * self.air_ap['adv'+var]
 
             var = 'z'
             #print(self.air_ap[var])
diff --git a/class4gl/setup/batch_setup_global.py b/class4gl/setup/batch_setup_global_old.py
similarity index 100%
rename from class4gl/setup/batch_setup_global.py
rename to class4gl/setup/batch_setup_global_old.py
diff --git a/class4gl/setup/batch_setup_igra.pbs b/class4gl/setup/batch_setup_igra.pbs
new file mode 100644
index 0000000..ecf97f8
--- /dev/null
+++ b/class4gl/setup/batch_setup_igra.pbs
@@ -0,0 +1,30 @@
+#!/bin/bash 
+#
+#PBS -j oe
+#PBS -M hendrik.wouters@ugent.be
+#PBS -m b
+#PBS -m e
+#PBS -m a
+#PBS -N c4gl_setup
+
+module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
+
+EXEC_ALL="python $C4GLJOB_exec --first_station_row $PBS_ARRAYID --last_station_row $PBS_ARRAYID"
+
+for var in $(compgen -v | grep C4GLJOB_ ); do
+    echo $var
+    if [ "$var" != "C4GLJOB_exec" ]
+    then
+    EXEC_ALL=$EXEC_ALL" --"`echo $var | cut -c9-`"="${!var}
+    fi
+done
+
+
+# EXEC_ALL="python $exec --global-chunk-number $PBS_ARRAYID \
+#                        --split-by $split_by \
+#                        --dataset $dataset \
+#                        --experiments $experiments"
+#                  #      --path-soundings $path_soundings \
+echo Executing: $EXEC_ALL
+$EXEC_ALL
+
diff --git a/class4gl/setup/batch_setup_igra.py b/class4gl/setup/batch_setup_igra.py
new file mode 100644
index 0000000..5a33bb9
--- /dev/null
+++ b/class4gl/setup/batch_setup_igra.py
@@ -0,0 +1,187 @@
+
+# -*- coding: utf-8 -*-
+
+import logging
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+import importlib
+spam_loader = importlib.find_loader('Pysolar')
+found = spam_loader is not None
+if found:
+    import Pysolar
+    import Pysolar.util.GetSunriseSunset
+else:
+    import pysolar as Pysolar
+    GetSunriseSunset =  Pysolar.util.get_sunrise_sunset
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--exec') # chunk simulation script
+parser.add_argument('--pbs_string',default=' -l walltime=30:0:0')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+# parser.add_argument('--first_YYYYMMDD',default="19810101")
+# parser.add_argument('--last_YYYYMMDD',default="20180101")
+# parser.add_argument('--first_station_row')
+# parser.add_argument('--last_station_row')
+# parser.add_argument('--station_id') # run a specific station id
+# parser.add_argument('--latitude') # run a specific station id
+# parser.add_argument('--longitude') # run a specific station id
+# parser.add_argument('--error_handling',default='dump_on_success')
+# parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+# arser.add_argument('--runtime',default='from_afternoon_profile')
+
+# parser.add_argument('--split_by',default="-1")# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+EXP_DEFS  =\
+{
+  'ERA-INTERIM_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'ERA-INTERIM_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+# iniitialize global data
+# ===============================
+print("Initializing global data")
+# ===============================
+globaldata = data_global()
+globaldata.sources = {**globaldata.sources,**{
+    
+        "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_19830609-19830808_6hourly.nc",
+        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830609-19830808_6hourly.nc",
+        "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_19830609-19830808_6hourly.nc",
+        "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_19830609-19830808_6hourly.nc",
+    
+#        "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_19830209-19830410_6hourly.nc",
+ #       "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q*_6hourly.nc",
+ #       "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u*_6hourly.nc",
+ #       "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v*_6hourly.nc",
+        }}
+
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+
+
+# ===============================
+print("getting a list of stations")
+# ===============================
+all_stations = stations(args.path_input,refetch_stations=False)
+
+
+# # ===============================
+# print("Selecting station by ID")
+# # ===============================
+# stations_iter = stations_iterator(all_stations)
+# STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+# all_stations_select = pd.DataFrame([run_station])
+# print(run_station)
+
+
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+#  # these are all the stations that are supposed to run by the whole batch (all
+#  # chunks). We narrow it down according to the station(s) specified.
+#  if (args.latitude is not None) or (args.longitude is not None):
+#      print('custom coordinates not implemented yet, please ask developer.')
+#  elif args.station_id is not None:
+#      print("Selecting station by ID")
+#      stations_iter = stations_iterator(all_stations)
+#      STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+#      all_stations_select = pd.DataFrame([run_station])
+#  #     print("making a custom station according to the coordinates")
+#  # 
+#  #     STNID = 43.23
+#  else:
+all_stations_select = pd.DataFrame(all_stations.table)
+#      if args.last_station_row is not None:
+#          all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+#      if args.first_station_row is not None:
+#          all_stations_select = all_station_select.iloc[int(args.first_station):]
+
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+# dtfirst = dt.datetime.strptime(args.first_YYYYMMDD,"%Y%m%d",)
+# dtlast = dt.datetime.strptime(args.last_YYYYMMDD,"%Y%m%d",)
+# # ===============================
+# print("Creating daily timeseries from", dtfirst," to ", dtlast)
+# # ===============================
+# DTS = [dtfirst + dt.timedelta(days=iday) for iday in \
+#        range(int((dtlast + dt.timedelta(days=1) -
+#                   dtfirst).total_seconds()/3600./24.))]
+# 
+# if int(args.split_by) != -1:
+#     totalchunks = len(all_stations_select)*math.ceil(len(DTS)/int(args.split_by))
+# else:
+totalchunks = len(all_stations_select)
+# 
+# print(totalchunks)
+
+#if args.cleanup_experiments:
+#    os.system("rm -R "+args.path_experiments+'/')
+
+# C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/setup/batch_setup_igra.pbs -t 0-'+\
+            str(totalchunks-1)+" -v "
+# propagate arguments towards the job script
+lfirst = True
+for argkey in args.__dict__.keys():
+    if ((argkey not in ['experiments','pbs_string','cleanup_experiments']) and \
+        # default values are specified in the simulation script, so
+        # excluded here
+        (args.__dict__[argkey] is not None)
+       ):
+        print(argkey)
+        print(args.__dict__[argkey])
+        if lfirst:
+            command +=' C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        else:
+            command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        lfirst=False
+
+print('Submitting array job: '+command)
+os.system(command)
diff --git a/class4gl/simulations/batch_update.py b/class4gl/setup/batch_update.py
similarity index 71%
rename from class4gl/simulations/batch_update.py
rename to class4gl/setup/batch_update.py
index d9ff78a..ab51951 100644
--- a/class4gl/simulations/batch_update.py
+++ b/class4gl/setup/batch_update.py
@@ -3,9 +3,9 @@
 """
 Usage:
 python batch_update.py --exec $CLASS4GL/simulations/update_yaml_old.py
---path_experiments $VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC/ --path_forcing
+--path_experiments $VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC/ --path_input
 $VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC_BACKUP_20180904/ --c4gl_path_lib
-$CLASS4GL --split_by 50 --global_keys "KGC" --subset_forcing ini --experiments
+$CLASS4GL --split_by 50 --global_keys "KGC" --subset_input morning --experiments
 "GLOBAL_NOAC"
 """
 
@@ -26,32 +26,32 @@
 parser.add_argument('--exec') # chunk simulation script
 parser.add_argument('--first_station_row')
 parser.add_argument('--last_station_row')
-parser.add_argument('--pbs_string',default=' -l walltime=:2:0:0')
+parser.add_argument('--pbs_string',default=' -l walltime=2:0:0')
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling')
-parser.add_argument('--subset_forcing',default='morning') 
+parser.add_argument('--subset_input',default='morning') 
                                         # this tells which yaml subset
                                         # to initialize with.
                                         # Most common options are
                                         # 'morning' and 'ini'.
+parser.add_argument('--subset_output',default='morning') 
 
 # Tuntime is usually specified from the afternoon profile. You can also just
 # specify the simulation length in seconds
 parser.add_argument('--runtime')
 # delete folders of experiments before running them
-parser.add_argument('--cleanup_experiments',default=False)
-parser.add_argument('--experiments')
 parser.add_argument('--split_by',default=50)# station soundings are split
                                             # up in chunks
 
 parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--path_forcing') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--path_experiments') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_input') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 
 
 #arguments only used for update_yaml.py
 parser.add_argument('--path_dataset') 
 parser.add_argument('--global_keys') 
+parser.add_argument('--updates') 
 args = parser.parse_args()
 
 sys.path.insert(0, args.c4gl_path_lib)
@@ -71,16 +71,9 @@
 #  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
 #  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
 
-
-
-# #SET = 'GLOBAL'
-# SET = args.dataset
-
-# path_forcingSET = args.path_forcing+'/'+SET+'/'
-
-print("getting all stations from --path_forcing")
+print("getting all stations from --path_input")
 # these are all the stations that are found in the input dataset
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
 
 print('defining all_stations_select')
 # these are all the stations that are supposed to run by the whole batch (all
@@ -102,8 +95,8 @@
 
 print("getting all records of the whole batch")
 all_records_morning_select = get_records(all_stations_select,\
-                                         args.path_forcing,\
-                                         subset=args.subset_forcing,\
+                                         args.path_input,\
+                                         subset=args.subset_input,\
                                          refetch_records=False,\
                                         )
 
@@ -119,26 +112,28 @@
 #if sys.argv[1] == 'qsub':
 # with qsub
 
-print(args.experiments.strip().split(" "))
-
-for EXP in args.experiments.strip().split(" "):
-    if args.cleanup_experiments:
-        os.system("rm -R "+args.path_experiments+'/'+EXP)
-
-    #C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
-    command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
-                str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)
-    # propagate arguments towards the job script
-    for argkey in args.__dict__.keys():
-        if ((argkey not in ['experiments','pbs_string','cleanup_experiments']) and \
-            # default values are specified in the simulation script, so
-            # excluded here
-            (args.__dict__[argkey] is not None)
-           ):
-                command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
-
-    print('Submitting array job for experiment '+EXP+': '+command)
-    os.system(command)
+
+
+#C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
+            str(totalchunks-1)+" -v '"
+# propagate arguments towards the job script
+first = True
+for argkey in args.__dict__.keys():
+    if ((argkey not in ['pbs_string']) and \
+        # default values are specified in the simulation script, so
+        # excluded here
+        (args.__dict__[argkey] is not None)
+       ):
+        if first:
+            command +='C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        else:
+            command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+    first = False
+
+command = command+"'"
+print('Submitting array job: '+command)
+os.system(command)
 
 
     #os.system(command)
diff --git a/class4gl/setup/setup_bllast.py b/class4gl/setup/setup_bllast.py
index af8c8bb..0de6fc4 100644
--- a/class4gl/setup/setup_bllast.py
+++ b/class4gl/setup/setup_bllast.py
@@ -12,9 +12,9 @@
 import Pysolar
 import sys
 import pytz
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+sys.path.insert(0,'/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
 
 
 globaldata = data_global()
diff --git a/class4gl/setup/setup_global_afternoon.py b/class4gl/setup/setup_global_afternoon.py
index 4a49a97..e8745dd 100644
--- a/class4gl/setup/setup_global_afternoon.py
+++ b/class4gl/setup/setup_global_afternoon.py
@@ -131,10 +131,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 if args.station_id is not None:
     df_stations = df_stations[df_stations.ID == int(args.station_id)]
 else:
-    if args.first_station_row is not None:
-        df_stations = df_stations[int(args.first_station_row):]
     if args.last_station_row is not None:
         df_stations = df_stations[:(int(args.last_station_row)+1)]
+    if args.first_station_row is not None:
+        df_stations = df_stations[int(args.first_station_row):]
 
 STNlist = list(df_stations.iterrows())
 
diff --git a/class4gl/setup/setup_goamazon.py b/class4gl/setup/setup_goamazon.py
index f9efe2c..3cc3c74 100644
--- a/class4gl/setup/setup_goamazon.py
+++ b/class4gl/setup/setup_goamazon.py
@@ -10,9 +10,9 @@
 import sys
 import pytz
 import glob
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+sys.path.insert(0,'/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
 
 
 globaldata = data_global()
diff --git a/class4gl/setup/setup_humppa.py b/class4gl/setup/setup_humppa.py
index ff37628..b7bcc7c 100644
--- a/class4gl/setup/setup_humppa.py
+++ b/class4gl/setup/setup_humppa.py
@@ -8,9 +8,9 @@
 import Pysolar
 import sys
 import pytz
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+sys.path.insert(0,'/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/')
 from class4gl import class4gl_input, data_global,class4gl
-from interface_humppa import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
 
 
 globaldata = data_global()
diff --git a/class4gl/setup/setup_global.py b/class4gl/setup/setup_igra.py
similarity index 91%
rename from class4gl/setup/setup_global.py
rename to class4gl/setup/setup_igra.py
index b5396a5..dd71324 100644
--- a/class4gl/setup/setup_global.py
+++ b/class4gl/setup/setup_igra.py
@@ -44,6 +44,7 @@
 parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 # parser.add_argument('--first_YYYYMMDD',default="19810101")
 # parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--startyear',default="1981")
 parser.add_argument('--first_station_row')
 parser.add_argument('--last_station_row')
 parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
@@ -119,22 +120,31 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 # get_valid_stations.py)
 # args.path_input = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
 
-df_stations = pd.read_fwf(fn_stations,names=['Country code',\
-                                               'ID',\
-                                               'Name',\
-                                               'latitude',\
-                                               'longitude',\
-                                               'height',\
-                                               'unknown',\
-                                               'startyear',\
-                                               'endyear'])
+# df_stations = pd.read_fwf(fn_stations,names=['Country code',\
+#                                                'ID',\
+#                                                'Name',\
+#                                                'latitude',\
+#                                                'longitude',\
+#                                                'height',\
+#                                                'unknown',\
+#                                                'startyear',\
+#                                                'endyear'])
+# 
+
+# ===============================
+print("getting a list of stations")
+# ===============================
+all_stations = stations(args.path_input,refetch_stations=False)
+df_stations = all_stations.table
+df_stations.columns
+
 if args.station_id is not None:
-    df_stations = df_stations[df_stations.ID == int(args.station_id)]
+    df_stations = df_stations.query('STNID == '+args.station_id)
 else:
-    if args.first_station_row is not None:
-        df_stations = df_stations[int(args.first_station_row):]
     if args.last_station_row is not None:
         df_stations = df_stations[:(int(args.last_station_row)+1)]
+    if args.first_station_row is not None:
+        df_stations = df_stations[int(args.first_station_row):]
 
 STNlist = list(df_stations.iterrows())
 
@@ -143,8 +153,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     one_run = False
 # for iSTN,STN in STNlist[5:]:  
     
-    fnout = args.path_output+"/"+format(STN['ID'],'05d')+"_morning.yaml"
-    fnout_afternoon = args.path_output+"/"+format(STN['ID'],'05d')+"_afternoon.yaml"
+    fnout = args.path_output+"/"+format(STN.name,'05d')+"_morning.yaml"
+    fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_afternoon.yaml"
     
 
     # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
@@ -152,11 +162,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         
     with open(fnout,'w') as fileout, \
          open(fnout_afternoon,'w') as fileout_afternoon:
-        wy_strm = wyoming(PATH=args.path_input, STNM=STN['ID'])
-        wy_strm.set_STNM(int(STN['ID']))
+        wy_strm = wyoming(PATH=args.path_input, STNM=STN.name)
+        wy_strm.set_STNM(int(STN.name))
 
         # we consider all soundings from 1981 onwards
-        wy_strm.find_first(year=1981)
+        wy_strm.find_first(year=int(args.startyear))
         #wy_strm.find(dt.datetime(2004,10,19,6))
         
         c4gli = class4gl_input(debug_level=logging.INFO)
@@ -258,7 +268,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                         logic_afternoon['daylight'] = \
                           ((c4gli_afternoon.pars.ldatetime - \
                             c4gli_afternoon.pars.lSunset \
-                           ).total_seconds()/3600. <= -2.)
+                           ).total_seconds()/3600. <= 1.)
 
 
                         le3000_afternoon = \
@@ -330,7 +340,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                     print('get profile failed')
                 
     if one_run:
-        STN.name = STN['ID']
+        #STN.name = STN.name
         all_records_morning = get_records(pd.DataFrame([STN]),\
                                       args.path_output,\
                                       subset='morning',
diff --git a/class4gl/setup/setup_igra_pkl.py b/class4gl/setup/setup_igra_pkl.py
new file mode 100644
index 0000000..8e795fe
--- /dev/null
+++ b/class4gl/setup/setup_igra_pkl.py
@@ -0,0 +1,359 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under args.path_output+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+#from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+# parser.add_argument('--first_YYYYMMDD',default="19810101")
+# parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--station_id') # run a specific station id
+# parser.add_argument('--error_handling',default='dump_on_success')
+# parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+
+
+# args.path_output = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+fn_stations = args.path_input+'/igra-stations.txt'
+
+
+#calculate the root mean square error
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+# args.path_input = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+# df_stations = pd.read_fwf(fn_stations,names=['Country code',\
+#                                                'ID',\
+#                                                'Name',\
+#                                                'latitude',\
+#                                                'longitude',\
+#                                                'height',\
+#                                                'unknown',\
+#                                                'startyear',\
+#                                                'endyear'])
+# 
+
+# ===============================
+print("getting a list of stations")
+# ===============================
+all_stations = stations(args.path_input,refetch_stations=False)
+df_stations = all_stations.table
+df_stations.columns
+
+if args.station_id is not None:
+    df_stations = df_stations.query('STNID == '+args.station_id)
+else:
+    if args.last_station_row is not None:
+        df_stations = df_stations[:(int(args.last_station_row)+1)]
+    if args.first_station_row is not None:
+        df_stations = df_stations[int(args.first_station_row):]
+
+STNlist = list(df_stations.iterrows())
+
+os.system('mkdir -p '+args.path_output)
+for iSTN,STN in STNlist:  
+    one_run = True
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = args.path_output+"/"+format(STN.name,'05d')+"_morning.yaml"
+    fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_afternoon.yaml"
+    
+
+    # # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    # #                   for EXP in experiments.keys()])
+    #     
+    # with open(fnout,'w') as fileout, \
+    #      open(fnout_afternoon,'w') as fileout_afternoon:
+    #     wy_strm = wyoming(PATH=args.path_input, STNM=STN.name)
+    #     wy_strm.set_STNM(int(STN.name))
+
+    #     # we consider all soundings from 1981 onwards
+    #     wy_strm.find_first(year=1981)
+    #     #wy_strm.find(dt.datetime(2004,10,19,6))
+    #     
+    #     c4gli = class4gl_input(debug_level=logging.INFO)
+    #     c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+    #     # so we continue as long as we can find a new sounding
+    #             
+    #     while wy_strm.current is not None:
+    #         
+    #         c4gli.clear()
+    #         try: 
+    #             c4gli.get_profile_wyoming(wy_strm)
+    #             #print(STN['ID'],c4gli.pars.datetime)
+    #             #c4gli.get_global_input(globaldata)
+
+    #             print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+    #             logic = dict()
+    #             logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+
+    #             # Sounding should have taken place after 3 hours before sunrise.
+    #             # Note that the actual simulation only start at sunrise
+    #             # (specified by ldatetime_daylight), so the ABL cooling af the time
+    #             # before sunrise is ignored by the simulation.
+    #             logic['daylight'] = \
+    #                 ((c4gli.pars.ldatetime - 
+    #                   c4gli.pars.lSunrise).total_seconds()/3600. >= -3.)
+    #             
+    #             logic['springsummer'] = (c4gli.pars.theta > 278.)
+    #             
+    #             # we take 3000 because previous analysis (ie., HUMPPA) has
+    #             # focussed towards such altitude
+    #             le3000 = (c4gli.air_balloon.z <= 3000.)
+    #             logic['10measurements'] = (np.sum(le3000) >= 10) 
+
+    #             leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+    #             logic['mlerrlow'] = (\
+    #                     (len(np.where(leh)[0]) > 0) and \
+    #                     # in cases where humidity is not defined, the mixed-layer
+    #                     # values get corr
+    #                     (not np.isnan(c4gli.pars.theta)) and \
+    #                     (rmse(c4gli.air_balloon.theta[leh] , \
+    #                           c4gli.pars.theta,filternan_actual=True) < 1.)\
+    #                           )
+    # 
+
+    #             logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+    #             
+    #             print('logic:', logic)
+    #             # the result
+    #             morning_ok = np.mean(list(logic.values()))
+    #             print(morning_ok,c4gli.pars.ldatetime)
+
+    #         except:
+    #             morning_ok =False
+    #             print('obtain morning not good')
+
+    #         # the next sounding will be used either for an afternoon sounding
+    #         # or for the morning sounding of the next day.
+    #         wy_strm.find_next()
+    #         # If the morning is ok, then we try to find a decent afternoon
+    #         # sounding
+    #         if morning_ok == 1.:
+    #             print('MORNING OK!')
+    #             # we get the current date
+    #             current_date = dt.date(c4gli.pars.ldatetime.year, \
+    #                                    c4gli.pars.ldatetime.month, \
+    #                                    c4gli.pars.ldatetime.day)
+    #             c4gli_afternoon.clear()
+    #             print('AFTERNOON PROFILE CLEARED')
+    #             try:
+    #                 c4gli_afternoon.get_profile_wyoming(wy_strm)
+    #                 print('AFTERNOON PROFILE OK')
+
+    #                 if wy_strm.current is not None:
+    #                     current_date_afternoon = \
+    #                                dt.date(c4gli_afternoon.pars.ldatetime.year, \
+    #                                        c4gli_afternoon.pars.ldatetime.month, \
+    #                                        c4gli_afternoon.pars.ldatetime.day)
+    #                 else:
+    #                     # a dummy date: this will be ignored anyway
+    #                     current_date_afternoon = dt.date(1900,1,1)
+
+    #                 # we will dump the latest afternoon sounding that fits the
+    #                 # minimum criteria specified by logic_afternoon
+    #                 print(current_date,current_date_afternoon)
+    #                 c4gli_afternoon_for_dump = None
+    #                 while ((current_date_afternoon == current_date) and \
+    #                        (wy_strm.current is not None)):
+    #                     logic_afternoon =dict()
+
+    #                     logic_afternoon['afternoon'] = \
+    #                         (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+    #                     # the sounding should have taken place before 2 hours
+    #                     # before sunset. This is to minimize the change that a
+    #                     # stable boundary layer (yielding very low mixed layer
+    #                     # heights) is formed which can not be represented by
+    #                     # class.
+    #                     logic_afternoon['daylight'] = \
+    #                       ((c4gli_afternoon.pars.ldatetime - \
+    #                         c4gli_afternoon.pars.lSunset \
+    #                        ).total_seconds()/3600. <= -2.)
+
+
+    #                     le3000_afternoon = \
+    #                         (c4gli_afternoon.air_balloon.z <= 3000.)
+    #                     logic_afternoon['5measurements'] = \
+    #                         (np.sum(le3000_afternoon) >= 5) 
+
+    #                     # we only store the last afternoon sounding that fits these
+    #                     # minimum criteria
+
+    #                     afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+    #                     print('logic_afternoon: ',logic_afternoon)
+    #                     print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+    #                     if afternoon_ok == 1.:
+    #                         # # doesn't work :(
+    #                         # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+    #                         
+    #                         # so we just create a new one from the same wyoming profile
+    #                         c4gli_afternoon_for_dump = class4gl_input()
+    #                         c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+    #                     wy_strm.find_next()
+    #                     c4gli_afternoon.clear()
+    #                     c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+    #                     if wy_strm.current is not None:
+    #                         current_date_afternoon = \
+    #                                dt.date(c4gli_afternoon.pars.ldatetime.year, \
+    #                                        c4gli_afternoon.pars.ldatetime.month, \
+    #                                        c4gli_afternoon.pars.ldatetime.day)
+    #                     else:
+    #                         # a dummy date: this will be ignored anyway
+    #                         current_date_afternoon = dt.date(1900,1,1)
+
+    #                     # Only in the case we have a good pair of soundings, we
+    #                     # dump them to disk
+    #                 if c4gli_afternoon_for_dump is not None:
+    #                     c4gli.update(source='pairs',pars={'runtime' : \
+    #                         int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+    #                              c4gli.pars.datetime_daylight).total_seconds())})
+    # 
+    # 
+    #                     print('ALMOST...')
+    #                     if c4gli.pars.runtime > 18000.: # more than 5 hours simulation
+    #                             
+    #     
+    #                         c4gli.get_global_input(globaldata)
+    #                         print('VERY CLOSE...')
+    #                         if c4gli.check_source_globaldata() and \
+    #                             (c4gli.check_source(source='wyoming',\
+    #                                                check_only_sections='pars')):
+    #                             c4gli.dump(fileout)
+    #                             
+    #                             c4gli_afternoon_for_dump.dump(fileout_afternoon)
+    #                             
+    #                             
+    #                             # for keyEXP,dictEXP in experiments.items():
+    #                             #     
+    #                             #     c4gli.update(source=keyEXP,pars = dictEXP)
+    #                             #     c4gl = class4gl(c4gli)
+    #                             #     # c4gl.run()
+    #                             #     
+    #                             #     c4gl.dump(c4glfiles[key])
+    #                             
+    #                             print('HIT!!!')
+    #                             one_run = True
+    #             except:
+    #                 print('get profile failed')
+    #             
+    if one_run:
+        #STN.name = STN.name
+        all_records_morning = get_records(pd.DataFrame([STN]),\
+                                      args.path_output,\
+                                      subset='morning',
+                                      refetch_records=True,
+                                      )
+        all_records_afternoon = get_records(pd.DataFrame([STN]),\
+                                      args.path_output,\
+                                      subset='afternoon',
+                                      refetch_records=True,
+                                      )
+    # else:
+    #     os.system('rm '+fnout)
+    #     os.system('rm '+fnout_afternoon)
+
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/class4gl/setup/update_setup.py b/class4gl/setup/update_setup.py
new file mode 100644
index 0000000..a36b60a
--- /dev/null
+++ b/class4gl/setup/update_setup.py
@@ -0,0 +1,327 @@
+# -*- coding: utf-8 -*-
+
+""" 
+Purpose:
+    update variables in class4gl yaml files, eg., when you need new categorical
+    values in the table.
+
+
+"""
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+import dateutil.parser
+
+import argparse
+
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--path_output')
+parser.add_argument('--diag_tropo',default=None)#['advt','advq','advu','advv'])
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--mode',default='ini') # run a specific station id
+# this is the type of the yaml that needs to be updated. Can be 'ini' or 'mod'
+parser.add_argument('--updates')
+parser.add_argument('--subset_input',default='morning') # this tells which yaml subset
+parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+                                                      # to update in the yaml
+                                                      # dataset.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+parser.add_argument('--split_by',default=-1)# station soundings are split
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--global_keys') 
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# iniitialize global data
+globaldata = data_global()
+if 'era_profiles' in args.updates.strip().split(" "):
+    globaldata.sources = {**globaldata.sources,**{
+            "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc",
+            "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc",
+            "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc",
+            "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc",
+            }}
+
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+
+print("getting stations")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_input,\
+                                         subset=args.subset_input,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    # if not (int(args.split_by) > 0) :
+    #         raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            #chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+
+            chunks_current_station = len(all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique())
+            print('chunks_current_station',chunks_current_station)
+
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk =all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique()[int(args.global_chunk_number) - totalchunks ]
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching current records')
+records_input = get_records(run_stations,\
+                              args.path_input,\
+                              subset=args.subset_input,
+                              refetch_records=False,
+                              )
+
+# if args.timestamp is None:
+#     backupdir = args.path_input+'/'+dt.datetime.now().isoformat()+'/'
+# else: 
+#     backupdir = args.path_input+'/'+args.timestamp+'/'
+# print('creating backup dir: '+backupdir)
+# os.system('mkdir -p "'+backupdir+'"')
+
+
+os.system('mkdir -p '+args.path_output)
+
+for istation,current_station in run_stations.iterrows():
+    records_input_station = records_input.query('STNID == ' +\
+                                                    str(current_station.name))
+
+    records_input_station_chunk = records_input_station.query('STNID == ' +\
+                                                    str(current_station.name)+\
+                                                   '& chunk == '+str(run_station_chunk))
+    print('lenrecords_input_station_chunk: ',len(records_input_station_chunk))
+    print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk))
+    print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1))
+    
+    # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)):
+    #     print("warning: outside of profile number range for station "+\
+    #           str(current_station)+". Skipping chunk number for this station.")
+    if len(records_input_station_chunk) == 0:
+        print("warning: outside of profile number range for station "+\
+              str(current_station)+". Skipping chunk number for this station.")
+    else:
+        # normal case
+        if ((int(args.split_by) > 0) or \
+            (os.path.isfile(args.path_input+'/'+format(current_station.name,'05d')+'_'+\
+                 str(run_station_chunk)+'_'+args.subset_input+'.yaml'))):
+            fn_input = \
+                    args.path_input+'/'+format(current_station.name,'05d')+'_'+\
+                    str(run_station_chunk)+'_'+args.subset_input+'.yaml'
+            file_input = \
+                open(fn_input,'r')
+            fn_output = args.path_output+'/'+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_output+'.yaml'
+            file_output = \
+                open(fn_output,'w')
+            # fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+
+            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+        else:
+            print("\
+Warning. We are choosing chunk 0 without specifying it in filename.    \
+ No-chunk naming will be removed in the future."\
+                 )
+
+            fn_input = \
+                    args.path_input+'/'+format(current_station.name,'05d')+'_'+\
+                    args.subset_input+'.yaml'
+            file_input = \
+                open(fn_input,'r')
+            fn_output = args.path_output+'/'+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_output+'.yaml'
+            file_output = \
+                open(fn_output,'w')
+            # fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+
+            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+            #          args.subset_forcing+'.pkl'
+
+        onerun = False
+        print('starting station chunk number: '\
+              +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+        #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+        # records_forcing_station_chunk = records_forcing.query('STNID == ' +\
+        #                                                 str(current_station.name)+\
+        #                                                '& chunk == '+str(run_station_chunk))
+        isim = 0
+        for (STNID,chunk,index),record_input in records_input_station_chunk.iterrows():
+                print('starting '+str(isim+1)+' out of '+\
+                  str(len(records_input_station_chunk) )+\
+                  ' (station total: ',str(len(records_input_station)),')')  
+            
+                c4gli_output = get_record_yaml(file_input, 
+                                                record_input.index_start, 
+                                                record_input.index_end,
+                                                mode=args.mode)
+                if args.diag_tropo is not None:
+                    seltropo = (c4gli_input.air_ac.p > c4gli_input.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                    profile_tropo = c4gli_input.air_ac[seltropo]
+                    for var in args.diag_tropo:
+                        if var[:3] == 'adv':
+                            mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                            c4gli_output.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                        else:
+                            print("warning: tropospheric variable "+var+" not recognized")
+                if 'era_profiles' in args.updates.strip().split(" "):
+                    c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp'])
+
+                c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp})
+
+                cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+                Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+                Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+                R = (Rd*(1.-c4gli_output.air_ac.q) + Rv*c4gli_output.air_ac.q)
+                rho = c4gli_output.air_ac.p/R/c4gli_output.air_ac.t
+                dz = c4gli_output.air_ac.delpdgrav/rho
+                z = [dz.iloc[-1]/2.]
+                for idz in list(reversed(range(0,len(dz)-1,1))):
+                    z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.)
+                z = list(reversed(z))
+
+                theta = c4gli_output.air_ac.t * \
+                           (c4gli_output.pars.sp/(c4gli_output.air_ac.p))**(R/cp)
+                thetav   = theta*(1. + 0.61 * c4gli_output.air_ac.q)
+
+                
+                c4gli_output.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z),
+                                                                       'theta':list(theta),
+                                                                       'thetav':list(thetav),
+                                                                      }))
+                air_ap_input = c4gli_output.air_ac[::-1].reset_index().drop('index',axis=1)
+                air_ap_mode = 'b'
+                air_ap_input_source = c4gli_output.query_source('air_ac:theta')
+
+
+                c4gli_output.mixed_layer_fit(air_ap=air_ap_input,
+                                     source=air_ap_input_source,
+                                     mode=air_ap_mode)
+
+                if not c4gli_output.check_source_globaldata():
+                    print('Warning: some input sources appear invalid')
+
+
+                
+                #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
+                
+                # if args.global_keys is not None:
+                #     print(args.global_keys.strip(' ').split(' '))
+                #     c4gli_forcing.get_global_input(
+                #         globaldata, 
+                #         only_keys=args.global_keys.strip(' ').split(' ')
+                #     )
+
+                c4gli_output.dump(file_output)
+                    
+                    
+                onerun = True
+                isim += 1
+
+
+        file_input.close()
+        file_output.close()
+
+        if onerun:
+            # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"')
+            # if os.path.isfile(fn_forcing_pkl):
+            #     os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"')
+            # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+            # print('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+            records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\
+                                                       args.path_output+'/'+'/',\
+                                                       getchunk = int(run_station_chunk),\
+                                                       subset=args.subset_output,
+                                                       refetch_records=True,
+                                                       )
+
diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index 9b6398e..5260a95 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -16,7 +16,7 @@
 parser.add_argument('--exec') # chunk simulation script
 parser.add_argument('--first_station_row')
 parser.add_argument('--last_station_row')
-parser.add_argument('--pbs_string',default=' -l walltime=:2:0:0')
+parser.add_argument('--pbs_string',default=' -l walltime=2:0:0')
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling')
 parser.add_argument('--subset_forcing',default='morning') 
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 3746cd4..eb2c3b4 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -20,7 +20,7 @@
 parser.add_argument('--last_station_row')
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling',default='dump_on_success')
-parser.add_argument('--diag_tropo',default=None)
+parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
 parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
                                                       # to initialize with.
                                                       # Most common options are
@@ -66,6 +66,7 @@
   'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+    'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
@@ -74,10 +75,6 @@
   'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
-
-# #SET = 'GLOBAL'
-# SET = args.dataset
-
 # ========================
 print("getting a list of stations")
 # ========================
@@ -206,6 +203,7 @@
 
     os.system('mkdir -p '+path_exp)
     for istation,current_station in run_stations.iterrows():
+        print(istation,current_station)
         records_morning_station = records_morning.query('STNID == '+str(current_station.name))
         if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
             print("warning: outside of profile number range for station "+\
@@ -234,7 +232,7 @@
             print('starting station chunk number: '\
                   +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
 
-            records_morning_station_chunk = records_morning_station.query('STNID == '+str(current_station.name)+' and chunk == '+str(run_station_chunk)) #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
 
             isim = 0
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
@@ -247,14 +245,11 @@
                                                     record_morning.index_start, 
                                                     record_morning.index_end,
                                                     mode='ini')
-
-                    # add tropospheric parameters on advection and subsidence
-                    # (for diagnosis)
-
                     if args.diag_tropo is not None:
+                        print('add tropospheric parameters on advection and subsidence (for diagnosis)')
                         seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
                         profile_tropo = c4gli_morning.air_ac[seltropo]
-                        for var in diag_tropo:#['t','q','u','v',]:
+                        for var in args.diag_tropo:#['t','q','u','v',]:
                             if var[:3] == 'adv':
                                 mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
                                 c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
@@ -288,9 +283,9 @@
                             if not c4gli_morning.check_source_globaldata():
                                 print('Warning: some input sources appear invalid')
                             c4gl.run()
-                            print('run succesfull')
+                            print('run succesful')
                         except:
-                            print('run not succesfull')
+                            print('run not succesful')
                         onerun = True
 
                         c4gli_morning.dump(file_ini)
diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py
index cdc8923..08d28b8 100644
--- a/class4gl/simulations/simulations_iter.py
+++ b/class4gl/simulations/simulations_iter.py
@@ -8,12 +8,43 @@
 import sys
 import pytz
 import math
-sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
 from class4gl import class4gl_input, data_global,class4gl
 from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
 from class4gl import blh,class4gl_input
 
-
 # this is a variant of global run in which the output of runs are still written
 # out even when the run crashes.
 
@@ -26,100 +57,152 @@
 #  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
 
 
-
 EXP_DEFS  =\
 {
-  'GLOBAL_ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-  'ITER_NOAC':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'ITER_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'ITER_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'ERA_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
-import argparse
 
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    #parser.add_argument('--timestamp')
-    parser.add_argument('--global-chunk')
-    parser.add_argument('--first-station')
-    parser.add_argument('--last-station')
-    parser.add_argument('--dataset')
-    parser.add_argument('--path-soundings')
-    parser.add_argument('--experiments')
-    parser.add_argument('--split-by',default=-1)# station soundings are split
-                                                # up in chunks
-    parser.add_argument('--station-chunk',default=0)
-    args = parser.parse_args()
-
-
-#SET = 'GLOBAL'
-SET = args.dataset
-
-if 'path-soundings' in args.__dict__.keys():
-    path_soundingsSET = args.__dict__['path-soundings']+'/'+SET+'/'
-else:
-    path_soundingsSET = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/'+SET+'/'
+# #SET = 'GLOBAL'
+# SET = args.dataset
 
-all_stations = stations(path_soundingsSET,suffix='morning',refetch_stations=True).table
+# ========================
+print("getting a list of stations")
+# ========================
 
-all_records_morning = get_records(all_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
-                              refetch_records=False,
-                              )
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
-if 'global_chunk' in args.__dict__.keys():
-    totalchunks = 0
-    stations_iterator = all_stations.iterrows()
-    in_current_chunk = False
-    while not in_current_chunk:
-        istation,current_station = stations_iterator.__next__()
-        all_records_morning_station = all_records_morning.query('STNID == '+str(current_station.name))
-        chunks_current_station = math.ceil(float(len(all_records_morning_station))/float(args.split_by))
-        in_current_chunk = (int(args.global_chunk) < (totalchunks+chunks_current_station))
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
 
-        if in_current_chunk:
-            run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-            run_station_chunk = int(args.global_chunk) - totalchunks 
 
-        totalchunks +=chunks_current_station
 
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
 else:
-    if 'last_station' in args.__dict__.keys():
-        run_stations = run_stations.iloc[:(int(args.__dict__['last_station'])+1)]
-    
-    if 'first_station' in args.__dict__.keys():
-        run_stations = run_stations.iloc[int(args.__dict__['first_station']):]
-    if 'station_chunk' in args.__dict__.keys():
-        run_station_chunk = args.station_chunk
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print(all_stations_select)
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
 #print(all_stations)
-print(run_stations)
-print(args.__dict__.keys())
+print('Fetching initial/forcing records')
 records_morning = get_records(run_stations,\
-                              path_soundingsSET,\
-                              subset='morning',
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
                               refetch_records=False,
                               )
-records_afternoon = get_records(run_stations,\
-                                path_soundingsSET,\
-                                subset='afternoon',
-                                refetch_records=False,
-                                )
-
-# align afternoon records with the noon records, and set same index
-records_afternoon.index = records_afternoon.ldatetime.dt.date
-records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
-records_afternoon.index = records_morning.index
-
-experiments = args.experiments.split(';')
 
+# note that if runtime is an integer number, we don't need to get the afternoon
+# profiles. 
+if args.runtime == 'from_afternoon_profile':
+    print('Fetching afternoon records for determining the simulation runtimes')
+    records_afternoon = get_records(run_stations,\
+                                    args.path_forcing,\
+                                    subset='afternoon',
+                                    refetch_records=False,
+                                    )
+    
+    # print(records_morning.index)
+    # print(records_afternoon.index)
+    # align afternoon records with the noon records, and set same index
+    print('hello')
+    print(len(records_afternoon))
+    print(len(records_morning))
+
+    print("aligning morning and afternoon records")
+    records_morning['dates'] = records_morning['ldatetime'].dt.date
+    records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
+    records_afternoon.set_index(['STNID','dates'],inplace=True)
+    ini_index_dates = records_morning.set_index(['STNID','dates']).index
+    records_afternoon = records_afternoon.loc[ini_index_dates]
+    records_afternoon.index = records_morning.index
+
+experiments = args.experiments.strip(' ').split(' ')
 for expname in experiments:
     exp = EXP_DEFS[expname]
-    path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/'+SET+'_'+expname+'/'
+    path_exp = args.path_experiments+'/'+expname+'/'
 
     os.system('mkdir -p '+path_exp)
     records_morning_station = records_morning.query('STNID == '+str(current_station.name))
@@ -128,8 +211,18 @@
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            file_morning = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(path_soundingsSET+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+
+            fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
+            if os.path.isfile(fn_morning):
+                file_morning = open(fn_morning)
+            else:
+                fn_morning = \
+                     args.path_forcing+'/'+format(current_station.name,'05d')+\
+                     '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_morning = open(fn_morning)
+
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
             fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
@@ -139,8 +232,12 @@
 
             #iexp = 0
             onerun = False
+            print('starting station chunk number: '\
+                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
 
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            isim = 0
+            records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
                 #if iexp == 11:
                 
@@ -149,6 +246,15 @@
                                                     record_morning.index_start, 
                                                     record_morning.index_end,
                                                     mode='ini')
+                    if args.diag_tropo is not None:
+                        seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                        profile_tropo = c4gli_morning.air_ac[seltropo]
+                        for var in args.diag_tropo:#['t','q','u','v',]:
+                            if var[:3] == 'adv':
+                                mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                                c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                            else:
+                                print("warning: tropospheric variable "+var+" not recognized")
                     
                     #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
                     
diff --git a/class4gl/simulations/simulations_iter_bowen.py b/class4gl/simulations/simulations_iter_bowen.py
new file mode 100644
index 0000000..9417534
--- /dev/null
+++ b/class4gl/simulations/simulations_iter_bowen.py
@@ -0,0 +1,475 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
+parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'ERA_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_ITER_BOWEN':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+
+# #SET = 'GLOBAL'
+# SET = args.dataset
+
+# ========================
+print("getting a list of stations")
+# ========================
+
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+
+
+
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print(all_stations_select)
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_forcing,\
+                                         subset=args.subset_forcing,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching initial/forcing records')
+records_morning = get_records(run_stations,\
+                              args.path_forcing,\
+                              subset=args.subset_forcing,
+                              refetch_records=False,
+                              )
+
+# note that if runtime is an integer number, we don't need to get the afternoon
+# profiles. 
+if args.runtime == 'from_afternoon_profile':
+    print('Fetching afternoon records for determining the simulation runtimes')
+    records_afternoon = get_records(run_stations,\
+                                    args.path_forcing,\
+                                    subset='afternoon',
+                                    refetch_records=False,
+                                    )
+    
+    # print(records_morning.index)
+    # print(records_afternoon.index)
+    # align afternoon records with the noon records, and set same index
+    print('hello')
+    print(len(records_afternoon))
+    print(len(records_morning))
+
+    print("aligning morning and afternoon records")
+    records_morning['dates'] = records_morning['ldatetime'].dt.date
+    records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
+    records_afternoon.set_index(['STNID','dates'],inplace=True)
+    ini_index_dates = records_morning.set_index(['STNID','dates']).index
+    records_afternoon = records_afternoon.loc[ini_index_dates]
+    records_afternoon.index = records_morning.index
+
+experiments = args.experiments.strip(' ').split(' ')
+for expname in experiments:
+    exp = EXP_DEFS[expname]
+    path_exp = args.path_experiments+'/'+expname+'/'
+
+    os.system('mkdir -p '+path_exp)
+    records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+    for istation,current_station in run_stations.iterrows():
+        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
+            print("warning: outside of profile number range for station "+\
+                  str(current_station)+". Skipping chunk number for this station.")
+        else:
+
+            fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
+            if os.path.isfile(fn_morning):
+                file_morning = open(fn_morning)
+            else:
+                fn_morning = \
+                     args.path_forcing+'/'+format(current_station.name,'05d')+\
+                     '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_morning = open(fn_morning)
+
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
+            file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
+
+            #iexp = 0
+            onerun = False
+            print('starting station chunk number: '\
+                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+
+            isim = 0
+            records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                #if iexp == 11:
+                
+            
+                    c4gli_morning = get_record_yaml(file_morning, 
+                                                    record_morning.index_start, 
+                                                    record_morning.index_end,
+                                                    mode='ini')
+                    if args.diag_tropo is not None:
+                        seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                        profile_tropo = c4gli_morning.air_ac[seltropo]
+                        for var in args.diag_tropo:#['t','q','u','v',]:
+                            if var[:3] == 'adv':
+                                mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                                c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                            else:
+                                print("warning: tropospheric variable "+var+" not recognized")
+                    
+                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    
+                    
+                    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                      record_afternoon.index_start, 
+                                                      record_afternoon.index_end,
+                                                    mode='ini')
+            
+                    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                        int((c4gli_afternoon.pars.datetime_daylight - 
+                                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+                    c4gli_morning.update(source=expname, pars=exp)
+
+                    c4gl = class4gl(c4gli_morning)
+                    
+                    #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.)
+                    EFobs = (1.-c4gli_morning.pars.EF)/c4gli_morning.pars.EF
+                    
+                    b = c4gli_morning.pars.wwilt
+                    c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01)
+                    
+                    
+                    try:
+                        #fb = f(b)
+                        c4gli_morning.pars.wg = b
+                        c4gli_morning.pars.w2 = b
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.H.sum()/(c4gl.out.LE.sum())
+                        fb = EFmod - EFobs
+                        EFmodb = EFmod
+                        c4glb = c4gl
+                        c4gli_morningb = c4gli_morning
+                        
+                        #fc = f(c)
+                        c4gli_morning.pars.wg = c
+                        c4gli_morning.pars.w2 = c
+                        c4gl = class4gl(c4gli_morning)
+                        c4gl.run()
+                        EFmod = c4gl.out.H.sum()/(c4gl.out.LE.sum())
+                        fc = EFmod - EFobs
+                        print (EFmodb,EFobs,fb)
+                        print (EFmod,EFobs,fc)
+                        c4glc = c4gl
+                        c4gli_morningc = c4gli_morning
+                        i=0
+                        
+
+                        if fc*fb > 0.:
+                            if abs(fb) < abs(fc):
+                                c4gl = c4glb
+                                c4gli_morning = c4gli_morningb
+                            else:
+                                c4gl = c4glc
+                                c4gli_morning = c4gli_morningc
+                            print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root")
+                        
+                        else:
+                            print('starting ITERATION!!!')
+                            cn  = c - fc/(fc-fb)*(c-b)
+                            
+                            
+                            #fcn = f(cn)
+                            c4gli_morning.pars.wg = np.asscalar(cn)
+                            c4gli_morning.pars.w2 = np.asscalar(cn)
+                            c4gl = class4gl(c4gli_morning)
+                            c4gl.run()
+                            fcn = c4gl.out.H.sum()/c4gl.out.LE.sum() - EFobs
+                            
+                            tol = 0.02
+                            ftol = 10.
+                            maxiter = 10
+                            
+                            is1=0
+                            is1max=1
+                            while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter):
+                                if fc * fcn > 0:
+                                    temp = c
+                                    c = b
+                                    b = temp
+                                
+                                a = b
+                                fa = fb
+                                b = c
+                                fb = fc
+                                c = cn
+                                fc = fcn
+                                              
+                                print(i,a,b,c,fcn)
+                                
+                                s1 = c - fc/(fc-fb)*(c-b) 
+                                s2 = c - fc/(fc-fa)*(c-a)
+                                
+                                
+                                # take the one that is closest to the border  (opposite to the previous border), making the chance that the border is eliminated is bigger
+                                
+                                
+                                if (abs(s1-b) < abs(s2-b)):
+                                    is1 = 0
+                                else:
+                                    is1 +=1
+                                    
+                                # we prefer s1, but only allow it a few times to not provide the opposite boundary
+                                if is1 < is1max:           
+                                    s = s1
+                                    print('s1')
+                                else:
+                                    is1 = 0
+                                    s = s2
+                                    print('s2')
+                                
+                                if c > b:
+                                    l = b
+                                    r = c
+                                else:
+                                    l = c
+                                    r = b
+                                
+                                m = (b+c)/2.
+                                     
+                                if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)):
+                                    cn = s
+                                    print('midpoint')
+                                else:
+                                    cn = m
+                                    print('bissection')
+                                    
+                                
+                                #fcn = f(cn)
+                                c4gli_morning.pars.wg = np.asscalar(cn)
+                                c4gli_morning.pars.w2 = np.asscalar(cn)
+                                c4gl = class4gl(c4gli_morning)
+                                c4gl.run()
+                                fcn = c4gl.out.H.sum()/c4gl.out.LE.sum() - EFobs
+                                
+                            
+                                i+=1
+                                
+                            if i == maxiter:
+                                raise StopIteration('did not converge')
+
+
+
+
+                        #c4gl = class4gl(c4gli_morning)
+                        #c4gl.run()
+
+                        c4gli_morning.pars.itersteps = i
+                        c4gli_morning.dump(file_ini)
+                        
+                        
+                        c4gl.dump(file_mod,\
+                                      include_input=False,\
+                                   #   timeseries_only=timeseries_only,\
+                                 )
+                        onerun = True
+                    except:
+                        print('run not succesfull')
+
+                #iexp = iexp +1
+            file_ini.close()
+            file_mod.close()
+            file_morning.close()
+            file_afternoon.close()
+    
+            if onerun:
+                records_ini = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='ini',
+                                                           refetch_records=True,
+                                                           )
+                records_mod = get_records(pd.DataFrame([current_station]),\
+                                                           path_exp,\
+                                                           getchunk = int(run_station_chunk),\
+                                                           subset='mod',\
+                                                           refetch_records=True,\
+                                                           )
+            else:
+                # remove empty files
+                os.system('rm '+fn_ini)
+                os.system('rm '+fn_mod)
+    
+    # # align afternoon records with initial records, and set same index
+    # records_afternoon.index = records_afternoon.ldatetime.dt.date
+    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+    # records_afternoon.index = records_ini.index
+    
+    # stations_for_iter = stations(path_exp)
+    # for STNID,station in stations_iterator(stations_for_iter):
+    #     records_current_station_index = \
+    #             (records_ini.index.get_level_values('STNID') == STNID)
+    #     file_current_station_mod = STNID
+    # 
+    #     with \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    #     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+    #         for (STNID,index),record_ini in records_iterator(records_ini):
+    #             c4gli_ini = get_record_yaml(file_station_ini, 
+    #                                         record_ini.index_start, 
+    #                                         record_ini.index_end,
+    #                                         mode='ini')
+    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+    # 
+    #             record_mod = records_mod.loc[(STNID,index)]
+    #             c4gl_mod = get_record_yaml(file_station_mod, 
+    #                                         record_mod.index_start, 
+    #                                         record_mod.index_end,
+    #                                         mode='mod')
+    #             record_afternoon = records_afternoon.loc[(STNID,index)]
+    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+    #                                         record_afternoon.index_start, 
+    #                                         record_afternoon.index_end,
+    #                                         mode='ini')
+
diff --git a/class4gl/simulations/update_yaml.py b/class4gl/simulations/simulations_veg.py
similarity index 64%
rename from class4gl/simulations/update_yaml.py
rename to class4gl/simulations/simulations_veg.py
index 1681e88..b2f7ad8 100644
--- a/class4gl/simulations/update_yaml.py
+++ b/class4gl/simulations/simulations_veg.py
@@ -13,12 +13,14 @@
 
 #if __name__ == '__main__':
 parser = argparse.ArgumentParser()
-parser.add_argument('--global_keys') 
+#parser.add_argument('--timestamp')
 parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
 parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 parser.add_argument('--first_station_row')
 parser.add_argument('--last_station_row')
 parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
 parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
                                                       # to initialize with.
                                                       # Most common options are
@@ -26,6 +28,8 @@
 
 # Tuntime is usually specified from the afternoon profile. You can also just
 # specify the simulation length in seconds
+parser.add_argument('--runtime',default='from_afternoon_profile')
+
 parser.add_argument('--experiments')
 parser.add_argument('--split_by',default=-1)# station soundings are split
                                             # up in chunks
@@ -55,24 +59,36 @@
 
 EXP_DEFS  =\
 {
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC_WILT':   {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC_FC':     {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':         {'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_VMIN':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_VMAX':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_V0':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_L025':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_L100':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_L600':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
+# ========================
+print("getting a list of stations")
+# ========================
 
-# #SET = 'GLOBAL'
-# SET = args.dataset
-
-
-print("getting stations")
 # these are all the stations that are found in the input dataset
 all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
+# ====================================
 print('defining all_stations_select')
+# ====================================
+
 # these are all the stations that are supposed to run by the whole batch (all
 # chunks). We narrow it down according to the station(s) specified.
+
+
+
 if args.station_id is not None:
     print("Selecting station by ID")
     stations_iter = stations_iterator(all_stations)
@@ -88,6 +104,7 @@
 print("station numbers included in the whole batch "+\
       "(all chunks):",list(all_stations_select.index))
 
+print(all_stations_select)
 print("getting all records of the whole batch")
 all_records_morning_select = get_records(all_stations_select,\
                                          args.path_forcing,\
@@ -152,30 +169,30 @@
                               refetch_records=False,
                               )
 
-# # note that if runtime is an integer number, we don't need to get the afternoon
-# # profiles. 
-# if args.runtime == 'from_afternoon_profile':
-#     print('Fetching afternoon records for determining the simulation runtimes')
-#     records_afternoon = get_records(run_stations,\
-#                                     args.path_forcing,\
-#                                     subset='afternoon',
-#                                     refetch_records=False,
-#                                     )
-#     
-#     # print(records_morning.index)
-#     # print(records_afternoon.index)
-#     # align afternoon records with the noon records, and set same index
-#     print('hello')
-#     print(len(records_afternoon))
-#     print(len(records_morning))
-# 
-#     print("aligning morning and afternoon records")
-#     records_morning['dates'] = records_morning.ldatetime.dt.date
-#     records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
-#     records_afternoon.set_index(['STNID','dates'],inplace=True)
-#     ini_index_dates = records_morning.set_index(['STNID','dates']).index
-#     records_afternoon = records_afternoon.loc[ini_index_dates]
-#     records_afternoon.index = records_morning.index
+# note that if runtime is an integer number, we don't need to get the afternoon
+# profiles. 
+if args.runtime == 'from_afternoon_profile':
+    print('Fetching afternoon records for determining the simulation runtimes')
+    records_afternoon = get_records(run_stations,\
+                                    args.path_forcing,\
+                                    subset='afternoon',
+                                    refetch_records=False,
+                                    )
+    
+    # print(records_morning.index)
+    # print(records_afternoon.index)
+    # align afternoon records with the noon records, and set same index
+    print('hello')
+    print(len(records_afternoon))
+    print(len(records_morning))
+
+    print("aligning morning and afternoon records")
+    records_morning['dates'] = records_morning['ldatetime'].dt.date
+    records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
+    records_afternoon.set_index(['STNID','dates'],inplace=True)
+    ini_index_dates = records_morning.set_index(['STNID','dates']).index
+    records_afternoon = records_afternoon.loc[ini_index_dates]
+    records_afternoon.index = records_morning.index
 
 experiments = args.experiments.strip(' ').split(' ')
 for expname in experiments:
@@ -184,26 +201,36 @@
 
     os.system('mkdir -p '+path_exp)
     for istation,current_station in run_stations.iterrows():
+        print(istation,current_station)
         records_morning_station = records_morning.query('STNID == '+str(current_station.name))
         if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            fn_forcing = \
-                    args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                    str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
+            if os.path.isfile(fn_morning):
+                file_morning = open(fn_morning)
+            else:
+                fn_morning = \
+                     args.path_forcing+'/'+format(current_station.name,'05d')+\
+                     '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_morning = open(fn_morning)
 
-            #file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
+            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                     str(int(run_station_chunk))+'_mod.yaml'
             file_ini = open(fn_ini,'w')
+            file_mod = open(fn_mod,'w')
 
             #iexp = 0
             onerun = False
             print('starting station chunk number: '\
                   +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
 
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
 
             isim = 0
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
@@ -216,8 +243,16 @@
                                                     record_morning.index_start, 
                                                     record_morning.index_end,
                                                     mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    if args.diag_tropo is not None:
+                        print('add tropospheric parameters on advection and subsidence (for diagnosis)')
+                        seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                        profile_tropo = c4gli_morning.air_ac[seltropo]
+                        for var in args.diag_tropo:#['t','q','u','v',]:
+                            if var[:3] == 'adv':
+                                mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                                c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                            else:
+                                print("warning: tropospheric variable "+var+" not recognized")
                     
                     
                     if args.runtime == 'from_afternoon_profile':
@@ -228,6 +263,8 @@
                                                         mode='ini')
                         runtime = int((c4gli_afternoon.pars.datetime_daylight - 
                                              c4gli_morning.pars.datetime_daylight).total_seconds())
+                    elif args.runtime == 'from_input':
+                        runtime = c4gli_morning.pars.runtime
                     else:
                         runtime = int(args.runtime)
 
@@ -235,15 +272,57 @@
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
                                         runtime})
                     c4gli_morning.update(source=expname, pars=exp)
+                    if expname[-5:] == '_VMAX':
+                        # ini_sel = ini[ini.cveg > ini.cveg.quantile(0.99)]
+                        # ini = c4gldata['GLOBAL_ADV'].frames['profiles']['records_all_stations_ini']
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'alpha' : 0.19835934815210562,
+                                              'cveg' : 0.9324493037539419,
+                                              'z0m' : 1.287697822716918,
+                                              'z0h' : 0.1287697822716918,
+                                              'LAI' : 2.4429540235782645,
+                                              }\
+                                            )
+                    if expname[-3:] == '_V0':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'cveg': 0.0,
+                                             'z0m': 0.01,
+                                             'z0h': 0.001,
+                                             }\
+                                            )
+                    if expname[-3:] == '_VMIN':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'alpha': 0.2868081648656875,
+                                             'cveg': 0.08275966502449594,
+                                             'z0m': 0.05760000169277192,
+                                             'z0h': 0.005760000169277192,
+                                             'LAI': 2.0,
+                                             }\
+                                            )
+                    if expname[-3:] == '_L025':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'lai':.25}\
+                                            )
+                    if expname[-3:] == '_L100':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'lai':1.0}\
+                                            )
+                    if expname[-3:] == '_L600':
+                        c4gli_morning.update(source=expname, pars=\
+                                             {'lai':6.0}\
+                                            )
 
                     c4gl = class4gl(c4gli_morning)
 
                     if args.error_handling == 'dump_always':
                         try:
+                            print('checking data sources')
+                            if not c4gli_morning.check_source_globaldata():
+                                print('Warning: some input sources appear invalid')
                             c4gl.run()
-                            print('run succesfull')
+                            print('run succesful')
                         except:
-                            print('run not succesfull')
+                            print('run not succesful')
                         onerun = True
 
                         c4gli_morning.dump(file_ini)
@@ -257,7 +336,10 @@
                     # in this case, only the file will dumped if the runs were
                     # successful
                     elif args.error_handling == 'dump_on_success':
-                        try:
+                       try:
+                            print('checking data sources')
+                            if not c4gli_morning.check_source_globaldata():
+                                print('Warning: some input sources appear invalid')
                             c4gl.run()
                             print('run succesfull')
                             c4gli_morning.dump(file_ini)
@@ -268,15 +350,16 @@
                                       #timeseries_only=timeseries_only,\
                                      )
                             onerun = True
-                        except:
-                            print('run not succesfull')
+                       except:
+                           print('run not succesfull')
                     isim += 1
 
 
             file_ini.close()
             file_mod.close()
             file_morning.close()
-            file_afternoon.close()
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon.close()
     
             if onerun:
                 records_ini = get_records(pd.DataFrame([current_station]),\
diff --git a/class4gl/simulations/simulations_wwilt_wfc.py b/class4gl/simulations/simulations_wwilt_wfc.py
index 1e5450c..27a6d6a 100644
--- a/class4gl/simulations/simulations_wwilt_wfc.py
+++ b/class4gl/simulations/simulations_wwilt_wfc.py
@@ -20,6 +20,7 @@
 parser.add_argument('--last_station_row')
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
 parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
                                                       # to initialize with.
                                                       # Most common options are
@@ -62,22 +63,28 @@
   'GLOBAL_NOAC_WILT':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_NOAC_FC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_WILT':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_FC':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
 
+# ========================
+print("getting a list of stations")
+# ========================
 
-# #SET = 'GLOBAL'
-# SET = args.dataset
-
-
-print("getting stations")
 # these are all the stations that are found in the input dataset
 all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
 
+# ====================================
 print('defining all_stations_select')
+# ====================================
+
 # these are all the stations that are supposed to run by the whole batch (all
 # chunks). We narrow it down according to the station(s) specified.
+
+
+
 if args.station_id is not None:
     print("Selecting station by ID")
     stations_iter = stations_iterator(all_stations)
@@ -93,6 +100,7 @@
 print("station numbers included in the whole batch "+\
       "(all chunks):",list(all_stations_select.index))
 
+print(all_stations_select)
 print("getting all records of the whole batch")
 all_records_morning_select = get_records(all_stations_select,\
                                          args.path_forcing,\
@@ -175,8 +183,8 @@
     print(len(records_morning))
 
     print("aligning morning and afternoon records")
-    records_morning['dates'] = records_morning.ldatetime.dt.date
-    records_afternoon['dates'] = records_afternoon.ldatetime.dt.date
+    records_morning['dates'] = records_morning['ldatetime'].dt.date
+    records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
     records_afternoon.set_index(['STNID','dates'],inplace=True)
     ini_index_dates = records_morning.set_index(['STNID','dates']).index
     records_afternoon = records_afternoon.loc[ini_index_dates]
@@ -189,13 +197,23 @@
 
     os.system('mkdir -p '+path_exp)
     for istation,current_station in run_stations.iterrows():
+        print(istation,current_station)
         records_morning_station = records_morning.query('STNID == '+str(current_station.name))
         if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
             print("warning: outside of profile number range for station "+\
                   str(current_station)+". Skipping chunk number for this station.")
         else:
-            file_morning = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_morning.yaml')
-            file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+            fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
+            if os.path.isfile(fn_morning):
+                file_morning = open(fn_morning)
+            else:
+                fn_morning = \
+                     args.path_forcing+'/'+format(current_station.name,'05d')+\
+                     '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                file_morning = open(fn_morning)
+
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
             fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
                      str(int(run_station_chunk))+'_ini.yaml'
             fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
@@ -208,7 +226,7 @@
             print('starting station chunk number: '\
                   +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
 
-            records_morning_station_chunk = records_morning_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+            records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
 
             isim = 0
             for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
@@ -221,8 +239,16 @@
                                                     record_morning.index_start, 
                                                     record_morning.index_end,
                                                     mode='ini')
-                    
-                    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+                    if args.diag_tropo is not None:
+                        print('add tropospheric parameters on advection and subsidence (for diagnosis)')
+                        seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                        profile_tropo = c4gli_morning.air_ac[seltropo]
+                        for var in args.diag_tropo:#['t','q','u','v',]:
+                            if var[:3] == 'adv':
+                                mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                                c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                            else:
+                                print("warning: tropospheric variable "+var+" not recognized")
                     
                     
                     if args.runtime == 'from_afternoon_profile':
@@ -233,6 +259,8 @@
                                                         mode='ini')
                         runtime = int((c4gli_afternoon.pars.datetime_daylight - 
                                              c4gli_morning.pars.datetime_daylight).total_seconds())
+                    elif args.runtime == 'from_input':
+                        runtime = c4gli_morning.pars.runtime
                     else:
                         runtime = int(args.runtime)
 
@@ -240,12 +268,12 @@
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
                                         runtime})
                     c4gli_morning.update(source=expname, pars=exp)
-                    if expname == 'GLOBAL_NOAC_WILT':
+                    if expname[-5:] == '_WILT':
                         c4gli_morning.update(source=expname, pars=\
                                              {'wg':c4gli_morning.pars.wwilt,\
                                               'w2':c4gli_morning.pars.wwilt}\
                                             )
-                    if expname == 'GLOBAL_NOAC_FC':
+                    if expname[-3:] == '_FC':
                         c4gli_morning.update(source=expname, pars=\
                                              {'wg':c4gli_morning.pars.wfc,\
                                               'w2':c4gli_morning.pars.wfc}\
@@ -255,10 +283,13 @@
 
                     if args.error_handling == 'dump_always':
                         try:
+                            print('checking data sources')
+                            if not c4gli_morning.check_source_globaldata():
+                                print('Warning: some input sources appear invalid')
                             c4gl.run()
-                            print('run successful')
+                            print('run succesful')
                         except:
-                            print('run not successful')
+                            print('run not succesful')
                         onerun = True
 
                         c4gli_morning.dump(file_ini)
@@ -272,7 +303,10 @@
                     # in this case, only the file will dumped if the runs were
                     # successful
                     elif args.error_handling == 'dump_on_success':
-                        try:
+                       try:
+                            print('checking data sources')
+                            if not c4gli_morning.check_source_globaldata():
+                                print('Warning: some input sources appear invalid')
                             c4gl.run()
                             print('run succesfull')
                             c4gli_morning.dump(file_ini)
@@ -283,15 +317,16 @@
                                       #timeseries_only=timeseries_only,\
                                      )
                             onerun = True
-                        except:
-                            print('run not succesfull')
+                       except:
+                           print('run not succesfull')
                     isim += 1
 
 
             file_ini.close()
             file_mod.close()
             file_morning.close()
-            file_afternoon.close()
+            if args.runtime == 'from_afternoon_profile':
+                file_afternoon.close()
     
             if onerun:
                 records_ini = get_records(pd.DataFrame([current_station]),\
diff --git a/class4gl/simulations/update_yaml_old.py b/class4gl/simulations/update_yaml_old.py
deleted file mode 100644
index aafed46..0000000
--- a/class4gl/simulations/update_yaml_old.py
+++ /dev/null
@@ -1,276 +0,0 @@
-# -*- coding: utf-8 -*-
-
-""" 
-Purpose:
-    update variables in class4gl yaml files, eg., when you need new categorical
-    values in the table.
-
-
-"""
-
-
-
-import pandas as pd
-import io
-import os
-import numpy as np
-import datetime as dt
-import sys
-import pytz
-import math
-import dateutil.parser
-
-import argparse
-
-
-#if __name__ == '__main__':
-parser = argparse.ArgumentParser()
-parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--first_station_row')
-parser.add_argument('--last_station_row')
-parser.add_argument('--path_experiments')
-parser.add_argument('--experiments')
-parser.add_argument('--station_id') # run a specific station id
-parser.add_argument('--mode',default='ini') # this tells which yaml subset
-parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
-                                                      # to update in the yaml
-                                                      # dataset.
-                                                      # Most common options are
-                                                      # 'morning' and 'ini'.
-
-parser.add_argument('--split_by',default=-1)# station soundings are split
-
-#parser.add_argument('--station-chunk',default=0)
-parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
-parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
-parser.add_argument('--global_keys') 
-args = parser.parse_args()
-
-sys.path.insert(0, args.c4gl_path_lib)
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-# iniitialize global data
-globaldata = data_global()
-# ...  and load initial data pages
-globaldata.load_datasets(recalc=0)
-
-
-print("getting stations")
-# these are all the stations that are found in the input dataset
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
-
-print('defining all_stations_select')
-# these are all the stations that are supposed to run by the whole batch (all
-# chunks). We narrow it down according to the station(s) specified.
-if args.station_id is not None:
-    print("Selecting station by ID")
-    stations_iter = stations_iterator(all_stations)
-    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
-    all_stations_select = pd.DataFrame([run_station])
-else:
-    print("Selecting stations from a row range in the table")
-    all_stations_select = pd.DataFrame(all_stations.table)
-    if args.last_station_row is not None:
-        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
-    if args.first_station_row is not None:
-        all_stations_select = all_station_select.iloc[int(args.first_station):]
-print("station numbers included in the whole batch "+\
-      "(all chunks):",list(all_stations_select.index))
-
-print("getting all records of the whole batch")
-all_records_morning_select = get_records(all_stations_select,\
-                                         args.path_forcing,\
-                                         subset=args.subset_forcing,
-                                         refetch_records=False,
-                                         )
-
-# only run a specific chunck from the selection
-if args.global_chunk_number is not None:
-    if args.station_chunk_number is not None:
-        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
-
-
-    # if not (int(args.split_by) > 0) :
-    #         raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
-
-    run_station_chunk = None
-    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
-    totalchunks = 0
-    stations_iter = all_stations_select.iterrows()
-    in_current_chunk = False
-    try:
-        while not in_current_chunk:
-            istation,current_station = stations_iter.__next__()
-            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
-            #chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
-
-            chunks_current_station = len(all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique())
-            print('chunks_current_station',chunks_current_station)
-
-            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
-        
-            if in_current_chunk:
-                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-                run_station_chunk =all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique()[int(args.global_chunk_number) - totalchunks ]
-        
-            totalchunks +=chunks_current_station
-        
-
-    except StopIteration:
-       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
-    print("station = ",list(run_stations.index))
-    print("station chunk number:",run_station_chunk)
-
-# if no global chunk is specified, then run the whole station selection in one run, or
-# a specific chunk for each selected station according to # args.station_chunk_number
-else:
-    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
-    if args.station_chunk_number is not None:
-        run_station_chunk = int(args.station_chunk_number)
-        print("station(s) that is processed.",list(run_stations.index))
-        print("chunk number: ",run_station_chunk)
-    else:
-        if args.split_by != -1:
-            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
-        run_station_chunk = 0
-        print("stations that are processed.",list(run_stations.index))
-        
-
-#print(all_stations)
-print('Fetching current records')
-records_forcing = get_records(run_stations,\
-                              args.path_forcing,\
-                              subset=args.subset_forcing,
-                              refetch_records=False,
-                              )
-
-# if args.timestamp is None:
-#     backupdir = args.path_forcing+'/'+dt.datetime.now().isoformat()+'/'
-# else: 
-#     backupdir = args.path_forcing+'/'+args.timestamp+'/'
-# print('creating backup dir: '+backupdir)
-# os.system('mkdir -p "'+backupdir+'"')
-
-
-for EXP in args.experiments.strip().split(" "):
-    os.system('mkdir -p '+args.path_experiments+'/'+EXP+'/')
-    for istation,current_station in run_stations.iterrows():
-        records_forcing_station = records_forcing.query('STNID == ' +\
-                                                        str(current_station.name))
-    
-        records_forcing_station_chunk = records_forcing.query('STNID == ' +\
-                                                        str(current_station.name)+\
-                                                       '& chunk == '+str(run_station_chunk))
-        print('lenrecords_forcing_station: ',len(records_forcing_station))
-        print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk))
-        print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1))
-        
-        # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)):
-        #     print("warning: outside of profile number range for station "+\
-        #           str(current_station)+". Skipping chunk number for this station.")
-        if len(records_forcing_station_chunk) == 0:
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
-        else:
-            # normal case
-            if ((int(args.split_by) > 0) or \
-                (os.path.isfile(args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                     str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'))):
-                fn_forcing = \
-                        args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                        str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-                file_forcing = \
-                    open(fn_forcing,'r')
-                fn_experiment = args.path_experiments+'/'+EXP+'/'+format(current_station.name,'05d')+'_'+\
-                         str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-                file_experiment = \
-                    open(fn_experiment,'w')
-                fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                         str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
-    
-                # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
-                #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-                # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
-                #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
-            else:
-                print("\
-    Warning. We are choosing chunk 0 without specifying it in filename.    \
-     No-chunk naming will be removed in the future."\
-                     )
-    
-                fn_forcing = \
-                        args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
-                        args.subset_forcing+'.yaml'
-                file_forcing = \
-                    open(fn_forcing,'r')
-                fn_experiment = args.path_experiments+'/'+EXP+'/'+format(current_station.name,'05d')+'_'+\
-                         str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-                file_experiment = \
-                    open(fn_experiment,'w')
-                fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
-                         str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
-    
-                # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
-                #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-                # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
-                #          args.subset_forcing+'.pkl'
-    
-            onerun = False
-            print('starting station chunk number: '\
-                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
-    
-            #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-    
-            # records_forcing_station_chunk = records_forcing.query('STNID == ' +\
-            #                                                 str(current_station.name)+\
-            #                                                '& chunk == '+str(run_station_chunk))
-            isim = 0
-            for (STNID,chunk,index),record_forcing in records_forcing_station_chunk.iterrows():
-                    print('starting '+str(isim+1)+' out of '+\
-                      str(len(records_forcing_station_chunk) )+\
-                      ' (station total: ',str(len(records_forcing_station)),')')  
-                
-                    c4gli_forcing = get_record_yaml(file_forcing, 
-                                                    record_forcing.index_start, 
-                                                    record_forcing.index_end,
-                                                    mode=args.mode)
-                    seltropo = (c4gli_forcing.air_ac.p > c4gli_forcing.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
-                    profile_tropo = c4gli_forcing.air_ac[seltropo]
-                    mean_advt_tropo = np.mean(profile_tropo.advt_x +profile_tropo.advt_y )
-                    c4gli_forcing.update(source='era-interim',pars={'advt_tropo':mean_advt_tropo})
-                    
-                    #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
-                    
-                    if args.global_keys is not None:
-                        print(args.global_keys.strip(' ').split(' '))
-                        c4gli_forcing.get_global_input(
-                            globaldata, 
-                            only_keys=args.global_keys.strip(' ').split(' ')
-                        )
-    
-                    c4gli_forcing.dump(file_experiment)
-                        
-                        
-                    onerun = True
-                    isim += 1
-    
-    
-            file_forcing.close()
-            file_experiment.close()
-    
-            if onerun:
-                # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"')
-                # if os.path.isfile(fn_forcing_pkl):
-                #     os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"')
-                # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"')
-                # print('mv "'+fn_experiment+'" "'+fn_forcing+'"')
-                records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\
-                                                           args.path_experiments+'/'+EXP+'/',\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset=args.subset_forcing,
-                                                           refetch_records=True,
-                                                           )
-    

From 4e2728fad73f40792f1eab8e786b24cd36d2c297 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Fri, 26 Oct 2018 15:19:13 +0200
Subject: [PATCH 095/129] new procedure update_input.py and update_output.py

---
 class4gl/setup/batch_update_input.py | 149 ++++++++++++
 class4gl/setup/update_input.py       | 331 +++++++++++++++++++++++++++
 2 files changed, 480 insertions(+)
 create mode 100644 class4gl/setup/batch_update_input.py
 create mode 100644 class4gl/setup/update_input.py

diff --git a/class4gl/setup/batch_update_input.py b/class4gl/setup/batch_update_input.py
new file mode 100644
index 0000000..d2b06b0
--- /dev/null
+++ b/class4gl/setup/batch_update_input.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+parser = argparse.ArgumentParser()
+#if __name__ == '__main__':
+parser.add_argument('--exec') # chunk simulation script
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--pbs_string',default=' -l walltime=2:0:0')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling')
+parser.add_argument('--updates')
+parser.add_argument('--subset_input',default='morning') 
+                                        # this tells which yaml subset
+                                        # to initialize with.
+                                        # Most common options are
+                                        # 'morning' and 'ini'.
+parser.add_argument('--subset_output',default='morning') 
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime')
+# delete folders of experiments before running them
+parser.add_argument('--cleanup_output',default=False)
+parser.add_argument('--split_by',default=50)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--path_input') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+
+
+#arguments only used for update_yaml.py
+parser.add_argument('--path_dataset') 
+parser.add_argument('--global_keys') 
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+
+# #SET = 'GLOBAL'
+# SET = args.dataset
+
+# path_inputSET = args.path_input+'/'+SET+'/'
+
+print("getting all stations from --path_input")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting stations by --station_id")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table [--first_station_row,--last_station_row]")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_input,\
+                                         subset=args.subset_input,\
+                                         refetch_records=False,\
+                                        )
+
+print('splitting batch in --split_by='+args.split_by+' jobs.')
+totalchunks = 0
+for istation,current_station in all_stations_select.iterrows():
+    records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+    chunks_current_station = math.ceil(float(len(records_morning_station_select))/float(args.split_by))
+    totalchunks +=chunks_current_station
+
+print('total chunks (= size of array-job): ' + str(totalchunks))
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+
+
+if args.cleanup_output:
+    os.system("rm -R "+args.path_output+'/')
+
+# C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
+            str(totalchunks-1)+" -v '"
+# propagate arguments towards the job script
+first = True
+for argkey in args.__dict__.keys():
+    if ((argkey not in ['pbs_string','cleanup_output']) and \
+        # default values are specified in the simulation script, so
+        # excluded here
+        (args.__dict__[argkey] is not None)
+       ):
+        if first:
+            command +='C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        else:
+            command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+    first = False
+
+command = command+"'"
+print('Submitting array job: '+command)
+os.system(command)
+
+
+    #os.system(command)
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/class4gl/setup/update_input.py b/class4gl/setup/update_input.py
new file mode 100644
index 0000000..07000f2
--- /dev/null
+++ b/class4gl/setup/update_input.py
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--updates')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=None)#['advt','advq','advu','advv'])
+parser.add_argument('--subset_input',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+parser.add_argument('--subset_output',default='morning')
+
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+
+# iniitialize global data
+globaldata = data_global()
+if 'era_profiles' in args.updates.strip().split(","):
+    globaldata.sources = {**globaldata.sources,**{
+            "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc",
+            "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc",
+            "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc",
+            "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc",
+            }}
+
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'ERA_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+    'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+# ========================
+print("getting a list of stations")
+# ========================
+
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
+
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+
+
+
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print(all_stations_select)
+print("getting all records of the whole batch")
+all_records_input_select = get_records(all_stations_select,\
+                                         args.path_input,\
+                                         subset=args.subset_input,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_input_station_select = all_records_input_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_input_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching initial/forcing records')
+records_input = get_records(run_stations,\
+                              args.path_input,\
+                              subset=args.subset_input,
+                              refetch_records=False,
+                              )
+
+
+
+os.system('mkdir -p '+args.path_output)
+for istation,current_station in run_stations.iterrows():
+    print(istation,current_station)
+    records_input_station = records_input.query('STNID == '+str(current_station.name))
+    if (int(args.split_by) * int(run_station_chunk)) >= (len(records_input_station)):
+        print("warning: outside of profile number range for station "+\
+              str(current_station)+". Skipping chunk number for this station.")
+    else:
+        fn_input = args.path_input+'/'+format(current_station.name,'05d')+'_'+args.subset_input+'.yaml'
+        if os.path.isfile(fn_input):
+            file_input = open(fn_input)
+        else:
+            fn_input = \
+                 args.path_input+'/'+format(current_station.name,'05d')+\
+                 '_'+str(run_station_chunk)+'_'+args.subset_input+'.yaml'
+            file_input = open(fn_input)
+
+        fn_output = args.path_output+'/'+format(current_station.name,'05d')+'_'+\
+                 str(int(run_station_chunk))+'_'+args.subset_output+'.yaml'
+        file_output = open(fn_output,'w')
+
+        #iexp = 0
+        onerun = False
+        print('starting station chunk number: '\
+              +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+        records_input_station_chunk = records_input_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+        isim = 0
+        for (STNID,chunk,index),record_input in records_input_station_chunk.iterrows():
+                print('starting '+str(isim+1)+' out of '+\
+                  str(len(records_input_station_chunk) )+\
+                  ' (station total: ',str(len(records_input_station)),')')  
+            
+        
+                c4gli_output = get_record_yaml(file_input, 
+                                                record_input.index_start, 
+                                                record_input.index_end,
+                                                mode='ini')
+                if args.diag_tropo is not None:
+                    print('add tropospheric parameters on advection and subsidence (for diagnosis)')
+                    seltropo = (c4gli_output.air_ac.p > c4gli_output.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                    profile_tropo = c4gli_output.air_ac[seltropo]
+                    for var in args.diag_tropo:#['t','q','u','v',]:
+                        if var[:3] == 'adv':
+                            mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                            c4gli_output.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                        else:
+                            print("warning: tropospheric variable "+var+" not recognized")
+
+
+                if 'era_profiles' in args.updates.strip().split(","):
+                    c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp'])
+
+                    c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp})
+
+                    cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+                    Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+                    Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+                    R = (Rd*(1.-c4gli_output.air_ac.q) + Rv*c4gli_output.air_ac.q)
+                    rho = c4gli_output.air_ac.p/R/c4gli_output.air_ac.t
+                    dz = c4gli_output.air_ac.delpdgrav/rho
+                    z = [dz.iloc[-1]/2.]
+                    for idz in list(reversed(range(0,len(dz)-1,1))):
+                        z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.)
+                    z = list(reversed(z))
+
+                    theta = c4gli_output.air_ac.t * \
+                               (c4gli_output.pars.sp/(c4gli_output.air_ac.p))**(R/cp)
+                    thetav   = theta*(1. + 0.61 * c4gli_output.air_ac.q)
+
+                    
+                    c4gli_output.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z),
+                                                                           'theta':list(theta),
+                                                                           'thetav':list(thetav),
+                                                                          }))
+                    air_ap_input = c4gli_output.air_ac[::-1].reset_index().drop('index',axis=1)
+                    air_ap_mode = 'b'
+                    air_ap_input_source = c4gli_output.query_source('air_ac:theta')
+
+
+                    c4gli_output.mixed_layer_fit(air_ap=air_ap_input,
+                                         source=air_ap_input_source,
+                                         mode=air_ap_mode)
+
+
+                onerun = True
+                
+                c4gli_output.dump(file_output)
+                    
+                    
+        file_output.close()
+        file_input.close()
+
+        if onerun:
+            records_output = get_records(pd.DataFrame([current_station]),\
+                                                       args.path_output,\
+                                                       getchunk = int(run_station_chunk),\
+                                                       subset=args.subset_output,
+                                                       refetch_records=True,
+                                                       )
+        else:
+            # remove empty files
+            os.system('rm '+fn_output)
+
+# # align afternoon records with initial records, and set same index
+# records_afternoon.index = records_afternoon.ldatetime.dt.date
+# records_afternoon = records_afternoon.loc[records_output.ldatetime.dt.date]
+# records_afternoon.index = records_output.index
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_output.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_output.yaml','r') as file_station_output, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_input+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_output in records_iterator(records_output):
+#             c4gli_output = get_record_yaml(file_station_output, 
+#                                         record_output.index_start, 
+#                                         record_output.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_output.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+

From facdd8186aaf95c55192f7206ef0af3bd2d62dbf Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 29 Oct 2018 13:01:35 +0100
Subject: [PATCH 096/129] new procedure update_input.py and update_output.py

---
 class4gl/model.py                          |  10 +-
 class4gl/processing/batch_update_output.py | 149 ++++++++++
 class4gl/processing/update_output.py       | 327 +++++++++++++++++++++
 class4gl/simulations/copy_update.py        | 316 ++++++++++++++++++++
 class4gl/simulations/simulations.py        |   1 +
 5 files changed, 799 insertions(+), 4 deletions(-)
 create mode 100644 class4gl/processing/batch_update_output.py
 create mode 100644 class4gl/processing/update_output.py
 create mode 100644 class4gl/simulations/copy_update.py

diff --git a/class4gl/model.py b/class4gl/model.py
index 2ae873f..28fe5d0 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -151,6 +151,7 @@ def __init__(self, model_input = None,debug_level=None):
                 self.input = cp.deepcopy(model_input)
 
     def load_yaml_dict(self,yaml_dict):
+        dictouttemp = pd.DataFrame()
         for key,data in yaml_dict.items():
             if key == 'pars':
                 for keydata,value in data.items():
@@ -167,10 +168,11 @@ def load_yaml_dict(self,yaml_dict):
             #     self.__dict__[key] = data
 
 
-        self.tsteps = len(dictouttemp['h'])
-        self.out = model_output(self.tsteps)
-        for keydictouttemp in dictouttemp.keys():
-            self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
+        if len(dictouttemp) > 0:
+            self.tsteps = len(dictouttemp['h'])
+            self.out = model_output(self.tsteps)
+            for keydictouttemp in dictouttemp.keys():
+                self.out.__dict__[keydictouttemp] = np.array(dictouttemp[keydictouttemp])
 
 
   
diff --git a/class4gl/processing/batch_update_output.py b/class4gl/processing/batch_update_output.py
new file mode 100644
index 0000000..ab51951
--- /dev/null
+++ b/class4gl/processing/batch_update_output.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+
+"""
+Usage:
+python batch_update.py --exec $CLASS4GL/simulations/update_yaml_old.py
+--path_experiments $VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC/ --path_input
+$VSC_DATA_VO/D2D/data/C4GL/GLOBAL_NOAC_BACKUP_20180904/ --c4gl_path_lib
+$CLASS4GL --split_by 50 --global_keys "KGC" --subset_input morning --experiments
+"GLOBAL_NOAC"
+"""
+
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+parser = argparse.ArgumentParser()
+#if __name__ == '__main__':
+parser.add_argument('--exec') # chunk simulation script
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--pbs_string',default=' -l walltime=2:0:0')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling')
+parser.add_argument('--subset_input',default='morning') 
+                                        # this tells which yaml subset
+                                        # to initialize with.
+                                        # Most common options are
+                                        # 'morning' and 'ini'.
+parser.add_argument('--subset_output',default='morning') 
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+parser.add_argument('--runtime')
+# delete folders of experiments before running them
+parser.add_argument('--split_by',default=50)# station soundings are split
+                                            # up in chunks
+
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--path_input') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+
+
+#arguments only used for update_yaml.py
+parser.add_argument('--path_dataset') 
+parser.add_argument('--global_keys') 
+parser.add_argument('--updates') 
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+print("getting all stations from --path_input")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting stations by --station_id")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table [--first_station_row,--last_station_row]")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_input,\
+                                         subset=args.subset_input,\
+                                         refetch_records=False,\
+                                        )
+
+print('splitting batch in --split_by='+args.split_by+' jobs.')
+totalchunks = 0
+for istation,current_station in all_stations_select.iterrows():
+    records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+    chunks_current_station = len(records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique())
+    totalchunks +=chunks_current_station
+
+print('total chunks (= size of array-job) per experiment: ' + str(totalchunks))
+
+#if sys.argv[1] == 'qsub':
+# with qsub
+
+
+
+#C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
+            str(totalchunks-1)+" -v '"
+# propagate arguments towards the job script
+first = True
+for argkey in args.__dict__.keys():
+    if ((argkey not in ['pbs_string']) and \
+        # default values are specified in the simulation script, so
+        # excluded here
+        (args.__dict__[argkey] is not None)
+       ):
+        if first:
+            command +='C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+        else:
+            command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+    first = False
+
+command = command+"'"
+print('Submitting array job: '+command)
+os.system(command)
+
+
+    #os.system(command)
+# elif sys.argv[1] == 'wsub':
+#     
+#     # with wsub
+#     STNlist = list(df_stations.iterrows())
+#     NUMSTNS = len(STNlist)
+#     PROCS = NUMSTNS 
+#     BATCHSIZE = 1 #math.ceil(np.float(NUMSTNS)/np.float(PROCS))
+# 
+#     os.system('wsub -batch /user/data/gent/gvo000/gvo00090/D2D/scripts/C4GL/global_run.pbs -t 0-'+str(PROCS-1))
+
diff --git a/class4gl/processing/update_output.py b/class4gl/processing/update_output.py
new file mode 100644
index 0000000..8fab56b
--- /dev/null
+++ b/class4gl/processing/update_output.py
@@ -0,0 +1,327 @@
+# -*- coding: utf-8 -*-
+
+""" 
+Purpose:
+    update variables in class4gl yaml files, eg., when you need new categorical
+    values in the table.
+
+
+"""
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+import dateutil.parser
+
+import argparse
+
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--path_output')
+parser.add_argument('--diag_tropo',default=None)#['advt','advq','advu','advv'])
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--mode',default='ini') # run a specific station id
+# this is the type of the yaml that needs to be updated. Can be 'ini' or 'mod'
+parser.add_argument('--updates')
+parser.add_argument('--subset_input',default='morning') # this tells which yaml subset
+parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+                                                      # to update in the yaml
+                                                      # dataset.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+
+parser.add_argument('--split_by',default=-1)# station soundings are split
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--global_keys') 
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# iniitialize global data
+globaldata = data_global()
+if 'era_profiles' in args.updates.strip().split(","):
+    globaldata.sources = {**globaldata.sources,**{
+            "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc",
+            "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc",
+            "ERAINT:u"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc",
+            "ERAINT:v"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc",
+            }}
+
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+
+print("getting stations")
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
+
+print('defining all_stations_select')
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print("getting all records of the whole batch")
+all_records_morning_select = get_records(all_stations_select,\
+                                         args.path_input,\
+                                         subset=args.subset_input,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    # if not (int(args.split_by) > 0) :
+    #         raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+            #chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+
+            chunks_current_station = len(all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique())
+            print('chunks_current_station',chunks_current_station)
+
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk =all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique()[int(args.global_chunk_number) - totalchunks ]
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching current records')
+records_input = get_records(run_stations,\
+                              args.path_input,\
+                              subset=args.subset_input,
+                              refetch_records=False,
+                              )
+
+# if args.timestamp is None:
+#     backupdir = args.path_input+'/'+dt.datetime.now().isoformat()+'/'
+# else: 
+#     backupdir = args.path_input+'/'+args.timestamp+'/'
+# print('creating backup dir: '+backupdir)
+# os.system('mkdir -p "'+backupdir+'"')
+
+
+os.system('mkdir -p '+args.path_output)
+
+for istation,current_station in run_stations.iterrows():
+    records_input_station = records_input.query('STNID == ' +\
+                                                    str(current_station.name))
+
+    records_input_station_chunk = records_input_station.query('STNID == ' +\
+                                                    str(current_station.name)+\
+                                                   '& chunk == '+str(run_station_chunk))
+    print('lenrecords_input_station_chunk: ',len(records_input_station_chunk))
+    print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk))
+    print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1))
+    
+    # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)):
+    #     print("warning: outside of profile number range for station "+\
+    #           str(current_station)+". Skipping chunk number for this station.")
+    if len(records_input_station_chunk) == 0:
+        print("warning: outside of profile number range for station "+\
+              str(current_station)+". Skipping chunk number for this station.")
+    else:
+        # normal case
+        if ((int(args.split_by) > 0) or \
+            (os.path.isfile(args.path_input+'/'+format(current_station.name,'05d')+'_'+\
+                 str(run_station_chunk)+'_'+args.subset_input+'.yaml'))):
+            fn_input = \
+                    args.path_input+'/'+format(current_station.name,'05d')+'_'+\
+                    str(run_station_chunk)+'_'+args.subset_input+'.yaml'
+            file_input = \
+                open(fn_input,'r')
+            fn_output = args.path_output+'/'+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_output+'.yaml'
+            file_output = \
+                open(fn_output,'w')
+            # fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+
+            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+        else:
+            print("\
+Warning. We are choosing chunk 0 without specifying it in filename.    \
+ No-chunk naming will be removed in the future."\
+                 )
+
+            fn_input = \
+                    args.path_input+'/'+format(current_station.name,'05d')+'_'+\
+                    args.subset_input+'.yaml'
+            file_input = \
+                open(fn_input,'r')
+            fn_output = args.path_output+'/'+'/'+format(current_station.name,'05d')+'_'+\
+                     str(run_station_chunk)+'_'+args.subset_output+'.yaml'
+            file_output = \
+                open(fn_output,'w')
+            # fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.pkl'
+
+            # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\
+            #          str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+            # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\
+            #          args.subset_forcing+'.pkl'
+
+        onerun = False
+        print('starting station chunk number: '\
+              +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+        #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+        # records_forcing_station_chunk = records_forcing.query('STNID == ' +\
+        #                                                 str(current_station.name)+\
+        #                                                '& chunk == '+str(run_station_chunk))
+        isim = 0
+        for (STNID,chunk,index),record_input in records_input_station_chunk.iterrows():
+                print('starting '+str(isim+1)+' out of '+\
+                  str(len(records_input_station_chunk) )+\
+                  ' (station total: ',str(len(records_input_station)),')')  
+            
+                c4gli_output = get_record_yaml(file_input, 
+                                                record_input.index_start, 
+                                                record_input.index_end,
+                                                mode=args.mode)
+                if args.diag_tropo is not None:
+                    seltropo = (c4gli_input.air_ac.p > c4gli_input.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                    profile_tropo = c4gli_input.air_ac[seltropo]
+                    for var in args.diag_tropo:
+                        if var[:3] == 'adv':
+                            mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                            c4gli_output.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                        else:
+                            print("warning: tropospheric variable "+var+" not recognized")
+                if 'era_profiles' in args.updates.strip().split(" "):
+                    c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp'])
+
+                    c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp})
+
+                    cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+                    Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+                    Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+                    R = (Rd*(1.-c4gli_output.air_ac.q) + Rv*c4gli_output.air_ac.q)
+                    rho = c4gli_output.air_ac.p/R/c4gli_output.air_ac.t
+                    dz = c4gli_output.air_ac.delpdgrav/rho
+                    z = [dz.iloc[-1]/2.]
+                    for idz in list(reversed(range(0,len(dz)-1,1))):
+                        z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.)
+                    z = list(reversed(z))
+
+                    theta = c4gli_output.air_ac.t * \
+                               (c4gli_output.pars.sp/(c4gli_output.air_ac.p))**(R/cp)
+                    thetav   = theta*(1. + 0.61 * c4gli_output.air_ac.q)
+
+                    
+                    c4gli_output.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z),
+                                                                           'theta':list(theta),
+                                                                           'thetav':list(thetav),
+                                                                          }))
+                    air_ap_input = c4gli_output.air_ac[::-1].reset_index().drop('index',axis=1)
+                    air_ap_mode = 'b'
+                    air_ap_input_source = c4gli_output.query_source('air_ac:theta')
+
+
+                    c4gli_output.mixed_layer_fit(air_ap=air_ap_input,
+                                         source=air_ap_input_source,
+                                         mode=air_ap_mode)
+
+                if not c4gli_output.check_source_globaldata():
+                    print('Warning: some input sources appear invalid')
+
+
+                
+                #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime)
+                
+                # if args.global_keys is not None:
+                #     print(args.global_keys.strip(' ').split(' '))
+                #     c4gli_forcing.get_global_input(
+                #         globaldata, 
+                #         only_keys=args.global_keys.strip(' ').split(' ')
+                #     )
+
+                c4gli_output.dump(file_output)
+                    
+                    
+                onerun = True
+                isim += 1
+
+
+        file_input.close()
+        file_output.close()
+
+        if onerun:
+            # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"')
+            # if os.path.isfile(fn_forcing_pkl):
+            #     os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"')
+            # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+            # print('mv "'+fn_experiment+'" "'+fn_forcing+'"')
+            records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\
+                                                       args.path_output+'/'+'/',\
+                                                       getchunk = int(run_station_chunk),\
+                                                       subset=args.subset_output,
+                                                       refetch_records=True,
+                                                       )
+
diff --git a/class4gl/simulations/copy_update.py b/class4gl/simulations/copy_update.py
new file mode 100644
index 0000000..2410d2c
--- /dev/null
+++ b/class4gl/simulations/copy_update.py
@@ -0,0 +1,316 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import sys
+import pytz
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--updates')
+parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--error_handling',default='dump_on_success')
+parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
+parser.add_argument('--subset_input',default='morning') # this tells which yaml subset
+                                                      # to initialize with.
+                                                      # Most common options are
+                                                      # 'morning' and 'ini'.
+parser.add_argument('--subset_output',default='morning')
+
+
+# Tuntime is usually specified from the afternoon profile. You can also just
+# specify the simulation length in seconds
+
+parser.add_argument('--experiments')
+parser.add_argument('--split_by',default=-1)# station soundings are split
+                                            # up in chunks
+
+#parser.add_argument('--station-chunk',default=0)
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
+parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+from class4gl import blh,class4gl_input
+
+# this is a variant of global run in which the output of runs are still written
+# out even when the run crashes.
+
+# #only include the following timeseries in the model output
+# timeseries_only = \
+# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+EXP_DEFS  =\
+{
+  'ERA_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+    'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True},
+  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+}
+
+# ========================
+print("getting a list of stations")
+# ========================
+
+# these are all the stations that are found in the input dataset
+all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False)
+
+# ====================================
+print('defining all_stations_select')
+# ====================================
+
+# these are all the stations that are supposed to run by the whole batch (all
+# chunks). We narrow it down according to the station(s) specified.
+
+
+
+if args.station_id is not None:
+    print("Selecting station by ID")
+    stations_iter = stations_iterator(all_stations)
+    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    all_stations_select = pd.DataFrame([run_station])
+else:
+    print("Selecting stations from a row range in the table")
+    all_stations_select = pd.DataFrame(all_stations.table)
+    if args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+    if args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(args.first_station):]
+print("station numbers included in the whole batch "+\
+      "(all chunks):",list(all_stations_select.index))
+
+print(all_stations_select)
+print("getting all records of the whole batch")
+all_records_input_select = get_records(all_stations_select,\
+                                         args.path_input,\
+                                         subset=args.subset_input,
+                                         refetch_records=False,
+                                         )
+
+# only run a specific chunck from the selection
+if args.global_chunk_number is not None:
+    if args.station_chunk_number is not None:
+        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+
+
+    if not (int(args.split_by) > 0) :
+            raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.")
+
+    run_station_chunk = None
+    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+    totalchunks = 0
+    stations_iter = all_stations_select.iterrows()
+    in_current_chunk = False
+    try:
+        while not in_current_chunk:
+            istation,current_station = stations_iter.__next__()
+            all_records_input_station_select = all_records_input_select.query('STNID == '+str(current_station.name))
+            chunks_current_station = math.ceil(float(len(all_records_input_station_select))/float(args.split_by))
+            print('chunks_current_station',chunks_current_station)
+            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+        
+            if in_current_chunk:
+                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                run_station_chunk = int(args.global_chunk_number) - totalchunks 
+        
+            totalchunks +=chunks_current_station
+        
+
+    except StopIteration:
+       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+    print("station = ",list(run_stations.index))
+    print("station chunk number:",run_station_chunk)
+
+# if no global chunk is specified, then run the whole station selection in one run, or
+# a specific chunk for each selected station according to # args.station_chunk_number
+else:
+    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+    if args.station_chunk_number is not None:
+        run_station_chunk = int(args.station_chunk_number)
+        print("station(s) that is processed.",list(run_stations.index))
+        print("chunk number: ",run_station_chunk)
+    else:
+        if args.split_by != -1:
+            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
+        run_station_chunk = 0
+        print("stations that are processed.",list(run_stations.index))
+        
+
+#print(all_stations)
+print('Fetching initial/forcing records')
+records_input = get_records(run_stations,\
+                              args.path_input,\
+                              subset=args.subset_input,
+                              refetch_records=False,
+                              )
+
+
+
+os.system('mkdir -p '+args.path_output)
+for istation,current_station in run_stations.iterrows():
+    print(istation,current_station)
+    records_input_station = records_input.query('STNID == '+str(current_station.name))
+    if (int(args.split_by) * int(run_station_chunk)) >= (len(records_input_station)):
+        print("warning: outside of profile number range for station "+\
+              str(current_station)+". Skipping chunk number for this station.")
+    else:
+        fn_input = args.path_input+'/'+format(current_station.name,'05d')+'_'+args.subset_input+'.yaml'
+        if os.path.isfile(fn_input):
+            file_input = open(fn_input)
+        else:
+            fn_input = \
+                 args.path_input+'/'+format(current_station.name,'05d')+\
+                 '_'+str(run_station_chunk)+'_'+args.subset_input+'.yaml'
+            file_input = open(fn_input)
+
+        fn_output = args.path_output+'/'+format(current_station.name,'05d')+'_'+\
+                 str(int(run_station_chunk))+'_'+args.subset_output+'.yaml'
+        file_output = open(fn_output,'w')
+
+        #iexp = 0
+        onerun = False
+        print('starting station chunk number: '\
+              +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
+
+        records_input_station_chunk = records_input_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+
+        isim = 0
+        for (STNID,chunk,index),record_input in records_input_station_chunk.iterrows():
+                print('starting '+str(isim+1)+' out of '+\
+                  str(len(records_input_station_chunk) )+\
+                  ' (station total: ',str(len(records_input_station)),')')  
+            
+        
+                c4gli_output = get_record_yaml(file_input, 
+                                                record_input.index_start, 
+                                                record_input.index_end,
+                                                mode='ini')
+                if args.diag_tropo is not None:
+                    print('add tropospheric parameters on advection and subsidence (for diagnosis)')
+                    seltropo = (c4gli_input.air_ac.p > c4gli_input.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                    profile_tropo = c4gli_input.air_ac[seltropo]
+                    for var in args.diag_tropo:#['t','q','u','v',]:
+                        if var[:3] == 'adv':
+                            mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                            c4gli_output.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                        else:
+                            print("warning: tropospheric variable "+var+" not recognized")
+
+
+                if 'era_profiles' in args.updates.strip().split(" "):
+                    c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp'])
+
+                    c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp})
+
+                    cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+                    Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+                    Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+                    R = (Rd*(1.-c4gli_output.air_ac.q) + Rv*c4gli_output.air_ac.q)
+                    rho = c4gli_output.air_ac.p/R/c4gli_output.air_ac.t
+                    dz = c4gli_output.air_ac.delpdgrav/rho
+                    z = [dz.iloc[-1]/2.]
+                    for idz in list(reversed(range(0,len(dz)-1,1))):
+                        z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.)
+                    z = list(reversed(z))
+
+                    theta = c4gli_output.air_ac.t * \
+                               (c4gli_output.pars.sp/(c4gli_output.air_ac.p))**(R/cp)
+                    thetav   = theta*(1. + 0.61 * c4gli_output.air_ac.q)
+
+                    
+                    c4gli_output.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z),
+                                                                           'theta':list(theta),
+                                                                           'thetav':list(thetav),
+                                                                          }))
+                    air_ap_input = c4gli_output.air_ac[::-1].reset_index().drop('index',axis=1)
+                    air_ap_mode = 'b'
+                    air_ap_input_source = c4gli_output.query_source('air_ac:theta')
+
+
+                    c4gli_output.mixed_layer_fit(air_ap=air_ap_input,
+                                         source=air_ap_input_source,
+                                         mode=air_ap_mode)
+
+
+
+                
+                c4gli_output.dump(file_output)
+                    
+                    
+        file_output.close()
+        file_input.close()
+
+        if onerun:
+            records_output = get_records(pd.DataFrame([current_station]),\
+                                                       args.path_output,\
+                                                       getchunk = int(run_station_chunk),\
+                                                       subset=args.subset_output,
+                                                       refetch_records=True,
+                                                       )
+        else:
+            # remove empty files
+            os.system('rm '+fn_output)
+
+# # align afternoon records with initial records, and set same index
+# records_afternoon.index = records_afternoon.ldatetime.dt.date
+# records_afternoon = records_afternoon.loc[records_output.ldatetime.dt.date]
+# records_afternoon.index = records_output.index
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_output.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_output.yaml','r') as file_station_output, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_input+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_output in records_iterator(records_output):
+#             c4gli_output = get_record_yaml(file_station_output, 
+#                                         record_output.index_start, 
+#                                         record_output.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_output.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index eb2c3b4..0303d5a 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -66,6 +66,7 @@
   'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
     'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},

From eb902e6e0ac926ebe5190c57ab599c2ef50f25b0 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 29 Oct 2018 13:30:05 +0100
Subject: [PATCH 097/129] new procedure update_input.py and update_output.py

---
 class4gl/interface/interface_new_koeppen.py | 4 +++-
 class4gl/interface/interface_stations.py    | 6 +++---
 class4gl/interface_multi.py                 | 7 +++++--
 3 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py
index a7bd245..3936756 100644
--- a/class4gl/interface/interface_new_koeppen.py
+++ b/class4gl/interface/interface_new_koeppen.py
@@ -116,7 +116,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_experiments+'/'+key+'/',\
                       args.path_forcing+'/',\
                       globaldata,\
-                      refetch_records=False
+                      refetch_records=False,
+                      obs_filter = True
+                                            
                     )
 
 sns.reset_orig()
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
index 4fe9218..b290c35 100644
--- a/class4gl/interface/interface_stations.py
+++ b/class4gl/interface/interface_stations.py
@@ -1,4 +1,3 @@
-'''
 import numpy as np
 
 import pandas as pd
@@ -124,9 +123,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_experiments+'/'+key+'/',\
                       args.path_forcing+'/',\
                       globaldata,\
-                      refetch_records=False
+                      refetch_records=False,
+                      obs_filter = True
+
                     )
-'''
 
 if bool(args.make_figures):
     fig = plt.figure(figsize=(10,7))   #width,height
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 0409509..ff8c32e 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -51,13 +51,15 @@
 os.system('module load Ruby')
 
 class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',]):
+    def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False):
         """ creates an interactive interface for analysing class4gl experiments
 
         INPUT:
             path_exp : path of the experiment output
             path_obs : path of the observations 
             globaldata: global data that is being shown on the map
+            obs_filtering: extra data filter considering observation tendencies
+                           beyond what the model can capture
             refetch_stations: do we need to build the list of the stations again?
         OUTPUT:
             the procedure returns an interface object with interactive plots
@@ -68,6 +70,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
         self.globaldata = globaldata
 
  
+        self.obs_filter= obs_filter
         self.path_exp = path_exp
         self.path_obs = path_obs
         self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
@@ -219,7 +222,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
             # some observational sounding still seem problematic, which needs to be
             # investigated. In the meantime, we filter them
 
-            if self.path_obs is not None:
+            if ((self.path_obs is not None) and (self.obs_filter)):
                 print('exclude exceptional observations')
                 print('exclude unrealistic model output -> should be investigated!')
                 valid = (\

From 18c2a794819906842549bb0b81f93b581a21b439 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 29 Oct 2018 13:39:42 +0100
Subject: [PATCH 098/129] new procedure update_input.py and update_output.py

---
 class4gl/interface_functions.py | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index fbc4db0..0d9af4e 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -596,4 +596,11 @@ def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
                               (obs_afternoon.ldatetime - \
                                obs_morning.ldatetime).dt.seconds*3600.
     return stats
+def tendencies_new(mod_afternoon,mod_ini,keys):
+    stats = pd.DataFrame()
+    for key in keys: 
+        stats['d'+key+'dt'] = ""
+        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+                              (mod_ini.runtime)
+    return stats
 

From eb6769cfcc8714f1cf502a2562327538b946cf46 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Mon, 29 Oct 2018 14:02:25 +0100
Subject: [PATCH 099/129] new procedure update_input.py and update_output.py

---
 class4gl/interface/interface.py          |  4 ++-
 class4gl/interface/interface_koeppen.py  |  4 ++-
 class4gl/interface/interface_stations.py |  5 +--
 class4gl/interface_functions.py          |  4 +--
 class4gl/interface_multi.py              | 41 ++++++++++++++++--------
 5 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index b895ea4..4531b5b 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -19,6 +19,7 @@
 parser.add_argument('--figure_filename',default=None)
 parser.add_argument('--figure_filename_2',default=None)
 parser.add_argument('--experiments_labels',default=None)
+parser.add_argument('--tendencies_revised',default=False)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -125,7 +126,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_experiments+'/'+key+'/',\
                       args.path_forcing,\
                       globaldata,\
-                      refetch_records=False
+                      refetch_records=False,
+                      tendencies_revised = args.tendencies_revised
                     )
 
 if args.make_figures:
diff --git a/class4gl/interface/interface_koeppen.py b/class4gl/interface/interface_koeppen.py
index ec77bb8..ada9a44 100644
--- a/class4gl/interface/interface_koeppen.py
+++ b/class4gl/interface/interface_koeppen.py
@@ -18,6 +18,7 @@
 parser.add_argument('--figure_filename',default=None)
 parser.add_argument('--figure_filename_2',default=None)
 parser.add_argument('--experiments_labels',default=None)
+parser.add_argument('--tendencies_revised',default=False)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -124,7 +125,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_experiments+'/'+key+'/',\
                       args.path_forcing+'/',\
                       globaldata,\
-                      refetch_records=False
+                      refetch_records=False,
+                      tendencies_revised = args.tendencies_revised
                     )
 
 key = args.experiments.strip(' ').split(' ')[0]
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
index b290c35..f452269 100644
--- a/class4gl/interface/interface_stations.py
+++ b/class4gl/interface/interface_stations.py
@@ -15,6 +15,7 @@
 parser.add_argument('--load_globaldata',default=False) # load the data needed for the interface
 parser.add_argument('--make_figures',default=None)
 parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--tendencies_revised',default=False)
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -124,8 +125,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_forcing+'/',\
                       globaldata,\
                       refetch_records=False,
-                      obs_filter = True
-
+                      obs_filter = True,
+                      tendencies_revised = args.tendencies_revised
                     )
 
 if bool(args.make_figures):
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index 0d9af4e..b880ae9 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -596,11 +596,11 @@ def tendencies(mod_afternoon,obs_afternoon,obs_morning,keys):
                               (obs_afternoon.ldatetime - \
                                obs_morning.ldatetime).dt.seconds*3600.
     return stats
-def tendencies_new(mod_afternoon,mod_ini,keys):
+def tendencies_rev(mod_afternoon,mod_ini,keys):
     stats = pd.DataFrame()
     for key in keys: 
         stats['d'+key+'dt'] = ""
-        stats['d'+key+'dt'] = (mod_afternoon[key] - obs_morning[key])/ \
+        stats['d'+key+'dt'] = (mod_afternoon[key] - mod_ini[key])/ \
                               (mod_ini.runtime)
     return stats
 
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index ff8c32e..2e83af9 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -51,7 +51,7 @@
 os.system('module load Ruby')
 
 class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False):
+    def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False,tendencies_revised=False):
         """ creates an interactive interface for analysing class4gl experiments
 
         INPUT:
@@ -71,6 +71,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
 
  
         self.obs_filter= obs_filter
+        self.tendencies_revised = tendencies_revised
         self.path_exp = path_exp
         self.path_obs = path_obs
         self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
@@ -156,18 +157,32 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
 
             self.frames['stats']['viewkeys'] = ['h','theta','q']
             print('Calculating table statistics')
-            self.frames['stats']['records_all_stations_mod_stats'] = \
-                    tendencies(self.frames['stats']['records_all_stations_mod'],\
-                               self.frames['stats']['records_all_stations_obs_afternoon'],\
-                               self.frames['stats']['records_all_stations_ini'],\
-                               self.frames['stats']['viewkeys']\
-                              )
-            self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                    tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                               self.frames['stats']['records_all_stations_obs_afternoon'],\
-                               self.frames['stats']['records_all_stations_ini'],\
-                               self.frames['stats']['viewkeys']\
-                              )
+
+            if self.tendencies_revised:
+                self.frames['stats']['records_all_stations_mod_stats'] = \
+                        tendencies_rev(self.frames['stats']['records_all_stations_mod'],\
+                                           self.frames['stats']['records_all_stations_ini'],\
+                                           self.frames['stats']['viewkeys']\
+                                  )
+                self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                        tendencies_rev(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                                           self.frames['stats']['records_all_stations_ini'],\
+                                           self.frames['stats']['viewkeys']\
+                                  )
+
+            else:
+                self.frames['stats']['records_all_stations_mod_stats'] = \
+                        tendencies(self.frames['stats']['records_all_stations_mod'],\
+                                   self.frames['stats']['records_all_stations_obs_afternoon'],\
+                                   self.frames['stats']['records_all_stations_ini'],\
+                                   self.frames['stats']['viewkeys']\
+                                  )
+                self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
+                        tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                                   self.frames['stats']['records_all_stations_obs_afternoon'],\
+                                   self.frames['stats']['records_all_stations_ini'],\
+                                   self.frames['stats']['viewkeys']\
+                                  )
 
         self.frames['stats']['inputkeys'] = inputkeys
         

From 9ebd8c6b7a6140b30af8055f9d822632c4ae0af5 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 6 Nov 2018 15:34:10 +0100
Subject: [PATCH 100/129] full update

---
 class4gl/interface/interface_new_koeppen.py | 107 ++++++-------
 class4gl/interface/interface_stations.py    |  65 ++++++--
 class4gl/interface/taylorDiagram.py         |   7 +
 class4gl/interface/test.png                 | Bin 0 -> 17020 bytes
 class4gl/interface/test_histogram.py        |  25 +++
 class4gl/interface/world_histogram.py       | 164 ++++++++++++++++++++
 class4gl/interface_multi.py                 |   9 +-
 class4gl/setup/setup_goamazon.py            |  63 ++++----
 class4gl/setup/setup_igra.py                |   4 +-
 class4gl/simulations/simulations.py         |   5 +
 10 files changed, 351 insertions(+), 98 deletions(-)
 create mode 100644 class4gl/interface/test.png
 create mode 100644 class4gl/interface/test_histogram.py
 create mode 100644 class4gl/interface/world_histogram.py

diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py
index 3936756..27bc478 100644
--- a/class4gl/interface/interface_new_koeppen.py
+++ b/class4gl/interface/interface_new_koeppen.py
@@ -16,6 +16,7 @@
 parser.add_argument('--figure_filename',default=None)
 parser.add_argument('--figure_filename_2',default=None)
 parser.add_argument('--experiments_labels',default=None)
+parser.add_argument('--obs_filter',default='True')
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -117,10 +118,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_forcing+'/',\
                       globaldata,\
                       refetch_records=False,
-                      obs_filter = True
+                      obs_filter = (args.obs_filter == 'True')
                                             
                     )
-
 sns.reset_orig()
 key = args.experiments.strip(' ').split(' ')[0]
 xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
@@ -217,7 +217,7 @@ def brightness(rrggbb):
     print(np.sum(kgc_select))
     koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
 
-koeppenlookuptable = koeppenlookuptable[koeppenlookuptable.amount >= 200]
+#koeppenlookuptable = koeppenlookuptable[koeppenlookuptable.amount >= 200]
 koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
 # koeppenlookuptable = koeppenlookuptable[:9]
 include_koeppen = list(koeppenlookuptable.KGCID)
@@ -241,9 +241,8 @@ def brightness(rrggbb):
         #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
     
         #print(obs.std())
-        #if i == 2:
         dias[varkey]._ax.axis["left"].label.set_text(\
-            "Normalized standard deviation")
+             "Normalized root mean square error")
         if i == 1:
             axes[varkey].annotate('Normalized standard deviation',\
                         xy= (0.05,0.27),
@@ -261,9 +260,11 @@ def brightness(rrggbb):
                        )
 
 
+            # ticks = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x-1.))
+            # #ax.axis["left"].axis.set_major_formatter(ticks)
 
-            # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
-            # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+            # # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+            # dias[varkey]._ax.axis["left"].axis.set_major_formatter(ticks)
         #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
         # Q95 = obs.quantile(0.95)
         # Q95 = obs.quantile(0.90)
@@ -317,7 +318,7 @@ def brightness(rrggbb):
             dias[varkey].add_sample(STD/STD_OBS, PR,\
                            marker='o',ls='', mfc='white',mec='black',
                            zorder=-100,
-                           ms=10.*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\
+                           ms=3.5*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\
                                 np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
                            # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
                            # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
@@ -325,7 +326,7 @@ def brightness(rrggbb):
             dias[varkey].add_sample(STD/STD_OBS, PR,\
                            marker='o',ls='', mfc='none',mec='black',
                            zorder=700,
-                           ms=10.*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\
+                           ms=3.5*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\
                                 np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
                            # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
                            # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
@@ -357,53 +358,54 @@ def brightness(rrggbb):
         for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]):
             icolor = 0
             for ikoeppen,koeppen in koeppenlookuptable.iterrows():
-                print(ikoeppen,':',koeppen)
-                kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
-                koeppen_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'][kgc_select]
-                koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'][kgc_select]
+                if koeppen.amount >= 200:
+                    print(ikoeppen,':',koeppen)
+                    kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID'])
+                    koeppen_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'][kgc_select]
+                    koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'][kgc_select]
     
-                #axes[varkey].scatter(koeppen_obs,koeppen_mod,marker=symbols[ikoeppen],color=colors[ikey])
-                         #  label=key+", "+\
-                         #                    'R = '+str(round(PR[0],3))+', '+\
-                         #                    'RMSE = '+str(round(RMSE,5))+', '+\
-                         #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+                    #axes[varkey].scatter(koeppen_obs,koeppen_mod,marker=symbols[ikoeppen],color=colors[ikey])
+                             #  label=key+", "+\
+                             #                    'R = '+str(round(PR[0],3))+', '+\
+                             #                    'RMSE = '+str(round(RMSE,5))+', '+\
+                             #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
     
     
     
-            # # pl.scatter(obs,mod,label=key+", "+\
-            # #                              'R = '+str(round(PR[0],3))+', '+\
-            # #                              'RMSE = '+str(round(RMSE,5))+', '+\
-            # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
-                
-                dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
-                               pearsonr(koeppen_mod,koeppen_obs)[0],
-                               marker='o',linewidth=0.5,
-                                        mfc=koeppen.color,mec='black',#koeppen.color,
-                                        zorder=300+icolor,
-                               ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
-                               # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
-                               # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
-                               )
-                dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
-                               pearsonr(koeppen_mod,koeppen_obs)[0],
-                               marker='o',linewidth=0.5,
-                                        mfc=koeppen.color,mec='black',#koeppen.color,
-                                        zorder=301+icolor, ms=1
-                               # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
-                               # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
-                               )
-
-
-                # dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
-                #                pearsonr(koeppen_mod,koeppen_obs)[0],
-                #                         marker='o',linewidth=0.5, mfc='none',mec=str(koeppen.color),
-                #                         zorder=600+icolor,
-                #                ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
-                #                # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
-                #                # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
-                #                )
-
-                icolor += 1
+                # # pl.scatter(obs,mod,label=key+", "+\
+                # #                              'R = '+str(round(PR[0],3))+', '+\
+                # #                              'RMSE = '+str(round(RMSE,5))+', '+\
+                # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+                    
+                    dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                                   pearsonr(koeppen_mod,koeppen_obs)[0],
+                                   marker='o',linewidth=0.5,
+                                            mfc=koeppen.color,mec='black',#koeppen.color,
+                                            zorder=300+icolor,
+                                   ms=3.5*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
+                                   # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                                   # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                                   )
+                    dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                                   pearsonr(koeppen_mod,koeppen_obs)[0],
+                                   marker='o',linewidth=0.5,
+                                            mfc=koeppen.color,mec='black',#koeppen.color,
+                                            zorder=301+icolor, ms=1
+                                   # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                                   # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                                   )
+
+
+                    # dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(),
+                    #                pearsonr(koeppen_mod,koeppen_obs)[0],
+                    #                         marker='o',linewidth=0.5, mfc='none',mec=str(koeppen.color),
+                    #                         zorder=600+icolor,
+                    #                ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float)))
+                    #                # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\
+                    #                # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7}
+                    #                )
+
+                    icolor += 1
     
             latex = {}
             latex['dthetadt'] =  r'$d \theta / dt $'
@@ -608,6 +610,7 @@ def brightness(rrggbb):
     
     if args.figure_filename is not None:
         fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+        fig.savefig(args.figure_filename.replace('png','pdf')); print("Image file written to:", args.figure_filename)
     fig.show()  
 
     koeppenlookuptable = koeppenlookuptable.sort_index()
diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py
index f452269..a7dbd21 100644
--- a/class4gl/interface/interface_stations.py
+++ b/class4gl/interface/interface_stations.py
@@ -16,6 +16,7 @@
 parser.add_argument('--make_figures',default=None)
 parser.add_argument('--figure_filename',default=None)
 parser.add_argument('--tendencies_revised',default=False)
+parser.add_argument('--obs_filter',default="True")
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -125,7 +126,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_forcing+'/',\
                       globaldata,\
                       refetch_records=False,
-                      obs_filter = True,
+                      obs_filter = (args.obs_filter == 'True'),
                       tendencies_revised = args.tendencies_revised
                     )
 
@@ -137,7 +138,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     #colors = ['r','g','b','m']
     colors = ['k']
-    symbols = ['*','x','+']
+    symbols = ['^','x','+']
     dias = {}
     
     for varkey in ['h','theta','q']:                                                    
@@ -150,6 +151,21 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
         dias[varkey]._ax.axis["left"].label.set_text(\
             "Normalized standard deviation")
+        if i == 1:
+            axes[varkey].annotate('Normalized standard deviation',\
+                        xy= (0.05,0.36),
+                        color='black',
+                        rotation=90.,
+                        xycoords='figure fraction',
+                        weight='normal',
+                        fontsize=10.,
+                        horizontalalignment='center',
+                        verticalalignment='center' ,
+                        #bbox={'edgecolor':'black',
+                        #      'boxstyle':'circle',
+                        #      'fc':koeppen.color,
+                        #      'alpha':1.0}
+                       )
         # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
         # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
         #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
@@ -221,8 +237,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                                 'RMSE = '+format(RMSE,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
                                 'Bias = '+format(BIAS,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
                                 r'$R$ = '+format(PR,'0.2f')
-                ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9,
-       horizontalalignment='right', verticalalignment='bottom' ,
+                ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9,
+       horizontalalignment='left', verticalalignment='top' ,
         bbox={'edgecolor':'black',
                           'fc':'white',  
                               'boxstyle':'square',
@@ -234,7 +250,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                                 'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
                                 r'$R$ = '+format(PR,'0.2f')
 
-                ann = axes[varkey].annotate(annotate_text, xy=(0.05, .98 ), xycoords='axes fraction',fontsize=9,
+                ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9,
        horizontalalignment='left', verticalalignment='top' ,
         bbox={'edgecolor':'black',
                           'fc':'white',  
@@ -290,6 +306,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                                mfc='k', mec='k', # B&W
                                #mfc=colors[ikey], mec=colors[ikey], # Colors
                                label=key)
+
                 istation += 1
     
             if varkey == 'q':
@@ -299,13 +316,38 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
             elif varkey == 'h':
                 units_final = r'[$m\, h^{-1}$]'
     
-            axes[varkey].set_xlabel('observations')     
+            axes[varkey].set_xlabel('Observed')     
             axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=12)                                     
+
+
+        # if varkey == 'q':
+        #     print('get_xlim not working well...STRANGE')
+        #     limits =  [np.percentile(nani,1),np.percentile(nani,99)]
+        # else:
+        #     limits =  [np.percentile(nani,1.0),np.percentile(nani,99.0)]
+
+
         if i==0:                                    
-            axes[varkey].set_ylabel('model')                                            
-        abline(1,0,axis=axes[varkey])
+            axes[varkey].set_ylabel('Modelled')                                            
         i +=1
-    
+          
+        axes[varkey].set_aspect('equal')
+        low  = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].min()
+        high  = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].max()
+
+        low  = np.min([low,c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].min()])
+        high  = np.max([high,c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].max()])
+
+        low = low - (high - low)*0.1
+        high = high + (high - low)*0.1
+        axes[varkey].set_xlim([low,high])
+        axes[varkey].set_ylim([low,high])
+        abline(1,0,axis=axes[varkey])
+        if varkey == 'q':
+            ticks = ticker.FuncFormatter(lambda x, pos:
+                                         '{0:g}'.format(x*1000.))
+            axes[varkey].xaxis.set_major_formatter(ticks)
+            axes[varkey].yaxis.set_major_formatter(ticks)
     
     
     # # legend for different forcing simulations (colors)
@@ -320,7 +362,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     
     # legend for different stations (symbols)
-    ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+    ax = fig.add_axes([0.08,-0.02,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
     leg = []
     isymbol = 0
     for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
@@ -334,7 +376,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     
     ax.axis('off')
-    ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
+    ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10,ncol=4)
     
     
     fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
@@ -345,6 +387,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     
     if args.figure_filename is not None:
         fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+        fig.savefig(args.figure_filename.replace('png','pdf')); print("Image file written to:", args.figure_filename)
     fig.show()  
 
 
diff --git a/class4gl/interface/taylorDiagram.py b/class4gl/interface/taylorDiagram.py
index d9e9e26..5ece652 100644
--- a/class4gl/interface/taylorDiagram.py
+++ b/class4gl/interface/taylorDiagram.py
@@ -10,6 +10,7 @@
 
 import numpy as NP
 import matplotlib.pyplot as PLT
+from matplotlib import ticker
 
 
 class TaylorDiagram(object):
@@ -83,6 +84,12 @@ def __init__(self, refstd,
         ax.axis["left"].set_axis_direction("bottom")  # "X axis"
         #ax.axis["left"].label.set_text("Standard deviation (model)/ Observed (observations)")
 
+
+        # #ax.axis["left"].axis.set_ticklabels(["1.0","0.8","0.6","0.4","0.2","0.0","0.2","0.4","0.6","0.8","1.0"])
+        # ticks = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x-1.))
+        # ax.axis["left"].axis.set_major_formatter(ticks)
+
+        ax.axis["left"].toggle(ticklabels=False, label=False)
         ax.axis["right"].set_axis_direction("top")    # "Y-axis"
         ax.axis["right"].toggle(ticklabels=True)
         ax.axis["right"].major_ticklabels.set_axis_direction(
diff --git a/class4gl/interface/test.png b/class4gl/interface/test.png
new file mode 100644
index 0000000000000000000000000000000000000000..cdf991517e5da6823c279fbf801c610d55b5e0e2
GIT binary patch
literal 17020
zcmch<1yq%57cKmPg^2}NfCv~gD0)!JK-{3Bbb}&F2&jb8cxNd)#Ng_ZlrH%kZ(E_tgeU_p*-;@J)<2OdG#Y
z3(}q(8x)O+IcHpVb1E-9-}xo)r`7A{XgRSY?C<%OQyFKJr4q}xV~5VNWy|~qq-V{V
z#j$GD1)9lcUgNbCWg@})i;C8cr584rUtPTRW|u+sg+xO!zub`@3+?Nk?Y*|`+|L6_
z%F4pqwrLhW*-@=3p7V1sJH*x8sjD`;VY2&tlyOdGrbAoEaD%6bn4hX0W6~+HT0iml
z6&z%{O>G$Nto@s7kFS8BfPlYWObO%sMqy#qH`my8QmiU}I%P>8f6ErI)3G4B*yNkN
zhGzA*99QuXL)uv5CH9Iqqg_|7Txly1j<5Lf?pC22<4Jt{WZ_iy5zhxHY6a(}Jc{3Y
zudU2=ca4|d@^PrEt}^SqosyE$7AYxpIoBbP!H1XjURz>W5pne9ty>xv77@#rFLxNO
zKOY?&tVrMHHgZPVqBQuehs?C$%E^i19Ss&A?(&_>s*f_tO676u-M#wivw;DJ!LB-;
zqeqY4ym6zqtsBdcN!HocLk$ac1u8>%sT=sxtI^4(59^U+SBl2K9X0CRVnkHaM!3=MBn
z#v7AfzPweR-gz!V^X17y^=|#eH*eprXqcWlZ*;!i(KtW5@BLaUM45iF1s|=Ts7Uk0
z;eem#GPc=%{R3fP(39PN2VoWd^y#`okGFm77)`5pda`DRZH(T_lOH7(Ire-r>g(&%
z&vnZf8|-Lx?EKwa=-BM5BP1z#6tOhc^2BbS);iwuTefqQR`f%GNQ6X7Us{7l7S?&2
z>(FVNy3|9{J=2p4m600CJ_=JKbH+nOO6qgnvs?z-!)t1EnG>CV7&CuBK?Lw$kbzCe7qs}icA4{5D4G9g+xikK|P`on5S_hB2
zEqA(#C95|3MrbC~*cum;zLr<(eJ(U?7W=zK_^SWzFTUC@I;3`9v+2y82n#c-tXSDL9
z*fki}XF5ionV!%O@*B(!h!>emG%w_I|NU{(8rcg8?NzbSJ-OqD7jlVd4tLk5SXYZ_
zMXHAvN$WSqWp$Thk;uwRTUQ-anCQK<-S%rleD0WvO>J`DXpT-;2tolD@;sl%^&rxt
zI^t;PK+)C`i(jSBT)to7a_DW`?Y(x#;ql=fMnsSt!$I;>^ShBwA!bBGe`8O-zNn^_
zE~vVQCMb_kD{k@_uWMN2*j^#|DNxSZcGtO|*Ox6_YAQR?`=%xJXz2dciHV8tk+eO7
zJ1@;+-~QD$?QDC+7T~AZ``fFp3N)oLCT$WQKR+B`b@}SmHF|n_Evcc(K5KUG-i`OS
z7TKuD_Fd-tSd(TKRNa58mPh7XSoM&{Z;`F8i`O`K={-*
z#)LDTcT4KaQzdI0JF29CM3SdIt+OBRYs&ZT?LA{wlW?ph_4k09-uOiICj(kRIj>;A
zi$hPgucqHNFMQ~m^}hT;O2gte5?B;Tr_LJny5EC2tsNPYy>HkT)80=GH`v)4q*(d#
zeY7YGxl-*UEbprFyRc;ToAf_glVXC5nq?T~u`grdyE?{MxOn}%#8;Ha4L_}lGwK+;
z&>C>QX!6qB#pPw8%7d@w@%qH^faU*q*Sa`^C_
zk`hrQR;FXe13Ene_dHGpfIRmEm|S73WZ7;T07cvl)-XlUwTj9id;mk-w7z7Cl@fP_bQ?~l@styPLo
z+=jc+(AR&wgje41bWnUxPQTXAuPWtROJqaaE2FJzlPz0PLkfdjhr26(e!O2ek){77
zNIR(T*5DVdhdT?$YPEBD&-9&{KO$9zdSwzYyF6M)Jfq$=2k6)S<>iYo`)4r-gRx>e
z4}RKCr%wN*q@;m~{*sQ-)*$i307SLRbS%5wSTa&~@O~(Exe(%(pIX0Ww{x^|{FbettUL{pWMd^6#__
zw0*YvntnFX9k^QNIn~eGpN=C%qD;KIgZqg@$d-TpTnUWl-sN;qMnsCHjeNCJj|j3oi+OO%sg#kRdu2L`OtzmXb=+eZs&0_GYKGd!4O%!xduX{kS?_
zMvFK;988IIQ)v9bNA28)GKqv77^<_ge)Z=Z6pe~}UQbdH9_3$<<)M`$*tVUnNmyQI
z`6Vg5x}k(^zPeOUQc~Sg{A956Wy!gxIr
zb=Mo5Hlfvt%`q8&Q{AtVJxPgPfur0Z*{%VDR~p;PX>D9W=M=R+fVyfAFK{pW-RQR)?j94
zCVjGKmG|nc`;~oow2-g^HD*4Rc-UOwc7;)!e@&iya0YQAXv=sIB!?vV?dIg$I`J_4
zQ&R5GyRodf)@Rv+F9`?=Mgk68n!I-mykBby;)FD
zDOxAK>Qs=scBk~M{nBySzg5gno$BzM9uLas`EZY4P;8UtQSDqtiB4d@cGVQyqD3Ox
zfAphL#a1jUyD4PN;Ns-u%revi&3gCl-4+Ff>5xGiJwO5Nw@Vj?u=X$H)B^{x4^Mm4
zJ%)7M#R~hsXazEK%p(ETMqhg7Jy^`|<#*%!H)mxN&q>F`u4-e~Sm#g=aEr0wp7?4L
zPos>C3>Q@UTigmLC^^$pG=*bxl+@#PcRe
zSMNDJ*_;2L>Fhx1NG4+)D-n78HCCH?XTklHhR)FwVLzi&ws4onH!}U
z8RF$d6+halObF!E)G))krRdRPDf63gxnou|zoD0{$DbA^c_5m5wDdYeI;?eC##ziA
z=AT^Cv|%j8+*Dw->O+yfogTRPs{(JD&#i82YZFX;^yrc0ZSGxF*L{3;{CVk;x4-|Y
zP?XH~{Ur)^JQ~Tj^MDR2v7!nJ4H4*LzB^@k18Ds&6!H7W9ydJuj$^-~qF0979!nd)
ze*eDrhYtrrReXis0}$-o5L1Gw8J^P~
zW}tifaSz_5rAI>6wx+ABV?EnX}g
zr4w&EfmoJAF_T1-$iI2>2i;7E$m)Lg$<8e_TE0m1%q9hvJp`sH0~OY_BLin?N=qKBr@9S?lq
z_lW?%^2ljCb^3J7Rd(Ju^te_an)>Nyp8f9cPjMd~T3-DI=R3kYpD~aBY}P_i!}KFy
z=27}-wz1)=e%ekl4BV`-OIJ$ugM0U$;mHVY+=!UhbGy#cYyHo8iPG?$N0I#aKPfo;
zFK870!3AP@pYUKnsbY|P1kGs})t6H4M-T_J)^Ca^RKA9bDQfdA^oscKlj%FVma&x1Q}S~U4GAHF
zO3m&2b`9rRuGr!3=kfplBDbv2{4&STb|WKk@2iVq_4mK-bBwymybJ1`1giy~PkyO_
z)4K8og6+3a2l#e7h*DIq
zTG+~k+zO6jRMmCU0d|TS5oZm);!z|>E_+&;O1`_A%9tjvOXBraED(yE6shE;L_Vrv
zVJY2{KOc9TH
zItcQH4I3UjeHxRUE!*$^@%|?3!S+i1jC0}JPuB#aV~qBko|3OLo11rlh+4fSsf)cf
zsRdSWpa1%d%lN`>)b1T=Yp*_g5#*r{*w<*tb+1e^SH*b_USsFgwz7&6*s^Pvfq3pn
zNT5smmcd_Tdr_|{(NYe4+!SO1RN;ubu}qPoOy}XqV7Zl$Ok%>q1me5Khq^TO@4s5@
zlvWpehkKV54o?q@HArhOdbHIz-_WsA3;7f1e9*Z$U`s{hpL=MxnJdkS2dw$FBc2h5
z6Im8M6n!^je&WOdI%)7zm6esBj7v#Ltz5o51XRq|l+kgH}&-XeX}W<@qyJe
zbuZPZHCBbTb0Fv)%UYHPz2?xO*&&bFHdy{ARcLnCm
znfsynoKblu;`F1qU(#O&
z5v%@eK6h=R*)?#v2vne06UH~U!)VgV%gZ15`){UM)uqOwI_2E?lH;1TWZfU8JQ{SM
z473erxtkLYrhhJB
z_m|ah6bNtHq->jJfJGUTS5TCj%G|B>=FepNljqF+pd725onkb!HhgLpNX?p8YQhoz
zi`oy>)hmdf(tj%<&Z2XU508WkdG?{?zTyEdBp72ECHmtSrvybM%x4W&l5H_uvwS_4
z*lxDst%N1`Tf#!}GBb6y@1~O3?knPQjFwWO$0w4PuP5m`vWtAeEeK*Z^B6a~Sr!E=
zGfx|*d)bv-0Q2)69EO6`$iOpvIQGhV)(>ptMCb9#1=*NckcB%hQWp0@^zy`*S747f
zCbE~93UUE4&aja06tg->E(TJJ$l#14Vy4IwF1-HFbdwQ>*UVulJDvWKV&%rK5T{VHzXv4ocnuOg1!$E9>4Oa{^!|TjyrO!Gxaiq=rabpuv@zD~s5t2%zOq#lrQz~#a
z#i31oF^{Y+l9tHc+A+G%K-X31xl?1-gr1|pREJnvFz^^61(<%pr%zQ4@;f8Ad2ghZL+y5KDOw$G7MIyBgc4OE`&UFW+sPn-yYu<2NB
zbiNHLfHqngurWT`%{zC7cFOPES^L!wtDvpYuDXBlZW%`
z=H_iY^w(+bmoBOa4#!*n7Xg=~|5k$|w
ze0jFqNaH0k1p&zpv%kNG$o^Y2o=4#4&6_vNNBWw4{rz=YR_8I^?(`zafVlMGk&A+$
z){e%T^mqt(a%eKYpofijXQ`nVUL)s{3^lDHT$L+U@8$ku$Bvy`3!EOczJfFP`ujd
z@8Q3p&kLRdm+21jM;x9>kPRf
z2zZT&dj!+AWX`+G{G|c-rS34N=G{N&a-6Jj%`7G+XG}}60$yeint5twsreU%U!KWh
zazy=krGoz|P`yTZe0BCzy$-R05x0r___2lOZ;XG!wn&82;E2t4p$*h*ElnV1oLvcMN%XCCu?;UQP^JG${?Nnk8SxY6MHQ5rAqK!Z+p9kz`(L&FL+on!6VXiN|A
z>@3mv8|VLQZno^KNsQ8a`FsOEKanlXPo8|36=?70RWkW#87>Q~b~UvFoe{b%D`1j-
zmQyTde4)6~qd=|}N&D7Ml@p`=Ak!Pd!^5|1*s$m0$BzYtgaj&aqBLq}fjLIL#tPy_nVQgn%TUZJV-0qo^ZU0ZOOS@n
zM#I4&SzSYe=*UFTe8*){ag#%slzLh@I;@IhOAVWE*?QpRLiTd+o%y}Qv`Knj?0Ie3YN!((SW097QQ8=0CjzJg`=i@ff3ZUpKm`m#dlId5i<9nQ93+9-*-`WBmxa9vnz%iC_Gn^6s_ruy
z4*15b>4`$?&z}eA3*;1d-^h94I1Qt2?2Tw`amQQwTPMedJKA0Z5mCO?`m?!|Q>U(b
z-#ads$-!#zG3(+$IRo3YI=8p^`KrH-mt30w*O~|@VM(xS$kk6h{YYR7%mA2yRA9EQ
zLs$=Hrvn*zCR;Hmo$N4~E+6k<#6vZFTxWLtxIkph?Qc+x?M+ZF7c5xtz}Hu+U6QfQ
zv0W33`oss_A5HyP>jZUx35~_ARJu*wcE_$-Eqbz5r4U>U)N-o;l
z^xpe(ogE`~8j2$dac^zV-Mhhm^=Mxo-oGCT)&>5eLMO8oBKw;;^?Zw+HE)T3{)9Sa
zCE$SBJoNL_Pon!Y_PFKF>#c~?h|*7q5@^dA{-%Kwax7gMj9P7pzshsz(n&nNyU8(@
zuQ2rt(#+%GPOucr3JROgS;8vdGEk;mnSRy`c8On@4{913K1D7RnJNF*iXFE)kM8i0
z#JefK{JcDrbLjC}Fob-DjvNh*03$0NIjcK&^5
zI5Ae=pg>=Up_ybkObp$ANhXG1w&4eEfu`YKYxDca_YF6-%z$gffJzj%kRVKDbfW^F$mwuj#<%c-l}%t!9|THEy9;4!2n|MBC;N>0w_
zP5D07V2ESHaq#@m+G5PxcGNrEuXG&uwKth_UTk!xGzF;LS0u9`Oe-2{)6a$yZwSO>
zkWu~2mdDl*Zcckh*EL7|+E~71>r`Y>w*95Z@ska2)g{ofNp8G2^2dAu^bH8D3fyFD
zRQ|(}ji4c>o8&!LunCl?U5YrNfM4g$R$RE!n>shmiw0#DYw(;;F0%BS&q+Hof>vkF
zP1~6+iwIS$r*180wuCiaf-JqEKJsLG2gQL5d}ws|ia2b(rkCApnf#`vA(N<(k0M$Lg68zTUbtY%lI>~d
zyT9r|;U-<
zSsnx
z6yQ!CwHfv=V3v>(PPf=rq*s`CmvGPNe&^Nx(2Rxn_;!<-qpy(GK(n7oJ6MG#l4bxP
zrF$gA{Vww!Mr2vj(brAFNuz}{UJ2;)@}h{C1i`=R^86J=UtS+poO!(cY^KMKJp&05
zq5%gVLaG*_5e5fp3QXq<45Qf)tBGDG?
z&U=;Qo+mr(sx;K(!s~3a4#E1OUDZC!`q2}iH&!&0{3`8`b;7o`L3EbkDW&oi65=Pc`p9Q&Q
zoZ|oeW6VzPqUfwfgi8c*Nnj`i1wS$}(s+FF|2p*?)8tyX8xgoG!TiL>;$k8}41BdO
z@lE&qp9wfElK)-6agVyCnn!TwPoXTYo4@L_(p-kSjef|3?GJobBW9TPy0mx!xG&71
zd=S}wE;~;Qbc>vkAGe6_(q-TmAC34#9>dLpKqMX(j=v9jT84GR8Z`R_C_KpYqdHie
zV2O!97eu3{+SW&HJ@x4Vw|%9SPNZ>;ON*r7$C45qDAmFfpkIBjLhyjmcEzBuqaUbiD>&z@DG(e63es(*k(X`6Y$%F2kN
zdoll7qu`MZSw>iPmhMalvZpTc3UVKM6vvoy>2DpkchM0Kk6g@wQ!ri)YCq0IiCKQfR_!kJ}2pJe(P
zaiYX?%+MyTte}Q1=@yoJ%-)~m+4Y&IAsNDqX)>q!R}x-lGDMOMiA>#m)`nYG(uq7m
zQjbrJU?=k!GXew>=?KNlq`5fUkL9F{gK1ou0Tc3#nTkg;S%6R!a^N|oPY7gP7x}j@
z{Bw#K&E`Kc^amKTm&VVQ77Qm*+_&u5k(N}rTLx;(Kk)ER?#@J?*N5Z(?c9>{0Wr+U
z+&@V0FR=Nqa6)u)<`=}6H@|j0(MDZ2=V%#
zTG1PK_~$uU&W4}c@6vp^wQUcbbhOE=mNJLdp7+aEM@UF0Dya}JY+iwz$#YJhn4LGD
z%?mI5ZF0|_m)9ETdU)}gZATD*VJD^xpBU^uZR5WH33$&YGj-
zT@spTz+v5eQPh>}-xr5aG<>Ua@pu2qpN~9k#~RnnxqMmcxb)C&28+3U;#Azz1~D<7
znE*bjN6rD5>G*Og$X5WI3^QZviIF~+#*6dBMkX*sM$dzuZR^5S9-ClIzjyB*Jq&m#
z1Q!EcIBDM4;>}NP4W4gSpWy!4U^mZ);#|3s=;Mzux5^Mj{t$H?J#$8F)CacU7wgs9
z`sb?r;Kiz(v6R_PdB8bIyjy6xtA_wo?r25@>8|xJ2hsi>O5KXJIk^j-uB%CDxQg+V%G!+AK!a8e~FXpr=UF>uIhv(Bo9bXga))8h8bAEI@;y&)0B1k}w{gf3&^uQ~16%|3hYK>;nP;oi{Oz
zB)TJ9P}8C=M*%G3p+9Uo{PYM~d{)+{&O6qzlqw@O1*aPJIyr4;jxPmIy*I|q{_Uu^
zujocv8U}nfM6k>d-Msk#Ld#c|gQ67M(#iZR?-&zx|Lbaqxlrrm-O@qv;jL;)k%LqK
zu0tmjJ_3TwO>b{4OqGf41>RRKYvM^J3ft>9Z!p5wv4rMnYy<1jJ~6*SH*C%@APfez
zRgfmE6E{%H3$jm!D{uqQ!+7LeE_4kKF2f`mQj=e1JPb70Lor&py*sl*4BldY_q&fD
zTMZQ^9D76dI*|UoMc%J498U1A21bKy_a%1uBQWj28K!=Gno|!Re8Mo$J#B`if?xSg
z1@^|6b_dDWpS&R*XOQ-(It{Czep&1zn<+q!$8+K=H%j)~$so7iH=y}MLhRKbrl4)u
zaI%^jDiXDBzE?dx$F)Cr>%sv_IY*4-~4yaQhS3l(B;&r($Yyq{&AbzHwLp
zRLD?K8!ZJ8UK2jY7dmxdh}HM_m9u%Uvq3mzopqM{CB20S$SF{Lm)TH^Ba
zp0WcBQ0-(h&f6O+Fsc(L?Lmtq*`Yzm6#5_Rr%~K{bO-F%}E4jHvX;B7ganRP%&mY<(P
zj6NJWnT4j7+}(^!X=$
zi9~jTl#@L`V}1F&j``f@ku#|^l8_auMAN>8%;UKb9Aq5CL+nB)*<1uUO^Nu(tFB_z@l3IoXxCtO
z{kTru0Sr~4D!yQ5zX23?BBK+L1&SJORQ=i#?xINoc|r2-$+q=7$>6#?L-nOn7bjfD
zc4W%{%I6_^GPcfuH3Z1HYGX!<%N|<8?TRrr3@GB*D}foo79bz?+QjMDK-;OtrX2wq
zS(2Oe(L10QajaY!0dJ-8_uqbGt^jkczs+QloyQjjSpwMm@hsOPbvMJ(0~zzPeTiJ_
z;8&u$NJ*8uJ#DZ9j`E1Li8MkLFcfD@B;}5^N2~idgIAFKYDDd75v=Pc-YNPvv-gVt
zADJ~%`%uQW~
zgXo&#(tj*)g#1P_IyUWFyVf{)*7aHEJ0SdEBgCo5$r^}Uz`o>~&8C)o;
zKP4MH>r}B&p25365oKEU|>EpMHX8tKBzQ?mx~b5`ZN86$d-2T`Q-^wn?b0npiSU5#1yD
zI;wOYmtTCiX!x+7-n)0yY8qRn42b!=J7?H91)Gd88PZQMy=d&xCJ2H0t*n@Uz&^mK
z6fdeiLm%B!uY#W&cOG$ZKtlpEy}5>({Rz0TMt2+vtqx!2S1meTOt_n_j~
zn=;lPG#vx!K0cM5M>b+tPxj~2q1#&1?mc=GiDmMuWk5oe+7z;K{-Q+>1o|AyFsd~c
zbEdM92n0|prY1&PQpu)Ia*xC)&N6H6W`6#?5C@F?%o&qI2Bf9}lt|wThHLsMQ`E`~
zn+R~5+L1OZjpXGalpYS0jfAXqlw9Nf+pC`K>Q3JaQIYN-ww|++Y|*5ZBX9*j8t!aH
z>12i}{E^srqYv6c_QDo5pCHG0cZ-Y2SMW*nwqL@;_!kt)s#)x^rgIBIdLIP@*i^~G
z0fTA52kU?w*xTG;uP5mI2U8*x$ZcGQw&xVtxTYkofYd+zu#Q)FAPK#Ew)&0v~X%CtG1pjk^m<%Uv>3ZjBeuV32s;xRopzFW-O+il!HT4ntXoh
zaBJGp@+PSNQV2}@SGkgl-uS~(R0b^ImzDi`-P>EbJH2U16N;4aq>kn;bo=6d$IiIN
zAHd)etFWyeyJ~!E-^XX5-jf*(860tiyQ8ZZZOZcUR6lZrJp76Faw)$M?}gX^eal&4
z0|==SQxI&F_`lFrq;5y7pr@|KTE83e=H8_Z9)yZ7Ztpsy=5cCY_=L;e1cnJdp!;n*
zg^k!ty}Hntc0m5E#&a&Vfv<`vXkarsH+)&VJU39R2{{<8ZkCrfw#ylOn9&7XCA)PT
zilwXl>xlCJeOqlu2C0wmx)x8;E8>ULy>bH6UQcWUn
z|DAP_NCj-4b|FG`@g($RdYE-w#(rEdKs)VM+B~n{a8jp>++HW6j?E2{o*r%CexR?{
z&!v!Ur_9N*19orpw}ObbKYwCd4#EjbyAv@n5NACiIXiH=nlZ%`sS%iD?+}@s5dJ98
z_E}BAR{)oOSozMnz6H_LnaE`niy`OLMXN*LXCOBDT81ZsMEv5_QelQRg!0od9F~6f
z_N~7{npLHW=R^~4Wr)&3!cVcqchy^XrOT>_Q@dWMRUyb<)O;5
z&L#gY0A4e;sgBVj?KIKj;u%turtT4QE{f8QfECR)cl0EoHt;Wi8G!Sqh**we``=?3
z`{z@WJ)Rw~C6OKoBR8YdvlF0^w*(o9`5ixg+yPtjRIMmVnB2{Fvd(9l-qYxy?it6&
zXR)%LSar2J6*bu~p`Of!Fx#uVuO?46L~WUwzmdvNeU_N76tWD#Ljr$Kt)b48F%L&}
zF8}!_D;_bNO6YT>v*U-SCO~hXmW~qpLg)N%$XA`nH*xxK6P(
zYTlNVdhg~Q-JySWop7O4xMft52*>PK28TXxd;FZ1+rv2gA=ySa|LjVod%tCM=mHDg
zoT+Ar6slJ{wvQVJwnlEgX||X(kM}E2&_YQthd-n@df^8b^PgNuXa185dGBOdyb_Lv
USnE should be investigated!')
                 valid = (\
diff --git a/class4gl/setup/setup_goamazon.py b/class4gl/setup/setup_goamazon.py
index 3cc3c74..2315353 100644
--- a/class4gl/setup/setup_goamazon.py
+++ b/class4gl/setup/setup_goamazon.py
@@ -179,14 +179,15 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
         dpars['STNID'] = current_station.name
         
 
-        # there are issues with the lower measurements in the HUMPPA campaign,
-        # for which a steady decrease of potential temperature is found, which
-        # is unrealistic.  Here I filter them away
-        ifirst = 0
-        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
-            ifirst = ifirst+1
-        print ('ifirst:',ifirst)
-        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        # # there are issues with the lower measurements in the HUMPPA campaign,
+        # # for which a steady decrease of potential temperature is found, which
+        # # is unrealistic.  Here I filter them away
+        # ifirst = 0
+        # while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+        #     ifirst = ifirst+1
+        # print ('ifirst:',ifirst)
+        # air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        air_balloon = air_balloon.iloc[:].reset_index().drop(['index'],axis=1)
         
         is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
         valid_indices = air_balloon.index[is_valid].values
@@ -299,29 +300,29 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
         # make theta increase strong enough to avoid numerical
         # instability
         air_ap_tail_orig = pd.DataFrame(air_ap_tail)
-        air_ap_tail = pd.DataFrame()
-        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
-        theta_low = air_ap_head['theta'].iloc[2]
-        z_low = air_ap_head['z'].iloc[2]
-        ibottom = 0
-        for itop in range(0,len(air_ap_tail_orig)):
-            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
-            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
-            if (
-                #(z_mean > z_low) and \
-                (z_mean > (z_low+10.)) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                #(theta_mean > (theta_low+0.2) ) and \
-                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
-
-                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
-                ibottom = itop+1
-                theta_low = air_ap_tail.theta.iloc[-1]
-                z_low =     air_ap_tail.z.iloc[-1]
-            # elif  (itop > len(air_ap_tail_orig)-10):
-            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
-        
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if (
+        #         #(z_mean > z_low) and \
+        #         (z_mean > (z_low+10.)) and \
+        #         #(theta_mean > (theta_low+0.2) ) and \
+        #         #(theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.00001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # 
         air_ap = \
             pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
         
diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py
index dd71324..3c71599 100644
--- a/class4gl/setup/setup_igra.py
+++ b/class4gl/setup/setup_igra.py
@@ -260,8 +260,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
                         logic_afternoon['afternoon'] = \
                             (c4gli_afternoon.pars.ldatetime.hour >= 12.)
-                        # the sounding should have taken place before 2 hours
-                        # before sunset. This is to minimize the change that a
+                        # the sounding should have taken place before 1 hours
+                        # before sunset. This is to minimize the chance that a
                         # stable boundary layer (yielding very low mixed layer
                         # heights) is formed which can not be represented by
                         # class.
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 0303d5a..3008f65 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -66,6 +66,8 @@
   'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+  'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
     'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
@@ -275,6 +277,9 @@
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
                                         runtime})
                     c4gli_morning.update(source=expname, pars=exp)
+                    if exp[-3:] == 'SM2':
+                        c4gli_morning.update(source=expname, pars={'wg': c4gli_morning.pars.wg - (c4gli_morning.pars.wg - c4gli_morning.pars.wwilt)/2.)
+                        c4gli_morning.update(source=expname, pars={'w2': c4gli_morning.pars.w2 - (c4gli_morning.pars.w2 - c4gli_morning.pars.wwilt)/2.)
 
                     c4gl = class4gl(c4gli_morning)
 

From bce24936acaa0a72b255f50697026f5afbe6a0f7 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 6 Nov 2018 15:34:29 +0100
Subject: [PATCH 101/129] full update

---
 class4gl/interface/test.png | Bin 17020 -> 0 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
 delete mode 100644 class4gl/interface/test.png

diff --git a/class4gl/interface/test.png b/class4gl/interface/test.png
deleted file mode 100644
index cdf991517e5da6823c279fbf801c610d55b5e0e2..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 17020
zcmch<1yq%57cKmPg^2}NfCv~gD0)!JK-{3Bbb}&F2&jb8cxNd)#Ng_ZlrH%kZ(E_tgeU_p*-;@J)<2OdG#Y
z3(}q(8x)O+IcHpVb1E-9-}xo)r`7A{XgRSY?C<%OQyFKJr4q}xV~5VNWy|~qq-V{V
z#j$GD1)9lcUgNbCWg@})i;C8cr584rUtPTRW|u+sg+xO!zub`@3+?Nk?Y*|`+|L6_
z%F4pqwrLhW*-@=3p7V1sJH*x8sjD`;VY2&tlyOdGrbAoEaD%6bn4hX0W6~+HT0iml
z6&z%{O>G$Nto@s7kFS8BfPlYWObO%sMqy#qH`my8QmiU}I%P>8f6ErI)3G4B*yNkN
zhGzA*99QuXL)uv5CH9Iqqg_|7Txly1j<5Lf?pC22<4Jt{WZ_iy5zhxHY6a(}Jc{3Y
zudU2=ca4|d@^PrEt}^SqosyE$7AYxpIoBbP!H1XjURz>W5pne9ty>xv77@#rFLxNO
zKOY?&tVrMHHgZPVqBQuehs?C$%E^i19Ss&A?(&_>s*f_tO676u-M#wivw;DJ!LB-;
zqeqY4ym6zqtsBdcN!HocLk$ac1u8>%sT=sxtI^4(59^U+SBl2K9X0CRVnkHaM!3=MBn
z#v7AfzPweR-gz!V^X17y^=|#eH*eprXqcWlZ*;!i(KtW5@BLaUM45iF1s|=Ts7Uk0
z;eem#GPc=%{R3fP(39PN2VoWd^y#`okGFm77)`5pda`DRZH(T_lOH7(Ire-r>g(&%
z&vnZf8|-Lx?EKwa=-BM5BP1z#6tOhc^2BbS);iwuTefqQR`f%GNQ6X7Us{7l7S?&2
z>(FVNy3|9{J=2p4m600CJ_=JKbH+nOO6qgnvs?z-!)t1EnG>CV7&CuBK?Lw$kbzCe7qs}icA4{5D4G9g+xikK|P`on5S_hB2
zEqA(#C95|3MrbC~*cum;zLr<(eJ(U?7W=zK_^SWzFTUC@I;3`9v+2y82n#c-tXSDL9
z*fki}XF5ionV!%O@*B(!h!>emG%w_I|NU{(8rcg8?NzbSJ-OqD7jlVd4tLk5SXYZ_
zMXHAvN$WSqWp$Thk;uwRTUQ-anCQK<-S%rleD0WvO>J`DXpT-;2tolD@;sl%^&rxt
zI^t;PK+)C`i(jSBT)to7a_DW`?Y(x#;ql=fMnsSt!$I;>^ShBwA!bBGe`8O-zNn^_
zE~vVQCMb_kD{k@_uWMN2*j^#|DNxSZcGtO|*Ox6_YAQR?`=%xJXz2dciHV8tk+eO7
zJ1@;+-~QD$?QDC+7T~AZ``fFp3N)oLCT$WQKR+B`b@}SmHF|n_Evcc(K5KUG-i`OS
z7TKuD_Fd-tSd(TKRNa58mPh7XSoM&{Z;`F8i`O`K={-*
z#)LDTcT4KaQzdI0JF29CM3SdIt+OBRYs&ZT?LA{wlW?ph_4k09-uOiICj(kRIj>;A
zi$hPgucqHNFMQ~m^}hT;O2gte5?B;Tr_LJny5EC2tsNPYy>HkT)80=GH`v)4q*(d#
zeY7YGxl-*UEbprFyRc;ToAf_glVXC5nq?T~u`grdyE?{MxOn}%#8;Ha4L_}lGwK+;
z&>C>QX!6qB#pPw8%7d@w@%qH^faU*q*Sa`^C_
zk`hrQR;FXe13Ene_dHGpfIRmEm|S73WZ7;T07cvl)-XlUwTj9id;mk-w7z7Cl@fP_bQ?~l@styPLo
z+=jc+(AR&wgje41bWnUxPQTXAuPWtROJqaaE2FJzlPz0PLkfdjhr26(e!O2ek){77
zNIR(T*5DVdhdT?$YPEBD&-9&{KO$9zdSwzYyF6M)Jfq$=2k6)S<>iYo`)4r-gRx>e
z4}RKCr%wN*q@;m~{*sQ-)*$i307SLRbS%5wSTa&~@O~(Exe(%(pIX0Ww{x^|{FbettUL{pWMd^6#__
zw0*YvntnFX9k^QNIn~eGpN=C%qD;KIgZqg@$d-TpTnUWl-sN;qMnsCHjeNCJj|j3oi+OO%sg#kRdu2L`OtzmXb=+eZs&0_GYKGd!4O%!xduX{kS?_
zMvFK;988IIQ)v9bNA28)GKqv77^<_ge)Z=Z6pe~}UQbdH9_3$<<)M`$*tVUnNmyQI
z`6Vg5x}k(^zPeOUQc~Sg{A956Wy!gxIr
zb=Mo5Hlfvt%`q8&Q{AtVJxPgPfur0Z*{%VDR~p;PX>D9W=M=R+fVyfAFK{pW-RQR)?j94
zCVjGKmG|nc`;~oow2-g^HD*4Rc-UOwc7;)!e@&iya0YQAXv=sIB!?vV?dIg$I`J_4
zQ&R5GyRodf)@Rv+F9`?=Mgk68n!I-mykBby;)FD
zDOxAK>Qs=scBk~M{nBySzg5gno$BzM9uLas`EZY4P;8UtQSDqtiB4d@cGVQyqD3Ox
zfAphL#a1jUyD4PN;Ns-u%revi&3gCl-4+Ff>5xGiJwO5Nw@Vj?u=X$H)B^{x4^Mm4
zJ%)7M#R~hsXazEK%p(ETMqhg7Jy^`|<#*%!H)mxN&q>F`u4-e~Sm#g=aEr0wp7?4L
zPos>C3>Q@UTigmLC^^$pG=*bxl+@#PcRe
zSMNDJ*_;2L>Fhx1NG4+)D-n78HCCH?XTklHhR)FwVLzi&ws4onH!}U
z8RF$d6+halObF!E)G))krRdRPDf63gxnou|zoD0{$DbA^c_5m5wDdYeI;?eC##ziA
z=AT^Cv|%j8+*Dw->O+yfogTRPs{(JD&#i82YZFX;^yrc0ZSGxF*L{3;{CVk;x4-|Y
zP?XH~{Ur)^JQ~Tj^MDR2v7!nJ4H4*LzB^@k18Ds&6!H7W9ydJuj$^-~qF0979!nd)
ze*eDrhYtrrReXis0}$-o5L1Gw8J^P~
zW}tifaSz_5rAI>6wx+ABV?EnX}g
zr4w&EfmoJAF_T1-$iI2>2i;7E$m)Lg$<8e_TE0m1%q9hvJp`sH0~OY_BLin?N=qKBr@9S?lq
z_lW?%^2ljCb^3J7Rd(Ju^te_an)>Nyp8f9cPjMd~T3-DI=R3kYpD~aBY}P_i!}KFy
z=27}-wz1)=e%ekl4BV`-OIJ$ugM0U$;mHVY+=!UhbGy#cYyHo8iPG?$N0I#aKPfo;
zFK870!3AP@pYUKnsbY|P1kGs})t6H4M-T_J)^Ca^RKA9bDQfdA^oscKlj%FVma&x1Q}S~U4GAHF
zO3m&2b`9rRuGr!3=kfplBDbv2{4&STb|WKk@2iVq_4mK-bBwymybJ1`1giy~PkyO_
z)4K8og6+3a2l#e7h*DIq
zTG+~k+zO6jRMmCU0d|TS5oZm);!z|>E_+&;O1`_A%9tjvOXBraED(yE6shE;L_Vrv
zVJY2{KOc9TH
zItcQH4I3UjeHxRUE!*$^@%|?3!S+i1jC0}JPuB#aV~qBko|3OLo11rlh+4fSsf)cf
zsRdSWpa1%d%lN`>)b1T=Yp*_g5#*r{*w<*tb+1e^SH*b_USsFgwz7&6*s^Pvfq3pn
zNT5smmcd_Tdr_|{(NYe4+!SO1RN;ubu}qPoOy}XqV7Zl$Ok%>q1me5Khq^TO@4s5@
zlvWpehkKV54o?q@HArhOdbHIz-_WsA3;7f1e9*Z$U`s{hpL=MxnJdkS2dw$FBc2h5
z6Im8M6n!^je&WOdI%)7zm6esBj7v#Ltz5o51XRq|l+kgH}&-XeX}W<@qyJe
zbuZPZHCBbTb0Fv)%UYHPz2?xO*&&bFHdy{ARcLnCm
znfsynoKblu;`F1qU(#O&
z5v%@eK6h=R*)?#v2vne06UH~U!)VgV%gZ15`){UM)uqOwI_2E?lH;1TWZfU8JQ{SM
z473erxtkLYrhhJB
z_m|ah6bNtHq->jJfJGUTS5TCj%G|B>=FepNljqF+pd725onkb!HhgLpNX?p8YQhoz
zi`oy>)hmdf(tj%<&Z2XU508WkdG?{?zTyEdBp72ECHmtSrvybM%x4W&l5H_uvwS_4
z*lxDst%N1`Tf#!}GBb6y@1~O3?knPQjFwWO$0w4PuP5m`vWtAeEeK*Z^B6a~Sr!E=
zGfx|*d)bv-0Q2)69EO6`$iOpvIQGhV)(>ptMCb9#1=*NckcB%hQWp0@^zy`*S747f
zCbE~93UUE4&aja06tg->E(TJJ$l#14Vy4IwF1-HFbdwQ>*UVulJDvWKV&%rK5T{VHzXv4ocnuOg1!$E9>4Oa{^!|TjyrO!Gxaiq=rabpuv@zD~s5t2%zOq#lrQz~#a
z#i31oF^{Y+l9tHc+A+G%K-X31xl?1-gr1|pREJnvFz^^61(<%pr%zQ4@;f8Ad2ghZL+y5KDOw$G7MIyBgc4OE`&UFW+sPn-yYu<2NB
zbiNHLfHqngurWT`%{zC7cFOPES^L!wtDvpYuDXBlZW%`
z=H_iY^w(+bmoBOa4#!*n7Xg=~|5k$|w
ze0jFqNaH0k1p&zpv%kNG$o^Y2o=4#4&6_vNNBWw4{rz=YR_8I^?(`zafVlMGk&A+$
z){e%T^mqt(a%eKYpofijXQ`nVUL)s{3^lDHT$L+U@8$ku$Bvy`3!EOczJfFP`ujd
z@8Q3p&kLRdm+21jM;x9>kPRf
z2zZT&dj!+AWX`+G{G|c-rS34N=G{N&a-6Jj%`7G+XG}}60$yeint5twsreU%U!KWh
zazy=krGoz|P`yTZe0BCzy$-R05x0r___2lOZ;XG!wn&82;E2t4p$*h*ElnV1oLvcMN%XCCu?;UQP^JG${?Nnk8SxY6MHQ5rAqK!Z+p9kz`(L&FL+on!6VXiN|A
z>@3mv8|VLQZno^KNsQ8a`FsOEKanlXPo8|36=?70RWkW#87>Q~b~UvFoe{b%D`1j-
zmQyTde4)6~qd=|}N&D7Ml@p`=Ak!Pd!^5|1*s$m0$BzYtgaj&aqBLq}fjLIL#tPy_nVQgn%TUZJV-0qo^ZU0ZOOS@n
zM#I4&SzSYe=*UFTe8*){ag#%slzLh@I;@IhOAVWE*?QpRLiTd+o%y}Qv`Knj?0Ie3YN!((SW097QQ8=0CjzJg`=i@ff3ZUpKm`m#dlId5i<9nQ93+9-*-`WBmxa9vnz%iC_Gn^6s_ruy
z4*15b>4`$?&z}eA3*;1d-^h94I1Qt2?2Tw`amQQwTPMedJKA0Z5mCO?`m?!|Q>U(b
z-#ads$-!#zG3(+$IRo3YI=8p^`KrH-mt30w*O~|@VM(xS$kk6h{YYR7%mA2yRA9EQ
zLs$=Hrvn*zCR;Hmo$N4~E+6k<#6vZFTxWLtxIkph?Qc+x?M+ZF7c5xtz}Hu+U6QfQ
zv0W33`oss_A5HyP>jZUx35~_ARJu*wcE_$-Eqbz5r4U>U)N-o;l
z^xpe(ogE`~8j2$dac^zV-Mhhm^=Mxo-oGCT)&>5eLMO8oBKw;;^?Zw+HE)T3{)9Sa
zCE$SBJoNL_Pon!Y_PFKF>#c~?h|*7q5@^dA{-%Kwax7gMj9P7pzshsz(n&nNyU8(@
zuQ2rt(#+%GPOucr3JROgS;8vdGEk;mnSRy`c8On@4{913K1D7RnJNF*iXFE)kM8i0
z#JefK{JcDrbLjC}Fob-DjvNh*03$0NIjcK&^5
zI5Ae=pg>=Up_ybkObp$ANhXG1w&4eEfu`YKYxDca_YF6-%z$gffJzj%kRVKDbfW^F$mwuj#<%c-l}%t!9|THEy9;4!2n|MBC;N>0w_
zP5D07V2ESHaq#@m+G5PxcGNrEuXG&uwKth_UTk!xGzF;LS0u9`Oe-2{)6a$yZwSO>
zkWu~2mdDl*Zcckh*EL7|+E~71>r`Y>w*95Z@ska2)g{ofNp8G2^2dAu^bH8D3fyFD
zRQ|(}ji4c>o8&!LunCl?U5YrNfM4g$R$RE!n>shmiw0#DYw(;;F0%BS&q+Hof>vkF
zP1~6+iwIS$r*180wuCiaf-JqEKJsLG2gQL5d}ws|ia2b(rkCApnf#`vA(N<(k0M$Lg68zTUbtY%lI>~d
zyT9r|;U-<
zSsnx
z6yQ!CwHfv=V3v>(PPf=rq*s`CmvGPNe&^Nx(2Rxn_;!<-qpy(GK(n7oJ6MG#l4bxP
zrF$gA{Vww!Mr2vj(brAFNuz}{UJ2;)@}h{C1i`=R^86J=UtS+poO!(cY^KMKJp&05
zq5%gVLaG*_5e5fp3QXq<45Qf)tBGDG?
z&U=;Qo+mr(sx;K(!s~3a4#E1OUDZC!`q2}iH&!&0{3`8`b;7o`L3EbkDW&oi65=Pc`p9Q&Q
zoZ|oeW6VzPqUfwfgi8c*Nnj`i1wS$}(s+FF|2p*?)8tyX8xgoG!TiL>;$k8}41BdO
z@lE&qp9wfElK)-6agVyCnn!TwPoXTYo4@L_(p-kSjef|3?GJobBW9TPy0mx!xG&71
zd=S}wE;~;Qbc>vkAGe6_(q-TmAC34#9>dLpKqMX(j=v9jT84GR8Z`R_C_KpYqdHie
zV2O!97eu3{+SW&HJ@x4Vw|%9SPNZ>;ON*r7$C45qDAmFfpkIBjLhyjmcEzBuqaUbiD>&z@DG(e63es(*k(X`6Y$%F2kN
zdoll7qu`MZSw>iPmhMalvZpTc3UVKM6vvoy>2DpkchM0Kk6g@wQ!ri)YCq0IiCKQfR_!kJ}2pJe(P
zaiYX?%+MyTte}Q1=@yoJ%-)~m+4Y&IAsNDqX)>q!R}x-lGDMOMiA>#m)`nYG(uq7m
zQjbrJU?=k!GXew>=?KNlq`5fUkL9F{gK1ou0Tc3#nTkg;S%6R!a^N|oPY7gP7x}j@
z{Bw#K&E`Kc^amKTm&VVQ77Qm*+_&u5k(N}rTLx;(Kk)ER?#@J?*N5Z(?c9>{0Wr+U
z+&@V0FR=Nqa6)u)<`=}6H@|j0(MDZ2=V%#
zTG1PK_~$uU&W4}c@6vp^wQUcbbhOE=mNJLdp7+aEM@UF0Dya}JY+iwz$#YJhn4LGD
z%?mI5ZF0|_m)9ETdU)}gZATD*VJD^xpBU^uZR5WH33$&YGj-
zT@spTz+v5eQPh>}-xr5aG<>Ua@pu2qpN~9k#~RnnxqMmcxb)C&28+3U;#Azz1~D<7
znE*bjN6rD5>G*Og$X5WI3^QZviIF~+#*6dBMkX*sM$dzuZR^5S9-ClIzjyB*Jq&m#
z1Q!EcIBDM4;>}NP4W4gSpWy!4U^mZ);#|3s=;Mzux5^Mj{t$H?J#$8F)CacU7wgs9
z`sb?r;Kiz(v6R_PdB8bIyjy6xtA_wo?r25@>8|xJ2hsi>O5KXJIk^j-uB%CDxQg+V%G!+AK!a8e~FXpr=UF>uIhv(Bo9bXga))8h8bAEI@;y&)0B1k}w{gf3&^uQ~16%|3hYK>;nP;oi{Oz
zB)TJ9P}8C=M*%G3p+9Uo{PYM~d{)+{&O6qzlqw@O1*aPJIyr4;jxPmIy*I|q{_Uu^
zujocv8U}nfM6k>d-Msk#Ld#c|gQ67M(#iZR?-&zx|Lbaqxlrrm-O@qv;jL;)k%LqK
zu0tmjJ_3TwO>b{4OqGf41>RRKYvM^J3ft>9Z!p5wv4rMnYy<1jJ~6*SH*C%@APfez
zRgfmE6E{%H3$jm!D{uqQ!+7LeE_4kKF2f`mQj=e1JPb70Lor&py*sl*4BldY_q&fD
zTMZQ^9D76dI*|UoMc%J498U1A21bKy_a%1uBQWj28K!=Gno|!Re8Mo$J#B`if?xSg
z1@^|6b_dDWpS&R*XOQ-(It{Czep&1zn<+q!$8+K=H%j)~$so7iH=y}MLhRKbrl4)u
zaI%^jDiXDBzE?dx$F)Cr>%sv_IY*4-~4yaQhS3l(B;&r($Yyq{&AbzHwLp
zRLD?K8!ZJ8UK2jY7dmxdh}HM_m9u%Uvq3mzopqM{CB20S$SF{Lm)TH^Ba
zp0WcBQ0-(h&f6O+Fsc(L?Lmtq*`Yzm6#5_Rr%~K{bO-F%}E4jHvX;B7ganRP%&mY<(P
zj6NJWnT4j7+}(^!X=$
zi9~jTl#@L`V}1F&j``f@ku#|^l8_auMAN>8%;UKb9Aq5CL+nB)*<1uUO^Nu(tFB_z@l3IoXxCtO
z{kTru0Sr~4D!yQ5zX23?BBK+L1&SJORQ=i#?xINoc|r2-$+q=7$>6#?L-nOn7bjfD
zc4W%{%I6_^GPcfuH3Z1HYGX!<%N|<8?TRrr3@GB*D}foo79bz?+QjMDK-;OtrX2wq
zS(2Oe(L10QajaY!0dJ-8_uqbGt^jkczs+QloyQjjSpwMm@hsOPbvMJ(0~zzPeTiJ_
z;8&u$NJ*8uJ#DZ9j`E1Li8MkLFcfD@B;}5^N2~idgIAFKYDDd75v=Pc-YNPvv-gVt
zADJ~%`%uQW~
zgXo&#(tj*)g#1P_IyUWFyVf{)*7aHEJ0SdEBgCo5$r^}Uz`o>~&8C)o;
zKP4MH>r}B&p25365oKEU|>EpMHX8tKBzQ?mx~b5`ZN86$d-2T`Q-^wn?b0npiSU5#1yD
zI;wOYmtTCiX!x+7-n)0yY8qRn42b!=J7?H91)Gd88PZQMy=d&xCJ2H0t*n@Uz&^mK
z6fdeiLm%B!uY#W&cOG$ZKtlpEy}5>({Rz0TMt2+vtqx!2S1meTOt_n_j~
zn=;lPG#vx!K0cM5M>b+tPxj~2q1#&1?mc=GiDmMuWk5oe+7z;K{-Q+>1o|AyFsd~c
zbEdM92n0|prY1&PQpu)Ia*xC)&N6H6W`6#?5C@F?%o&qI2Bf9}lt|wThHLsMQ`E`~
zn+R~5+L1OZjpXGalpYS0jfAXqlw9Nf+pC`K>Q3JaQIYN-ww|++Y|*5ZBX9*j8t!aH
z>12i}{E^srqYv6c_QDo5pCHG0cZ-Y2SMW*nwqL@;_!kt)s#)x^rgIBIdLIP@*i^~G
z0fTA52kU?w*xTG;uP5mI2U8*x$ZcGQw&xVtxTYkofYd+zu#Q)FAPK#Ew)&0v~X%CtG1pjk^m<%Uv>3ZjBeuV32s;xRopzFW-O+il!HT4ntXoh
zaBJGp@+PSNQV2}@SGkgl-uS~(R0b^ImzDi`-P>EbJH2U16N;4aq>kn;bo=6d$IiIN
zAHd)etFWyeyJ~!E-^XX5-jf*(860tiyQ8ZZZOZcUR6lZrJp76Faw)$M?}gX^eal&4
z0|==SQxI&F_`lFrq;5y7pr@|KTE83e=H8_Z9)yZ7Ztpsy=5cCY_=L;e1cnJdp!;n*
zg^k!ty}Hntc0m5E#&a&Vfv<`vXkarsH+)&VJU39R2{{<8ZkCrfw#ylOn9&7XCA)PT
zilwXl>xlCJeOqlu2C0wmx)x8;E8>ULy>bH6UQcWUn
z|DAP_NCj-4b|FG`@g($RdYE-w#(rEdKs)VM+B~n{a8jp>++HW6j?E2{o*r%CexR?{
z&!v!Ur_9N*19orpw}ObbKYwCd4#EjbyAv@n5NACiIXiH=nlZ%`sS%iD?+}@s5dJ98
z_E}BAR{)oOSozMnz6H_LnaE`niy`OLMXN*LXCOBDT81ZsMEv5_QelQRg!0od9F~6f
z_N~7{npLHW=R^~4Wr)&3!cVcqchy^XrOT>_Q@dWMRUyb<)O;5
z&L#gY0A4e;sgBVj?KIKj;u%turtT4QE{f8QfECR)cl0EoHt;Wi8G!Sqh**we``=?3
z`{z@WJ)Rw~C6OKoBR8YdvlF0^w*(o9`5ixg+yPtjRIMmVnB2{Fvd(9l-qYxy?it6&
zXR)%LSar2J6*bu~p`Of!Fx#uVuO?46L~WUwzmdvNeU_N76tWD#Ljr$Kt)b48F%L&}
zF8}!_D;_bNO6YT>v*U-SCO~hXmW~qpLg)N%$XA`nH*xxK6P(
zYTlNVdhg~Q-JySWop7O4xMft52*>PK28TXxd;FZ1+rv2gA=ySa|LjVod%tCM=mHDg
zoT+Ar6slJ{wvQVJwnlEgX||X(kM}E2&_YQthd-n@df^8b^PgNuXa185dGBOdyb_Lv
USnE
Date: Tue, 6 Nov 2018 15:40:04 +0100
Subject: [PATCH 102/129] full update

---
 class4gl/interface/interface.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index 4531b5b..eb9311d 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -20,6 +20,7 @@
 parser.add_argument('--figure_filename_2',default=None)
 parser.add_argument('--experiments_labels',default=None)
 parser.add_argument('--tendencies_revised',default=False)
+parser.add_argument('--obs_filter',default='True')
 args = parser.parse_args()
 
 print('Adding python library:',args.c4gl_path_lib)
@@ -128,6 +129,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       globaldata,\
                       refetch_records=False,
                       tendencies_revised = args.tendencies_revised
+                      obs_filter = (args.obs_filter == 'True')
                     )
 
 if args.make_figures:

From 25ae1c6bd6ead82d85a4dad5268054030112d5a6 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 6 Nov 2018 15:44:35 +0100
Subject: [PATCH 103/129] full update

---
 class4gl/interface/interface.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index eb9311d..bb234fc 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -128,7 +128,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       args.path_forcing,\
                       globaldata,\
                       refetch_records=False,
-                      tendencies_revised = args.tendencies_revised
+                      tendencies_revised = args.tendencies_revised,
                       obs_filter = (args.obs_filter == 'True')
                     )
 

From df6907926582f248c27f45c177f5b41857ec7bb5 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 13 Nov 2018 18:47:26 +0100
Subject: [PATCH 104/129] make batch pbs work with arbitrary modules

---
 class4gl/simulations/batch_simulations.pbs | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
index 92e496c..830aef7 100644
--- a/class4gl/simulations/batch_simulations.pbs
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -7,7 +7,8 @@
 #PBS -m a
 #PBS -N c4gl_sim
 
-module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
+#module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
+$LOADDEPSCLASS4GL 
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
 

From de6af0f9a7910461e310e9c86410322354c58450 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Tue, 20 Nov 2018 14:41:42 +0100
Subject: [PATCH 105/129] update

---
 class4gl/simulations/batch_simulations.pbs | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
index 830aef7..a8da5e2 100644
--- a/class4gl/simulations/batch_simulations.pbs
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -8,6 +8,10 @@
 #PBS -N c4gl_sim
 
 #module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
+module purge
+source ~/.bashrc
+
+echo $LOADDEPSCLASS4GL 
 $LOADDEPSCLASS4GL 
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"

From 1839a2a145e5ec547be9c597631d5605dfd64cd4 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Wed, 21 Nov 2018 09:52:27 +0100
Subject: [PATCH 106/129] fix load modules batch_setup_era.py

---
 class4gl/class4gl.py                        |  23 +-
 class4gl/interface/interface_new_koeppen.py | 334 +++++++++++++-------
 class4gl/interface/world_histogram.py       | 237 +++++++++++++-
 class4gl/setup/batch_setup_era.pbs          |   6 +-
 class4gl/setup/batch_update_input.py        |   1 +
 class4gl/setup/update_input.py              |   8 +-
 class4gl/simulations/batch_simulations.pbs  |   3 +-
 class4gl/simulations/simulations.py         |   8 +-
 class4gl/simulations/simulations_iter.py    |   1 +
 class4gl/simulations/simulations_veg.py     |  44 ++-
 10 files changed, 494 insertions(+), 171 deletions(-)

diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index e0a307a..2bdf6b4 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -924,6 +924,7 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
             #    self.update(source='globaldata',air_ac=pd.DataFrame({key:list([np.nan])}))
 
         #print('keys 2', keys)
+        print(keys)
 
         for key in keys:
             # If we find it, then we obtain the variables
@@ -1058,15 +1059,21 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None):
                         if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]:
                             ilonmin = ilon        
                         
-                        if ilatmin < ilatmax:
-                            ilatrange = range(ilatmin,ilatmax+1)
+                        # for the koeppen climate classification we just take nearest
+                        print(key)
+                        if key == 'KGC':
+                            ilatrange = range(ilat,ilat+1)
+                            ilonrange = range(ilon,ilon+1)
                         else:
-                            ilatrange = range(ilatmax,ilatmin+1)
-                            
-                        if ilonmin < ilonmax:
-                            ilonrange = range(ilonmin,ilonmax+1)
-                        else:
-                            ilonrange = range(ilonmax,ilonmin+1)     
+                            if ilatmin < ilatmax:
+                                ilatrange = range(ilatmin,ilatmax+1)
+                            else:
+                                ilatrange = range(ilatmax,ilatmin+1)
+                                
+                            if ilonmin < ilonmax:
+                                ilonrange = range(ilonmin,ilonmax+1)
+                            else:
+                                ilonrange = range(ilonmax,ilonmin+1)     
                             
                         if 'time' in list(globaldata.datasets[key].page[key].dims):
                             DIST = np.abs((globaldata.datasets[key].page['time'].values - classdatetime))
diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py
index 27bc478..e5d7161 100644
--- a/class4gl/interface/interface_new_koeppen.py
+++ b/class4gl/interface/interface_new_koeppen.py
@@ -1,3 +1,4 @@
+'''
 import numpy as np
 import pandas as pd
 import sys
@@ -121,12 +122,40 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       obs_filter = (args.obs_filter == 'True')
                                             
                     )
+                    '''
 sns.reset_orig()
+
+
+
+lookup_symbols= {
+ 'A':'equatorial',
+ 'B':'arid',
+ 'C':'warm temperate',
+ 'D':'snow',
+ 'E':'polar',
+ 'W':'desert',
+ 'S':'steppe',
+ 'f':'fully humid',
+ 's':'summer dry',
+ 'w':'winter dry',
+ 'm':'monsoonal',
+ 'h':'hot arid',
+ 'k':'cold arid',
+ 'a':'hot summer',
+ 'b':'warm summer',
+ 'c':'cool summer',
+ 'd':'extremely continental',
+ 'F':'polar frost',
+ 'T':'polar tundra',
+ } 
+
 key = args.experiments.strip(' ').split(' ')[0]
 xrkoeppen = xr.open_dataset('/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc')
 koeppenlookuptable = pd.DataFrame()
 koeppenlookuptable['KGCID'] = pd.Series(xrkoeppen['KGCID'])
 
+from matplotlib.patches import Rectangle,FancyBboxPatch
+
 def abline(slope, intercept,axis):
     """Plot a line from slope and intercept"""
     #axis = plt.gca()
@@ -217,14 +246,12 @@ def brightness(rrggbb):
     print(np.sum(kgc_select))
     koeppenlookuptable.iloc[ikoeppen]['amount'] = np.sum(kgc_select)
 
-#koeppenlookuptable = koeppenlookuptable[koeppenlookuptable.amount >= 200]
 koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
-# koeppenlookuptable = koeppenlookuptable[:9]
 include_koeppen = list(koeppenlookuptable.KGCID)
 
 
 if args.make_figures:
-    fig = plt.figure(figsize=(11,7))   #width,height
+    fig = plt.figure(figsize=(11,8.0))   #width,height
     i = 1                                                                           
     axes = {}         
     axes_taylor = {}         
@@ -245,7 +272,7 @@ def brightness(rrggbb):
              "Normalized root mean square error")
         if i == 1:
             axes[varkey].annotate('Normalized standard deviation',\
-                        xy= (0.05,0.27),
+                        xy= (0.05,0.37),
                         color='black',
                         rotation=90.,
                         xycoords='figure fraction',
@@ -406,6 +433,14 @@ def brightness(rrggbb):
                     #                )
 
                     icolor += 1
+
+
+
+
+
+
+
+
     
             latex = {}
             latex['dthetadt'] =  r'$d \theta / dt $'
@@ -428,6 +463,60 @@ def brightness(rrggbb):
         abline(1,0,axis=axes[varkey])
         i +=1
 
+
+
+    axph = fig.add_axes([0,0,1,1])
+    axph.xaxis.set_visible(False)
+    axph.yaxis.set_visible(False)
+    axph.set_zorder(1000)
+    axph.patch.set_alpha(0.0)
+    
+    axph.add_patch(Rectangle((0.006,0.01), 0.99, 0.13, alpha=0.71,
+                           facecolor='white',edgecolor='grey'))
+
+
+    idx = 0
+    koeppenlookuptable_sel = koeppenlookuptable[koeppenlookuptable.amount >= 200].sort_values('KGCID',ascending=True)
+
+    for ikoepp,koepp in koeppenlookuptable_sel.iterrows():
+        xy_box = ((0.059+np.floor(idx/4)*0.335),0.127- 0.09*((np.mod(idx,4)/(len(koeppenlookuptable_sel)/4))))
+        xy_text = list(xy_box)
+        xy_text[0] += 0.009
+        xy_text[1] -= 0.00
+        xy_color = list(xy_box)
+        xy_color[0] += -0.045
+        xy_color[1] -= 0.019
+        axph.add_patch(Rectangle(xy_color,0.049,0.023,
+                                      edgecolor='black', 
+                               #       boxstyle='round', 
+                                      #xycoords='figure fraction',
+                                      fc=koepp.color,
+                                      alpha=1.0))
+    
+        axph.annotate(koepp.KGCID,
+                    xy= xy_box,
+                    color='white' if (brightness(koepp.color)<0.5) else 'black', 
+                    family='monospace',
+                    xycoords='figure fraction',
+                    weight='bold',
+                    fontsize=10.,
+                    horizontalalignment='right',
+                    verticalalignment='top' ,
+                    # bbox={'edgecolor':'black',
+                    #       'boxstyle':'round',
+                    #       'fc':koepp.color,
+                    #       'alpha':1.0}
+                   )
+        full_name = ""
+        if koepp.KGCID is not "Ocean":
+            for char in koepp.KGCID:
+                full_name += lookup_symbols[char] + ' - '
+            full_name = full_name[:-3]
+        axph.annotate(full_name,xy=xy_text,color='k'
+                    ,fontsize=8,xycoords='figure fraction',weight='bold',horizontalalignment='left',verticalalignment='top')
+
+        idx +=1
+
     
 
     i = 1
@@ -601,7 +690,7 @@ def brightness(rrggbb):
     # ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10)
     
     
-    fig.subplots_adjust(top=0.95,bottom=0.09,left=0.08,right=0.94,hspace=0.35,wspace=0.29)
+    fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.35,wspace=0.29)
     
     
     #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
@@ -779,9 +868,9 @@ def brightness(rrggbb):
         #sns.set(style="ticks", palette="deep")
 
         
-        exppairs = {'obs|ref'     :['soundings','GLOBAL_ADV'],
-                    'fcap|wilt'   :['GLOBAL_ADV_FC'  ,'GLOBAL_ADV_WILT'],
-                    'allveg|noveg':['GLOBAL_ADV_VMAX','GLOBAL_ADV_VMIN']
+        exppairs = {'obs|ref'     :['soundings',keylabels[0]],
+                    'fcap|wilt'   :[keylabels[1] ,keylabels[2]],
+                    'allveg|noveg':[keylabels[3],keylabels[4]]
                    }
         current_palette = sns.color_palette('deep')
         exppalettes = {'obs|ref'     :['white','grey'],
@@ -792,11 +881,11 @@ def brightness(rrggbb):
         data_all['expname'] = ""
         print('making alternative names for legends')
         expnames = {'soundings':'obs',\
-                    'GLOBAL_ADV':'ref',\
-                    'GLOBAL_ADV_WILT':'dry',\
-                    'GLOBAL_ADV_FC':'wet',\
-                    'GLOBAL_ADV_VMIN':'noveg',\
-                    'GLOBAL_ADV_VMAX':'fullveg',\
+                    keylabels[0]:'ref',\
+                    keylabels[1]:'dry',\
+                    keylabels[2]:'wet',\
+                    keylabels[3]:'noveg',\
+                    keylabels[4]:'fullveg',\
                    }
         for expname_orig,expname in expnames.items():
             data_all['expname'][data_all['source'] == expname_orig] = expname
@@ -807,6 +896,9 @@ def brightness(rrggbb):
 
         icolor = 0
         
+        koeppenlookuptable = koeppenlookuptable[koeppenlookuptable.amount >= 200]
+        koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False)
+        koeppenlookuptable = koeppenlookuptable[:9]
         fig, axes = plt.subplots(nrows=len(varkeys)*len(koeppenlookuptable), \
                                  ncols=len(exppairs), \
                                  figsize=(8, 13), #width, height
@@ -820,114 +912,114 @@ def brightness(rrggbb):
         irow = 0
         sns.set_style('whitegrid')
         for ikoeppen,koeppen in koeppenlookuptable.iterrows():
-            for exppairname,exppair in exppairs.items():
-                for varkey in varkeys:
-                    varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
-                    ax = axes[irow,icol]
-
-                 #   axes[i] = fig.add_subplot(len(varkeys)*len(koeppenlookuptable),len(exppairs),icolor)
-            #sns.violinplot(x='KGC',y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
-            
-            #ax.set_title(input_key_full)
-                    current_data = data_all[(data_all['exppair'] == exppairname) & (data_all['KGCname']  == koeppen.KGCID)]
-                    sns.violinplot(y='exppair', x=varkey_full,
-                                        hue="expname",split=True,
-                             palette=exppalettes[exppairname],
-                            # palette=["m", "g",'r','b'],
-                             linewidth=1.0,inner='quart',
-                                        data=current_data,sym='',legend=False,ax=ax)
-                    ax.legend("")
-                    ax.legend_.draw_frame(False)
-                    ax.set_yticks([])
-                    ax.set_ylabel("")
-
-                    # if varkey == 'q':
-                    #     ticks = ticker.FuncFormatter(lambda x, pos:
-                    #                                  '{0:g}'.format(x*1000.))
-                    #     ax.xaxis.set_major_formatter(ticks)
-
-                    if varkey == 'q':
-                        title_final = r'$dq/dt$'
-                        xlabel_final = r'[$\mathrm{g\, kg^{-1}\, h^{-1}}$]'
-                    elif varkey == 'theta':
-                        title_final = r'$d\theta/dt$'
-                        xlabel_final = r'[$\mathrm{K\, h^{-1}}$]'
-                    elif varkey == 'h':
-                        title_final = r'$dh/dt$'
-                        xlabel_final = r'[$\mathrm{m\, h^{-1}}$]'
-
-
-                    ax.set_xlabel("")
-                    #sns.despine(left=True, right=True, bottom=False, top=False)
-                    if irow == (len(varkeys)*len(koeppenlookuptable)-1):
-                        #ax.set_frame_on(False)
-
-                        ax.set_xlabel(xlabel_final)
-                        ax.tick_params(top='off', bottom='on', left='off',
-                                        right='off', labelleft='off',
-                                        labeltop='off',
-                                        labelbottom='on'
-                                      )
-                        ax.spines['top'].set_visible(False)
-                        ax.spines['bottom'].set_visible(True)
-                        ax.spines['left'].set_visible(True)
-                        ax.spines['right'].set_visible(True)
-                        #sns.despine(left=True, right=True, bottom=True, top=False)
-                    elif irow == 0:
-                        ax.set_title(title_final,fontsize=17.)
-                        ax.tick_params(top='off', bottom='off', left='off',
-                                        right='off', labelleft='off',
-                                        labelbottom='off')
-                        #ax.set_frame_on(False)
-                        # ax.spines['left'].set_visible(True)
-                        # ax.spines['right'].set_visible(True)
-                        ax.spines['top'].set_visible(True)
-                        ax.spines['bottom'].set_visible(False)
-                        ax.spines['left'].set_visible(True)
-                        ax.spines['right'].set_visible(True)
-                        #sns.despine(left=True, right=True, bottom=False, top=True)
-                        #ax.axis("off")
-                    elif np.mod(irow,len(exppairs)) == 0:
-                        ax.tick_params(top='off', bottom='off', left='off',
-                                        right='off', labelleft='off',
-                                        labelbottom='off')
-                        #ax.set_frame_on(False)
-                        # ax.spines['left'].set_visible(True)
-                        # ax.spines['right'].set_visible(True)
-                        ax.spines['top'].set_visible(True)
-                        ax.spines['bottom'].set_visible(False)
-                        ax.spines['left'].set_visible(True)
-                        ax.spines['right'].set_visible(True)
-                        #sns.despine(left=True, right=True, bottom=False, top=True)
-                        #ax.axis("off")
-                    elif np.mod(irow,len(exppairs)) == 2:
-                        ax.tick_params(top='off', bottom='on', left='off',
-                                        right='off', labelleft='off',
-                                        labelbottom='off')
-                        #ax.set_frame_on(False)
-                        # ax.spines['left'].set_visible(True)
-                        # ax.spines['right'].set_visible(True)
-                        ax.spines['top'].set_visible(False)
-                        ax.spines['bottom'].set_visible(True)
-                        ax.spines['left'].set_visible(True)
-                        ax.spines['right'].set_visible(True)
-                        #sns.despine(left=True, right=True, bottom=False, top=True)
-                        #ax.axis("off")
-                    else:
-                        ax.tick_params(top='off', bottom='off', left='off',
-                                        right='off', labelleft='off',
-                                        labelbottom='off')
-                        #ax.set_frame_on(False)
-                        #ax.spines['left'].set_visible(True)
-                        #ax.spines['right'].set_visible(True)
-                        ax.spines['top'].set_visible(False)
-                        ax.spines['bottom'].set_visible(False)
-                        ax.spines['left'].set_visible(True)
-                        ax.spines['right'].set_visible(True)
-                        #ax.axis("off")
-                    icol +=1
-                irow +=1
-                icol=0
+                for exppairname,exppair in exppairs.items():
+                    for varkey in varkeys:
+                        varkey_full = 'd'+varkey+'dt ['+units[varkey]+'/h]'
+                        ax = axes[irow,icol]
+
+                     #   axes[i] = fig.add_subplot(len(varkeys)*len(koeppenlookuptable),len(exppairs),icolor)
+                #sns.violinplot(x='KGC',y=varkey_full,data=data_all,hue='source',linewidth=2.,palette="muted",split=True,inner='quart') #,label=key+", R = "+str(round(PR[0],3)),data=data)       
+                
+                #ax.set_title(input_key_full)
+                        current_data = data_all[(data_all['exppair'] == exppairname) & (data_all['KGCname']  == koeppen.KGCID)]
+                        sns.violinplot(y='exppair', x=varkey_full,
+                                            hue="expname",split=True,
+                                 palette=exppalettes[exppairname],
+                                # palette=["m", "g",'r','b'],
+                                 linewidth=1.0,inner='quart',
+                                            data=current_data,sym='',legend=False,ax=ax)
+                        ax.legend("")
+                        ax.legend_.draw_frame(False)
+                        ax.set_yticks([])
+                        ax.set_ylabel("")
+
+                        # if varkey == 'q':
+                        #     ticks = ticker.FuncFormatter(lambda x, pos:
+                        #                                  '{0:g}'.format(x*1000.))
+                        #     ax.xaxis.set_major_formatter(ticks)
+
+                        if varkey == 'q':
+                            title_final = r'$dq/dt$'
+                            xlabel_final = r'[$\mathrm{g\, kg^{-1}\, h^{-1}}$]'
+                        elif varkey == 'theta':
+                            title_final = r'$d\theta/dt$'
+                            xlabel_final = r'[$\mathrm{K\, h^{-1}}$]'
+                        elif varkey == 'h':
+                            title_final = r'$dh/dt$'
+                            xlabel_final = r'[$\mathrm{m\, h^{-1}}$]'
+
+
+                        ax.set_xlabel("")
+                        #sns.despine(left=True, right=True, bottom=False, top=False)
+                        if irow == (len(varkeys)*len(koeppenlookuptable)-1):
+                            #ax.set_frame_on(False)
+
+                            ax.set_xlabel(xlabel_final)
+                            ax.tick_params(top='off', bottom='on', left='off',
+                                            right='off', labelleft='off',
+                                            labeltop='off',
+                                            labelbottom='on'
+                                          )
+                            ax.spines['top'].set_visible(False)
+                            ax.spines['bottom'].set_visible(True)
+                            ax.spines['left'].set_visible(True)
+                            ax.spines['right'].set_visible(True)
+                            #sns.despine(left=True, right=True, bottom=True, top=False)
+                        elif irow == 0:
+                            ax.set_title(title_final,fontsize=17.)
+                            ax.tick_params(top='off', bottom='off', left='off',
+                                            right='off', labelleft='off',
+                                            labelbottom='off')
+                            #ax.set_frame_on(False)
+                            # ax.spines['left'].set_visible(True)
+                            # ax.spines['right'].set_visible(True)
+                            ax.spines['top'].set_visible(True)
+                            ax.spines['bottom'].set_visible(False)
+                            ax.spines['left'].set_visible(True)
+                            ax.spines['right'].set_visible(True)
+                            #sns.despine(left=True, right=True, bottom=False, top=True)
+                            #ax.axis("off")
+                        elif np.mod(irow,len(exppairs)) == 0:
+                            ax.tick_params(top='off', bottom='off', left='off',
+                                            right='off', labelleft='off',
+                                            labelbottom='off')
+                            #ax.set_frame_on(False)
+                            # ax.spines['left'].set_visible(True)
+                            # ax.spines['right'].set_visible(True)
+                            ax.spines['top'].set_visible(True)
+                            ax.spines['bottom'].set_visible(False)
+                            ax.spines['left'].set_visible(True)
+                            ax.spines['right'].set_visible(True)
+                            #sns.despine(left=True, right=True, bottom=False, top=True)
+                            #ax.axis("off")
+                        elif np.mod(irow,len(exppairs)) == 2:
+                            ax.tick_params(top='off', bottom='on', left='off',
+                                            right='off', labelleft='off',
+                                            labelbottom='off')
+                            #ax.set_frame_on(False)
+                            # ax.spines['left'].set_visible(True)
+                            # ax.spines['right'].set_visible(True)
+                            ax.spines['top'].set_visible(False)
+                            ax.spines['bottom'].set_visible(True)
+                            ax.spines['left'].set_visible(True)
+                            ax.spines['right'].set_visible(True)
+                            #sns.despine(left=True, right=True, bottom=False, top=True)
+                            #ax.axis("off")
+                        else:
+                            ax.tick_params(top='off', bottom='off', left='off',
+                                            right='off', labelleft='off',
+                                            labelbottom='off')
+                            #ax.set_frame_on(False)
+                            #ax.spines['left'].set_visible(True)
+                            #ax.spines['right'].set_visible(True)
+                            ax.spines['top'].set_visible(False)
+                            ax.spines['bottom'].set_visible(False)
+                            ax.spines['left'].set_visible(True)
+                            ax.spines['right'].set_visible(True)
+                            #ax.axis("off")
+                        icol +=1
+                    irow +=1
+                    icol=0
 
         idx = 0
         for ikoeppen,koeppen in koeppenlookuptable.iterrows():
diff --git a/class4gl/interface/world_histogram.py b/class4gl/interface/world_histogram.py
index 2b53ab2..b1fcec6 100644
--- a/class4gl/interface/world_histogram.py
+++ b/class4gl/interface/world_histogram.py
@@ -139,26 +139,229 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
 
 
 
+
 import matplotlib
 import matplotlib.pyplot as plt
+from matplotlib import rc
 import numpy as np
+import seaborn as sns
+# activate latex text rendering
+sns.reset_orig()
+
+data = [220,14.2,150,400,420,100,150,30,60,20,500,]
+error = [10, 1, 20, 60, 10,10, 1, 20, 60, 10, 5, ]
+
+ini = c4gldata['GLOBAL_ADV'].frames['stats']['records_all_stations_ini'].set_index(['STNID','dates'])
+vals = c4gldata['GLOBAL_ADV'].frames['stats']['records_all_stations_mod']
+stats = c4gldata['GLOBAL_ADV'].frames['stats']['records_all_stations_mod_stats']
+
+ini_fc = c4gldata['GLOBAL_ADV_FC'].frames['stats']['records_all_stations_ini'].set_index(['STNID','dates'])
+vals_fc = c4gldata['GLOBAL_ADV_FC'].frames['stats']['records_all_stations_mod']
+stats_fc = c4gldata['GLOBAL_ADV_FC'].frames['stats']['records_all_stations_mod']
+
+vals_obs = c4gldata['GLOBAL_ADV'].frames['stats']['records_all_stations_obs_afternoon']
+stats_obs = c4gldata['GLOBAL_ADV'].frames['stats']['records_all_stations_obs_afternoon']
+
+ini_common_index = ini.index.intersection(ini_fc.index)
+
+vals.index = ini.index
+vals = vals.loc[ini_common_index]
+
+stats.index = ini.index
+stats = stats.loc[ini_common_index]
+
+
+vals_obs.index = ini.index
+vals_obs = vals_obs.loc[ini_common_index]
+
+stats_obs.index = ini.index
+stats_obs = stats_obs.loc[ini_common_index]
+
+
+ini = ini.loc[ini_common_index]
+
 
-data = [220,14.2,150,400,420]
-error = [10, 1, 20, 60, 10]
-x = [i + .5 for i in range(5)]
-
-fig, ax = plt.subplots()
-bar = ax.barh(x, data,0.1 +np.arange(len(data))*0.9/len(data), align="center", yerr=error)
-plot = ax.plot(x, data)
-ax.set_xticks(x)
-ax.set_xticklabels(('wt', 'N23PP', 'N23PP/PEKN', 'PEKN', 'N23PP/PEKN/L28F'))
-ax.set_title(r"Everything in the document can use m$\alpha$th language",
-             y=1.05)
-ax.set_ylabel(r"Rate (s$^{-1}$)", labelpad=10)
-ax.set_xlabel("Mutant",labelpad=10)
-ax.yaxis.set_ticks_position('left')
-ax.xaxis.set_ticks_position('bottom')
-plt.savefig('test.png')
-plt.show()
+vals_fc.index = ini_fc.index
+vals_fc = vals_fc.loc[ini_common_index]
+
+stats_fc.index = ini_fc.index
+stats_fc = stats_fc.loc[ini_common_index]
+
+ini_fc = ini_fc.loc[ini_common_index]
+
+dlat = 10
+blat = np.arange(-55.,60.,dlat)[[2,3,4,7,8,9,10,11]]
+lats = (blat[1:] + blat[:-1])/2.
+
+fig = plt.figure(figsize=(10,6)) 
+variables = ['h','theta',r'q']
+labels = [r'$h$',r'$\theta$',r'$q$']
+units = ['m','K','g/kg']
+xlims = [(-10,3000),(280,310.),(2.5,17.5)]
+#var = "theta"
+for ivar,var in enumerate(variables):
+    ax = fig.add_subplot(1,len(variables),ivar+1,)
+    data = []
+    data_025 = []
+    data_075 = []
+    
+    data_fc = []
+    data_fc_025 = []
+    data_fc_075 = []
+    
+    data_obs = []
+    data_obs_025 = []
+    data_obs_075 = []
+    
+    data_ini = []
+    data_ini_025 = []
+    data_ini_075 = []
+    
+    lnts = []
+    
+    for ilat,lat in enumerate(lats):
+        print(ilat,lat)
+    # 
+        query = 'latitude >= '+str(blat[ilat])+' and '+ 'latitude < '+str(blat[ilat+1])
+        print(query)
+        select = ini.query(query)
+        if len(select) >= 7:
+            lnts.append(len(select))
+            #print(stats.iloc[select.index])
+            print(stats.loc[select.index])
+            data.append(vals.loc[select.index][var].mean()) 
+            data_025.append(vals.loc[select.index][var].quantile(0.25)) 
+            data_075.append(vals.loc[select.index][var].quantile(0.75)) 
+            data_fc.append(vals_fc.loc[select.index][var].mean()) 
+            data_fc_025.append(vals_fc.loc[select.index][var].quantile(0.25)) 
+            data_fc_075.append(vals_fc.loc[select.index][var].quantile(0.75)) 
+    
+            data_obs.append(vals_obs.loc[select.index][var].mean()) 
+            data_obs_025.append(vals_obs.loc[select.index][var].quantile(0.25)) 
+            data_obs_075.append(vals_obs.loc[select.index][var].quantile(0.75)) 
+    
+            data_ini.append(ini.loc[select.index][var].mean()) 
+            data_ini_025.append(ini.loc[select.index][var].quantile(0.25)) 
+            data_ini_075.append(ini.loc[select.index][var].quantile(0.75)) 
+        else:
+            lnts.append(0)
+            data.append(np.nan)
+            data_025.append(np.nan)
+            data_075.append(np.nan)
+            data_fc.append(np.nan)
+            data_fc_025.append(np.nan)
+            data_fc_075.append(np.nan)
+    
+            data_obs.append(np.nan)
+            data_obs_025.append(np.nan)
+            data_obs_075.append(np.nan)
+    
+    
+            data_ini.append(np.nan)
+            data_ini_025.append(np.nan)
+            data_ini_075.append(np.nan)
+    
+    data = np.array(data)
+    data_025 = np.array(data_025)
+    data_075 = np.array(data_075)
+    
+    data_fc = np.array(data_fc)
+    data_fc_025 = np.array(data_fc_025)
+    data_fc_075 = np.array(data_fc_075)
+    
+    
+    data_obs = np.array(data_obs)
+    data_obs_025 = np.array(data_obs_025)
+    data_obs_075 = np.array(data_obs_075)
+    
+    data_ini = np.array(data_ini)
+    data_ini_025 = np.array(data_ini_025)
+    data_ini_075 = np.array(data_ini_075)
+
+    if var == 'q':
+        data = data*1000.
+        data_025 = data_025*1000.
+        data_075 = data_075*1000.
+
+        data_fc = data_fc*1000.
+        data_fc_025 = data_fc_025*1000.
+        data_fc_075 = data_fc_075*1000.
+    
+        data_obs = data_obs*1000.
+        data_obs_025 = data_obs_025*1000.
+        data_obs_075 = data_obs_075*1000.
+
+        data_ini = data_ini*1000.
+        data_ini_025 = data_ini_025*1000.
+        data_ini_075 = data_ini_075*1000.
+    
+
+    
+
+    data_left = np.zeros_like(data)
+    data_right = np.zeros_like(data)
+    select = (data >= data_ini)
+    data_left[select] = data[select]
+    data_right[select] = data_ini[select]
+    data_left[~select] = data_ini[~select]
+    data_right[~select] = data[~select]
+
+    data_diff_left = np.zeros_like(data)
+    data_diff_right = np.zeros_like(data)
+    select = (data_fc >= data)
+    data_diff_left[select] = data[select]
+    data_diff_right[select] = data_fc[select]
+    data_diff_left[~select] = data_fc[~select]
+    data_diff_right[~select] = data[~select]
+
+
+    
+    
+    #bar = ax.barh(lats, data,blat[1:] - blat[:-1] , align="center", xerr=error)
+    
+    
+    erb = ax.errorbar( data_ini, lats+2.*dlat/8.,xerr=[data_ini-data_ini_025,data_ini_075-data_ini], fmt='s', color='darkgrey',mfc='white', ms=3, mew=1)
+    erb = ax.errorbar( data_obs, lats+2.*dlat/8.,xerr=[data_obs-data_obs_025,data_obs_075-data_obs], fmt='s', color='darkgrey',mfc='black', ms=3, mew=1)
+    erb = ax.errorbar( data,    lats-1.*dlat/8,xerr=[data-data_025,data_075-data], fmt='s', color='black',mfc='black', ms=3, mew=1)
+    er2 = ax.errorbar( data_fc, lats-2*dlat/8.,xerr=[data_fc-data_fc_025,data_fc_075-data_fc], fmt='s', color='blue',mfc='blue', ms=3, mew=1)
+    
+    ba2 = ax.barh(lats, data_diff_right - data_diff_left  ,(blat[1:] -
+                                                            blat[:-1])*0.85 ,
+                  align="center", left=data_diff_left,color='red',
+                  edgecolor='lightgrey',linewidth=2.)
+    bar = ax.barh(lats, data_right - data_left ,(blat[1:] - blat[:-1])*0.85 ,
+                  align="center", left=data_left, color='none',edgecolor='black',linewidth=2.)
+    # ax.set_yticks(blat)
+    # labels = [ w.get_text() for w in ax.get_yticklabels()]
+    # ax.set_yticklabels(labels)
+    # ax.set_yticks(blat)
+    ax.set_yticks([["test",-30.]])
+    if ivar == 0:
+        plt.legend([r"morning observations",\
+                    r"afternoon observations",\
+                    r"afternoon control",\
+                    r"afternoon $\it{wet}$ "])
+    
+    #er2 = ax.errorbar( data_fc, lats,xerr=[data_fc-data_fc_025,data_fc_075-data_fc], fmt='s', mfc='blue', ms=3, mew=1)
+    
+    
+    
+    # plot = ax.plot(x, data)
+    ax.set_yticks(lats)
+    ax.set_xlim(xlims[ivar])
+    ax.set_ylim((-65.,65.))
+    #ax.set_yticklabels(('wt', 'N23PP', 'N23PP/PEKN', 'PEKN', 'N23PP/PEKN/L28F'))
+    #ax.set_title(r"Everything in the document can use m$\alpha$th language", y=1.05)
+    ax.set_title(labels[ivar],fontsize=17.)
+    ax.set_xlabel("["+units[ivar]+"]", labelpad=10,fontsize=15.)
+    if ivar == 0:
+        ax.set_ylabel("Latitude [°]",labelpad=10,fontsize=15.)
+    ax.yaxis.set_ticks_position('left')
+    ax.xaxis.set_ticks_position('bottom')
+    ax.tick_params(labelsize=15.)
+fig.tight_layout()
+fig.subplots_adjust(left=0.10,bottom=0.13,right=0.98,top=0.94,wspace=0.24,hspace=0.20)
+fig.savefig('test.png',dpi=200)
+fig.show()
 
 
diff --git a/class4gl/setup/batch_setup_era.pbs b/class4gl/setup/batch_setup_era.pbs
index 1ee9b9b..18a6018 100644
--- a/class4gl/setup/batch_setup_era.pbs
+++ b/class4gl/setup/batch_setup_era.pbs
@@ -7,7 +7,11 @@
 #PBS -m a
 #PBS -N c4gl_setup
 
-module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
+module purge
+source ~/.bashrc
+
+echo loading modules: $LOADDEPSCLASS4GL 
+$LOADDEPSCLASS4GL 
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
 
diff --git a/class4gl/setup/batch_update_input.py b/class4gl/setup/batch_update_input.py
index d2b06b0..355f532 100644
--- a/class4gl/setup/batch_update_input.py
+++ b/class4gl/setup/batch_update_input.py
@@ -20,6 +20,7 @@
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling')
 parser.add_argument('--updates')
+parser.add_argument('--global_vars')
 parser.add_argument('--subset_input',default='morning') 
                                         # this tells which yaml subset
                                         # to initialize with.
diff --git a/class4gl/setup/update_input.py b/class4gl/setup/update_input.py
index 07000f2..635e1c0 100644
--- a/class4gl/setup/update_input.py
+++ b/class4gl/setup/update_input.py
@@ -19,6 +19,7 @@
 parser.add_argument('--first_station_row')
 parser.add_argument('--last_station_row')
 parser.add_argument('--updates')
+parser.add_argument('--global_vars')
 parser.add_argument('--station_id') # run a specific station id
 parser.add_argument('--error_handling',default='dump_on_success')
 parser.add_argument('--diag_tropo',default=None)#['advt','advq','advu','advv'])
@@ -51,7 +52,7 @@
 
 # iniitialize global data
 globaldata = data_global()
-if 'era_profiles' in args.updates.strip().split(","):
+if (args.updates is not None) and ('era_profiles' in args.updates.strip().split(",")):
     globaldata.sources = {**globaldata.sources,**{
             "ERAINT:t"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc",
             "ERAINT:q"     : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc",
@@ -230,6 +231,9 @@
                                                 record_input.index_start, 
                                                 record_input.index_end,
                                                 mode='ini')
+                if args.global_vars is not None:
+                    c4gli_output.get_global_input(globaldata,only_keys=args.global_vars.strip().split(','))
+
                 if args.diag_tropo is not None:
                     print('add tropospheric parameters on advection and subsidence (for diagnosis)')
                     seltropo = (c4gli_output.air_ac.p > c4gli_output.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
@@ -242,7 +246,7 @@
                             print("warning: tropospheric variable "+var+" not recognized")
 
 
-                if 'era_profiles' in args.updates.strip().split(","):
+                if (args.updates is not None) and ('era_profiles' in args.updates.strip().split(",")):
                     c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp'])
 
                     c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp})
diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs
index a8da5e2..4f12ccc 100644
--- a/class4gl/simulations/batch_simulations.pbs
+++ b/class4gl/simulations/batch_simulations.pbs
@@ -7,11 +7,10 @@
 #PBS -m a
 #PBS -N c4gl_sim
 
-#module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
 module purge
 source ~/.bashrc
 
-echo $LOADDEPSCLASS4GL 
+echo loading modules: $LOADDEPSCLASS4GL 
 $LOADDEPSCLASS4GL 
 
 EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID"
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 3008f65..20fc34f 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -69,7 +69,7 @@
   'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
   'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-    'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True},
+    'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
@@ -277,9 +277,9 @@
                     c4gli_morning.update(source='pairs',pars={'runtime' : \
                                         runtime})
                     c4gli_morning.update(source=expname, pars=exp)
-                    if exp[-3:] == 'SM2':
-                        c4gli_morning.update(source=expname, pars={'wg': c4gli_morning.pars.wg - (c4gli_morning.pars.wg - c4gli_morning.pars.wwilt)/2.)
-                        c4gli_morning.update(source=expname, pars={'w2': c4gli_morning.pars.w2 - (c4gli_morning.pars.w2 - c4gli_morning.pars.wwilt)/2.)
+                    if expname[-3:] == 'SM2':
+                        c4gli_morning.update(source=expname, pars={'wg': c4gli_morning.pars.wg - (c4gli_morning.pars.wg - c4gli_morning.pars.wwilt)/2.})
+                        c4gli_morning.update(source=expname, pars={'w2': c4gli_morning.pars.w2 - (c4gli_morning.pars.w2 - c4gli_morning.pars.wwilt)/2.})
 
                     c4gl = class4gl(c4gli_morning)
 
diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py
index 08d28b8..5c1bd2b 100644
--- a/class4gl/simulations/simulations_iter.py
+++ b/class4gl/simulations/simulations_iter.py
@@ -66,6 +66,7 @@
   'AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+    'GLOBAL_ADV_SHR_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True},
   'GLOBAL_W_ITER':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
   'IOPS_NOAC_ITER':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
diff --git a/class4gl/simulations/simulations_veg.py b/class4gl/simulations/simulations_veg.py
index b2f7ad8..6dbcdac 100644
--- a/class4gl/simulations/simulations_veg.py
+++ b/class4gl/simulations/simulations_veg.py
@@ -66,9 +66,9 @@
   'GLOBAL_ADV_VMIN':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV_VMAX':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV_V0':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ADV_L025':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_L001':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_ADV_L100':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ADV_L600':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
+  'GLOBAL_ADV_L099':    {'sw_ac' : ['adv'],'sw_ap': True,'sw_lit': False},
   'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
   'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
 }
@@ -290,26 +290,38 @@
                                              'z0h': 0.001,
                                              }\
                                             )
-                    if expname[-3:] == '_VMIN':
+                    if expname[-5:] == '_VMIN':
                         c4gli_morning.update(source=expname, pars=\
-                                             {'alpha': 0.2868081648656875,
-                                             'cveg': 0.08275966502449594,
-                                             'z0m': 0.05760000169277192,
-                                             'z0h': 0.005760000169277192,
-                                             'LAI': 2.0,
+                                             {'alpha': 0.2868081648656875,\
+                                             'cveg': 0.08275966502449594,\
+                                             'z0m': 0.05760000169277192,\
+                                             'z0h': 0.005760000169277192,\
+                                             'LAI': 2.0,\
                                              }\
                                             )
-                    if expname[-3:] == '_L025':
-                        c4gli_morning.update(source=expname, pars=\
-                                             {'lai':.25}\
-                                            )
-                    if expname[-3:] == '_L100':
+                    if expname[-5:] == '_L001':
+                        #c4gldata['GLOBAL_ADV_SHR'].frames['stats']['records_all_stations_ini'].LAIpixel.quantile(0.01)
+
                         c4gli_morning.update(source=expname, pars=\
-                                             {'lai':1.0}\
+                                             {'LAI':0.111/0.325,\
+                                              'cveg':0.325,\
+                                              'z0m':0.17425,\
+                                              'z0h':0.017425,\
+                                              'alpha':0.270,\
+                                             }\
                                             )
-                    if expname[-3:] == '_L600':
+                    if expname[-5:] == '_L099':
+                        # select = c4gldata['GLOBAL_ADV_SHR'].frames['stats']['records_all_stations_ini'].LAIpixel >= 4.38
+                        # np.mean(c4gldata['GLOBAL_ADV_SHR'].frames['stats']['records_all_stations_ini'][select].LAIpixel)
+                        # np.mean(c4gldata['GLOBAL_ADV_SHR'].frames['stats']['records_all_stations_ini'][select].LAIpixel)
+                        # np.mean(c4gldata['GLOBAL_ADV_SHR'].frames['stats']['records_all_stations_ini'][select].LAIpixel)
                         c4gli_morning.update(source=expname, pars=\
-                                             {'lai':6.0}\
+                                             {'LAI':4.605/0.807,\
+                                              'cveg':0.807,\
+                                              'z0m':1.78,\
+                                              'z0h':0.178,\
+                                              'alpha':0.195,\
+                                             }\
                                             )
 
                     c4gl = class4gl(c4gli_morning)

From e0b41c3557240adff0b246876d6d7c804f7de52c Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 22 Nov 2018 09:32:58 +0100
Subject: [PATCH 107/129] make batch pbs work with arbitrary modules

---
 class4gl/model.py |  12 +++++++++++-
 test.png          | Bin 0 -> 92416 bytes
 2 files changed, 11 insertions(+), 1 deletion(-)
 create mode 100644 test.png

diff --git a/class4gl/model.py b/class4gl/model.py
index 28fe5d0..d3b1fdb 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -1775,6 +1775,11 @@ def store(self):
         self.out.wthetav[t]    = self.wthetav
         self.out.wthetae[t]    = self.wthetae
         self.out.wthetave[t]   = self.wthetave
+
+        self.out.advtheta[t]   = self.advtheta
+        self.out.advu[t]       = self.advu
+        self.out.advv[t]       = self.advv
+        self.out.advq[t]       = self.advq
         
         self.out.q[t]          = self.q
         self.out.dq[t]         = self.dq
@@ -1875,7 +1880,7 @@ def exitmodel(self):
         del(self.theta)
         del(self.dtheta)
         del(self.gammatheta)
-        del(self.advtheta)
+        #del(self.advtheta)
         del(self.beta)
         del(self.wtheta)
     
@@ -2019,6 +2024,11 @@ def __init__(self, tsteps):
         self.wthetav    = np.zeros(tsteps)    # surface kinematic virtual heat flux [K m s-1]
         self.wthetae    = np.zeros(tsteps)    # entrainment kinematic heat flux [K m s-1]
         self.wthetave   = np.zeros(tsteps)    # entrainment kinematic virtual heat flux [K m s-1]
+
+        self.advtheta   = np.zeros(tsteps)
+        self.advu       = np.zeros(tsteps)
+        self.advv       = np.zeros(tsteps)
+        self.advq       = np.zeros(tsteps)
         
         self.q          = np.zeros(tsteps)    # mixed-layer specific humidity [kg kg-1]
         self.dq         = np.zeros(tsteps)    # initial specific humidity jump at h [kg kg-1]
diff --git a/test.png b/test.png
new file mode 100644
index 0000000000000000000000000000000000000000..cb8d3fbb6dbfa33ca9cb45b795db2ce0becd8795
GIT binary patch
literal 92416
zcmeFZcT|+u+b;Yd5fY`u7zC*rQBkA7|Eb3B%0u>}T(L-{rcld;ejm
zug$rQe;bBjoI0m|Ifr5FZ5Z~=i*MQBou{Jjs^PzHa6ju9e+#d`Z|#4F-@m(Z$_$5L
z!mrUk7WX!9b$C%;{wBacg7zP6qYn47KF6;}^0SEpb1_&9jtoIKq3$VwfNl9kwV
z*~{ySvb6L+ACU6!bd;9gPJfGGdoZ0}jvHS~oE!*vVCfJiw&7voZX$Yb`+)=ByxqFx
zz)exjQu4sTn`~~!UoK^Q`c}EU>KF2KzR=X!ODtN7;T`3bXP$B>w;w!wW?xDEiI>8hdmd*zcJt+=H@
z`CNFhz|+7hWN%(c|JJ{L#x(c0X|n(IbI9dX*1vwz{DIT!%im8uxz6?FN1dZ>U#_s{
z4>sj5Z;HPA`gKwMu`h4l`-N5L%a2k2M=u((cLKjbV#dYC$Cu!DNm*Q&j>539$9f40
zl$4F}l)2>PO0kY~J>8(7poxhIS+~BTL|m&9eKr!Gn55zxQ*j1HmHqOwCohIkK7X#|
z=B=9_Z4o!xk6{a)E`+KW@x0QXhE}_Y1wm^|T;!CXrC!yw$wGQBDR}cw8uSK(5o8lf
zjXYx*ht8D1*gTaaz4`q=8y|`n+PwPB!n@0)!Y!G&{N65-61g!JIoGDe&8g`P-!c}5
zX}y(LS6BDktT0`MdRi0d%ZsP?kTSMNc~8&9I4qhOsM-~X|6eO
zG-xKky2OpFV3@WA!wel@A^1Fn{YLI{kzRJf{AY4C#Y&S>7`-VDwC-FtoL&9hmY$l8
z#pl*F>P<0{P11OJ6WsEE?@*BMrIvV;W5EOE-t-C68s#Nnn4ub+Sw--ctZ5}19l6fjhoQ($^B&mNpkQ=NOe
zFI34CtlPNcT-F2Tsx{Nac6zdq)VkX-@m<<*{avAi(CxgtM$hM2^g1r0?_S&duIlWc
zJN>4Ld$!^7&@8(u4qmf^2RC|DG1n+wQCzn-Oz<3^23SZ{-gekmuN<$HZO1~ccWx|A
zJw17+5v`wX4Ls3^Vhvp{IdHDQm0TjZHbt~X&l_!^t~6YCM;CS^i8wh&ePZ`+jo?7r=dk?@gH?Q|n_0P4?`0(WAFf#&n28cKNR{)YOLkfu;3%b$%)m5F+X};n{QF1CJe1#nyy+Ngn8hqkK~s>EW#Mm#&wyP
zI@B&sy$I}L;XA;AP*zKiRU
zJ3{uR1+fP8Op#OkC$bWK`kefR)&^rMW~u{-E%TyI?(VI=)$>V$zWrDI=#<#CDl07H
z@pQe$##LKYpUL-$exq>%RvE>`#WKQ2e!V%WguTSXAD%Az6fW+$@-d96GDL3`PN3?{
zjXjMVt(o_zT@Rt{I@;2dOB{?}u9^wztqux^?J+C7
zq^G|6F7ndsr#C^CKVewS@+yr$&Cp;tZQ9866?s3?qw?}Rb>80IRoR?a$h4EQb7Pps
z`p5pj`IZ6Y>Fk`m#{M$gJb7rNL0P4=Zha5tTUDg*
z+_|F@)m!LD?Z=T?{t!!kM66!S?JdJOp3lAba0a%yS-F?f$M@FjpLorx1Iu1;VQmKP
z6;mayy#;A9zDvE1%a?Oat%HhX@(6^PtCkq%WuU98+p0`^-hZ`TD3rcq-^mfR89xK`
z&Y1)SNxQm;2DEVKg7jD1!S`ZfVniFL8caIZ%qu?wt}$tF+8n#52$k=#;oTCXyT`?0-;X{R6woQIc;~R#txy#UFNEKhWb^ET7TLSKv1&AdjyGmSASs5pg%dzt{O*M#<-A^0}4oKHa=pLV~
z4dW&~IdOX%Zi-;P1xxjC9VnLyTGlr6vbVAA0UR_Bf1dgvo`yyjSEm
zKN|RHXUf6mBnTw*QN^0F`t4?eli(~{z*d&H=3#a2Q$vEEUgv^mtzcUv)-a|713$mA
zw0g?mF?gySIqi*>xX5d~9ky?<`4E^f#+V5g&wC4wgIovhqX`}-p|3BGn9-`fHpzwC
zzDr{49IJ4u7_z?~AZElAyY%cD)mK~W9ANug<-~lRMy#YrR^@>q-}PDkSYKTPK`Yo&
z1hdHufZaErG!vL1;|o7Ie){u|fs2mQ{bfR<{nbKa2DMEv7>=7f;^)1t>9@lM8Fwel
z|GCrP$2xbOtyr5zAAaR(f9X{vSnGhIZU~3lmE6I{G*$Y|`DS%-@
z#U82B7Ns&wh)?ZLi+4$0G`^gga_!R}1LbZ9F>JuMmQ(S^I%*0&c!EZ&sM3eM5vIiH
zm8wA}LkxO7M`_wVj3#-vlBYef`$C0}n^n+a*Xu-c{1$8>a*1={2Df^tHtndQBDH#L
zTD-Z6G3D{sHk;1MNN$|y%Gr*?Z&2}rPm5tk8}mg9D#_wTo5!!ZVM&k(*_^tJ-@2M#
zFHD}27xo`}{Q6qsgF|tuex8JhEtt(RvhLva3=x5LV{z$A}*K$H?vpY>mOr|lb%w8ZH}4Bvu{iAYLH
zqCIQt&SWglyt}Be)=Szpyj$+l!b*!dhLxu?-bjGYqrICVM~`T13~Ow&n`vNS=%Fjl
zz(944jErpa#Ew@4u~v!cUBfWtp8o!RTleNiM>c(~Jo5Io(>FxcZ6ax4_bz=JcO54>
zv(&6y#@o|E^VyW!hLEK#^x;2w!{VcGOzd%D3VPcLVX64Zyz7I};xW@@)MS)Mgu+^2
zAsfW2!*#D$Bt86eBfF9Ng|KV(Xg*o+?@KAR1^iHek}s}T))&~}A=joWJe-}KVe`!#
z?Bp3*A648w>{~f36sO|rPH<+$QgPr{L$A8Ik^0U;H1ORv3<+X$ibhSnH;Y#&M8+Fd
zYvT25pXgMzs&FAJr1cK0Tl4|F!_8F?$OkUkK{_Oqj1L}9i(M*LK$EaBeLO+IZDNTZ
z#jdBnF3S6|yG%OSB~t39vSMJ)K3OT?O~?%9;Jerf@Z
z+FCLz!kiVM8f=UhtUiOwF1drXhtBScRBfMCzjXF*P$*$hlnbF0JtJgr@op$krNb)|+aI?u2oRB;(iX@sovp81}wG2{-yA4<3Ikf{fA%C~AK`BgTaZS(~IEHA*3@XXA(k()mjZKnww^`^cJb>p+)v}L&
z|4swd`|~7?8X4b){jnpEw@!7e!j%i*{ruW~4aiHp#@`FLm+r;SE8+{uZiCURY`>vN
z==&?*NYT(JE*?-}R6+9KbouhZ=I?0Wrd%MA*{$a9WjkH79?Oh^Y+`2ikx9;ZiUD)M
zV7u=g<)!y_^KYbs^M@wB!BU^fkyPb|>m#L`E%A&+l$(~<+m(0>B^w0KMbkfrX(U4C
z2CK@oCUN0KD#>N*WdmfxxyWws%&?0DzL1$7H<<))X$dd&rTj>q(HKed7>FtZ$i{4g
zArYoGnpQ^*M{1};mh6&{qU`PLUQHk65K#}*bY{aoa4s$`(nrKJdTlofQy8urE0N4&
zkf|&efd5tf{F1e7JUwA9%OL15yWkbgyz12nQIAUIQp!dL{;;45guFBHN1Ze$*l#J|
zZ+v58=z`zA4E8|>_b2Q6{)JT+#6#-F8pNQo**bwj+uBF`}aueRf?mS8`A!$*{f{hqp;nOY6R8&8tj^5ksk5xcq(
z2%XaoYC1S4B1PL>Ut74LhC!~OfDuEmwl2M58zW(c(iCp{G7PJ1fOOE4>_()xc?6ipM#gF2gN3hs
zxDFd|W-MAocgw;}WDMum?x}$XYfe%jl4(UX;g+y<`Hsz8t5@-)Ci!0KD{@MjR&nny
z4Wu_&w?^&PEickQRszn1-r^A)jnaScTg}4D32^;tNODJEIL(Plxc2t;C~c9Tq8E|N
zBO%w%b@cW1U7UtQ4K`GIR@Nbnl~>%JO_IbB$Prqh07-CPh666KA`1
z`0q;iaNS~$b;@?B(o{E9cna6S%CZO^7Og+bOaCgG4UecY1rLgS{rD|5WkdbHD`Mja
zh?+mv@sGXKZ2wX(r~af0X)_vytv;&|->I)3y}3m6AmrpUp{)cotNA7E<=jG)!^StY^
zSHk!z?aN*n#IT0E%DGsOxWG~|`*vUC2pF=s?`}*}V-2mgU8Gt`ze8I+`Zzy)Tn9e>
z_;FNW;@{JWX>CY182KvS!qx1*){8*~A{L!ODmha|9aq2h1B-pVVu@Z_a
z4W3Z4ndruJXod}3n=gErekLvo>ZKPEU0`Bw=6_@tGvmT;v6;DFiK1XOmorBssJo`W
z%*OYBPa5|BFRbIs@cz#NDF64OXa@f8ocM^CTe^cJj<
z(o*J2X?%zZecJ%NtOu(+;*Dqfgyl)`58Kt(9#n9IWT#)}3K|3d)ZCt?3j^dR6-x;$
zs7;7p6hA+5j~ic#F8!2C?I<_F(tKJLOO+V0T1qDrJ_Z2wK~n;5=B&2R!!VKH(ic@Q
zj!%P3|G6-YGsEk2iWF;Vc3G&(mksxCfaqV7?}oxXRFjlp5#}}Zx%-hnmY_`aHm|gP
zRA*P017uW`gF7;}R0PL*)y@2XHJ+GG>uWH_40{h#EC#yz13rfGj(Tj2daUMhVTU14
z^#Z?b#4jLFI0NO3Pk^1>FlPFV@l(qhT`cunLzS_L-*)Vb3>CKq!X|CSdRMI>Ilc(DKg#!nVe>1yk5>|`>B98n4ed-jcPK&8WBNtV`pRrpZmyn-3oRs&lRamEk<2#caf7n#V~X=APRhA8
zQD(cR=44P3XFz<#NUoYdwg$L!owTG12-6ioXR?f)*OmucyWo0+%DK}7m&xaMTRlA>
zl!1Fc_x^khN!oZCYK$Zrq2*eMiD?%)c%L1Dx?A1t?Dv0V@n&84;aq-w5!1
z8Nqk|;Sj^!&nf_$9KU5bz?dJb^utM2uYBT~2T;X(X$a4R*?&0Wa_rkmCSyk0_q&(h
zAAKL};Tsc>Gi4!fOyYE4@-O}*u%zJIU@pYjl>#x&UM8tRS;bTlN=864Y0JvWO;reW
zLHH{dxry=c8WZ5Phh`u}QlTx8QFN_jJ$OELijTW3<19>w88w(2^2EHq0
zndVi>9U8NL1mmYX{iqSU&5GSEZQY?pF9UR`!Yj}e9}9*;fe+7|y@Nfd*Gvpx{|(pm
zLH6EI>|YZd$jvJA$mHUvtGoO3(hO=Is0$U7VBD^VT@8+Q@C5k?tn;i+TmzWV1M
zWrdddEUBwbsYXX&-93kbmMOiCDSBF3T6Gb-qglC*?a>zUHcmH4v6`McXKEU|X)sYR
zi4Fj#2`zP4WVBDN4o(^DI~nozu8>MNPG`a@sCdY_!h)aZLCTrhV1jj4tW&FKz6~q0
z!??vRQ{VUM!Rsp}47N`$-?o`QOs254+t91Px@i_BLTqy`ojtuTGRkxZw~Q&9<*gd`
zmF0*nc6L`^z0u#=+65@#ZWSNbnQfCl3nftop=P*s>(=WM;$302%pv9$zU-rIhe{_4
znmhsESYBCybZ$?NrLQINt{>EXp73w9PQarXARnI!l8#u?15)6qoYG9?yuXFHwTqjZ
zo}HatSre9R0Y-?d1$+_54+VmZ8$%h&%9~pX*c|$5@bJc8yq|W1prNty$?D&J4XNqZEcQ|
zD|Bdzk+7~-m9AO$Brl=z&7A1oLJD3gEHS5X6rH%eYrl>@Al&_UCL`2MEb+?EF;bQn
z?-qm{E~NQ>r&*lcEur1>myfU^*()7B{+FAGRG!FWA#jGwwP`vMg4c&ds5f`&kIij>
z&5ySSlQfutt!m5tKcCW|k3{1D@VT@!+3SXDX=y=^JO@mQ0zeizQEzVZQloUG8XlTj
z^-{_)?AA|^p{4pmB8O2PUvIV2OIB6t$S|0h_m-|&Xh+$$TVT767zMVKcNlp#0)Xt4
zjfD){=U422B(^#wF#CF{V^D$IT;$ZTTB!hV{LeA0Sf*8hAg1Y8+QY><4S4HY4d%QC
z@s9Rg!8lr8g*$;%;5QR6l(3oB1Mrau^+TmK4eHT-Ux$;_q3QS%FmsK@)+7~j9h6<}
z2nra#n!;SCZXZr?FF&NGrx#>}5UvAFESRx0`YMipjHA@ty(~>;1HRgqHhhVcoCKw3
zwcC213xTpk_L%8F<-5xNbRW%JU~I(CKff%%EuyXpp~y2fQiJlqXxp&4M^!Q5G(a6Y
z(eo@q8jF>mWKCizcJ9i4eGU8=Dz)LmFKLX
z6`e^^mIpMu7hos3pw)%-hLm-k>XkRrqX1!!cl$@GuU@td1R&FgwxY$QWKW>6V4vB)
z-nY>XXpYnZ22{$2A`%i33f;&hs`JfqWq?%RC=X390lZ6ZkF9>tp|6tLvDlqwY0f7C
zsOCVnBNTz5vjANdLKzbwqET%QKmo_rf3951Gq%uI;Lzl`I-WtjZN2_0@KVd=Gf>EH
z8wM!fn^uh6PI-Cx>wcgj+H>VGY=!mf3bz_XGP
z1p!6(W-V^D2M7r-n<^pqEkPt{0rQIkCxM{v01IDoiCgGZ;0}o{IRt_Mgii7bAT(1;
z{9xAWRz$_4N4)~{u#nSxo4`)dB0%|)S_v-Y7zA!qA)^QlA+vW%T`bA#9D;ZpdJ7!r
zZDHyU#FDQ)aU;h9Y@T@K^><1@8uE
znd+GgM@CK{3-I%M0qloG0}A>SA=i2^!ZgpKjLfLJD`d`Bv(TsV7b>YOYaG3p&iT!Z11xezS
z41>;|9{5K2*3QY^!tKKVd;8G5lYw!1eXTW7X+T*xmALDGS&DrvV0|UIR#oLw=e?rE
z3|hh5*h-Ft2uD#uPK)}oc-@g;XSl!wo7DsdDY(9Gvn5Y>(+X&hzln}MC5qj+!KNJa
zi4toq6)`jyL!9)fDZ^2B)d3aIyfj5F1O(4(+(~~c9)6(`!
zYd^dd^tTqE^7CuW*l*fg3Iq-R7?BFeyH82Q!{!f;EPLueZACYVL^rz5E_^V88@RNA
zX+?*|2h9nJgekK#Dn65MfznIpFwm&dB_Q|DQwOx{rRCdO%b%%`ekPY*wS(kg8_wID
zc&VmZDtpU=@^b5}&M9hx$}+1@a2w?UI2Ikg94;sDd2M0MJ10!)%xh?>}zB
zfHg@0p+<;y@}WHf;$I=wI@r&48ii_z9!uAnfiv=awUxK%jx|JOi)$Z4h1_SCFClU{
z9^yuxo2uu?TMI;jX7gY+GXHEfa7ndSfb#$98F
z3i1O8Ngip4RtZW1h(rcx9E14`{Az|D6noL4fR0%sG}7*_(gEOio59USA>s|9@VSrg
zHP#9>ns`ht-YGI`UkbaufAP3!Ezg{oSDVpAJ5Okhuu{ckoogWpkcuXn9~?3&`1MWr
zh7^0MX3UMpX)1nhdlpR;9#n)pJ{j}$qOmjEcrKumg|%{|NTtz7>#&l#T3XaB`_oaU
zPl?j7T}
zl$%H(tfTApVNkr7mAINg@F*eMsl$4YLgbZH&G~LjWT%+^?xrDYCR5z0>Ou_FOq$=Blz;kWx%(kMfE-VX>dEa
zd??r{QRcCXskODn_(pcL2?;|`9GLcf{hindf}dMeW-5iNxY{&!NuB8?82C&+I$!E$5!G4L-27QJ
zRx~bg(JaQU>FiH0@9k}3p&Ss{vXCgjzR>-8UAx=S(`UWwW>LwSj3%Tex0uxeNBukt
z!blC+=K%h(BE-FVwklCYb%0lnf@FaT{7lnr>FP&+QT`4<j0#Rf5bF}?T)
z9{4pfy-uQ-OEvPZJkxz06
z#OTzJ4pH<)X>v-^BQNcjQ$kr%1&+4$<^pp>8HqL!ZtD>Q3U6@st}n-!%WF;N<@Ya+
zKQ^BD6r9zf>!-`y)j>KiN)c_y;~o>1w>v3pRZ;SS1bo_kN>eQN6$ebH!PBrRTS;!o
zTcL7snHfxdC0FP?U=->GrM&$#hzLq%H5pJ-9g_LpUTG|DaP)di%vGI=i1v0_DdOPj
z$-{miJBTyM5kBOqvHl^hY+g?(E%9t#=NtA94^DR_DUqW|AseaJxnwkh@z3r@
z&3cXx9jof@d{FTXX7erIwzzz9?-k4XER&qZdB?8l)&)#+>v)htX>o1y4en~c0TmTj
z>td;_l9lc@nNrz|K(>XjydY5!z-+$Q5R>-T>vsU(|9Xkea(IQX6xwQ)iH`-$Q)_1X
zpS7N8m?`*aeywH?7IN6MB`&|F;G>FTb91)00ng}YtD3CuVDGHbGwfws7g^AIG|mw~
z_gJ3MyGoaA-K!SwI6kcbLby5G1ds?)(h)h|lBPoxSq<^_d#uU>9*qy|--NRq{5Cb*
zDV3Gu-XA-gERPT6OnnME<80LUXJvoQ8_4dqbS3Uy6BpnTQIK*XGCzMz$XO+A!^Q%t
zTG;lTbk$jFKJx3IF(uw%+(Z4gYoBe0509!5`W}>*Si5;U>W5ta@DEN40-m<*(PA~T
zGeRmp&!EzKk(-{jzgE%1->&Al$XGl0MVo@Xm?nL)@q$X7!71ABJ3`%nn%}Lye#fUR
zubJ&-GK{m$t|^##Aro@z1Bf(q`y1%tFk!(Axi-Hq(Uh+fC04Uod$cvh`u%xG&L9hZ
zCg{Loyq|dGe+!&{+CA2mfq?Qv2tfb=u~|S82_DNfFWvZb%j2+^tS1}9(NNw-%PrW0
zJN-?A3*O)mu@Fx`+%!f|+rO6ky=d7)VQXCflfUMgy7Be(C+V(gsuWG3e
zeu07LZw};M0L%L~lKp?t5!jx;84K*c76L}uLje)u3<3aN2fzklp0fWL4&R8s9WYQ~?7_2QhEBpj@u;qUuC2QG^e@;k5@1I{>A&D}0LQ4yL6t5DD+-rC
zeC|J1Q_sjK?(h@BbH9&UNfTQD7xY;DdH<*}eH)f-3MB*=u!}@;@ETA&gZ&ndat5l^
zz|uH=adLR4>T>?{+N2SniRHkt91ICw?a@f
zrC%LZ9w4{_#IX9Bmo4=tY-~4OAPVWjAhx=$1rO~6FptB#qQK6PZ8fos78D;TMn6^I
zwl8{8xi%n$E!FdKs+J-EMSuYxfx>adov5gwz;)#WCM17HAhwRNEDiW^o6DS{C{nou
zuT4?cgfQiNah3D&mNOlb>hBIbG(CbLtP%zc{FYu6U?0!&^4tk)n)@djGp?*@f%*eD
zsD{fXD
zet6oAIY?G2G(HSvu+{h?P4AJ+UQp^CYOSJ3`caB|!o4{KVxG;PG3Qs1AQYJs1W&q=
zyB%GMMTH8_>o!^8xe}SzPaAe*li7ukUP1|q!6;E6c`dkrfA06{&~qcxXta5dxn|T}
zMX)d+y23=QvMT!8qgNgVLy5zij5K
z-DZvm)j(BqneKIAgkm<%l^bXhuhFyqOzcy)4x)%#H{8s#R{=XS
zc^qG8iNoQ_bl>LyrJkcJe;4qfj6~1V1nU{3v;(^F{G$Xlw~N86&2n$br79#^hU>ho
zdSWD%C#a+M|Z;4w*>p@^Lw;pb#?^z0JPfLuiJ!apbUzAFGY#I;1Pb{%@Nqk1%8&IB>aQW=O>(Wrq>smjjHZP{3xv7VL<
z*><$EP_RpTgRPeffiz2b=SXp~lA4Q9Ig*1n=2LvX9asBzgjC&+G2OS=P>N)pim>W73G;<}hZ}0EJ4S4rVE%wSGr7Lc;z6Sgdhp_lDkX_&6
z|5~H7>ar$D(LI<`H5AzS`ca?*k&CtTF^lX04l?)ws8&kufM4uyj+f8r7*=P}NxXGv
zgn&r+Td>2>peFhYiM6qDO6|znuNyOhIt2Ra5C#h3z<|m@Nn-ywKK#N{k*W5pDCYo?
z>ySb)K-Ngnx?IZjpD^0Xmw*BjfR~!jj|`FkMoI_410zKp`m|Ypq}lHsm@Ghgd>vWPHv|HFHr3^yb3XTQ1Qqo4
z2ri+oMC&VO$yinSdlM#DFuvxm_5K53^#T^4rGSF8W#l?*=4wwNK!MB7El49HqYi(o
z@LmUHdO^=3hF!by&z+%qCL+xS67u%u+i6FQ05JD946-UKLIr9*Sov^+d%AlT8Ysjg
z6y5tD0VKIxE(JjPFJI9_Kd00}bL)+{SaR|oMb@q%by%4P32e$bQQ$t28Wz#iQT717lmS7t
zy6$~=f2Ih?tOnHC5Lx>-I+`ir{!u?_1fYzTBq-h#s)1HPDLR!P2aEf9B`wx!qI(u*
zllFaQ{T+cG8jvg5abK;V@zXZ1C{g6k$@m(Ypy%M5~HgxR6c
z79pqvEg+!kf4ziSKFqyKhVYB?v_3ScD9~BIJ$;RJiD$R^=T?L7$jkr0NEd~8V=>P8oSbu
zgt{b`c(ni@+qJ%8mx@JxG~>3a&~V^>{yRwI!fu2PkzrtK|9mu
zIma@qv$)Xb(3|Dli#~-0x@^k<<^q5&!B&Frl$TEut!LU}Cb0!YPA|4jK*nS}1}mRi
zl*u*-5M#@i2Lo_Gc=(R5pyWkH9k`UW@?TOL&m8t;nKS{e^?=@-$%G5wSrn=3dr!x(
z9&Ryg-hJbUb%TP8jD6XJ{QKY;ZXV9@^`IQ~iWdeZ(8LjJcHReDHFtyUCJVg-`{BC-
ztDBJjb{FUi$gNfPsMhP00w74=b{&$$8eMtyjg?`)57*iI>X7i<^ChVF{&73TyI|n`
zbZSb9ZS4krXUEvEdm#LY!mZnEJAUK+zHK+(
zB@&%AuAKV1`DL?J04Y$NG|WmPcK6l9#!Xob6%SSQ@OKoiKNVpdtO3>F>cET#prYb2
z0J~3uXq(#)@bv{fho$ezU
zDWsx+r^n)z5*@Z+ZQD@{3P#(Ff#detHj8=`{uXSCHXwaLB~>d!*H
zcoRhBS!0f;Y&Z?B)G3}XzLctvD_tIP);ZXxzbQup=u8r*Ou{MzQyamq&Vz|MI$ePR
zSS|i!OQc3}T&-74tL1${i1U5Q1Swf~hVLSDmy!C486w3QVt$MrYYUU5R64C{e}kKr
z>BSZ^=rnUu`Sg2uKH$Uo=b;l!&zdN)RIE
z=bEIcrZuZ3D6I$&A_0H5-ZHeVx5I%|{_^Q}TO-b~R^3E2fBKnO^WW~DjA3#9KG7aNSb`q!M=uA?DYC(k$$qp
zk?!{PPfDDfs+?ZiaEQW13_C#MZ9H`Cm>XQKCN}bH3HbELF3Ot*V&kDl
zt*x!$-f2r(9gn(nqMXcP!#rJg>@P@2uUew69|_~O5_`GTLu9E0dLCFkJ!mB77wpnR{vEbn!Qk}qku6S-O+1IA6NtW
zu&zhhs3U!UP~au@R73u;$r2R3>jgL~?2x(@8b9I)K$0QG4z@PQ9@OTW@M`m;`=HP)
z72DRC1!I6sx5G7#b
zZ%eT{pCg*z?YiUX7eY?kOLAm>l)UteifbHPymH4VAj^uQE>JJPaQ5sjEE=@(V
zF>pREXKiHcl};u+ek)BT8k~-_^Qj+EFVqSUvGe9jTQzBYIla?bs>d$fdlUsAJd9o)g*0T1_#W8&uVAtj}x>+2L||uIZET
zzNYj0Z}^Q_U%En$JnGVOcTz&>M|)AB-1_1HEa
z(mGvb+MzRL+)qkZ4!^P;nl9dU*`3d&B$i(ueQZ|Z?C|D&rJpvmdL=KLZN+>@h9j|{
zg8+`U{&3xBi|Wza6+-pC%4#apejENCI-ca@*mJm1N2l)5kYHkhx|&&e(YCED98qk_
znnI6c)8LKCxx^dbM7(}G=*QD
z)gK9H{u3t7NW^a6?Ds?W=3bV#WRfJliwKnZ3f0U4c6aMtk(0_sKO_XoDk|dL5Bta+
zr61FT$PSjpx~!PZkI_buEt6U~ii6rmycA4=m#Yz~byeHN+3^5traa(p90_5h*ds&@BKwOm+0+AYhcpW*(};(Sln0kcQkg&9Y9#bLKUDq=cJdHC;j!UuC6nqc
zA*+itd6$CJNWt08iLQq3Tpl~OLPy6pR1NFU#_;^b7q#J1EH=c;skPtx0epg#GEbVI
ztMcse4M>2xi%Lq+ZeXtr^6M`xYE97q_4NxCi+>Rg2Bg-y$0>SPz1y#8*<0df-bnF$
z=U})Wr2=SleDs8m=cG4B*C+R5Jr#N>o~5NZe4Cvm0@);Ni8g6YulJlt9QJNMpoy7#
z5t`_d2XXp|E@tzn^yYtC;CFPFCDxe$B+D^qL=Z3M$c%?*1hd9#S@-)<-|2qT(s}~0
z-ANWI`()O|26Dk6%#f2nB2`gRVOS&3^mXt_(QA1{MMub#;FGWg*xVrBGAppxc?VD@
zCjXKS04#?Eo?d1lh59AKc~}-ke)!+(ocmdYZvBarSD@kE1^UO^B_$;fnkeD<=gJa+
z3p>u!m0?XV8{W{vWKJK`u#pLWyE(9&(-hB(F;bG{5w=8FxpTsbd1JC6>S*zffh(GS
z$;$mBx*Evd7p~!Q9md<6m1in{JDhE>{Sv8lUsEhEh(QgvwsP}D{4`hMx((~hK4(tO
zyf$?84fKe4Z>+=8uYUNKvHz`q$M)#7U+`ivlt%K&qT*u8liN~_kgHhdyMx;+TkJNE
z7KP=6lFTR^(HdF5I$&x}skIm~`Ql=EkZ~9gXaEuBSnM!tf{UJ^1+RHa8jQJA
zup~k^N^w1%mMf{MQX^Lc;mXx?%r$I%i)a!LqeLm5N&*V5WBI|L6@!+WQ;v=q
z2mISfmuYDYP`r###yfH>2Vam~nqtB!OV1qH
zTs;WmOp&#H>_HMb@_TFGcouVXi_
zcuqE|(!gJT=Tua6K2nq+bQ^j!;!&Nq)wr9uF*iR5Elw%H6!ERb=R-1*Cyp$D2yU0*
z+qQc*_;mQgrM?6Z%=!BNO9bJ#0PtZ?P}}44s5we&;}HTbBdNjjAe6bME46#B>|_+V
zX5~$hUUL784>Bg`a?CN@fh~9(EBN!2Hy{SwW9u*TgBNyBGPO^POr4N(ywsa*!efUz
zjsZYtFRGzxynK7EY4;UqJ~|O=7>%0ZSQxHLP{|=ct=75?<0u-HOi-j&fkMC}GqZfD
zuORJL`nX51oEtFVN4i0uhKQ(e6PBQNni&Q#eRQNBK-w!mxLN^lwM9fCYN_n?s9~TI
zLl80tCs)DvjI(7Htw^7Z3@MI(n01R{F_kDNYLho{z{av-QhrzA@J4E{>;CYWl
z#x9gm)9n;M%(=ES_0UcyUeQAsO~g4z$0&O+yfRow|K}-3o!akColdi*SFybHoaCy1
zc3ohMe%+2SCii~GcZXjEXgUka$ivWK%T>CRM8cc#-inmamIvGOmh3tI?A9$IUf$`M
z3;k_v_W)rfW*09yIJPh-RL`=+pq{*3XSWlVPZv~?U9JHtM^)|omecESAkeQ3Fst*R
z5&H@Ba;M5H1}RGcGgZE*3pNl`i6m%_uQJEEL+xX|@|t%JIvDq$2PXi{*?E@!zFs{B
zd~qr5m#TxX?|?s$EMiLyhGPrtp=A}^v&{p@u2vI8Wa1mjWs2;TY+g*vUXajWUDRYv
z+J+Py_d$WcacvF61tZYR>w|g(H)Y$1l{y|Yx5#AFFk?|lYgXn-ks<~_-{$;BZu~LD
z{wrFDJ_8M&_e&hd!=#k;i=Z@uPTljH?#fc0ev|K>Xp4a2c?o7-{*kGH8Z%%v7XPpb
zdAz6T_#!yrqtM6RH!s4b9mC3&{kMN=d1!T=KlOU6)ExmuDG*IHLibc6(!N-oXb&BIPQ
zpLh(lmv6RegET<`hz5aj{1}$#*?34N3XV>=1V=VN`l+$8g8Jl4a@jN8G6I(hd78d>
zd(#M^poMpvIjq?(nZ~!CGJ$Anrvf)vYazv7Jr`O3$&y0tQSo1zT)$czzGVz#Ry=i)
zA}NKf%Hjx~&_DL^CQ?7q21N%X%?dQXP!#E$Q}(L$&gO=bJ!;gTrOoQi3exRB{WtBI4EL{T0zKA1
zv1-85B&xS~_NwV1wIYb3#E{GyAg%H`R2?=b0%2c%A8s?TDenVeL)Pb28P)HBS3vrT
z`$*t+2lcKaMdez*BTP44gGpr?M_A%o((VDXD*r`y
zW5B!gzs|%hs2;SQtYsMc4)SIxq60AA%mGGoEL90G_y`z$3sBB*Iz~V%eHn+
z$5Y5B@id_hMgkck3$h*_JJ3Y7hfv3>r`(7G3Lk=?@p%)6t3lxAz>Oj9YbFjx$GwF!I8LrkJ5ZQ@^41O(J
zGw;?t*`8Vw1g)0MP-%CuxCH=kPi5CwIi9Yc)JH{%)aMrlyGnwPiUysikq71!?AI0v
zm_7g7zpLMa&`20C`Mj{Tu^zfw6G6|&aIb(uIGh5|*Ktx$P|84=aVH25E1DKvdh&!8
z?6zQ?o9g9F5JL;ZZfZUtO-+0HbRUXafXDlmxhQ|5rq9O5rf
zQ0pdEchQEYK;g*K1i#Kq=N6zE7xn|STs(*`
zRqBNUT&)6U--tG#cK<|(>3%&GgI#uO7r&bTvDk;rhjserVYy?kI7YU7AsODh=Y>#j
zMFem#NIV8<3y+hC!Hm^#3TP*}R*!&e`}jZR4#dEzzz?hO{(Gu(tqLGA)4_OQM6`m5+JPQ!!4**uQJkyCTk7=TKo?rC*Sse<8a0(>Z9L~wp
zWP}z&cSaD~A|KXOB-IcvIwd5e)?ExaK})Vk$`&vda5^8p`slD+rGlLySW_;mVI
zlnV~1$lm^C%a3o=+IGtV(8<$ePIRBj2gQnTJd#|&L+C|S0=Mi3J5&}>SeBNnKzNx-
zkf75?F=y+oz7Nvt3d#@I%Pq?)&0|S9`w{D1{*4W&OkZ9$67QwG;k7;kY4V#3^XYnV
zG90)&>8{F#vJ(43CA8H9B}e
z=(bd9Yp!L5f>B2IvO#`fVd2;hAp@XMetNdbk?`^84+Z0z4KE_TDG*$AFA~J@RTl4l
zHKPRjlM|+fCbGPXb^H5w|9~8LBZavlu~vF@SQdI6tcXF&6+@Q!>nQKl#6g4i>y!B-Sb9Ame}#1*-?)Qfz13UvSIti;S_4NPGL+s
zGFs=02|of+yTM|_aN2F{WQnw|%0XB0}vOuh<;E9{%$;;Y|F^#KgU5+a2M
zg|snA+B`*zNvTTBZbhu{Y^*WyBd$gO&kMR{Ct!3hNmW3
z-$3@<&IUM<=QMeTws8cimLY6xHTk9z;uz0GbMyIHkqsF*yN4V48f76;3wplY8wVZn
zC?N*EiyG&c+<;OcaBjkDNOq18$+aX$=w;hHMr^14vAw9HCTe6;KM#S8g1QG$n|gP5
z49Iv!VZw6XFT-|j0E}4fDAUH~Eo_Ale)%@tDwJO~{-|-nh6Mhd)l|IK1sDRJ*c19u
zF<|HWt((#f$$+zo*Tb39CsV$w>Cr}+?fJ*
zOE~mQprg0fEI+R^_%eT?vW~1IZO-uS)YM+=;5`X;L-L55jj6p-$o=t0s*k+mVx~k=
z5?kE~y}*XvA$U*0v1`KqnIT)Q!#Qx3t>7_|p#5b(+Bm2|03A%mn8u-u3<)xjG#b>$
zSK4qJ8O((gbizX5$L-c{k#bypX3-1Kh|zXk=@xJsiRZG8j`@YWGABv_Wjf9rpm<*du7Ag4S0iS})cRRs4!t7uD
zwlxlloO>;1Qn7^qOZ{gNg<9ZHi3iItx;MdbPcj@^f(~rZLAqMg#G8ox(v6YGdJmf>
zk_3+o`uE*R$#{S({T1=S?e%52(7)kI{*Xg{kH>ykEwWjfR4st~;ypUy3w4*KhTAOk
zbzKd9c8_v$W9Cuc$MsZyXL-e$yr1Tmp2USgrTGw5nnH2VR>eK8%1Sw%#!+|qSZkB#
zKBg}RW4BWobhL
zaQi=4&OhS8Kh@e;wh@Da-tjEBZ>$R~_l=VlRAbZPG}gC5ij>{u>^^9DViZbk?v?@*
zadAL!A&T)z3%C1WAp>&lo+Mfl1OYllT4VypsDx>{N=%fEY9_TmZ((7$D+c)&cscQG
z+v1-Ffc1+*M&Ws>zMQO;Cv~(M^5v{n|ct1Tfr)(v%}u
zXU_e=Gw#lPkEWGI7WwPX2)>(^7-53a2*&%qd_m#|q`hZYf8Vk|6lfKrzZM;9E51bLm7XNH(yA
ziN`(eCyIW3BL{_Q?u8y0_a&%5SU|G+^6efERxCoDoAO??A)H{H&`qBg?=CJXvImv?
zi3eM~hW`ab+XMhnafnX9`(FUS_Q~F@CdMhjRhRS-{Au!OJ>=EFpFEn8sXW5wS$!Dm`1~8%*VTmFyGMx8sjzPXsU1K@s1~~X=$c3F_O?HAnHEh~#x$zC=QxBdu8vIJZnC$A&FX!Z)h&}tbtV{`7Lk&BR(bdazv;cp4C#YKKcbHGs4M(g8qwClCBLKzUJAF~jjfaMRQS0EB)8<@@@!
zW&d8i?%%?{{OgvGvyDp4W9P&-2YSm0RFZ>7?M`-4gPa#9D&dgFHy3Ji2eoSF=*
zqDA(oV3}^Mwg=mVKy6a;$4>D&_3rnNpj}J64#6%h1#l#)Li~_+U!1ShY0qGyw_SAq
zYTr--bPOJXMhx%UtV4+}?R62TYK48n0~-T#9AnndW0
zIaB~sYLc-Bv$+s4x_sYfG-rrth22XtVQgSiGg_kjNRKDLPgO?COc
zSV%iuXO?oKmPFlxDzEBZy@-I@TmG;{JuYUDMS+IGfAa4GHVX@Jk;&r5rXKZD*pkF@
zXZiKy+GFW6h932euhv_`-;O1vor~I#bI3BzzD>2RQq#aDEY$=B!e0Om^h9^;v4xZ`
zqb$cR7_JL_ud4R*`hPXu5(LS3X61ULxR!!8zR|8ubX>zDwmzbvX~0>?J$IbJd^%lo
z10bxN(>z_=Ws!)8aHYOV2h*MbU@qW!4IqfxQ({wme(q(In8!gc)6-S`TjaIY9!=&O
zEFGzyUv!70l#cph?HK43KgJSS8jwdVriJbMLb<7!7!`FZUgF?d1yrQdONxXM@9A1R@z!*t}rN5xjV&1
zGeB>;xZS+0puP#TLoy`J@C8}8xvyN2)enZ5*K6D#0+
z2(X5^+n-34h&rDZ7uz)G;D?hUQM5=R##AjtmpXhqJtw2wO>f-xnd~+5>T%t3Cv@2b{A8
z;4B^Lb>WuHz`6$&CJU`>ANSapWh0v_LlWwg4(+Ti_H`!c&`+8U)}G{DjFm`7&h!~K$qKs(6kjGpg}ZRsDcPZ8s{pC17>
z-5Z%TT$BJzaIG#u5g<$!$|sk?(07^T0YIWSWt)r;U50hDjXud6B}h}*wE_TOr%y<3
zA%Y_U%E>&nCTSqVf#2pK@s+5gEu%rfc9bp5mYD+|8LZGqk5CcXqKK288SL{YSW`<6
zS2i|gG|c=L65X5=zTPk@ChKWQlY=NZGR0UU(X9x>saiK1Ug0fuLuo1eWos{V6`Jk1
zv3W{;PdOP()4LwGoMmCptg3tRYT;O2U0wGSL(GGY&vJYBj7G0>6k%kP7O`k-5zBt@
zViS?2-!GX#9jXUdX|1lwdO6t{D-Mnmj9grb(`60#M>R@VQR}d`ahV@>piOgJ?+%AL
zpYS7)A41L+c75)bmXZPhG|NY#gUyJ)=IPbwLXPHdclMV-eBvO%voKjeocfxjmM|>0
z!}RrmAHS;s3KaPKhoH_4D;DwTOU6!yhF7kL0&0QUUBZ((N`4wS{^OPB{C6z2qT8h(ET<@A>yp^e8zifY^dEsc8%r~AtYF$=(a)Q{_
zu_s2TOF)ALWIItu+R1izd5u_%wbFTLU8FmAu!h;R<#X$1Uo|>rmu&KnwWIMrSAa8(
z8ScvVO4vCVd9qTb!u_~-VuKZR4hms41Xd3
zB&DN_NM$WGg|5@i867|eBLCA(ymiB4F^-w7vYeN|^V3qgER(S${%Wdyoc)-8EnDW?7^!*Pv_kcVC^&
zrKw@JSfUUnIeZ#XaX}*Q&8RZ$C4-y$KS-~9xUD;BV3MzLPgTqvK!8@r?({d!XSYFC
z!AOMmoDi7|F$7BPu5p}naqJp0_=uq<3J2$$)7KdYt4|Lpju7mUh7=la7Sd}
z0p(4FV=~Qm0`V2IEekV_AIr-pP7kjMoRmKy)%_~E1L>KvMB&MxK``Zifdph^-9Nxo
z7X`K;)=PC8L5$}HNUUqN&J>|sF*1{j?Xj-`G%f2JHbwm`{8LSph*d%+2~rie4(J^r
zsEaKllY`9hGwzyj*wQ`ig`Bo{wEJVauUe$~1ISq|e*a$}-bjQ771Z_)zNl(KWZq*o
z?VGVzZKg-N5d~Z7^p(sIV7=;M1oLEk``<{+C_!SdpvR~i8yS6oOueOZ92HoEx>m~d
zFF{|Wzx=gE$d{za?(b)&ymN$7a}mO!{UKp)@`lNpf9FnC;5ibxm^TcsIm7N2hs+->?|3)1olmxCz9gOC4*f|zic{9l>+nXT
z`;G(ImI3=Y-X{}FQ+LhQWA90>B_~L+TMik&#qj9=&;kquy4ho!pigw1=w}!aJWp||
z!0NuJXB@-fDSZ?%TkTwfHxA*HRk=F}7ljt1-fzcJg6KsZ8>FS*s&_=}Rr<|+6s!Kn
ze7)Y0+KJ%>X~d4-Yi0|?sTe^RDow3Uo3H>Yt!zJ8D+B55$pTLEV<@C*+T7MF5`enS
z=Ao;B;;R)Gtf8QPjY&(AxKoiNm6i|w&yVa}l9(}8Gp3NOtHsVj^Rdcsfm~ugFNSx~
zQAgvpDoRP&MqV{BDFd<+uDN>0D%@?u+q27WfvOrs%A%$HWtVYJ&%}MWIz#6I;pFjW;uqiC>p39P
zak9?i+{R+Dde*u6llFps48Gfn!kX>w_uIR#o%-Nl`Y|*C?uP8sD8{xPdvj*#HqU(p
zMOh893o@|)gNKH&40gzvXfb0|VWZ#(^u)|tOY}c6M
z*xcZuej^jT{7rP20^2*MNE%o@eXTzX8phzG7kt1K9&~x|_@m(_$0B3ab>>5X48#6KXage1%G6*5XR-k$)!K1C9KNPP0ubb{29
zco+glyUn32A4ZzL?5*&Xlnbv2fBT(Sl|J&^nEu_>BO#UdWj=w-c}`@RE&UFYIXomN
zK4LxjRG^al@T3AH=$9KQHKcCUd3IDJl}}C6vig2?Lld64!7}cx?N|?4CNz7|cj7Vp
zBq~POC<;1}(6K(rHYgxcJ(d?`=vv+FNBu5;>Ydki%&J#Vz6eyCA^aDr?au<&w4j0?
zRU%!gv}Who0Xg@o7O_5&&K;XSj+8O)Q;j*7D1>Q%Dw`;RRnsIclc5o=@*s3{P(&Q!ygHqDD$)n$ZkIG-T0|EsX7iNPNh{Y3$0XtB58hk|5{bA
zeQFIo9eW2kx>$P%0E>g~@ZFO)dEtLjjB9f^iA4A^yC%p8J4hvvH5wwS8h`A9>)joB
z8@lGs!$tPni3@%JR$EtXeU0->Sf`5y|FJjcMEvAl>?t&>S`jn1>fnK#cisvI!Y8;d
zLEoq#8!vJE@%>MQ4~6rgEu3ATr-R!6iwM;E8U5ExiuL?7eMxo_!Hc}E;RWKZR0Kma
z5Lf~SjRy4*)e%mPv&kZLS#uuU4yF%J&Ac%9(8E`l5LtXcGRbxIlJ)dhwEDd(oaeFD
zZY~zH*#XgOgPbfNv`0~MG*xcepHSsusiQh8ArHO#RD1O-v&?9@hFyOm*#LjUnU!Gi
z-U%lT=k<3>B(dKR5-HW126l7>Ze!T7
zLwe*NJn6^gX06z0gAb37*>At<{z)Yky5X%1TRGgKf9rv$dAA-|1J;C-H6R8
zX?FY0IXO12n^%0J&Yg`NIBp_EaL|Es1O58lM_wv%#c@`Jo-1KjzTRdGXrMzQGe7e&u3FBwm`Ehj|o@r2Tq%z}c{Q{XX?ZNGe
z#u2t#{tNH+JM&`Wo6~?+Ew2>$1PgbCU39A1WpG&X^zc95)&YxCRk=5{FD%o&nc%+H
z0`l`2ch8K_Ii35ww=Nz3(fvR;|Jb&6`+`u}e7DK!-8ATTU-AXDUwYJ54v26c7<{yv
zO~q<#VP$nDQaCaBwQ0^#>7?Q`LqB9;zVvoyqU=cP`(W9?fBc00=??3*qu-I=W!7Ki
zgRF^+_wN5u#!GqWNBi@AKDrP2gtHv3o$D-%QLjN*GML`A%Im7Q{+}R*|CaIo8!@EG
zc&CS<*afmn5pXqsToH0cJZ*M4Wfc*GK3ujegJJ4_>UhV_S=HOtUc6nxxEVsbRnD-0
zVWHe6n6?T!-X*3?bU08TzWM1PUs2ICcRt1MN?vW8W{u63TQ81|e_=5Ha;*%ujhMZ76d9N)dikyWauV)UMS*#;fe;
z=4M(J=C7X=yF&T{RBTZyApTp(crl=Nbnd_gxQiTu0d#M!eJfEen)3Ni9NfMHTvqxC
zK6XVL`duIF>x=xPlCE;K1OiUl!|=d?#H~VUrkbO(i{GhA3b&_|Q6yThKA=L?{^E*+
zwr(dFRdwg$JP2}732|<&t0Mtqg!ht!|1LsI?>58`2Y>WWa?%X1|A|=#`KpFEYY9*A
z#60>D`SC_Z)gZ_kgG=|RL-i4^A1@zpF`Ho;J6_Y+gu%y9#tDnerzoWre*C<=2_LX!
zJSmxR0_6_Ui42387XQ>xP4$-)JrT*&i{Jm>b6J0?DgWIDN>O(=Kd~MYVO1jv0-K>c
zHrn!v{gUvJ;s~-b^j)fIitiJ{7|?CYi$D(q{fZU*{@(Efq5d?1HN@Za?`%H27h5f!
z#JQZ(SM&=~H_Qj&YL>TFO+_!7m|q@?y%aLvpQsw;3?txd8S%cx;lRY*Mm(crGb57V
z@H3K1YyS&yf9GF-G;C4QnTHrCFkE&*AP$TdHi;fBT!?vOa!8Y&BP6aJb2t>J5UHOb
zJE+8sBV`EqB@QS}
z$81FT+cuHf_JlX?&xeWS>lQEKiXDB7qa+WqcaSuiFvrPKC6a{g)8t7s=P#;ae3r%E
zFVA_~LN`Z@^TbW|l|&#MI&_F66SWi37HJGbx?_btGd_av&+SdWX=CmHZZV>>KHD5zf1N1{X)@p{p5ATOwzazf*F$nEJsF<
znIL_{(wiFL32l=~CiY6&!>|FviwJx8q&L(6(&QQFwoSzD>h@zf43hre_B?MPiE>TZ
z#g)wI+-}U0lUBP*;XX45y|1F1ruk{{1t^(eN9be(vz+@I1{c58ccfb6i`Hmz_Pqck
ze3FM#{hq!SsvLxL5KJt7>!}1r4cz(qDX+hOhvFo2Ch->yy#3?<4oahNM5
zSz&x6(Rai*IU^)|Z0YwuS86egcAm#BxGQinm!IS+@G0u+cRG!q$5KENh%p!4VaFs&
zZGbJi{6iN1aL1ojRWjuye@}|^2J#ivZhmR1CG^NDb#QQDesJNJB{lu)(w&W$QkH9_
z2)SKhQR0Zad&gJ)KEyL;U1Y4P2!D#yqE)BlHu5{r)LK|IoepnS>#CJbSPf_RdJ7-fI;d7A!yY?g^
zg2>X)^t2O)UQbsUKl=szW?g@A_To*VDmT*2kr?n`sJ3`{k#f@pA&Cb8OKt?nP2Q4o
z1tf6A(9jJzTDeC>Gg*nO&>EdSXb+zkRb|wc#H(sV%3T*zn@yg(Srxe<5?sq3dtNiEp0
zX^-+Jcpkez(=0sKt$;!FbSsiYPK
zV(S?FH1w#Od3hfe=E82S+gbWNGoTlUOmQmTEbv^WG}7piJf>anQiLQ}#j$4_?gyBj
z6l}6~GBtQ}kL~v%Q}OYRi+{)-S@bCe@f^-{);fEZ9k|FLJiYwZhj$6yLIR$-%Scd2
z!Jo@k@+x{C_U4lXEi}GA@%sx=d?N5lEG*#tW&_{6d4nsYgSrURfqpLfV5Ma)0Oe$1
z)4FkyPjNMd99kl^uGF6VgXawDYx1C`3cArxLvqJKmiPkw%aTw?(^EMp!CX3Qg>
z-6V7D3}o*GNH=MP1K9&$;%-@}0t>*+u6^|Ak%1oqX2}C@y@=j6oftWW&_8cSzxem>
zQ@R-3dVY!)?!z9x_d!doh9VGiD@eDd2Lh;*zq+Zs6eHSgFBM{wVZI&P&tw
z_X((|4|`0AjV_bqpj;_?(tgM;tKQMj>zm^SCapk;a$#ZNnd#qW9`Px{Y#C~1RCL}t
zOVF?I60c5K@7vJO%h~1$Ue5NGox9I?q9T-6ytBzPqljKvYSOP?vth
ze<&*}vpnGwR6lMIQ5UEjY`-m{J)r9@Atq&EW^5?3nTP(+|A6VskcAYJa=Gyfp
zhixy`se9ux-Zh)4HUghoc6Pjy(YJcSz;u77b$`9(yHrGPvXCe+O$kGlj7;;+{bw{Y
z>YIDhEgv;B2no0K+t0W3mA$iW?lwDiRK7r?uCleI%&z^P;g6k+7RUc6DuHUHygwGg
zwjr=;2|3f~8C5PZus0|mBfm(?(sK4|Qo{<{_h2^jNx29twb0CBYTV}T$
z6WJ!WEcGutZ$25rSNwza(l*+5jvQI>{fh&u{Iq>Db>SU@?DcndCgxG3X5Yn$SRr?i
zqm_<<$sOc(9`avSv#`JgW`#^MebI&rSw{rn&i>6ayI;0=u?h?d>>8n;MjKMZYNlHc
zdJ=!w5NcHg`P|Qc-)aL}>UB>fO@L&?GfkXo$+WZHhB$eGbEg1T*1>`qiQN-VI@ni9vq*7!e;gyN_zS
z5jtcD=J;Hs@~npV+ym1(!m0XWAi=RUJ&7JF7|Gg{Y9-IKkqyF!uy`pIsB#JJyi`Tr+G5;RQCj!V
z=r;Yb><(Y`v)Y02M4ahG_>q_*Iwyyjvl^SYj7p8NHw?}DUYDea{CQOpp3oVw{NqQWTlEeM*IuaTFkT
zF5+6)6}@wDA#~xNP)5*f{~XvS9ul`sx}wG+s3QeLa2w7IZYS+~PTQ;Rwhzh5l0~({
z4XLg>-U>f9Q#Q~?u4KUqkBImvB8pugjq{_!d1rL$ya8@e+6dmgolQ0#k6t3gjPy{{
zWMkPaZN=I?S!kEc7#0z6@0m$fYR)*xB}w_C`ZpyvH<{x7f``A)Pj9}tJ@M;amd=Y*
zme#V!6xVqSW^qW5hegH4R`fEiIPCjUZrCn#u-~4e`NYV(C1#iP9S;FSzKfU}e@ho8
zZcdlJ_-KEXzGf~K&CYSRru32Vg%?u1=OWC`yH!_L&(A3(xdl5rK>cxd+Q(Mx
zRAv!<(pXUszs$cp{k??qF&rcd-Rj$-TF1aV9%wnfN=
zHxP||Os5oV!8*ukh{f4l9c5^wR;CncI^P1r?v?2La6$tVAZgNUErqnh?WX87mWw@#
zjkKU#O*#;-RJmA|=-6wBb-V%&&G>8YoR)}P{_xn}lsavgbgj5|uhV=mXFTVqT5~};
zTf32fF*3jZ(^TSRc6lP9Gqi-IBBLy635(d}KvQa2NHbMC)l6^PRQ72TPM#VhB}zJ(
z-qy4M(~o4A43WHyNFId!lSnKkxSMVO$<_gk^1x;=&V)1vZbd(YLan6i^?)Ztd3hm^
zTmulUux^}q6rEbWF00&$-KA1w2`<*v#f$Zam)STP*J^H}BIjR8pkMOg?3FS#b6%Z7
z*@k8|mQ&K48Ey$HNks4Xkkb-6Cflsc&RRQTw~rvR7Nq%KAsW2kxI_~x0yDfIB+Z8t
zV-4tKBO;Y1`b@55OxQ`dE*P{9&?{
zoi2@t$=X2L@0opSg0pjIs?0Gf$4`)W`EK9{ZT8PJsfGz;x8HCXWh2c>M|(y(Jfj1W
z@fGAr-mA#7;wa&5l1XdvCu(e)F7wKnb6fCw3HJK3{W&VBkRLv
zowBoCL`bE9AV-;i&Yp-EPozQb{C*ffH75oL?WW(+0HQ8$#%Syt1|HT*IyZjGB5Q8x
zFuv(mQ;+88G(`gKx|CvYdAUADcfIeGq4<+yXm=+^#>YxFkq-gKv*f@$4S`-Lz*Wy8
zzi74K>Pn7sw}k?=&>zC#LHtb=qZW28-w!{5ve=Z<<+>J1Un9yRp$d~3KDv!mw8ZB?6i4E=s01aJCH5QgS
z`kV5(*o|2nhxr}7zn%_WNmTl014w)939fxNu9H>@WZCRXLu1K2;#NwYrcn0oAgU7!
za%rCnyV2Xq=aQj`%RWTR9}WL0yCgn@Xtq4`^VPvh%r$Nx*N`z6*N>7~b|SSb55KKa
zUf-QclCih(z%DFP?+DgTa{nAc1-zg{{z?ElDPp(uL1+w`eJGeNpRgh)Vo+-`V#r1F
z|2pa1*K~!JL)
zMfv?IXoYZ2c{l22!r78$wT%$9n1bX!1dA5#;oQ6$>PO;^orRXt4G>#CNocZ~vd95M
zZsyCtxPkQ+1M@%yCk#u4XNf$A8F_A$G$QKx{?4x|m@l8mtj6vLp~MDKfTQ%w$6=$D
zplAdX;^oCILaULH({@NzRathK)DUE9?b~o38^7UcKS8qOyvC8YW&fJAjIh;ML!mkn
zNz0FyXt9qD4|ju{?Ug3IO@fJ1Ojua=5a$g2n0;qo5z2wOHh30ke;`h-=2Zpj<<`@I
zr32=HW%d-xd%2%?lWXl9dzVk(rUQ4{w;?U|`>R`*MrX`V_dYi^xpKu7(vc7}eWm!&
z`l`@|fF>+cpWQaS;ei)kMiz??8J>c~o|X1>^#E%lN+s|3SxEiz(cc9(1!LCS7d=)T
z%JDqqyMMm8CON~9W;7XPPxutc6)&2};LJe7o`Wc@$fZgjKr`lh0NgF@JRz~|*v3Ol
z&k@W{&60w=dlYiNPERv6kaS|1;tL3-h=myMpLGoFXgER5Dtw>>`>~P;(O}gAJa-Wt
zlb4SSkMSD6s_E|WpYA1HVSoi%z*eJ{YW`Y$Nqj?>f-yk7*c(HweW=y48(#*^W1+Bm
z%20)Vz~K3YBEr717SkIO|m?`1za(CsxSWJ)>D^%Bw;6RBl(SS
z#RUFHo|psT!HyYS*8$mAKAHGs!%hwtj@2(4)rE~(?6P~dbM*UNtUJ##1??g(5CgyK
zN=0Iaif+Ez?|W-jFuCPu$D2*lHJwgn2YF;#grmEvjpLX?!UZ;Tjm@MMsOnn{Hgl0O
zl&mWRpUEeCKBi(+_nV1AeL!@_eP*hvs-#baO$;-|iH?l!BV`LMf=2TbK_(z6^34A&)%T~4m5OF1R1y0L6uQb;^e+=6;@pZWp
zo#cmTX}t9|Qq_?zOkPXq=r7g4(kn$w#_Ab2Os$$w#I->nD|n@ZI`wUL#OyN`y8+?q
zN1$@@0N+ffaRzyjG1duztJ_o62%S@+gBR=6+7Aot!w}24d$XJLpGKFS=Nt1C&a{8kc0;v;o3X
zSE8Y#XX1Qpb=E>>es(GrT^epoevy%@gwL)O;uLq{U={;Ry6dz>a1|^XF&(MWYIBnauLw`tJ
zlH7a4K{VubaIoF-VBVq$^LIUTk-hgi)9q{DUf~qu)z^gY$C1t_DlX+OyL3F$UU)<{
z^Xed%V+bj|uQgaS!y+TyG1T})a3lQ3aC1G0C)5-^wtNU!o~@~b6nhPUO1TFe7iLy!
zVjr+083eRp^43w$lI)X!L*l%&A&+1cevy7>6@0JI^N@aJL$JB>LvR+?Vv2-WSJF2P
z9Gz5);h*AWm5MCS_9!GIop^6^nw#Z8!ZFYA8#27S`T3#h8~9%<^ET}o&ya;hZd5QX_xQ8#X+5d#M13#n4Xd3>Pm#khJ?$oq1Bfp&;_$3Rkbtuk0-=$k6yoi
z!cDs%bUjCuny2eA%K^zj6kXMJn)$K1n|(!&zo2$))~lhmq?+rG58=i=(AhFtg?*5R
z(7ZVP7GVEnBgZF%#^%??>XNxMbO?!nk`vlXi*iDbvvBN@?^{qiWhuW($Y}rQIlF@a
zk_M-?IYRk&4U~r$)Y)sZ1cELQ^^3A0`+3j5Mxnh9J3FnFY=z<}7AQ7;k^i`x-pr*H
zw)&^tJs3^pb3Y{rJO3k;PCs3lnbj%|e=sg-GDaOMRO(04R|!alOh12-lw|CA&+W$M
zgO)OHUEMt;KB6f9jMs@Cv$PVu?Y>?3fT?$f)2WJp*XPdn@{Skoj7mS`)7a^nSz+d6
zIpzNJT2savi}UBZ9b}(xWwR92(yOLs;{>Uj6A+_*NIn>3dG(H+zg6+qwq*B|T_~8frF+
z&pt|r-!k#*&dZyB$XB?h1vn#dVGwGo0G8}wO>U}kLxyBOZa}`f%*6ZdqLkY8!a4_A
z*nrB~N5;gbyE9UJ(#QAh##W}DI-D%o;0e1OcZ}oi+u?C^bWJ{77mWGYZE#$
zF=9{~h*eq}*&Z^@ufxNIVDedqitR3R9C-lln;0G~UaE3!A0zk)hj&=@1<2vpA+6!T
zXJBwQJnHFJ9v*%Jn~JSWS(;_9+1DSxJRsyIV!rjE
zV9^hvp-axKbrsNEK1G~GzdwzWL*$hWU8S6h2-U{p3LmLuE@T^O!*20P9mAIP2nmhc
z&_yz3Wx25cyFFU`wvu5{xU&NQ_CvV=GhrwF_lh;?EdEG9Q1!M{ISFWqGR0~5E{bI<
zyJnc^6>uyh(`Va^ZNCc#kXAUwKme(0S6v=Z@%ZAz#E1NPddca6uiGAUROW}
zEL$1o#iyS;`S*sd{&8coiOVz2KkuxPI3aC9NLZJMsu}1nXowGgm=W1bH}StW^EMY|G0#i+*W-l9iG1Bec#U
z*mS=Y=fSUaLR{V(;WhbgTLPm`)A;W!-$G_){42$Iq{LgDDf#K&UrL
z&Aj=Ba2U0e+R8I-{X-V9_r9!25a|ku0#;rg$Cth~I*&pZISp4UAynpelSAu?UU-JL@lJzrpSJMy#E_ytq`d3!2NY9!d
zXu|qlrU@SaHkVf+tXq(~ik
z9GiJ@gjO#3@yZLQc7a&-<^$UO{n1t67V13k0X_)Yqf!NA#?4wVYvZOk!2P_~v{9>t
zE;IJ~Xxqe(fl%e6N;gLgFMtE)s$Qd|x!LZ;r}FU*|AVWmKyKbNIUm6ha`%9VS^n2>
zCRpmFnm`0J$wV*qeRrCVxWG6{I-26syw=5gCw7wVF)@5KJ_Js$w)e+s+vT|86V)Wy
z`eC|6!-RfI{;c+Vhju<1qE@y2{FUq#w#=i=y9&~!O)%6E{!9aaNME(-_PsyeXUw0^
zze{pvGy@ASwZcl};VPr=-EuvLu_=qe_P+-+ajz!ZE`IzjZM=nr(bsbJ@2-(^>9}|w
zp>lTrv-op?ls~op$=qpgY4bxk*5!#8Yko34J>A!q!z{Ni@5ti2=!(IzCyQ!3fr!1x
zBPdY#>94`_^cUs+>nmQjcJ8j3b2xR+qS57D80!lV8Keta#t<|fhqgZK9_Pd?M;AMu
zGmFa-RZ=jVei}#bRwQk>0<)3;l*P+yTeWL(h&wgoXk&|bbE3?E4=)ib3GRheL$lXJ
zQ<6l9)%SNtv5mkv1cnSZG*}RYB;W6vCk{e{WON6ii8iepSnM`{W_PePYRQs;c2)UIggrFEx__2lfgn=+cwo(izDw#4DCN3JpX1lwL
z@OmVRC9asbcC>mH=V*{4c`fARVO1TULR-1uxVX5FLlB)aG$N&-lbqzSGTlJh6G5PC
zq89lKr{PDoboRLSbp72}PN8FQY*QD^l5G&JDKYuRmt7{;e77V+YdlP!bkiuO>728B)*PE%Ap2Xh08
zV+QT%ZPLl0wp-VL6WU)nw+2a!mjrHScN2hQy6LrfO`i+orIf?^%w~>Rv7qc7s?jr
zzEkBHa4Mu3;XPNGk|E-H>wcS=mL^5(QDVLi!;uVRBA6nvG@ek;$w8kH-{yiN4b2<%
zcKXLMVh@7*ca|X%%}0}K>ebc6X!F3~BVYQM_&CW^P7=@KwO4~tnX$5$-rrAw&a4fw
zcp~a^9Of
zv>M-Ty1dSyx$yOFP|>kh)k@PsUwXQ!MQ83AET|9ZJScY>9}lqGBw*N@jub-7i6KKE
z1uaTi0nH3v|1?Fyz=*DdY)|%QML0=IjC>>sRFWitpoR=M!-7;$-sKPbX`e`US4jXC
z=s`o^d@?X`CvwT`$cGPz6^0(;NQA3k-C746fE@MTL`R=U|Do78qEfiKyn%7UE&;-d
zSg|jzEIxNZ2rPc>q@xt4A2dkny|@WEY=Mwao{n~%9{{a}3POM#6RZMSLo4=C7B!D0
zPL*Hmk4*`
z8$5IrYMA=l3xtQ&d;hCk_}8-i|LvE&;VVqJc~9{TsRf*VOEDS(=K-CS4f1
zFvM)C8yh)+(i`|GSK@Z816Xl5eHBGOdf3`P=*~^Sz$KKtE{F@=fnC4WJg7yOF~ZlR
z%n^o@G?8{%yCfudBjYG0@4D6e`xXkt=OO;M0Gb%^X&Y6se=NR1
zN*?uRt@rP$k2*nRsN9VO3z<)AF#c?#kfuxQ+n=^vyY^t$?hqAaQ#;d~J6DX1j^Vv_
zCASZ@s&&!cYe*zIGkZ$TeoH9sQUNL>zBaYf{fvqVa$}Fn%g@3*=z(z^s=Pr7*Xsoj
zd4X0WG%2R9>PjU2MRFKC6NS&@`atCx;^x-y0kfz>G}|AarQD>jB`>CY>Xa-UwjlP>sonv!a2joUn#)S^}60s)-tb*pK|2x>~RxK2!X_IPJY8Q
zE6qAFUT~Fxa^zl_oZG`jrctE*`P>DQi;Ly{;(*J5wH?$P&caJ;U(R)|q4=zsde*!F
zwLhA3i1TKgpal6(&c57R*iSVf?bIwg8*UjCT@t6&t9W^f+Ge~rz`9sRm1X_rs#oQ`
z>84yve_X}1q3X-={TN7w3*!xrvIw)tL!BXQ6a#h`@KW&Hl*L8|<0Y)wS+kj^jHVg}
zLz)piQzC18H>=pzt}`CvIe2f}gBnH(OWs1Newe_gGC4?ys$UKfcnF($M>NN7q2}h)
zoq>8CB~WGDb(J}5JZQwmQc+oX$|5zh>ixY@HFR$~PQ<|VTP#TXted14&)a!~D!S+A
z=R=?-RMbf`DfO=oI?oN!LBi3lN;c-Vvi*)T1e+C(P6^JL2AM5+(3A6Qf?Oa|ZrOIH
zRyEfa;XLo`Ijx@O$Y`B)c#$UM@bS=D6UV8@_21uex1*MlvD+8x*}o_6Wl!Fli`G{E
zEM==VXq4=wQ`jGJQ0rB8_~G;)!(M>?XC%?@zrJFDBcX{z7r)m0gVyp9@3+THgM5sx
z13W@h1*L$}tiCw%mKzlx>gd_qm(r}|+)%-*^bCzGcS;!5yE8EpwU2aTpGjF%GK>|P
zj+g3_q6G+HaIt-c6-B^DXQ44(h4rdzBhC(9`GNmal2ELF)@J9o=hmP#`?n{{e;n<`
zi5+-4LjM$|CYG|I>Bx8l(m6mJu8B~K@x(%vhx8L&2=$cJ)Yu5vOvq#cc-H6|BXb7=
zWKNIF#<7d^3!Z!ye3MF@_
zYHDszrppX=KJjU#XvXjGwma@a@&@5XFp>6st_6CK3;VztH1lv=i~6K~KbJK)^Pv)i
zD{~{&^NO+Y0IjMceeCiO+_nbc2(A0MBIHmqJ)dt{@+t&%9){TyD|V%%OOl6OFpmol
zzH7cDDZt}Uz{a1-)wP__f|s||Yre}%2mQC2xLdGPqf#(w#7V%kaEo6qF)j^bvI(=*0qyXOG)Etfg@(Ex&Fw0-U
zdc@jsU~=lWO#b5iUkY1BKVj!a{mZV5>Z)r=flWVkVN>ybn{QKxOuZYNo6D*Eyu2g}
zb1pg%a6CcH4tjBOVW1Ml1L>}0o=>x5l6=S5>tFs^FMjE8vKMIwT|iRY;-j%flL~je
zgs}#E`(h4rz1sSsFObQ>NeFKU87GU>vMO`*)#nM{%fe#*^}}6K2phq8(9Bl0Z{*kp
zy(O0%WtZ3=^1b!X!|d*;i0CvSUS6UsQU`xrRE@30_tx;|5wBK0+(zXGR!dj|3B^o1
zx8M-ohM1GCyJiBXd&2JO|-Jkq^CE7~K%d4C@ldr(0$x0X<@`tp0
z_{D`Mt0fkKSr6c>F*#eFk+yNh_UEro3^dV1k63E0n*887nM6ss9tW}_c4-j8DagcW
zBbHSwO`MiVM1yBjy7ff6AR57L|H(1tDbPK4b)Bq^aac>}e@^ocVolBZU?5CMA)NSG(OW=~>^Z0d*|kM1YbG|@~oB;pxCdo2>+-=Fa@fDNw+MQ^_7lvX!?
zOA=k+H!}x$Wa-)YO1~^t$j16C0-?msCn*yrIuXrFbM4ZY9#%F+3#KCMljgqY3Rc@a
ziI8_;TFWLs25fdjkDBahiKs~h8jVD_8BEmCpmP+;_%Egz8cpg}WVw0x81%7&8E
z-;S>&Kt}ow6IXqdF8cdz!+3n8X^dkOB$H$R+uzgd_#N0@ETtS!5J?tesC8Eik!S+{
z4p4PtR~#u62a-knU4_t6PVEeu1wc}KRu(cJDJ}rgWs97?R5SW8Nj(Q$Ht>rslYmV$
z5kVX>$P1a@uHOWrtf5yPi`5gAaM_*H{Ew`d{8AslM>RM-ZE!Q8@Hutgq%@Ut)4pC
z13lRa`m;Z@0L_JGqobnwD@-T2vT+3E?r$-MteHl7cU5Nqx2692(3RBJ1*8cvnU>!1
z14PJ2nz?EYq<`n$D`F=YduB>&G@-=16f=&CW%=u~1RPCab0&)sfQi5bYXG_27Ew40
z-}7>uZL5G#M^gg2y-9!qEfG`ReBB?1fUh6-?M?sJzad5JT^0@S{I>#ya>s#IzlIBx
zD3O*}&u~N>xU=dmbe5O>urt`H?;QhRpw>@zu=fD6F&~V;YTr`=#@VWJ2eV
z^cOeudmn*IEtjmWvILUBvOM@PPjT0L{7D)*$ynvnHra<~(lF
z=biNJl|9Q;)Bq5U6}cEa-}Iiix~h(iRm`itjAhxB2%3Snf2n{LQGn^7$@36yyM7IM
zGV<|AtsC>zAL{+m;bwE|0)lUL-=xgd;kAEf+5Ru3fGZ|qE{+x}v^bP*z+pYzzB4Ep
z5kBR6uGO=Wq5$%DrUvK+JQ-v8YYuOxI(}YdG2RQctUIE_6uQp
zyf>TNM$JSpdiCSE3!747?d0vNS@*M+OnX)iwLQj)QvS_{!XwF^bh4ql!kTed-maSZS
zjMRw?>~g99ljJtq8KiiDWK0mC`Sna+eWEbUm=-w$EcJQvQ#=hVb)6;&Yei^~ynF_>
zvLTXjPYY_TVPqHQ1LB~a3nRXNjjlan^*u>j#q`B_q*x~sRwCc6xJghJ6;-au5LLJ(
z7ACzG))S~_yqTTk%tL1^ZAyv|hyvnuTW*e&q#)Oz6~e8O=%e0zS!hfzucxQy)w8sy
z;cLQ5fu%cj%Ix2yi5oYj_#a)mGz8kuE{>`9e#7kLYtJ%CCELWGytzUT2D*y!a!WII
zHkQ1{=X&sVo`#+*qm3m3SGu~!qu~IF#PRP(Vz(y=U%jw~XuznRBCQI;-^w|IZ&tiv{cH#bZGH!=Z|
zm^s#YY(Cf?3E6B()*TS)(tW2jHT!qd_xaIFg^{itsL4{0G!`5ZvW1s-0|!SN=}I~{
zNgySJtFP6b>rq>&zU6-C!xL_`34+0imVb;t8|wG=EmU3`1Kzl{mi6f7Y`;XhAaiJv
z+}e*i$zTQ0@Mq2TJTA89Tns166Z@y_DF0^^B$KYC%kX*%-*l!bR#N#@X#<0-`a}l8
zjpTe%S?{-h0>&mX$nuR1s%q
zU7Cb9yB-N>0j9bI^i)~oYlR6~Wcidi>7b^tyIQuH#CkczhH6^s>ABm87PFo=tYg*H
zJ;2Eln>b=hNAKQulk!e$SNUBe@mWoNwckY2-Dz|&4!XK1&|_igy#Z`Vr~Ul{Q`l4s
zP`GTsCW*oGG%)iX2Qa>?mrHYhkt1
zr?B8mASQfM);1&O>7y|gh1DNzqeApl`WmJk$Lm@P14F2nL*%-f)yx-c1#8%xa2Ff{nRD=QFX*fM|P7+jMjzUD>B(!1*lm8{C%~lQ7(Y6R)n`T!bWP3G&yC
z*&)w~^~)%==e0Xu+QYFgaVC298cxIf$2|)s3dQqu4o14(*&A7GT3u~&@Mv*@zh%6B
zAS<)n>~-x&^gcUas2RRI+BI9MR#DRXN=5!Wgc2im*C~`0A}uy%+6EoP;YD9qWuu!#HAdb^||V7XpIV0mZf(Vg7nR
z@hdN%>o4gC@OGIfvw-q1_dWXPE#3>3e|`CdyQ^H3hK!MBWt#M(=TLN#rdN?iK{iSD
zyzvndG8Q$iy$_8?dCb2-uzT3pKkzPaitK~}Sjeo5Jqi{x0IsgJ4=a|W9i`)%cK$br
zJiTm+gW_aXSzEilSldAo#-|mC!e4~VH~#N1E5d|}XyQTOw$?vE-!0%C3=I4z6g|ax
zk>h#yMLTv*?E=Ig$I|^8^UxZo;1iwcO7}>;yjz-~7#~>KFC=
z*U!uW`TyH5<>giDmr*)8NCk{mBB7e}%gTS?r%S>}x)||QVA`g8V@{^Kdn=*dAV^5P9%Tyx8s10XDf1Wf2eaB78{BzmwDtk7%
zxgFfU>F}fGKb)xl*=;{C$h>Eym~W@xt7EIR$_h3!rq394^hH?TNt$Um;XK~DVh;eGhWoF?De
zco&2J4MkQ~7W%eF@i|oXqbE=L<>|!Qlw68^?-MZa%GGIj;*bt+*?Kxk(CW_{UK)O>
zf6>0)dMUNr#>S>Ql%CO_>tI;Fw1bA)PH`hG^Hf_^t%hW2!Qdj{&HFUgnV9IiED74M
z(U~czG10L4$k5PGDHEMuAKXRD=7ZW2?bDxntp7q@q-N{3W6K$s_DaLZXnaFMS7p0~
zv=3+735r1Gvkkk-lMKI|wSE06a3u4x>~*J+H(U&U+mz4se!{a+cfZ^IV^EUM^!xH3
z_B--ld=GxuuE~E({;TO@0J#Y;^kTmoG1X93J-ma5)}q
zS%>@Y&a2Na)L(^EM>sA(a<^*c*5lz$u6RacP7U24@h|mgDec#2`bYHR$Lmp&)K!^U
zn}3{+*N^DyBoSI-ip-tDBli=&IxciJ(oyuhev@(%e-IE5osh7r`i*jqu?+-o%MShK
zqm>pIShNU?
zvSPoZDsmHzr`y%vxG#uU%1oYL=hSlt15&XPRC@6%p7^@R`MDXk8}>jdj~qRE+SbXluQ{U@^fiS*^`}zoSmJIF#o5#N4#->f&1p$
z57Xd>st=v@dKs}oUqg5AdCSy%L5pmaimNVeX?rFkZq*3q#A<1Jp8S09G`p-^(E;Va
zVs-}yH@n{L{PJh4+&EW08fpoNW^gQCs??OS_lG`)3>u0M-Rdsd7vKNye@v(w^irHD
zH->`82bv;4*9Z#=D*B0PpE+^D6LTsg2%>-ErgJdj_}x2NXV0)%SokQKWWDs{k`8_K
zif?LqT4?WHbu5p{s1Kh+F^n^=-w~wJUlVoy_2}p*Z!hlU81yPCy?>gw%i$a&dFn#o
zGwc3ST@O#5xuo7<*wnHRJ?i9yUp#%PYG$facq1=GfIHueI;K)DW~Z97`(U-7-G?(S
zxC~i)x|(0zwc?gF8mWiJF4!b%DzwV}<*@EHmtm!*e3m>e{u`m8;k`bB9Xv{6H?*=e
zf~RLSS5W+2ev{%;ymPS&7VA?tZ)PABAQBCt*VS#^u|o+B?`xpGs{&oyt7UCw7N?`D
zn|O1|EacUz+Nh6Vr}<^jATbSc*cdw*0slH95mmc~g?x%fPM)j*IUaP#KPBN-wpt8#XHUwxg#F^_`Zy`y5?qcDm2
z(kVSR^>8lw>nfcOL%!L&v2+$$KlYVNsCk>6nOqD%MTWh7yJPp2a_VlwDkUqcB=3h0
zk!E4_%;Z%lmLvFXo~>T9W(>}Rb?FNbXW)2nI@t6^s#O0?(H$E4MMqX2r0p*8uT0rfgt%1&eq$_H2F{X
zmt)<(M&FGZ;}e#hwJabpMsu=`#pRYocl|X}
zEczjc8Bqj6n=ELD#7VcrtlC=j=W>qCX^j}w_BfXAxn-d^JUUv1;XjJ%z9C~C!gkhP
z8zn3g5=(Jy5hBK>D~Y&}qT9boMK(4bu{yByC~*+WLTX(bFt_|Gpgc$5B>M>|K_)L#T^-bO1e3lx*%(4~6Zz`sooHn?EuIgz#=
z2X%@M^Xe{vL?gL>
z4W?n9F_9abr@eBx2<-xQ-1ct3_i9yiry7~eD;zqt4
zyA4%>*Kd$8Hj2MF&?t#Ppfx`~zoiGa;qyT(MZa!}>EWQ97jNDO*n?Ba3zxR&QYs0Q
z)7H|`y8Q|CX5!gs&<1yPhR4Pp)ft_!wM`pKgzs*L5}xS23+;+jW7WdWZ*w@PEiE-A
zjMv>$2(%x60-e(+bOBvS4aUN8$LF2Xuow^7W@cnq%+K1m*snVTB<*M;>W(0(
zi?k7DNgF5vaw4daDj}N(_Ej{e4-xPdx2pg>rEX5X9n;56o`xE340!Qrz`TBZnwhq$
z0EtnarIaPI{~-8+i1p-`sUT->HMPjn998!iXsY&ul7zo$Mi0zhdD
z)|GAuo>h+=af^$OA0tW~vOFWMwd?IkZEfvm9SI2us|y}wAD@@K*9LLxZD0N^Te@Lf
z5irfh6Fyp|D@g_hnI#3}QtoA5ZlRHkTXOrazuo}zE&A}mhwu0kaRn%<0?8Vheod&D
zD#OS3Dwp<#*8F?k4j)*Wxtc__++M{w2$n~DFdjr5Zb$`CEWz&j5kFvVVepsMM`>e0
zeAdz4-o83&5cg>reUF2K!xo}C3)&{;D=bTUQ*rp*H_7mQ=ke!nwp+;RHr7*JTz63;
z8BoMoh8}NLHa7m89euKE1o!Q0ygb!a@p8*KwfX5KOO^~zOo%G;;3MkYjnx>U-I_;?
zx~Z+Y11kOVwZ%FA?Q?e%as3!TP&xs1%mHS;zR-BrTW6aKM-!$Tr^BAh9qq0sk5k@_ZN%arZQA-*
zN!-1A_x}9z&%RFxna?stZpR$rWxZeZ^%#7^S7?*ipb@R4x~
zDW5GzO?%I7+v4E4G1_#O6?KYSsyD{r!~Yk1?*Ucixo(YOjX`4)6%`R|3q=HpC`FW_
z26c%ADbhitS5caP2qsY?f@O&i1q218Nf&7XDhdM9dlwPupwguO^C99s+4tV_|L2Z-
z#<=$%mvQ#l*`chpzVH3sXFhYzXTHWBXF~NnR`dY)JWv`gV!s-4F2Ha3&bIMqn^v$X
z9bpt7r}~wkW?341MN>DA;~TbtTbX!R1uA8nv2BlH3gQv%Sg&~A&XKW|	Re
zc#z3tMku;T+5(4L*)0$gz&&^E{IGweXTzT@N3*XEZS&rD-12b|`vKN_zy3J^nr!kS
zpKuBDV73{AwvzQB5!=o3T0=baqH{)&du2Q|IUyTu2F6k}A1(>kH-^^VKbVIHNc-RS
zKn4@4QPb_achw(=szhs+pa9qov>)#>b&mo`2?o^U0(iv_`@_02JT}G;5iOQro^i4h8|Sl(X_bP3TY+>
z0w;w-sB^!1HDR+;^Q9Fw{RLtxQZjHK^O~BPQpaJo4@YgSO|y?VlJ4QLC>=EODhZpg
zf-V4M6-0SlO#r>ZR>AWhi$d#*?ZvwIBoX-QCfH}r+S*oNT?b<(BCn|ESafuBD$w>~
zp50-{{o1?!;$G~mf|aRz_wL0ZN$i6iWd{J1Siy*LKaLM5j}V;-7%#Pwt@gk-
zq70mRqz>mfc0g6$JTZTc42N}g&8+%o>Rq+{rS1wCWVJvgdYUz>o36$)5oL$^H_83k
zcrsdB-`t594_AE*4spxo@b+VcqpUCGbJ>=y_}-@a_N`k%Kn$b=%T^8!4_6_|eh{`R
z%xZEtabxSKK$XG8x8U~Rx_lpD|H_$@qmd1w*m=tUh%D|^%w-dEUu*Q8)toK8f=-&6
zn#-2Gy!Tr_J?-_crx)%ffHppjiiw05uV~e7ji_z=j!ov(`iqO1rTF86SeKpHoJ!DN
zc=`Kl89U#~4Tf%G42pBr&a=U@=gf&QOTT&ZW^Iaf=vSytpvE#CzhH{FsA;HfCdrm}
zG|jFwTv5T!PLK1luA*U{DZd!E52K=N^F+i@;ewypEX8RJ-u}62vCJnt$F8L&p@I@2
zj(Ha(WG<(r9l4XM&#Ide)*b9*F3#96mraxwQhAhy7{+3cu)NUv;&OCgpyAM=L#3_x
zJ_d2BNsK`
zNR?M^Dw|g$-tMUf`cuo^ZOu(PHD@-zO#0M>Q+9>2+K8o3dv9;Fgl(rL+%Naoj84){
z;E`;{w=>&cY$E~l{E6MWfs|1s&GYKDw3ORi0z%H4o;V$Qq7yT%$BMWvn5!hXm6pDVm^3seq9H)j=xq#kC;QII1c;hGL=qIPud4U#5NzszK)Ozg*TOc2!80G78S4`C;
zW^lqvOH0l3o<2FPlJN5ezn{qP?V5KPmOM83pD!poF-xD@`|w5Cn)wO~2?>D}dc&zZ
zE9^z`rT@qg(4F}9<;wU6UvTgpXFD7JGdy56I5S#e*CMYtY`$s&%YCa3FTZ+NSNEv<
z0zMZP#zu)<7`0i8mUdM%_n~WNHepGxxpKu+@rOc@v1@D6t&YGt@OLhNk1)tKxgQsn
zJ>RV8w^W|j`)8is?w{C{mg0kXr6D4r9d>LxyOs2oBEFB_hLUj{pVG_ft!qLS-wN<@
zA&Hhf|98kjK0SmSTus;~GcOlN>eZ<;b}L=Wkh399?>#~UZ(b^zJ8wRSJFD*fV;-30
zOnLhBDM-TGCq;ezxo5lbgFbN;7IB$8Z{$XAFK|`!Dy+g~tsvfP>=v-On*O0Nb9Nqm
zf9*f7<$rpi|Nn0y-s}I<7a_NxW)@q88d@U-MMc}ePGvx9@m)Cf_Jcbmm}fOBOuZnaY+uwC!pZ;efU!Q^l3
zUUBrDsWmHChJhJkwXwOfqnCQ$(h_vz#!p}X52bo56**n|I&;Cc?AO`ce0o4}ag&+w
zLml;)%VrgXX;=0I3u3qGJUv%(rR3{hmubb8&8J65Cz@Aqx1^;1>L1XOw)OM~f@JHM
z3JCi8gKR_C&GY_xb?PP~B{fxjDR=+kLV&q}~Qx$}Rk5BO%-
z5KuTDUKtl0Y?Z_=zeDRJB-D}jcK`l;+?B-wfSQi=JUh?6tZANkXO=(FX8i4In!T6b
zlgu@9H3H+Utx*+RKUbhV;k?}3{fq08E7z`_u(5gkw-`ddT7UAde{8`2CURiM;kQKN
zT4}^DSBg0<5re#CBu8XqWJG8rMX%qmVGvNY=J75dfQ`rQJwm0<%gG^%d6l?TAUs?r
z-b)BBzY3H1gV(PQ;%*A!UpaPO2t?uYW^1x>idq4a4bl|WVH!K*RyPw57y)8bYs-0OP4PnVqxhIVopx2
ziA!2qx&eL^^;8>^n|+>)DD8B0V%*S@wf6Kx!2(5bm0(#z8FYzZJPgT@v^O+-h@tc#
zFgBa{_zu!D1I&ZZn5dt-L`}f^)bQ=U{tAQ5;slY>;7ib2E#u@2r*VK9JMcenf5-RU
zS|wr{h9HwvK=whnJ}BJk(vzmA4hb+5oe??RsELdB`O~Ko*nS>C^yq2IY|>4m4?Nt3
zKoqZ=H}AtFCn3-mZw8Qm@@w&Eycq(zz%qO^o
zeGAxs_@M%B5wL#`_a=!<#UQoxqVgSF&^mge|E>OIQC4+^b6QE@o;hiQct}0ruM*cp
zT0xr!K7O5r@7Pi_>Z^gyU!S8t`p4zV%Z)Rwn)jMb)K}rJdcwC_Wo2!hkaqO+>C-JZ
z+&&?#5W#+x>V0*mGX>?POw0N9u}WZ0(ZlevonGF=l`B`yd}&k_SWY<$i|Z;QRf>?y
z7)d{}kF^ay?&i$Vip#Vg?(O?y%@{HkrW=vu+-T7lZ*cW^=uvLBiE#w1#MK=DE4|I8
zy?C$Q4gJR!b(NN5j~)q-nMB}mR9adZoNv3+dRA=JG>%F#$Y0(<
z1_y;71O+X0)=7wu_W;4b#qxmOQ|EZD_RE(mckbM2SJx)FTzaf~dsbatU2_|`F1m-#e16Xi!m5+CtfNV1
z=e~Tdwv8GgJBxx$PhwO^-ucYEn91rKpi&*>*4=#@*(xewCcb^|p0>Uuv?py46*9lJ
znza|-mjKf+2`_E<>86a&LqnA?z%AqAio_`zokU1s7PCA?M>-@Vqyq*YWuL8D@6l0A
z!360zSfO$o%e^G%fEoJr&*%v{q&ICk0CT1y)DlKGh}BiQ>>l0SVe$anwkgh%R!=m&
zJ=9$j#3Sj*w{G1YJV}dNxP|rqB(fhP*Jkj_fbOZnm8t-_JlnSA-Mazg`?SOPRDtuO
zR?yyMVq&76ayF##!k7ioEy$^S%w>Ou{&CLyMa5WLI~+di4|dlm;;)lHfVjH|2pYzC
z-Oi@WOdOY{7&t-wX`>U;G_Wh}IP>n;#M4dMoSd9!=*y6!aUU^D?a=9}ICP>>!6Kc8
zdnIhzZ!k7PhPb}_^Pj){{=4m$kGkmBs1e;7ScB0CMJxEC(#Hu>Tf}46P&K=Z)IC2Mtp6V*}0#j@v
zhnUQ`v|iXImFTkbGU2R`x%}grXl#RMus7pEBGc6yJXHws0U*{CaXYv(HX&Ux4F0S8
zcwefR%L&9M3WboMX7uvMmA}Z~gH>TJmU_%--Sp+FR~IP!1=Zqz#Xv;`i25iCt{ujK*z}CC+dE=rlEH+3K^{MB
zQTg+dCGV`b`sEHDl*jwJedo?*Y&fr8zBC3wM;j^1%ns*K8*Zuds_>n8U%!4G5rWOJ
zF*v193vW^d>q+Fo_&L1d(aXAgTeluXGKmU+5~dWsx9w}x+Oubms0B0&lb*(O4c3lR
zFBkm4vs?A~^XEjMK-3w;W?_Q<`c>oBuhrJ|*3MnO|ERRT!eIU=E{Z4G@D(G_yG@PU
z92-6twhs*q?4ZNVX4__rf0i@zhn5+H%bN|BioM)d(&)pFkm@JGg?oqtQ`3SVbetcb&z^w9?9lh@gWqkC%jEr!C?-!6a
z!bQ1UV=l_z#QNovzkmnZ%mF{>iI2G=tRDv+tU@WPwAJccH1Pe>jjCgmEOhbg)mNt$
zt&yr%$8hA%OO^@Pk{W8pu>b7`Ww}nvw>iMYw-@7$*dH-VHG6w>f4tbjNfi6UXc+KD
z?CS79bAqxFV{oE?&a1p>fB6Zc1*ZyHt<1tDi5kSz!&0|Fj8&}L$7=5CGSf_Vta#WB
zhBq7Y?%YojRR#6~iLA^0`03L&)a`}kn>$r)9y<>i1}2?uYLwWud+XL{83)kWwuZ@|
zF&kPW?B}OW+J~|^Z4*+9f2rPvz@*sC$RFGe@$;%kR((msPyB}rB*c{GE4TVaCtv8+
zLtiCS*k7?`Y$fQ#$OykShxronvhXXc7|~mKIoPbIq|o`qnZ|(nHY={n$MtoTpTmNu
zVv9ZPSr;yOrCLL7ZZ1y$xC_DcL|`L0<*4n}F7Ug}Zl#6BsEF3mz_AqZ87>a#m@`Zr
z`QXdT#OYEhfX`Xyf`WpsO7=IIi<;qlOgkf=>s(@mqp9i1a_{ggpTII-_>>0{8yI%C
zE<`xR4T?K}1yn}Z;cQzFF2R5O^_RjcAMB49$qpt>?Vn*WXw{%>yi&4EUgG@>FbLD7s+Hn-vqEyG!CF=I!Zl}
z{plpKI5$JTS9q=`jZHyKO^u~LyfQDp@yraM4Lc8_KwcKeEw$RVz%ZeqPC5o6Cw6a0
z;6m}4h}#N$x$VSb>4}bGi}Q)x=9Y9+LXK4PlXDIZPysX{PQ^Cz?5@IkON8HtS=D$I
z+h%o77Tz_4IvRHh#N9FjWt|E%(^FaCa<%)#0CWXQ1$CBbdD#7rAFZ%`M#|Ll>I&Z(
zz+2uaa2gyIQ0_zg;uaARWoQ+Z@qi@Rqfjh%>lyrneIr~K=$e*k|)VKMScNoh+u4L#O~3De7MXlNL{a30ll
z-cSI8?qKTK)6_|Mm7lJ#-m)dBeG!Lg-m?rRv3QXyg599VfN$t`ug{E9?slA
zr3)aOZ`PJC`QkOWoRSpPeT8YNz+vTkp^T
zjsiF=^lB?S9E1H9<3_qk*KwUNU88|=?)
z8q2v^-*OT=Fyyhv8f}g)nZT)o$Za~r!Xed)mo8oEz;scSijH##x}0U{>t-s*A$UVv
zO$m|d-nn{J?xyYM%KTkHCsOSqKWKvK6?}XhN_vfjkB;t@@$qP}UKU@kM0Zph$Zpnq
zEAR7LRaB~$SlC~{g_u2iw!7apzqEVf&J@N2w8W~fueWJ_F;7r4SssVB-JlNuy9DMn
zp7(OkI=l1@FA)T82Cp2B?ZQS=TrAI=ce>@mP`4UJOOBQ%?%G*Bw&YiAy$Gy6iZO^2
zI3)0Dmc9*4SC7~0rD~^)JrExg8WsmWcl7(9a<>T&rlDHj$1|XqCtB7_6(jI36l<#i
zk&B~P7~#Nz%QNtsjCUa~B>Gj6WJ;d~Pb<aKJ;kD
zf@9DQV2BaS{nS%81qZ$AX~{Jl93f~xyMW$P9S0+@CBgKiyVA5e_I~iYDITx=Cc`>v
zz|9X&#Arw9To{q133ALG;YzCj%uM4FJeA9!27USR<*ern0W2?iPJWZTEV++fOJFM@
zC)UkDvcC=YYAx4Q)^Iqjqg@^Kn70F%(XLL}2nsm1ccq&Nk=AF=t}dLt_1gH@{n)yp
z6-Ls2yX=nwuwU93Xq}XMd=038%3$WL9VVp=SmR949gV)fb!-5*bF-wR)-lOJFQCek
z=@sW1>)!hw-q?Uh=*3l9mHRzb-p5hpM<8D#Me`;MB5OOQiVniZq5?d%@NpVC3?wCP
zWny^Kjs4BZqR1U%Ny%r2Ujh4)^wBObP~Y-QU-))s?{~5(IIF-0X^aQ4GKcYoOi|1T
zJ_9ubAsa&HyB~SHe0)?=Ud=rAe3FKNbCkWHk$>J~Z)XUch1prfx0}RLEQRm5cX#dg
z5z8F`CSRs*j}Jr_1S2P247f3uIdYBQk>lI2p(V9Sv2twP_U($;n(V8om0Gf7346~5
zAs|27-g-5{t5V}1_c5ysW!juSoNF|)Yikrue}J}3wJLzC361olgdpw$sU~a6Y0t5-kz^Unp$Qbc1vK<82u
zFP)At7DB8OYSHhjZ
zg!&f8AcZgj!flq+3L)+8SaUE8m7EpWjdwJ&XAA<(#1QQ&rq(<->Hd6_YC^f~&$>PN9;Ac?0-C=Kw
z8MA9dgf8pEBUJ$M#U#L%fO{B3t>XibLh3G_vxLWITK50#`$8-CLRSa94SI5HY3T!)
zEaCgRnXCmFg$N}P|J@D>EIJc84AiWs#er+9!_goZe|-BKneMj$dN-oeM;Bp+4vH`f
zBe$wb1tA5oCcDh5e|h!ll?i4+MzKfI67=NUzF#ZV=D584jcCo32MAB@MBiA9KF154
zDE{$FYGceeI^PPAP!`p(dw^O_n3_f~a*|H_L`x<~;=N(p4Z*>JTgV`X6hEl$o;Dxe
zaAKs$2BlF8yj_3wW_eWlc0L*^9rXK(qyTCuS+4
z&d8S8<^L0@i;KA>yd1tB8N=opFALV!Jr{UV=lrKjh_47F*&KnHk2jk36M6!1)7sUg
z@oA4l2?8Zag2k~k6{Sq_PkEo
z@}R$Z6FHFaG(CL~0al$T0~MIZj0U_(m+{A7ZwHd766+>WX6R;#sND;|7dHCn8#ivu
zHdu51<3(ce0NXp!e^Y%OM6Ipu$;rtHE$-n9N84@&J-zaM3SNee^W9sAtSuG$23sE}
za1XH{amQ=C#7xT%VuDf6)<}h~2Q6T5Y;h(qQ4zA#B;k816F|T6FH~&LcC@3?IJyY$%_jDCmO$
zQ{xBFlqiG3Yz1`$U||bO4VoW?y?cLyxvaZeev4nOere|9pb(+m&W5xkfFgZSUk>zF
zxG*Nz%shyh+g(0q8Z4>L3DRtpAhf`7VJJ$!E&hjiRV6@LcPh&&+kfYiWx^h@daO@
zUoaE8ZyFUI!5vDp?K*+ceQQ%lgg~3*C?%&fj3|c=9*mW$OaJAV)JU)t2tOF0HDCga
zE0oIQf!n7N`~!iB24ta=9E4xSD{ml6uJriF-yxY8q#HP^rD2ccShFU^N9TeXI(!vX
z)xXdT=}Im~iHO45JO+CxXkkI0X_b_2nnAvPwbP$MR=<<$2xyZJPSA5b1w6L;gp46J
z`}+E3wS7NFEaGXVMqlPs29NW=@l6Nj)~^&7tKlzj;8C74-J@UaKR=iwHJ>eTG=jg)
z0XTw<{|#s>{Rx+xYux@T$wfK+C9{X54}*8`ZNLR_RyD(-PBi^<-WFCcp6mz;o#j~d
z?PudOr+z;$i|zG>ojcWj{`qH+^a{Agw%9ImQI$3wyEjdgJLC8d_~>t89f81W?zWmC
zu-Vu#gu%wQ9X3$)C+2t1`xz$HgV{m@ScSWw`26O#pIqh%U>kUN2W-hij^Xhh!~@!i
z@qY<8@bTPotXdU;hN=^f?+G9af9{z?I5t)*HFaK>DzaB#N=1xtK8(
z;3&T5GU>4Cz??t<+IKGNPLJA5zrGB|vqd&+H@yD(i;qc9j$b!dP=l|Phn9_3>&eg(&i%A|bmTrxV@5Hf9BPK!bT_x;RJji;UK
z5ClkogW5$1gJpwq;wv#S_Ik^%T^h8#L#pCfy;=ZHX=0|ZLtJlk>=FduwX2@o#7H*^
z$M7&K7;=#ruovJFsaUYY<07nQ)+V0`61fJ5kJfeai{2o%96}GDKbMk;48o80=FOXB
zGeR+=QHdI33rhz7+O;xxml*0JJM1=Qp9ui(Qd$7oABoIA${F%Elw8P4f=q!Y_`4G`c}
zb~Zicbj4(3TvP3OB7pjq*9?(?D^@qFJeau@Q&zckbs8WivGRh@1O?LAM)43e0ockz
z@gPUwKmmL{1z>v-IM^*3QY&=R#&fQ}I#q&rKSBe*Dk{K%?$@qWLQ67=8b&=@ks?W!
z!Kgv|?rL2{Z!fQ4OcNTvzOjz3C&*{oRe>CSSS>D(U4Ul7>SA*B6)+CWbV+GWffV-o3msuNLY;>}xyVD`DtlR}-&|FJ3Kv@-bel
zShTshdGYnzkpEY*Jm~+U*b8G?gZ;7e`7c%%hnumbi4v{r*0pHi!tE>;Yjs8_tbrnc
zb_+e703P?NtKZF%diU-fJ?qPB4*&+kAfT7zj4dy!Zwj)7;W;O#I^y9fkYRHtOWeH$
zD#m^|q8EDN(2N)2#7GV~T)z&S%Lxo&U0sCbL=+JkaN
zq}h8X#HIhwm9vHZ&3^gjqCj!l96uee$wyJdS^;j5A_)R%Q3*8E0(Qw59g-uSEqc0-
z1W0K?Ypx>Z*&^t{gYsytT}%bs5C)Smu{R&?7$FFB
z4TrHWBy*^BG%Z)j`PK!22+M$=04vPE2-ia0rG5tY$YkVm>%7r}|22*0qw_d!0m1W%
z7#$tLRDkXJD=%a1ks^ehuL}Xq@^F=MEL*13E)}=wzalR_?{&I2Eu;e(4}!*0L-@*Z
zu-60NJ;+*In{+yWNIrV)zNh`yj-;Wp+e^C)qk{xTK>Kho0_4o3rV=>c})Znu2K~}1LUo@h$RPgQ<(4b)n2jkf?
zfsli#pehKv97~t#rH}24M$sg0mv$V62^t=0=Rs$P
zBcvL5K`fD-Tqtuqo(vdCnG;8-g8^DHDYGL03__g2L=Ra8d`o~6av|@gO=jqHM`4gy
z-S}yyHpBuPlXhq1mk|+tqnF
z(N$-IcIS>3nmRlVBq?APZ@
ztk*w}8+iCI{7&u3THDw2MaouZ*!GBiNqCdz=B<^+aneM=dtc34`41J3U$5g3Wd1DT
z_uWrM*0bjp_Wjsfp0xBTb*HEV*wUScj7W>Pi_OkV(GU#E{yUH>
zca59-UGel3sii31f{~FCQ3DK!oc3}+Yh)3vlfL&kIi~Z*!AtKKeF9|efcEG_6^K0Z
zH-b)_Rm-73#qad-^3vD0uJb)$m{8;A<70FIn(Q0a8!YC85&sm&$){MKi^pO?G_Btr
zUhrYc*%H!=Vef~wHs7++($$qBz6Bo;Gx;j)bYD{$00-B^7jUxvPn4DBr*8S++}uqd((2ND^7z9`{433(={QH~M4$J?(`r
z1~;0?U!Asl=gyS!F_03cx_wr|r~u4RW0?Xc4k)KXEUz0k-aamXF(?AW&Z|@x=yW6=
z`}V^_GlkSI28VvHj(>q4C>rASrk^bEF}07Iv0K7>>~Ly`f<^s$2*7N#Nn5g
z)}fXX1A&G~_i&dRU8bBJoSeFF^Ks}(%<9G<>>YP81C`Z<6GyY%e<&QXO?qTyh0;^|
zMhjaDj8@{0ws7cIph;FVOi%--X{-K(p}DOuW(eC&BymP-+AjV9_mz|eF!vNoVs`YdgxR@l{oHzKR%sEp}S+MJa~km>W4!hQB7Za
z@)I7pNPxSLS~n!&9b^HeQPU(LScjRqbPTuA2rRZ_WvmllVT_1%B6T>fPul9OjGNr<
z6+B~T&mToo0)Jk9{w+q@o?owG7bfUVW`I+t9su&eg0huKW|HHDYFwDUqLGS3>$anj
z5-P~>EJasuID8kN4bY{utjr=?VKv%6a{dSZHlh`!E|S&?(RuLY(qrw&elG^4G5{`s
zhN)sTo_t(BOT`SD6tPvJc&f>DIAgv1OWN5O=;eiU3a~c~P
z+XDUoN1T;_$J_ySEvS8Mouk-nAK
z7F~&(G;PbBYVpTnmUD7$Ub80PR8td6(BfG0n+mkc8d`Ep=C*5aeOyzG2Fsnx|C6yv
z7IfuKYx_z9jc3oE?e;z+y1c`sCB;$T)fvf_^wh=*^(4!l$K;VY7|-1gRga&Ea^wYQ
z`4HM@7s!=LtPZ~?J{Ww#5@oV*4jyl)b%DMxr*&D0=B@W1FI{>8YVPFRt{#K?8inW!
z$f;ykhk*p*$;dW?O)?zInVz*zPFx%u29H)Aznhs9{nzj%+5P+T3JN|2s1$zahOoUc
z{O!CyP%q8E$PguY)K9$m{7*Ai&S#zL;y{I6P$3^Yc(4Q82A+(9*9_bDC}bgxp#Ug4
zDw1+*2TVb2Rh3^$cvSJ?VtS->Bq}La4!lWc9j}8lsEab%xp>i{9o}hMBYpq_i!GyC
zP*G`qw83s!NqZ;FZH_`tLn(m
zUN*Q>LgUcWWDBp52csJG8Q#JEz=`4b33~sCNBR!Gqm8Da45fS!7{e5ZaupngjW}5?
z=xFnb@2I`T7)`1B!7m#d1
zJQx)MqVmv4D2E*4*zmd@J5LnA*o!x}OMBg5hs5!VQA8}ll18&N1dX8)lD!E%@?9zC
z)DMOJk=VyFmBwbUjf?W!Vr+#OW4&AA-+;KnsJOEJ&NXY+p!F@_Xy8`_bS*^+ID
zx3l))&NIXGw8QnpR*$Z>gEOBpPi?N+({@n0HjUp=)?#B@>nV3kf4an#s(r$q$ZI6}-cO-5!rATEw2OB76<
zzdk^&nf%^X-|^GVvED_XCKitg@M+RW+}w|cH!9w=Q2rI3j$f{!Vd;zYGiYjK@gF^r
z$k6NS!-H>v_qjF2UX5?{`G&_Px60x%RLElYj@;_E^R&qZ;C}R|f+x7(>%sn?8ZG7X
za?oSA(@~`kVn~^tK-JalhJb!V|AVH!48_F4FG=9~RwfK;BDHEzJj;eO3y`Ql(6%HL
z4-do+CXU0o$-psg`{?mIl1|UDkihQok6DX-wbVK7m6B3myzrYB*8#tREL8JJ&CbxkLjz4bboles`;7JzI(qUkzWLs%g`PkaVr@(imaGxo;G1tj
zeT5Bk<;4PQ4;oshQIWB_1(}GUyYAttr%M|LvTb?8-?m-@3{ACJU3IEF>^O^k_A^LN
zacMb&5av8iPN9q23_a4VM^G!ZoPS^aI}A!7Mm#zAw{05Pr*SB~PCIt@#dQtv|Hk=^
zRNN_=>YJL7-oTZH?T%QkEITZ?9T?l;hJ9c6j#&x#24GS-e$~{5`14NKWN^y2eNEKT
z)vL|RM-Z}#VMskiLx|&df6@J|%;~#%^EhjU3$rzssl0Jg5qBO%Se90&O~Bxb$VFbt
z^JmT)7_U&e=IJm?(+5nSxBZGk)9NFNq?gySg1@%yqof89Y|EA`c>tHN8JYz`f#n&_
zPCf!5jnljW;4@hV;3os)_IiP~sB!T#zA=>WN#R^iBwD`Tn()Dn}If;bJBZ=kNGj}G_6K)%68mLn4@5DhPfKxP99
zkgq5WgoC~3$CC<)yBs?Xi6*GbCNNb%GLAs(_9iA7qw;K;dLunoZrh7XR7P+Kg9JFy
zRvv6tDVgdqFAFz8J8=C?26-YJM_PUSpHxLCZy`>O!k|WndLm&`G(+R#2+y5o>uc=!
zA$68%1rd$p3j2A({&uV~6{H80ia+{J@J85VQ1kOzEH
z-&P07Uq?uP)YSCDS6k#jjKGsIZO^`O
zBM^;@?12OM;8Yc!^R&Kd2(8DX7d$!Mf2;u$<`5<21EieGF@*_4mychCYngHgJkwjU
z;3dE%_Zglcv1Qn}Wq1uyyzvgg;Rlnzl*e)b)swe!xnZUO1XTdL%A28sN68ka9A%(fH7U|V4}<_s(_&pO~`%XDy(~OlWu2bW@?}R
zBufe+vrButtqTv|+pUIU)ToGpW;563`F_!&a%izp
z)b;AJX-(r$65!BIm!idsiaz2eew~@Q?$F1NrC}w0fI+|$V=CdWY|9~k_wTQd
zDzBFGvhgQkwuP7y@Hj*+19-r3SMl%&7Y+47`4aZl7DDA0i_%O{z{&RVxXP_y!vld~iCnBOIs<
z8d(rJEz;W1_>r*y&FyVJzegWGeq3Ki{+0_9gDReDq_x535CN$S6&$+#bJd@pH#XMb
zpiuCL&cR4D?<46B9#{D8ln^mTrFv?-O`0a&@e}3E={R(klX@&-q9~!F0*x!I&!iGS
zHXX#>?|?zM81*3PgdATu^jUR-1TpjKV#^zuu>e@cfDn#wUwSHYV*MbVMvr9HQBaH(
zXz|zgp~4=cNg@z&nRxs>yZgJE-5qq6fH6OG^l0T{lQP264USz=Fgp5RNQWq0yhCAPW$7z&#;GmA$*!T1@H)VD90
zCI4H0GYhwD04H8`cxzbRM5#jKg8#!_tJ$bEhd&Akrp12qwrz1w(`K3$S4E!IhFNT3
z3t;jH2K?znliS+x^%yH$Hm{|^#j|8c9=?xplsxR6#P!k+hcZBHW@l;0RYnKdp+FtL
z*DzwYJUI_W0aJa4Z?DmH03nPAiF#mkT>OrwxW7nRr!akH$3?=+S{xL^AS*Kqr6=ob
z?>Et#EpH)i*bOhtT2gwFK-;}{Mbc_g
zBg|>15>5{gw84RrOhESJxXb}4YYZj`z{mV8=6xN&w&Z>#po>*b^KxjmBfuAr!mkzu
z6~?WF9%EchG~!4klanihK*51YIND*q<
zi59F1ROQw2!<2>rwyhJ~30WsdB!_8(92OYY+7h3^@E1R`m-aT$4&w;h!|W&692(F7
zEU;D=gIx4woR@56WY$75);fG_>>_f62D6NuBo!1Cb{M`{DJv_x8T1>F%yccq@omQ7
z&1rTiD;0mK3?3Z%W%Og%{|8efCCe>bw8>_GP%gi4H%p#jus!a#$
zOfTlg2>p}52(juIiKG5cnh$Gh-|P$bLw2*hf#GqM}5QloUa--B1vtsK;@ckv4f?G4eySOT`F40pSf2tqGIi(jG1;
zDcRb^HS@f@%eVBWpVmTcC%YP!OEJz6
zqy7pIS-gj4Kp+ssfEK|TI6`TE)cN7T?|{}hJ%9#iK~r&<-6#PXrVhW_g5ri~{7EU%
zRC#))1dJ+~o^WCFi;9XKjuYA>ck;AL1TGq&xGmrV&YU?z?i8StAR16bR}>rW1O<`c
z52)8|!q3n&$AC;gzELNpTzqiUtGzR@uVZ&y4v|LHk8h$Tqaoh{01izb@5YPFN5H<6
z=`#($`YY7$o%~u*!ID5r9<;((w0yg(*fGh(mW3
zAo+LV$^o|fkgSQ^JhzGDdu@hyvHqPP>b-Un@RWJqt>#b;3Rh{zPS
z^^o$k**VwO2z}nfV7y%EmJk4A9Ux2?NI+dFVB;(jl|P{Wy32bfD=VuYB_+iUr)ONn
z7=na3Wu$Je-FyfPK9mlSJ|cY$X;qeYfOHGGF(2ive-G_m!65L$stMSuMAqy+SKK^7
znTe4`qX@{W_R1|P60-+eB#_(P+^jpCf{v)lxCI1Ee9Aop4`v`Y+(VdrYe()t+x6wW
zgF%o2+>Y3k3!?)j)^H%;mBB51Ku_o^4Ahb>Nx)aiY3R|BJ1~E*GI)3_p!Z|K8AEI-
z`?-)#?^c##GnhFKebV-{l$1NXcaREanAD4g*HajpUtCIWM4Kpj@gUC!n)y#hA!mEU
zgBRImWMqWo5SaFle*hO@Cy|Zuy71SN_iel?>=g};v-%vA7Vp+W{FdhVqNXqBvN4FB
z%%4BJ+bM9)?Q+;nl-?i1yjxr?x=z0MDEA16qJUKP=Nf=Z;nujN6np+0*y8Zv&Ahy?
zsyFQh@FWSn;VjIBv~n4ooW<90IFvGYvMrSp;MK$7j+jz`wYmDE_a_MY*cbdLkH%5Hp@@chaAK{`t#L|Ix#TcXS>sMQ!)cGqQM$L016vC*jtKFnp4e
z&@Kjl1LK(0+;V69P^w^SWHBVcfmJ;nNDgVjg6nsbnUG;
ziUzE60w+-cbHTo`|DoNP)w}H8Sl)muT!8|{EJMK;FOGquW>rpGQ9x3vH#EQ1Ntng%
zki=kbB&IgI@QWnl!)z2)`|D?Gj}$#5$j
z?K?hQ%LQ^^fRWAEebOWg6fBmbU0;W}KA~@hjbqm|RP8YAhXg4-P9TDN%S|
zRk~k||EP~cmliT@=xo5=?ka
z(t|OY?4-dm>_82}6pZYWbq=>!|$GNoM*k*2_{{!1>H!Lk=
zfo&;!yA~|VA{B!}L!v{@LY)cZDF#QnK6iAxT}bKcpE5B%>S@q=b=R8UQm$|B#b6r7
zz=QmRD5&?xnkTMqxdnvYQe86H%o;$>L-P9zSnboPexa>YvRl3&Z}@C(dj4Ke4;YmL
z_16;%3BzpwNkJ=Z*^TaibZ*FwOAw{r1#UWD34_f@BnB;aivdaK74F@1SlKwc(UhT;3}j(ud{fiXO43FLae8PXiqx-PDN_QbsA
z0ovPTBP|h&dg2Xp*}TatS1q_5HfVSlt9gePe*gVVrPwK`(r|W|HKZm&0@=tUKg)lC
z!NVty?*6_pM==A?n4@|fo_7z|_l
zBr6z4$Gwg~P}tvJN}_1MmDK5mpDn=|9K~pc63uIbLD-xoTO*NLz>ex@8SQf?1Zwio
zqOIC?>JPMMVTcF(+786=6Yt7(@g>c1q1{L$b`(AWV?Z(L9Z{#FK^cSt@sA=EK&v!|
zMV>K_iF7x}xwzHSvlpOkC<~DEd3Bg*7#*m^C$95In%Dk?(u}kaOm2ljJcTHC=Fs88
z<>+~_q6LUp0a5A0Aoim*^}Rh}8%2=F$}A<-8M2NsOg-uehMuHIn^y3{AEZn`?T_!5
zKI+^rD{F#3#|p*=jD$miCif`!nznj0U4E1gjQWE5)`8V1ka$M*!$Sr|UDF?|s26yu
zo|V`Rl#c7PiAf)oc4}agPEc#u6eCF+Cqsq8p8ZoG1?8l^nb7lY9w?DT2pXn5N4pb>
zrj2hHZY3y;U}LEIsKnvL#l=gSXtp&KHzX0mG88ilZ8|B{QqVOlRgOKTfe)q8B>an2NFT+Y+=Nm^WF`7Tl7~^+
zJ$NVN%CI}YOmaFLjdBF8c)pYdnty7Eh>b-&ps{
zz06OaJZ7;apHcSi8~J=}Vul~W-wvFOt#jrzkl%#L{ICDD_6plS4yE|t`kSgd5t>3K
zS5k4H6W&Dh5#}>sOvz6(`*uzE4F0C=`2MeGyE&lk-e!_3fCk<`2$SZMsGBt(QKi$0
z3>v`!M5wIf>;Q`A>+5|dv=N65z*$6L3m8^(u%Dq#UAdMZlJFxki=%*XA$B>!RF|J3
z=9-8$?g*?mHyJ2EseVo~x9pkc6-)?xi9vlj#KL5ZYN6yTbIkWJS)kAGn5zAY`+J-H
zD;qc4B=z4c1YN1oPvQVm0A#qyK}z{ppPu>5$=jwI0OaPD5F3g1Ws88oVUY3g0V(2v
z;Bo5SW>KG9VwwJ8?5+lev~tCbgzqc^zx6kcd%>p&pGBlF7(g&|c6~Qt>I5^WfQ{7)
z(h{TyvS||K11@@T&H$7`^+8ISA$A-r_6GQg0Q#wsUYrl(hzYh3Q)tK7pC97v27070
z*{F;spsZ*+J`Xx{gg~b)18juFeR?dZx4$9SD-jbu>1wR_L8nVtNI!S`1
zs&=Z!|HP|`8o=4IY}qo3NAuBk%(dJ_UI_&JEL*xX2odbCwM9Z?qER!147A3&K;xiW
z2Q7-RSx~URuy?SpX-Z=Mx2GyP`)_h?OGkCwgZzqUL5muN(TUB1xES<4%?sOLyoalj
z)DH41q~my3c%cs68QcOM2`g
zZQk!<^{oRS*Y^&hg6Q>~VUiX{Q(4M}@SqU0fxU)C95uA^5&Mnkvrzn?uMZ9;77v8o
zV7knGC{P5~_b3`YW*4|^*=x)ewjBI?XX4$-*55{bu$s+x69bZ4gjvmwZ@pvph8;3$)
zLLmdV`Jm7!^4oF*gEYoaA}KKnqxVsyGvodM1Et#a)`x>(I|2M%3d!FRq#Zxt*5Y%-+LRNj+Nmm8G&io3r`r_NP3kUATCwwni&O
z1!8tLE=|Y=e}g7jP(bAfo&&1|lQg!VdUR~8t~{;dp)bxG*&b<)p}-RFy(Xi0;2*Qw
zX!p=e$8T+PeV{o!il(>^dVh{X-QhIRiWRR6-1xp(cPgM2T*GwQ2Pk(>Yw)f%=Un6r
z?WonDL~IftFw(pf$$o$|jT_r{W8iSZM@iimb18i0#IO@W2X+9c5LFCc3UMhS1kRJ1
z-Q$+|w2mOB@i;ukraPwy+~ChD09;{T!|0bN!b-9uCSmIuz#Bfq!f}|Bpe$&ygA}LX
zWB{p3Ts{li2~bg$v_f|EChwzDWiZ##h{UfJM@SHK4Iry>wGnW@AtG;6v)O01RzJNT
zZ30=T9St)$jEKa+)`)tN;Uct@03S40jk7CQJX;Max{N(`c9QXc+l5
zQTCh*4a+~bz6#h#tS{>^DqNa@fb|og=u{-kM_ahge8|9RET!aTeIp~{n~_jM#|A(s
z!N{y4Y%>_4hDPZ)=_sPP9xy6{96^&fvF!R>ms-wTE{|Q#3Fr5DuhW6&`
z*c{ny!AOzLvvFfKpA(J}R_Kvy4rzq3V(qQm_X9bl0a(X7&HJ(Gr9G6|VOsvWIT5==
zOS087anOk3ptGaOh8F4wX%=x^Xn3k?SleH9sybw_t4T|Vo82_0|xK2sz*TM$s5y(-`~Y2?H+^dR-9flaO0
zFPB^JlD4|L`o^c?0r$7m@-4h1Jq%Cx!BL%|5p=;cam!>KnLmo99EulEpI3wHONW${
z;K<~FQ^UOC;?*Dg3`Xo|meL`vZibd;tGdPppMTAjgc_?YA2$AAY5B@5W<9Qj4_NL^
z;U|karum^ZD5|t$!DK53L21Cd#t&k;ll>UXyc$qTIqj`
z;dcJKqNQP6`b7P|OS(1S{Wi5T`Pnuhd?Vp2#L;1mzncL`U-Hq{0
zg*ud`GxeD+E^e*teR3Dl-Sfhp@I7rBMRAuq-xRMPUYthVqroi}tGm)#mdl_3jScZM
z?6ccxRuefSkFX3hqIl%R=S<-7DF%3AFlhq9q%8i3>lW?$*lw2#5KC80ARzkhuaMtK
z(~8v6h)@h|57?`5Ttxy!8cdGlOWKE)G(}o{6yCL~PB#SwCk&<}R&bH@lqTq3{!gHT
zFw5XM@r>+!R>wY}>7`&YRxpYL22fvySa^LW7jO)QE=(UWD{I_d+l^GZm~wurc6mf|0@8uvTI3ba7LFvsyTlx{&8Sp0gqGEn8jKGat1vAYT)
zaa9gDD4S7pA#56W6XH)NG0l1?{({=K3+uIb2m*RISnM#kUXhnr0)S_OK%^g5?KlW7
zl!$YJtFZ4JLanBWDY^4P*z=)DcA%(x!>oah{QXc0Lmy}&-7eNp-yHFJbKeZiKhs6m?bM;vZLwuVqpK1!|yCQaVjBfs!K1
z5&Lfl4ZHA2s-s-rfHMNiFq8)JFrz{Y6*=ociq%?lVT^<#0|ro0=~OS`q2gBU#SAKz
z>WO!sW_ldg*+hAnqM4x6a!6h!%O9n6#-6;WdAtulIV%n
zMMG-}(}W;5H??Gfs+^WcNudP4AC8CA3>&#!-4IUx!GS4PM%&oI0+pew2j%x883{?_
z#^F!qc@)8kT{N-INagYIO6=SUdjfHQ$C{+gSPw}!57*lDvtN~`%9Ax{$%+M80@wnO#o3S^0*q94~
ziDJAn&`Oxv!|UL>3sSBz5nMD;0&T#2^2_Em3abtE;O^+A76dAWVyV5zuxi5e9%4k98PX4#@rhvqe{|KkNZChe^4<
z6rdq|hrz^;!^KpSxcHk7MqC#o-5);p&Xq4g^zN^qaBedwdlN79D%#iE`ytqAOl_<7
z6#W1@OLJPTIy0l~_|A{hbjK%2mzEaJIc!!f5duy9`v0IdX1C8=X8iZT_5UD282`=R
z^!TCi{EQc1`d$2=zP{M*gc{g~zP?S@L*VA$p2B@)kz@wv-E@oYVhfl+qelaoeS9-N
z>WoKZML#GEr5io=*mlqMB5ypV7qDTl&5==lF5?pW$cv`iuB48C<-7{jVEDll<$8`Tx{684h0d
z-?_T;K^GFo@o{`PN9Y_wVi|_K32)!obuND-X4E>+yE^SjN0<-
z=c4hyL_YdMJ8>N|49{WAX3
zjoqKjoQ!ZNovrm;UYB1K4T9!Aj;%aDPRvy>W7F}Q+oX?|d`hbKEc!gv%Am|m9-eWb
zZq~adJ0<-$$CPZ|9It}iP~?Q~6?b?3wnp)6oi(o9Kq{u_E+`$t4-(CBn3i`Av0HiI
zMd?3pkQwwe%~e7EOA0WUkIV1!_diMg{jF=6t(J$qr-a-}pJ(=jz5Xj8to^T`=;OzI
zEQCD1oq-I#i@%5|K6C~WfzW$pQ(7*jz)&U25wy~k8PxPk<e?gh@3cRzTmk#X?wGLZ*s-*L?2_~bs}
zt}?QoT{XEg@%cR2baxJ=xsP1k|9J4n^5A)Y%CyftxR<$itwGzcoAj_@{N}XH3ArBz
z&+i`k^mcXO9qYp0P1c5O-ESy7oQ~B5chicbCUkQ2rV#W5|F_Gf#GJTOgGROH=DHayERRG>%bZF9umeSHPEy!5XmNfpx2I4@I8>6
zw_wpCfhSRbtF>Q%PV*|R{g3v({GsN&4V%H33C&|jCGyOSt&SqJmzo(dT5(RBN|cH=
z?fYWP*xEdq7VQl>)oG!9p&}{@(P@>UIHeVBQq+6hhk53if8c$8dHpgY)j8+${e14v
zeP8!=UH1d)ZBWNqx}hu7>I^c9D7kip$HW**RkQxMfzC=b8tNhFwKpQ?mX_JEPLvo*
zWB_dmQn{7mHi|oddqb(c(cXt%uMAWdx2^Lc?fgJtkcZwe1UQ3`av~y;C^iJcP}v~t
zp1^SHaWKwS0?Kha_xjJZw_93TiqUlmB@0ddGWUdusDgy~7I=vmGN6rxStwmZh+!Rm
z;llpx!pwM+eTo;&8P)+BylYB3>XX!t!x#JEMOnKj8DJ}yab*b(!RsndNNxungK6)%
zUglhkH2H0T+wTYKV3{&4nTUN)*%PXck5BYXAC|b5JdwNP6JJBWz!B-SJ9zty3fO!Z{
zdCt6^o|S~Q!S$cVQU;~88kpTn7g|3_tRJYKz`jJBj+uW|Vu4a;98&hXg=&
zks{qF><1L<<|JVH+U+|Sy#_4to|cjNt7$3*RlVq2g`g!NN)@epyHL&vUB61E3Kq<2
zM|1d*5OLKf9Y3qfcBS_+yJ93qcZ>7ddNsGAnaHq@PxCr>`mo3sw#7K-M8AVxdkc^J
z-*8a$(IYLuj0id)3ucdiFFyt^V))rcFoE%Zw;j+gBt|J3O>P_zffvgEQtY%;1@%g7xCj3Sr2kQle5L?7f(
zxeACCTa1f-^^v?bZ5}l+;gwhFW-?O0V_F+)wYBOL3LCP@36?JXg*?6h8d5#SECYTG
zY`|Ez=noJS!KT~~{ucWKy1f}t(Sc;vf|7&)Ztm=v-+w=fKu8Xukz!08>V}epD>0jy
z8U}egovM{M6t@7~qSgIHJ+5XsuN?j;^7z;*Owj?V_7b3!OSiVmmJ)r^`W
zLK*K&sQ7_X3;OsrwZ}2ljeI_Een@#lQ46KW&mRc^B_CL(gLCF9(af;N1P>uxc!sx^
z4CdCo92m#|$A!qW?5!w88gNSqiHQx2wbIcmxoBP|UvbIBSSSg+zQT~uG`U&;FfTnS20m!$EHEOGVzWW
z#nphELvfcbH=aQLRE<8;s_tTJT-|5Zt{@kl#!Z?&gAIIi6&RQ-evS^l{HiD9yUw)~
z1B>;x&?+i*!|_}sh1aY(PV+rjg?LNVU@CB)UbMZ0Vo(IC>}r$aWmfiWxm8`G4BAW8
zpqa1ke1R0Lq|pO3rciL%nPq5NiJ}PG775wSHtIK>G5_`%$N{?N`NF#NY~<83kOcpL
zAk1>aVcG>WJ3SLpr5p&4Q=jCxXdC3ZRJebbbUvd`x2)>c)htRFj>G2WI%hlF?
z6~pIChNT;}*J4u2Xi#`j$!O83H)iDEuLvN2I4w}h?B+k>py0=s4x0WN)_v_n*vjL;bp-#Qq99l*5PG#U
z%P!N#KBc-#gD-Xli*P*A?@@DyrV!JQ&+s7#}rOO@-X3T@BNB5FN|v{g~&pf%i{
z`w9wjEr*&~uR>dGS$(t@LeZjom|~Lg5YrR7)G;W{TenHd&&3XdP}U{>%lAd7yfzP$
zhmXWVTi578N?uMcOgOtmfR?T<_k*cYnPzA@8-d6<-4Fb8n(M;#HAP4C1!zd?nh|bd
z(;u#jHnq5tk|JahfE5#*zJ&&FR^tq;l%8}=xI*}0eeQ)QrV^$5X=`hz4;d-0&oC}|
zCh6z0<=_9lIf;IsR;DY)17C*K0NV#oo>3qbCZu{MQs#idmo!48-(fAN#O7@Oph#DS
zbSFCZfrH>0Fi4qN!9t76NPoi$))(G)sIa
z5`n}OS`&6{$kz{PWYkg-k)uB6qc<7vw||fCS5#3}fNxc!hd`17xEp)Z-ASz;JrJ+r
z-Eb6fAia~&)l5s?#Y}5nzyL0gElu9I4-;%DdI_2*L-%n0+X1J{SBjZIk1I!`aFUrx
zv(TTc$`&*)_w!C4&x|$M=PpxmNzzqz$CBF{-#(NQDxn*ZyS!8Hs!>j5huO(9Bmc9C
z=${%L{IPN;4{cw(aC
z8$?gP5PFS!Q!?iUwj-x7c1Oh}Ti10vw3y-i-L_=Wt26;l`+JtxdPcy@!y=oKeU*;S
zm2D;0X&iIkQdrheAB_C!bkrA4=Z3$Zt9w8lgjVqF34(CCOUqvq3c9Pz1N*M}IWTVz;l-}T4T
z0_V)H?v0D60{xaC3|G0s09g$cdA?H!-=OU2{`oG_Ub#3qu!
zS=2>ua9lfJk&mpCt&xolgpp|#mF?kYAJ;~JxPoi9jf+nPtFlAkR#X9%j>zK@>|68G
zFU?if`*^{2b+t138?uLsuBZv8E`mFN6DJF2`511tiG19xNXiZDy~BjQD-pkpG==R3
zk*U^@kTm+Wg%))O@VIUN`?xjfaht{a&fhCU7hcx$uUUIaUtZ8MDqWQkal9fpY-|v`<5$|i3BugHeyIR%&iRuW8YH2Y>jI%yRG2|@c&t_*@B0SN}i!@5Tkz`~1
zf(_;l+oaLJ+GMc30~L`TCEP&z$N01DpA>9U)hkEAKSh7F|2Ps5#&>V*k-qUD$hMBt
z>Sm?gq+vW^a)IWAktlv*j3o0Iq9F+gu~muBWYCR2s)zXb3NP-N!&5Z)omD6TH#?qv;uTl42MsWtPvJUngD6gn3f_|hdhza0x
zKIt%zXBA_Zk=b0rt0-i@P1^&2vaiQ;HP2{4nfmYns4?d3rjjwSwplfQsRK
zE=fnDO8eI44H{J;53F5_U9xY;FpYEG8zwX$rbSp)j7O8zHt;(iL_&BWk_9aAAx+$n
zOhjNJlUdo=FrYys=@zQ9wV2%bQBl|*hTsU{W;yM>Moag%S>)qLvU@*P*VMq7sy2NB
zT=7GgEz0@eAGFQkmj6Zb*)x-wK9Wj1vVQWr#J&iwm4rWLjsVo8@g2n6B^^DLZ@^4E
zr;HX{`=VET-)h8Uv`*S0Jb{{$v=fvo8o)wh+_8T8GuxHAD9{>10>fi
zBu33taw19{!?k{(*y&Hq48iwfd$MV|7#OAN9Fyu7S%p5Xte^OZ;T(f%of{ue`Tn1p
z9Sc&@|L2l;qXr%Y3tScyurJ5n3hZrDcz7xmMreFNF)H+92#&+Uf||F4UZv#-=-t7b
z5b5KW-|RGPjNZ~{FC=8M;P^HcE8bYN-XQerhwY$z(0CT+LHtUv{%7_NM7G-`x=RBzV9$|so4aKTEBDSAVZIVT(0y|
z`T7ankY-a^{{W4q9!01@LEB557KlnxrN
z2dbgq`BmyoSjC&Oy_lo8XLX~!K~+rT+xySLeCelPwvx~ntd7!|NS{U{ZLBlJIVdeU
zhzL+&MkDUwCPADI8o*#^<_<<1SbwCeLpGQO%e_Zka#icz-np2bK3yO5n4O^rN^pZN`v*T9-`#_TNV7~sf+c`d%!vyWqceP8g>p|b2#sHmroxAOyao&_2-B0V5)X87N3u__t>F?SH~Ns%M7QRY*jLVf{-!>#^q_yAhr$+
zf@pv+30L5s!W|mN>ND1J?usnINLWgGfpfJ+%+f(c5G0uo>6+mMWI!H1(88=GW2RC!yGqF59*|<}2%OZ0
z*qzDSsXiq$5D9Aw5^xU5O1N04pC=gR<=!PLXlP(x+F+p144;PAn@1m<1?71DFnEH4{O)t(WTCQP(Ta=NxgOBw4F76fpksxk_Kxua8oWSmrNoCw1P2bEwK
zO7EXEF__Qa3)JlFx+tO3HG+UD=*%z{qkBOEkoP0m7Sqe$t^g*&CtrZpB0~o>
zv*i5uU2C%!Ov-vWS$=)x=2La@nB$C8l9p*EZ)*)ppH&@T2YwZM3}MHNgJEO5Yh~Sz
zFCNa^8P(qvvVu2Kx48w>Q6jGPV+0xX%#l{4;N*Z_Vcg$wy{K#uu}H_=cf4Q!rCb)U
z*MAgMlSu>>mTvOgFjw;1Uh%GbX#cGiAFm21vbDe}JOK*hs42X|SpNbUbeJDm(FtPN
zdN?2VH$?tefk_7E+8KSbl8i(Yi8C8eZ=;n|hF7+%2ksqKK0D)!$5nK{T}tmMs-{<9
zO=%k{H>ERo*NthHzqAb2(<_`nT&kh~EFEcH@TQeqOdOjl-DFfX7JA)opr*F{_Z
zN2{SC)DH$vhq8j1#W(oqxKFs1qQRIwvRYm^V0V7W2KGypbh1|SgRC@h+x`zh{kLK!
z^S?#V&;w~)!Yry+XP(eG*Yj`O20b$tu6VTXvQW9kJ`mEl?m!Zw
zB7O-M2*oDV1YqGxdrV?0WxR(9T2`1e^V?^WTnG#SYuov}MpKmhD_&vl(WaI{m?ZoO
zp^xtrOmuaVb{4eRlT=RPYOY1TQyWHbS|n*X@_@piYX@FyQ-opT3>!6@prJtY25ZxE
z4^Y>{I4(VS0TF|==!uTt;p7yVVBZZJ#7t*Z)@Qqpm%OCF<$WsYw<%$OfQDApN4@VS
zsXbfH1(?6_t?n`dtihD+2m)t)+vhdts;EbnS(?7#W|G3Yuk>mHihQ28%h=`<8g$ja
zIok}9P!C0YPZ+te$4D_BZuZ+kiB)1%s&)m}@J}(>>H;XZ-Q3(zp;Iemz669AB$CHq
zS{q-Q4MdHW5r^Pl%CW>8i*H5r4)(rB)aysPcESySw2rxx0ArOrNYx%wZD+FYq){(cu1%C+kb1WdwU=6NnS
zcw5ugf`W3oaPqgRp5F^zj2J@B*0LHr3fCMS_?A|Um@>}V!3RFs+A$WKZNNJB55Qln
zji-cn(h(Y?@
zdt;#8%Q<8K^xoy+;gQ3`;5vKB*I3#?KsJpYnf#s}lW5J@`CH0tkkD15-^55M%g8RR
zZEQwACKRnii#ifRvqvCL*!~b!M7GLE^*zr1hQBX2*#Sk*QE*ezA0n^HXhgFGoK6uC
z*4@)}G1c_8u<;%;j@QbRcX+a$`5p=GonJ69w(tWIADu|
zD7CKsqRj2at3cdEF^0{9lTDDm@TlE;z!1`m7kO*>81ollYGF76D|&7BN;kdrleAso
zDV+@acoAw$7l5T~RQKeJoss&&#kZ*+b}W@_dn|!oR0F`)=AkB8t$UU{8F`z-(+;2*
zII?!b2IU(_C~c-PVK)-IDLOvhv_JAVa*?e-LbcoBqLP#Y%V=f^qL&!N$W99T)W|ur
z<};B03e0vlg~wyKoP@=GTUVwzsCS6NJfz`C&@3)S2YuyJBtMcAsQhr~n55QBX
z7@I^HcsywH0BIhAOGj+&6lP(=h@kZo#wF
z;}$xY{2(u1)R@C>hK4+bS>$DWjB3gW(Ml@0yHzgk!P)cY<+DqAR-W;%2O4U&GEV;=
z4|JQd^7Yn_$Gep8(ZE&Sl=x1x)cw$i*aaDdmOHg^&%C|`@wDE$@qZ;&b04N&>Pb7h
zU<{v6^qJpisFpj3G1hLcJ3AT+5u9?+MvrbkBYR`*y>FQTKyBQs
zgbZl}4zX-dKnTp-n}h=oc!A9)=FZC;-YU5lL?J?VvmL0L-=%o;6x2w>FI7}joKV4v
z;kwlU1k?bZa@gA^!@MlE-z@OsIJq`)>shGdvQ{#6Uh;VQhug`W2}z!o)H{%JAu!v=
zRsk3=KG?<-HggMyV2Om2uy<`{^+2QF0s>>gu%3ZI6pG*HZ=QBjyB#VI>bxMd0z>ln
z-#&U4*TGnp9m_^ON=jbUA-M8W0gAC)@Cc$G!gF;)io0(8dN?+<92TQ21@P&=!5G}%
zj;;!eMt5N(ATt1H2{|Dq!1A1I({XlnW}kuMnguUJgkfhNd#oLG6zysk!FderF8H6$
zBiD0sZG<`1u@9(BxF90;yYZ^#>zo
zo~CBvMF7}tDp32>RoxB?3Z#P*gd9_%3ykM3tR;zJgh%a$#w@c8*9^HwqK_J{^)biA
zQG<6lKx+0wP_3nvUo`~Fu%_k>=jA>74L$Xfckli}JT3x+$&(d-1=V+ju?Ier4@wds
zv)?lV5WdMa00qW@$5t%YM2{UpiMgjDyLI^kt3_7jb<$M*0{z@SH?j?T_C1A$9
zg@(?hs%4y{!F;8DRU$P(O-Ye5vEkueBXZDXI(~ri&q_Rrg7?BvyQ=$F(^eT&+jKE*
zOOAR~ZoyMI4GLClcOzIhM=o)gXhWmghG<+R{xTjW>|-n@M);nI%Bnm4iJ*E&Rhf%k~Up}1^?
zuX7jUxA7f4P!$djOG=)NTK@&prEaQL8DzlAEQl6<@;lBj4h@+A&7ecFl^I1;WcQDF=6D=
z={41t8VXIN2xfTcRvm`oHUvGzhYILN#I!%aZXgHT%gDN`3c2u1w}i^!^aE=~iwM+q
zj*G=rT8T)On*f28C89_*G6m4kA}u-g!%vQrHd#c~$1NxD*#dLwc9P9%wD3iNZqS-?
zx9$S@I5hCXi#5COs+t`JXvl|<98}0f%$@2)m7bQMIP-mPGeh(;gdfcUe^z4LI5Avn
zhpoel-Mx34_Xs+bU*g{q{K@@?d;toMe_8vxN%6d@U*Ir@Fd8PV%0Z<>coj0iy};8z
zgc%qfzBLaZ(JO?VCOmAga4+El05vAW#1@7BsX{)TWId}KKa6v=i{`FExdLf3l>b{O
zPQrK~<53|&h=_hf!A2MsShR?V&KcS3LaSwH#J?c#K6ucC3lW&;4)BV0;;4X!OH(dk
zR-me}b7vY}6MWJ1YjO^9aWtZrz7f9nx1HPSA@4ffN-U`Bakm8QKjXy#_kayTI)ZWmc|zb&UfUSVkdK0XLTf-jM+FG2ra}{fpGU
z{pYKoM*sUC=j{3WL*lFdKKK7$6w7d8?f<>yP5T~dE(W@b-i@c|CZATCS`^C!(VxWh>^%Y<%mA5gTrv!gCe=dU|`U
zgm$DWVYFX#kqlgPaL(fjxkyL=UFLQxlgR!$lrh-3t>BsK-zck&6%AzkTi9pIDI!>diZdYbK+qU9|)p|MHe?>35QL9%xH-|`|}}khSiQ)
z?_o0d3dl(28?s-E&~R6_DMX$zU13sSn*y+^Bu75d4$%$TmTf#ljRu{vxqHQFD|4;@
ztB^zv-_en_1$`^Q7soJs%U1~svFE!CR#DYUtxg)ZA~B1agFkWg{p50gr7LKBae&E5
zYzEq%TMDz==ehlE5ASbT><_DniTqS^fAG6pJw4~DFQBQw^yj7}2!cPNN$zQ+5(ux%
z?Epb6gzNF1_vQe4Oa)Eq2{*C)gy2orYoE`r%1`Kuon9<@31*UUBM_Bys7Z>{!?9l!
ztN4KV1|VYW_IX~5|J(Hz|l74B%-~A3a0>s1JI%+VY)?Gb#`gL
zWa1fo_PrXUr~+0G+Gu_+3|U_xqiK9LGa)ZOfen=0zX!bzWTEIEUxM|)=X)t7af7P{
z1c6An2m3f~(-S1WgEbY27EwKCK!(-_#MJ=AK`JRdJ-@-+QpB#CKPqYh?XJ;Wg1VP_79fPyjs=3QOAebeA72L!04Ec42Q`Btc0W3HN4`U3yBSK(
zqtpe0t6fp=0yOH;*=odKgWmEmJQ&t@Afu3wq>?2aRr45;
zv3jNgSPXq9uJrA=^=*NSk(tNf>?}~~qd;|plmzGKFMC&q^RyP%$n@}aJ#3=QE^kdQ
zEWZ5+w5VipRUaq=ZY3rqCT8RM?}vJj{&~vv*N-Gv#8N@McR<1tTSNUGuvHEMLY!_-
zGX?+x`?Y+#3fp&98A=DBL43z@4e3O!z{CwWK#?@O6?KVAk+IWUe=z4DTbFt#H@g*f
z5U^vaEcY6Q5CKJR=AEihV3iptSH4bHd1Im-ZzMxWnUw|_}Pj-MI+>+RW5`j)Q9Om_M
z#>*$W7m?cxN?-N|psRhkb<#{U;weeP!6V_!6PPlREAY*E->`nEn9EY5xTEqAt-yQ;
z;P*5jhZw_)y}Y2j-3nnd7~35Rk#4(=9AnGQp=bGqVObix2(q|;6%+%SIlmcN}QgY
zJPNY0PxrYE@ToZ>ISrWK7A9Xr^A?AR0=Q%u+72sjOg)9X2anRBP03sx+#6EgOe?DG
zv3*uv1`K}s+oX;F=zuyftC3wbihRco=ZaiG((RG3(PS%{9r*;K)3K@s!vXWs5{pkM
z&$;>HgS$`z`CyRi^*OPCB!7#&2PEy_s^^s)@E)@Dj
zST_EQ;UvG2a|p9u_CM_Xjk2#dzX1LO#{Y-rCN8pPNJQ=?lGiRu_4Rs*nK-3UM(_SS
zBfcNoMYYWD{m6(rIl}3l1B9bmlnYpps3?`UZ+EmZkYjLc(DTS`v!56va}Y#~NTUtF
zR>+kh^}%eBn?M&8PL#&Lvn(QV<@|FGFF
zt{ycSIfPqrd;wUb_ERVr6i`dF!A3!Ix&h?Kr;R`G7ZGU6#3`G;dXs2i_)q($sE@(C
zzTo(?PV5KbL-8C$PXCFzs2?Z3@R|2U+;l*$$y6@B^C8~7wdVD$Y!Xmldk$+O_bTGHgB6P%k`=Q2G(JUw@oD?V6KV>waA<6L1d1*0}
z2ztejs3e`W5yPvpzCmDY!M4}-2Dj%rWe8`M!6tc*>;w4b>-53N8;&`HmV&eUk-7jO
zF~Gq!b96-k#2!PRak;qo@QBCSQQt#VlXJZU4L(f~P%jt*#zBgD_6>ymeojVq14+{w
zaWv+<0|dS;#nr{dwDUB>9ari&SnTaws}f9-?QpuiggpkGz!<_w^f8kK&wi^FSNrtT
z@S}6m3K5Uka2{`tfWNMsnR}ByS?kC7EWnL;@YEV-ul`1f=>B$lzJ*WU2u>E8QC?Si
zVh8F$?qwem83MthVPY=c^xKib+_latVX46fj3+nO>&wrzfqH=)GHgK94FVA=_`e{4xKh^}r7(lI^5z(MZfN?=RKAke0Rj2c4A=kD~2WqL4&%Ny4;L!Z_zF?-aOFMyMVmoeUK(W3MAz#MqIwz31>7L5XP8KB4sEkhI~jjdf{{9
z|CRxV5$Xuw154I(7+k&I+z=q-$Dx(JiS|Mh6D~}HeF!lziTe(IdwUD@Ta-GQ0e8{3
zXy4NBS3FwBH?Y&o*z;kH4^GCm=c%YYQKl_eE
zJEiV^8o;?4MtF53zpS4|v?J>o;x$(_kHS%ef2!!B#GoQ``ve?eqVzyW!F#Q6!Z!G?
zH{?t+<6SH>*3Ox;ux-!IA6fjCB#vNRQlrCke?CtL
zvHI>0WLv1Zn7BcnVqmRX<~ZoU#G5ENU?3l^XAG;(dG1~O#V4r5E0Z{w2n5@Cq@56Q{N-`5G7s;_}pFT4ma9Qr~&KrYDs
z_M!9}x$X(?1~v5^3X*g4EXLb;dE%e+QC?Z{NKoY
zY)=D%I7!y7zP|PVi9QCa>P)yAkctbMh4wL
zFw!IQIw)k&7Ez;D!pNzVgUEA{eh*;3gdY*FV2%0Cxw*}GktYg;EsHN}kE5wmXX=T}
zoNr{Uy!mY>&u}6iIDEc!;JasVbV7AK%m^3a@~EP
zXDRh@r#d+iRp6+F_>&lzzYo@Y07ED4sp;VdtPF&Z
zc}?Oq;EzoO2T_ab1P{;QZ2lk2OQmt%H!8{4QnuibtUIXTkk_^y*Q3tGEw>_ODTcSV
zf7tmP3J~&Nj(pr9Py~Zqe;_(U<#WTO5p5&~R~po`Ey%ftya8xD2GxiGK#TLQf4mMF
zAlvbl3H%0Ql!>IF34PH83l~zQMeaxF^N*t+un|HqRuQ`Sf`*oc^5~&FLWY|H2XC@k
zLLe&b4q^-zm8)SMFF6aqgpUZVEO-f|&wwYJ42(j4T>ONxm+0xk#n<7E@%c}h7a?2-
zm!CEb`c6NBC>HsMF>nVkT;Jm$(xDQ=a41lH&Q15u6yZ;S3_q^!PRz0B+UfW2`SX)d
zp&S@OQIQTH32kH7bX_zC3y`R!RN~9}O*+9o?0$z?_Ffu}hg>orWMEu*8UQz1kS`Gyfs;odG+M_rE(gUu{EMZ~
zYeh@!MBHDn6aV9$pbG8pKNtSG>-k^$|2XldmHB^zh&Rm6lgoIzS1>MjC%uh58oE2P
Je>!yX{{W;O>e&DQ

literal 0
HcmV?d00001


From b9f2e3c46e4bbf7f3d5d43cc3ea5544fe2ad7eda Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Sat, 12 Jan 2019 14:09:42 +0100
Subject: [PATCH 108/129] add MANIFEST.in to include everything in pypi
 distribution

---
 MANIFEST                                      |  49 +-
 MANIFEST.in                                   |   3 +
 class4gl/interface/interface_new_koeppen.py   |   3 +-
 class4gl/interface/interface_show_profiles.py | 569 +++++++++++++
 class4gl/interface_multi.py                   |   4 +-
 class4gl/model.py                             |   2 +-
 class4gl/setup/batch_setup_igra.pbs           |   6 +-
 class4gl/setup/setup_bllast_noon.py           | 719 +++++++++++++++++
 class4gl/setup/setup_goamazon_noon.py         | 755 ++++++++++++++++++
 class4gl/setup/setup_humppa_noon.py           | 733 +++++++++++++++++
 class4gl/setup/setup_igra_20181217.py         | 361 +++++++++
 setup.cfg                                     |   4 +
 setup.py                                      |  32 +-
 13 files changed, 3228 insertions(+), 12 deletions(-)
 create mode 100644 MANIFEST.in
 create mode 100644 class4gl/interface/interface_show_profiles.py
 create mode 100644 class4gl/setup/setup_bllast_noon.py
 create mode 100644 class4gl/setup/setup_goamazon_noon.py
 create mode 100644 class4gl/setup/setup_humppa_noon.py
 create mode 100644 class4gl/setup/setup_igra_20181217.py
 create mode 100644 setup.cfg

diff --git a/MANIFEST b/MANIFEST
index 1dde1bb..c4510cb 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -1,10 +1,55 @@
 # file GENERATED by distutils, do NOT edit
+setup.cfg
 setup.py
-bin/__init__.py
 class4gl/__init__.py
 class4gl/class4gl.py
-class4gl/data_air.py
 class4gl/data_global.py
+class4gl/data_soundings.py
+class4gl/era_advection.py
 class4gl/interface_functions.py
 class4gl/interface_multi.py
 class4gl/model.py
+class4gl/interface/interface.py
+class4gl/interface/interface_cloudiness.py
+class4gl/interface/interface_koeppen.py
+class4gl/interface/interface_new_koeppen.py
+class4gl/interface/interface_show_profiles.py
+class4gl/interface/interface_stations.py
+class4gl/interface/taylorDiagram.py
+class4gl/interface/test_histogram.py
+class4gl/interface/world_histogram.py
+class4gl/processing/batch_update_output.py
+class4gl/processing/update_output.py
+class4gl/ribtol/__init__.py
+class4gl/ribtol/ribtol_hw.py
+class4gl/ribtol/setup.py
+class4gl/setup/batch_setup_era.py
+class4gl/setup/batch_setup_global_old.py
+class4gl/setup/batch_setup_igra.py
+class4gl/setup/batch_update.py
+class4gl/setup/batch_update_input.py
+class4gl/setup/setup_bllast.py
+class4gl/setup/setup_bllast_noon.py
+class4gl/setup/setup_era.py
+class4gl/setup/setup_global_afternoon.py
+class4gl/setup/setup_goamazon.py
+class4gl/setup/setup_goamazon_noon.py
+class4gl/setup/setup_humppa.py
+class4gl/setup/setup_humppa_noon.py
+class4gl/setup/setup_igra.py
+class4gl/setup/setup_igra_20181217.py
+class4gl/setup/setup_igra_pkl.py
+class4gl/setup/update_input.py
+class4gl/setup/update_setup.py
+class4gl/setup/trash/setup_global_old.py
+class4gl/simulations/batch_simulations.py
+class4gl/simulations/copy_update.py
+class4gl/simulations/runmodel.py
+class4gl/simulations/simulations.py
+class4gl/simulations/simulations_iter.py
+class4gl/simulations/simulations_iter_bowen.py
+class4gl/simulations/simulations_iter_test.py
+class4gl/simulations/simulations_smchange2.py
+class4gl/simulations/simulations_veg.py
+class4gl/simulations/simulations_wwilt_wfc.py
+class4gl/simulations/trash/run_test.py
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..174aa24
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,3 @@
+recursive-include class4gl *.py
+
+
diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py
index e5d7161..f9a2a3e 100644
--- a/class4gl/interface/interface_new_koeppen.py
+++ b/class4gl/interface/interface_new_koeppen.py
@@ -1,4 +1,3 @@
-'''
 import numpy as np
 import pandas as pd
 import sys
@@ -122,7 +121,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
                       obs_filter = (args.obs_filter == 'True')
                                             
                     )
-                    '''
+
 sns.reset_orig()
 
 
diff --git a/class4gl/interface/interface_show_profiles.py b/class4gl/interface/interface_show_profiles.py
new file mode 100644
index 0000000..734e8b9
--- /dev/null
+++ b/class4gl/interface/interface_show_profiles.py
@@ -0,0 +1,569 @@
+'''
+import numpy as np
+
+import pandas as pd
+import sys
+
+import matplotlib
+matplotlib.use('TkAgg')
+
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--experiments')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--load_globaldata',default=False) # load the data needed for the interface
+parser.add_argument('--make_figures',default=None)
+parser.add_argument('--figure_filename',default=None)
+parser.add_argument('--tendencies_revised',default=False)
+parser.add_argument('--obs_filter',default="True")
+args = parser.parse_args()
+
+print('Adding python library:',args.c4gl_path_lib)
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import c4gl_interface_soundings,get_record_yaml
+from class4gl import class4gl_input, data_global,class4gl,units
+#from sklearn.metrics import mean_squared_error
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+#import seaborn.apionly as sns
+import pylab as pl
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import kde
+from scipy.stats import pearsonr                                                
+from taylorDiagram import TaylorDiagram
+from matplotlib import ticker
+# import importlib
+# importlib.reload(mpl); importlib.reload(plt); importlib.reload(sns)
+
+
+
+
+
+latex = {}
+latex['dthetadt'] =  r'$d \theta / dt $'
+latex['dqdt'] =      r'$d q / dt $'
+latex['dhdt'] =      r'$d h / dt $'
+
+def abline(slope, intercept,axis):
+    """Plot a line from slope and intercept"""
+    #axis = plt.gca()
+    x_vals = np.array(axis.get_xlim())
+    y_vals = intercept + slope * x_vals
+    axis.plot(x_vals, y_vals, 'k--')
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+
+
+
+# EXPS  =\
+# {
+# 'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'GLOBAL_ITER_ADV':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+# #'IOPS_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+# # 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+# }
+
+if args.load_globaldata:
+    # iniitialize global data
+    globaldata = data_global()
+    # ...  and load initial data pages
+    globaldata.load_datasets(recalc=0)
+else:
+    globaldata = None
+
+c4gldata = []
+    
+c4gldata.append(c4gl_interface_soundings( \
+                  '/data/gent/vo/000/gvo00090/D2D/data/C4GL/20181030/IOPS_ADV_ITER/',\
+                  '/data/gent/vo/000/gvo00090/D2D/data/SOUNDINGS/IOPS/',\
+                  globaldata,\
+                  refetch_records=False,\
+                  obs_filter = True,\
+                  tendencies_revised = args.tendencies_revised\
+                ))
+c4gldata.append(c4gl_interface_soundings( \
+                  '/data/gent/vo/000/gvo00090/D2D/data/C4GL/20181017_NOON/IOPS_ADV_ITER/',\
+                  '/data/gent/vo/000/gvo00090/D2D/data/SOUNDINGS/IOPS_NOON/',\
+                  globaldata,\
+                  refetch_records=False,
+                  obs_filter = False,
+                  tendencies_revised = args.tendencies_revised
+                ))
+
+
+profiles_morning            = []
+profiles_morning_raw        = []
+profiles_noon_obs           = []
+profiles_noon_obs_raw       = []
+profiles_noon_mod           = []
+profiles_afternoon_obs      = []
+profiles_afternoon_obs_raw  = []
+profiles_afternoon_mod      = []
+profiles_location      = []
+profiles_morning_datetime      = []
+profiles_noon_datetime      = []
+profiles_afternoon_datetime      = []
+
+for i in range(15):
+
+    if i == 0:
+        location = 'HUMPPA'
+        c4gldata[0].sel_station(90000)
+        c4gldata[1].sel_station(90000)
+    elif i == 7:
+        location = 'BLLAST'
+        c4gldata[0].sel_station(90001)
+        c4gldata[1].sel_station(90001)
+    elif i == 11:
+        location = 'GOAMAZON'
+        c4gldata[0].sel_station(90002)
+        c4gldata[1].sel_station(90002)
+
+    # if i == 12:
+    #     c4gldata[1].next_record()
+    #     c4gldata[1].next_record()
+
+        
+    profiles_location.append(str(location))
+    profiles_morning_datetime.append(c4gldata[0].frames['profiles']['record_yaml_ini'].pars.ldatetime)
+    profiles_morning.append( c4gldata[0].frames['profiles']['record_yaml_ini'].air_ap)
+    profiles_morning_raw.append( c4gldata[0].frames['profiles']['record_yaml_ini'].air_balloon)
+
+    profiles_noon_datetime.append(c4gldata[1].frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime)
+
+    if i in [12,13]:
+        profiles_noon_datetime.append(c4gldata[1].frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime)
+        profiles_noon_obs.append(c4gldata[1].frames['profiles']['record_yaml_obs_afternoon'].air_ap)
+        profiles_noon_obs_raw.append(c4gldata[1].frames['profiles']['record_yaml_obs_afternoon'].air_balloon)
+        profiles_noon_mod.append(c4gldata[1].frames['profiles']['record_yaml_mod'].air_ap)
+        profiles_noon_obs[-1].theta = np.nan
+        profiles_noon_obs_raw[-1].theta = np.nan
+        profiles_noon_mod[-1].theta = np.nan
+        profiles_noon_obs[-1].q = np.nan
+        profiles_noon_obs_raw[-1].q = np.nan
+        profiles_noon_mod[-1].q = np.nan
+
+    else:
+        profiles_noon_obs.append( c4gldata[1].frames['profiles']['record_yaml_obs_afternoon'].air_ap)
+        profiles_noon_obs_raw.append( c4gldata[1].frames['profiles']['record_yaml_obs_afternoon'].air_balloon)
+        profiles_noon_mod.append( c4gldata[1].frames['profiles']['record_yaml_mod'].air_ap)
+
+    profiles_afternoon_datetime.append(c4gldata[0].frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime)
+    profiles_afternoon_obs.append( c4gldata[0].frames['profiles']['record_yaml_obs_afternoon'].air_ap)
+    profiles_afternoon_obs_raw.append(c4gldata[0].frames['profiles']['record_yaml_obs_afternoon'].air_balloon)
+    profiles_afternoon_mod.append( c4gldata[0].frames['profiles']['record_yaml_mod'].air_ap)
+
+    if i < 14:
+        c4gldata[0].next_record()
+        if i not in [9,10]:
+            c4gldata[1].next_record()
+
+
+
+
+            #axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=12.)                                     
+
+
+'''
+
+
+
+numprofiles = len(profiles_afternoon_mod)
+
+
+per_row = 4.
+VARS = ['theta','q']
+
+for var in VARS:
+    fig = plt.figure(figsize=(9,8.5))
+    for i in range(numprofiles):
+        ax = fig.add_subplot(np.ceil(numprofiles/per_row),per_row,i+1)
+    
+        if var == 'q':
+            fac = 1000.
+        else:
+            fac = 1.
+    
+        label = profiles_morning_datetime[i].strftime("%H:%m")+'ST'+' ini'
+        plt.plot(profiles_morning[i][var].values*fac,profiles_morning[i].z.values,'b--',label=label)
+        #plt.plot(profiles_morning_raw[i][var].values,profiles_morning_raw[i].z.values,'b*')
+        
+        label = profiles_noon_datetime[i].strftime("%H:%m")+'ST'+' obs'
+        #label = profiles_morning_datetime[i].hour()+':'+profiles_morning_datetime[i].minute()+'ST')+' ini')
+        plt.plot(profiles_noon_obs[i][var].values*fac,profiles_noon_obs[i].z.values,'g--',label=label)
+        #plt.plot(profiles_noon_obs_raw[i][var].values,profiles_noon_obs_raw[i].z.values,'g*')
+        label = profiles_noon_datetime[i].strftime("%H:%m")+'ST'+' mod'
+        plt.plot(profiles_noon_mod[i][var].values*fac,profiles_noon_mod[i].z.values,'g-',label=label)
+        
+        
+        label = profiles_afternoon_datetime[i].strftime("%H:%m")+'ST'+' obs'
+        plt.plot(profiles_afternoon_obs[i][var].values*fac, profiles_afternoon_obs[i].z.values,'r--',label=label)
+        #plt.plot(profiles_afternoon_obs_raw[i][var].values,profiles_afternoon_obs_raw[i].z.values,'r*')
+        label = profiles_afternoon_datetime[i].strftime("%H:%m")+'ST'+' mod'
+        plt.plot(profiles_afternoon_mod[i][var].values*fac, profiles_afternoon_mod[i].z.values,'r-',label=label)
+        if var == 'q':
+            if i >=(numprofiles - per_row):
+                ax.legend(loc=2,fontsize=6)
+            else:
+                ax.legend(loc=1,fontsize=6)
+        else:
+            ax.legend(loc=2,fontsize=6)
+    
+    
+        ax.set_ylim(0,3500)
+        if var == 'theta':
+             ax.set_xlim((283.,315.))
+        elif var == 'q':
+             ax.set_xlim((0.,21.))
+        ax.set_title(profiles_location[i]+' '+ str(profiles_morning_datetime[i].date()))
+    
+        if (np.mod(i,per_row) !=0):
+            ax.set_yticklabels([])
+        else:
+            ax.set_ylabel('$h$ [$\mathrm{m}$]')
+    
+        if i < (numprofiles - per_row):
+            ax.set_xticklabels([])
+        else:
+           if var == 'q':
+               units_final = r'$q$ [$\mathrm{g\, kg^{-1}}$]'
+           elif var == 'theta':
+               units_final = r'$\theta$ [$\mathrm{K}$]'
+           ax.set_xlabel(units_final)
+
+
+
+
+    #ax.tick_params(
+    #axis='x',          # changes apply to the x-axis
+    #which='both',      # both major and minor ticks are affected
+    #left=(np.mod(i,per_row) ==0),      # ticks along the bottom edge are off
+    #top=False,         # ticks along the top edge are off
+    #labelbottom= (i==np.ceil(numprofiles/per_row)) # labels along the bottom edge are off
+    #)
+
+    fig.tight_layout()
+    fig.subplots_adjust(wspace=0.05,right=0.99,hspace=0.25)
+    fig.show()
+
+
+
+# if bool(args.make_figures):
+#     fig = plt.figure(figsize=(10,7))   #width,height
+#     i = 1                                                                           
+#     axes = {}         
+#     axes_taylor = {}         
+#     
+#     #colors = ['r','g','b','m']
+#     colors = ['k']
+#     symbols = ['^','x','+']
+#     dias = {}
+#     
+#     for varkey in ['h','theta','q']:                                                    
+#         axes[varkey] = fig.add_subplot(2,3,i)                                       
+#         #axes_taylor[varkey] = fig.add_subplot(2,3,i+3)                                       
+#     
+#         #print(obs.std())
+#         obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+#         STD_OBS = obs.std()
+#         dias[varkey] =  TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference')
+#         dias[varkey]._ax.axis["left"].label.set_text(\
+#             "Normalized standard deviation")
+#         if i == 1:
+#             axes[varkey].annotate('Normalized standard deviation',\
+#                         xy= (0.05,0.36),
+#                         color='black',
+#                         rotation=90.,
+#                         xycoords='figure fraction',
+#                         weight='normal',
+#                         fontsize=10.,
+#                         horizontalalignment='center',
+#                         verticalalignment='center' ,
+#                         #bbox={'edgecolor':'black',
+#                         #      'boxstyle':'circle',
+#                         #      'fc':koeppen.color,
+#                         #      'alpha':1.0}
+#                        )
+#         # dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+#         # dias[varkey]._ax.axis["left"].axis.set_major_locator(np.arange(0.,2.,0.25))
+#         #dias[varkey]._ax.axis["left"].axis.set_ticks(np.arange(0.,2.,0.25))
+#         # Q95 = obs.quantile(0.95)
+#         # Q95 = obs.quantile(0.90)
+#         # Add RMS contours, and label them
+#         contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels
+#         dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f')
+#         #dia._ax.set_title(season.capitalize())
+#     
+#         dias[varkey].add_grid()
+#     
+#     
+#         #dia.ax.plot(x99,y99,color='k')
+#     
+#         
+#         for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+#             mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
+#             obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+#             x, y = obs.values,mod.values
+#             print(key,len(obs.values))
+#     
+#             #scores
+#             PR = pearsonr(mod,obs)[0]
+#             RMSE = rmse(obs,mod)                                               
+#             BIAS = np.mean(mod) - np.mean(obs)
+#             STD = mod.std()
+#             
+#             fit = np.polyfit(x,y,deg=1)
+# 
+#             if varkey == 'q':
+#                 axes[varkey].plot(x, fit[0] * x + fit[1],\
+#                                   color=colors[ikey],alpha=0.8,lw=2,\
+#                                   label=key+", "+\
+#                            'RMSE = '+format((RMSE*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+ '\n'+\
+#                            'Bias = '+format((BIAS*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+' \n'+\
+#                            r'$R$ = '+format(PR,'0.2f') )
+# 
+# 
+#             elif varkey == 'h':
+#                 axes[varkey].plot(x, fit[0] * x + fit[1],\
+#                                   color=colors[ikey],alpha=0.8,lw=2,\
+#                                   label=key+", "+\
+#                             'RMSE = '+format(RMSE,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+#                             'Bias = '+format(BIAS,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+#                             r'$R$ = '+format(PR,'0.2f'))
+#             else: #theta
+#                 axes[varkey].plot(x, fit[0] * x + fit[1],\
+#                                   color=colors[ikey],alpha=0.8,lw=2,\
+#                                   label=key+", "+\
+#                             'RMSE = '+format(RMSE,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+#                             'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+#                             r'$R$ = '+format(PR,'0.2f'))
+# 
+#             if varkey == 'q':
+#                 annotate_text = \
+#                                'RMSE = '+format((RMSE*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+ '\n'+\
+#                                'Bias = '+format((BIAS*1000.),'0.2f')+r'$\,  \mathrm{g\,  kg^{-1}\,  h^{-1}}$'+' \n'+\
+#                                r'$R$ = '+format(PR,'0.2f')
+#                 ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9,
+#        horizontalalignment='right', verticalalignment='bottom' ,
+#         bbox={'edgecolor':'black',
+#                           'fc':'white',  
+#                               'boxstyle':'square',
+#                               'alpha':0.8}
+#                                        )
+#             elif varkey == 'h':
+#                 annotate_text = \
+#                                 'RMSE = '+format(RMSE,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+#                                 'Bias = '+format(BIAS,'0.1f')+r'$\,  \mathrm{m\, h^{-1}}$'+'\n'+\
+#                                 r'$R$ = '+format(PR,'0.2f')
+#                 ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9,
+#        horizontalalignment='left', verticalalignment='top' ,
+#         bbox={'edgecolor':'black',
+#                           'fc':'white',  
+#                               'boxstyle':'square',
+#                               'alpha':0.8}
+#                                        )
+#             else:
+#                 annotate_text = \
+#                                 'RMSE = '+format(RMSE,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+#                                 'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\
+#                                 r'$R$ = '+format(PR,'0.2f')
+# 
+#                 ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9,
+#        horizontalalignment='left', verticalalignment='top' ,
+#         bbox={'edgecolor':'black',
+#                           'fc':'white',  
+#                               'boxstyle':'square',
+#                               'alpha':0.8}
+#                                        )
+# 
+# 
+# 
+# 
+#             
+#             # print(STD)
+#             # print(PR)
+#             dias[varkey].add_sample(STD/STD_OBS, PR,
+#                            marker='o', ms=5, ls='',
+#                            #mfc='k', mec='k', # B&W
+#                            mfc=colors[ikey], mec=colors[ikey], # Colors
+#                            label=key)
+#     
+#         # put ticker position, see
+#         # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html 
+#         # dia.ax.axis['bottom'].
+#         # dia.ax.axis['left'].
+#         # dia.ax.axis['left'].
+#     
+#         i += 1
+#     
+#     i = 0
+#     for varkey in ['h','theta','q']:                                                    
+#         for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+#             istation = 0
+#             for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+#                 indices =  (c4gldata[key].frames['stats']['records_all_stations_index'].get_level_values('STNID') == current_station.name)
+#                 station_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].iloc[indices]
+#                 station_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].iloc[indices]
+#     
+#                 axes[varkey].scatter(station_obs,station_mod,marker=symbols[istation],color=colors[ikey])
+#                          #  label=key+", "+\
+#                          #                    'R = '+str(round(PR[0],3))+', '+\
+#                          #                    'RMSE = '+str(round(RMSE,5))+', '+\
+#                          #                    'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+#     
+#     
+#     
+#             # # pl.scatter(obs,mod,label=key+", "+\
+#             # #                              'R = '+str(round(PR[0],3))+', '+\
+#             # #                              'RMSE = '+str(round(RMSE,5))+', '+\
+#             # #                              'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey])
+#                 
+#                 dias[varkey].add_sample(station_mod.std()/station_obs.std(),
+#                                pearsonr(station_mod,station_obs)[0],#annotate=symbols[istation],
+#                                marker=symbols[istation], ms=5, ls='',
+#                                mfc='k', mec='k', # B&W
+#                                #mfc=colors[ikey], mec=colors[ikey], # Colors
+#                                label=key)
+# 
+#                 istation += 1
+#     
+#             if varkey == 'q':
+#                 units_final = r'[$g\, kg^{-1}\, h^{-1}$]'
+#             elif varkey == 'theta':
+#                 units_final = r'[$K\, h^{-1}$]'
+#             elif varkey == 'h':
+#                 units_final = r'[$m\, h^{-1}$]'
+#     
+#             axes[varkey].set_xlabel('Observed')     
+#             axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=12)                                     
+# 
+# 
+#         # if varkey == 'q':
+#         #     print('get_xlim not working well...STRANGE')
+#         #     limits =  [np.percentile(nani,1),np.percentile(nani,99)]
+#         # else:
+#         #     limits =  [np.percentile(nani,1.0),np.percentile(nani,99.0)]
+# 
+# 
+#         if i==0:                                    
+#             axes[varkey].set_ylabel('Modelled')                                            
+#         i +=1
+#           
+#         axes[varkey].set_aspect('equal')
+#         low  = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].min()
+#         high  = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].max()
+# 
+#         low  = np.min([low,c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].min()])
+#         high  = np.max([high,c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].max()])
+# 
+#         low = low - (high - low)*0.1
+#         high = high + (high - low)*0.1
+#         axes[varkey].set_xlim([low,high])
+#         axes[varkey].set_ylim([low,high])
+#         abline(1,0,axis=axes[varkey])
+#         if varkey == 'q':
+#             ticks = ticker.FuncFormatter(lambda x, pos:
+#                                          '{0:g}'.format(x*1000.))
+#             axes[varkey].xaxis.set_major_formatter(ticks)
+#             axes[varkey].yaxis.set_major_formatter(ticks)
+#     
+#     
+#     # # legend for different forcing simulations (colors)
+#     # ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+#     # leg = []
+#     # for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
+#     #     leg1, = ax.plot([],colors[ikey]+'s' ,markersize=10)
+#     #     leg.append(leg1)
+#     # ax.axis('off')
+#     # #leg1 =
+#     # ax.legend(leg,list(args.experiments.strip(' ').split(' ')),loc=2,fontsize=10)
+#     
+#     
+#     # legend for different stations (symbols)
+#     ax = fig.add_axes([0.08,-0.02,0.15,0.15]) #[*left*, *bottom*, *width*,    *height*]
+#     leg = []
+#     isymbol = 0
+#     for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows():
+#         leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10)
+#         leg.append(leg1)
+#         isymbol += 1
+#     
+#     # symbol for all stations
+#     leg1, = ax.plot([],'ko',markersize=10)
+#     leg.append(leg1)
+#     
+#     
+#     ax.axis('off')
+#     ax.legend(leg,['HUMPPA','BLLAST','GOAMAZON','All'],loc=2,fontsize=10,ncol=4)
+#     
+#     
+#     fig.subplots_adjust(top=0.95,bottom=0.20,left=0.08,right=0.94,hspace=0.28,wspace=0.29)
+#     
+#     
+#     #pl.legend(leglist,('EMI:WOC','EMI:MED','EMI:BEC'),loc=2,fontsize=16,prop={'family':
+#     #figfn = '/user/data/gent/gvo000/gvo00090/D2D/archive/report/iops_eval_report.png'
+#     
+#     if args.figure_filename is not None:
+#         fig.savefig(args.figure_filename,dpi=200); print("Image file written to:",args.figure_filename)
+#         fig.savefig(args.figure_filename.replace('png','pdf')); print("Image file written to:", args.figure_filename)
+#     fig.show()  
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index 72e1ba5..b5e5c6d 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -253,7 +253,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
                          #(self.frames['stats']['records_all_stations_mod_stats'].dhdt >  50.0000) & 
                          (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt >  40.0000) & 
                          #(self.frames['stats']['records_all_stations_mod_stats'].dhdt <  350.) & 
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt <  350.) & 
+                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt <  400.) & 
                          (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dqdt >  -.00055) & 
                          #(self.frames['stats']['records_all_stations_mod_stats'].dqdt >  -.00055) & 
                          (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dqdt <  .0003) & 
@@ -453,7 +453,7 @@ def next_record(self,event=None,jump=1):
                 if 'current_station_file_afternoon' in self.frames['profiles'].keys():
                     self.frames['profiles']['current_station_file_afternoon'].close()
                 self.frames['profiles']['current_station_file_afternoon'] = \
-                    open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_afternoon.yaml','r')
+                    open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
 
         self.update_record()
 
diff --git a/class4gl/model.py b/class4gl/model.py
index d3b1fdb..92792f5 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -1880,7 +1880,7 @@ def exitmodel(self):
         del(self.theta)
         del(self.dtheta)
         del(self.gammatheta)
-        #del(self.advtheta)
+        del(self.advtheta)
         del(self.beta)
         del(self.wtheta)
     
diff --git a/class4gl/setup/batch_setup_igra.pbs b/class4gl/setup/batch_setup_igra.pbs
index ecf97f8..9777de5 100644
--- a/class4gl/setup/batch_setup_igra.pbs
+++ b/class4gl/setup/batch_setup_igra.pbs
@@ -7,7 +7,11 @@
 #PBS -m a
 #PBS -N c4gl_setup
 
-module load Python/3.6.4-intel-2018a IPython basemap BeautifulSoup xarray Pysolar PyYAML netcdf4-python Ruby
+module purge
+source ~/.bashrc
+
+echo loading modules: $LOADDEPSCLASS4GL 
+$LOADDEPSCLASS4GL 
 
 EXEC_ALL="python $C4GLJOB_exec --first_station_row $PBS_ARRAYID --last_station_row $PBS_ARRAYID"
 
diff --git a/class4gl/setup/setup_bllast_noon.py b/class4gl/setup/setup_bllast_noon.py
new file mode 100644
index 0000000..e846d25
--- /dev/null
+++ b/class4gl/setup/setup_bllast_noon.py
@@ -0,0 +1,719 @@
+# -*- coding: utf-8 -*-
+# Read data from BLLAST campaing and convert it to class4gl input
+
+# WARNING!! stupid tab versus space formatting, grrrmmmlmlmlll!  the following command needs to be executed first: 
+#    for file in RS_2011????_????_site1_MODEM_CRA.cor ;  do expand -i -t 4 $file > $file.fmt ; done
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0,'/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 42.971834,
+                  "longitude" : 0.3671169,
+                  "name" : "the BLLAST experiment"
+                })
+current_station.name = 90001
+
+
+
+
+
+# RS_20110624_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110630_1700_site1_MODEM_CRA.cor.fmt
+# RS_20110702_1655_site1_MODEM_CRA.cor.fmt
+# RS_20110621_0509_site1_MODEM_CRA.cor.fmt
+
+HOUR_FILES = \
+{ dt.datetime(2011,6,19,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110619_0521_site1_MODEM_CRA.cor.fmt'],'afternoon':[11.25,'RS_20110619_1115_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,20,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110620_0515_site1_MODEM_CRA.cor.fmt'],'afternoon':[11.25,'RS_20110620_1115_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,6,25,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110625_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[11,'RS_20110625_1100_site1_MODEM_CRA.cor.fmt']},
+# dt.datetime(2011,6,26,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110626_0500_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110626_1700_site1_MODEM_CRA.cor.fmt']},
+# dt.datetime(2011,6,27,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110627_0503_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110627_1700_site1_MODEM_CRA.cor.fmt']},
+ dt.datetime(2011,7, 2,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110702_0501_site1_MODEM_CRA.cor.fmt'],'afternoon':[11,'RS_20110702_1057_site1_MODEM_CRA.cor.fmt']},
+# dt.datetime(2011,7, 5,0,0,0,0,pytz.UTC):{'morning':[5,'RS_20110705_0448_site1_MODEM_CRA.cor.fmt'],'afternoon':[17,'RS_20110705_1701_site1_MODEM_CRA.cor.fmt']},
+}
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_csv(balloon_file,delimiter='\t',)
+                                     #widths=[14]*19,
+                                     #skiprows=9,
+                                     #skipfooter=15,
+                                     #decimal='.',
+                                     #header=None,
+                                     #names = columns,
+                                     #na_values='-----')
+        air_balloon_in = air_balloon_in.rename(columns=lambda x: x.strip())
+        print(air_balloon_in.columns)
+        rowmatches = {
+            't':      lambda x: x['TaRad']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['Press']*100.,
+            'u':      lambda x: x['VHor'] * np.sin((90.-x['VDir'])/180.*np.pi),
+            'v':      lambda x: x['VHor'] * np.cos((90.-x['VDir'])/180.*np.pi),
+            'z':      lambda x: x['Altitude'] -582.,
+            # from virtual temperature to absolute humidity
+            'q':      lambda x: qfrom_e_p(efrom_rh100_T(x['UCal'],x['TaRad']+273.15),x['Press']*100.),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['VHor'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+        # filter data so that potential temperature always increases with
+        # height 
+        cols = []
+        for column in air_ap_tail.columns:
+            #if column != 'z':
+                cols.append(column)
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        # 
+        # # we copy the pressure at ground level from balloon sounding. The
+        # # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually
+        # write local solar time, we need to assign the timezone to UTC (which
+        # is WRONG!!!). Otherwise ruby cannot understand it (it always converts
+        # tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise']+dt.timedelta(hours=2))\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        print('ldatetime_daylight',dpars['ldatetime_daylight'])
+        print('ldatetime',dpars['ldatetime'])
+        print('lSunrise',dpars['lSunrise'])
+        dpars['day'] = dpars['ldatetime'].day
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        print('tstart',dpars['tstart'])
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='bllast',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS_NOON/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1]
+    
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = bllast_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = bllast_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS_NOON/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+
+    
+    c4gli_morning.pars.sw_ac = []
+    c4gli_morning.pars.sw_ap = True
+    c4gli_morning.pars.sw_lit = False
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/class4gl/setup/setup_goamazon_noon.py b/class4gl/setup/setup_goamazon_noon.py
new file mode 100644
index 0000000..c99e913
--- /dev/null
+++ b/class4gl/setup/setup_goamazon_noon.py
@@ -0,0 +1,755 @@
+# -*- coding: utf-8 -*-
+
+import xarray as xr
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+import glob
+sys.path.insert(0,'/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : -3.21,
+                  "longitude" : -60.6,
+                  "name" : "the GOAMAZON experiment"
+                })
+current_station.name = 90002
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+#DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC)
+#DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC)
+#DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))]
+
+DTS = [dt.datetime(2014,11,15,0,0,0,0,pytz.UTC), 
+       dt.datetime(2014,12,29,0,0,0,0,pytz.UTC),
+       dt.datetime(2015,1,5,0,0,0,0,pytz.UTC),
+       dt.datetime(2015,5,7,0,0,0,0,pytz.UTC)
+      ]
+
+
+
+
+
+HOUR_FILES = {}
+for iDT, DT in enumerate(DTS):
+    morning_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf')
+    if len(possible_files)>0:
+        morning_file= possible_files[0]
+    afternoon_file = None
+    possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.12??00.*.cdf')+\
+                     glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.11??00.*.cdf')+\
+                     glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.10??00.*.cdf')
+    if len(possible_files)>0:
+        afternoon_file= possible_files[-1]
+        hour_afternoon = int(afternoon_file[-17:-15])+float(afternoon_file[-15:-13])/60.
+    if (morning_file is not None) and (afternoon_file is not None):
+        HOUR_FILES[DT] = {'morning':[5.5,morning_file],
+                          'afternoon':[hour_afternoon,afternoon_file]}
+
+print(HOUR_FILES)
+
+# HOUR_FILES = \
+# {
+#     dt.datetime(2015,5,7,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150507.052900.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150507.172700.custom.cdf']},
+#     dt.datetime(2015,3,13,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150313.052700.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150313.173000.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+#     dt.datetime(2015,3,12,0,0,0,0,pytz.UTC):{'morning':  [5.5,'maosondewnpnM1.b1.20150312.052800.custom.cdf'],
+#                                              'afternoon':[17.50,'maosondewnpnM1.b1.20150312.173400.custom.cdf']},
+# }
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+def esat(T):
+    return 0.611e3 * np.exp(17.2694 * (T - 273.16) / (T - 35.86))
+def efrom_rh100_T(rh100,T):
+    return esat(T)*rh100/100.
+def qfrom_e_p(e,p):
+    return epsilon * e/(p - (1.-epsilon)*e)
+
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None):
+        print(balloon_file)
+        
+        xrin = balloon_file
+        air_balloon = pd.DataFrame()
+
+        air_balloon['t'] = xrin.tdry.values+273.15
+        air_balloon['p'] = xrin.pres.values*100.
+        
+        air_balloon['u'] = xrin.u_wind.values
+        air_balloon['v'] = xrin.v_wind.values
+        air_balloon['WSPD'] = xrin['wspd'].values
+        
+        print(xrin.rh.values.shape)
+        air_balloon['q'] = qfrom_e_p(efrom_rh100_T(xrin.rh.values,air_balloon['t'].values),air_balloon.p.values)
+        
+
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        # air_balloon_in = pd.read_fwf(balloon_file,
+        #                              widths=[14]*19,
+        #                              skiprows=9,
+        #                              skipfooter=15,
+        #                              decimal=',',
+        #                              header=None,
+        #                              names = columns,
+        #                              na_values='-----')
+    
+
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q,
+            'rho': lambda x: x.p /x.t / x.R ,
+        }
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        print('alt in xrin?:','alt' in xrin)
+        if 'alt' in xrin:
+            air_balloon['z'] = xrin.alt.values
+        else:
+            air_balloon['z'] = 0.
+            for irow,row in air_balloon.iloc[1:].iterrows():
+                air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \
+                        2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \
+                        (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1])
+                        
+             
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # # there are issues with the lower measurements in the HUMPPA campaign,
+        # # for which a steady decrease of potential temperature is found, which
+        # # is unrealistic.  Here I filter them away
+        # ifirst = 0
+        # while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+        #     ifirst = ifirst+1
+        # print ('ifirst:',ifirst)
+        # air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        air_balloon = air_balloon.iloc[:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            print(air_balloon.z.shape,air_balloon.thetav.shape,)
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD)
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if (
+        #         #(z_mean > z_low) and \
+        #         (z_mean > (z_low+10.)) and \
+        #         #(theta_mean > (theta_low+0.2) ) and \
+        #         #(theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.00001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # 
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        print('ldate',ldate)
+        print('lhour',lhour)
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-4)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunately!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS_NOON/'
+
+
+os.system('mkdir -p '+path_soundings)
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn =pair['morning'][1]
+    print(humpafn)
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn = pair['afternoon'][1]
+    balloon_file = xr.open_dataset(humpafn)
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM//humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS_NOON/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+
+"""
+stations_for_iter = stations(path_exp)
+for STNID,station in stations_iterator(stations_for_iter):
+    records_current_station_index = \
+            (records_ini.index.get_level_values('STNID') == STNID)
+    file_current_station_mod = STNID
+
+    with \
+    open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+    open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+    open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+        for (STNID,index),record_ini in records_iterator(records_ini):
+            c4gli_ini = get_record_yaml(file_station_ini, 
+                                        record_ini.index_start, 
+                                        record_ini.index_end,
+                                        mode='ini')
+            #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+
+            record_mod = records_mod.loc[(STNID,index)]
+            c4gl_mod = get_record_yaml(file_station_mod, 
+                                        record_mod.index_start, 
+                                        record_mod.index_end,
+                                        mode='mod')
+            record_afternoon = records_afternoon.loc[(STNID,index)]
+            c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+                                        record_afternoon.index_start, 
+                                        record_afternoon.index_end,
+                                        mode='ini')
+"""
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/class4gl/setup/setup_humppa_noon.py b/class4gl/setup/setup_humppa_noon.py
new file mode 100644
index 0000000..72bbf89
--- /dev/null
+++ b/class4gl/setup/setup_humppa_noon.py
@@ -0,0 +1,733 @@
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import io
+import os
+import numpy as np
+import datetime as dt
+import Pysolar
+import sys
+import pytz
+sys.path.insert(0,'/user/home/gent/vsc422/vsc42247/software/class4gl/class4gl/')
+from class4gl import class4gl_input, data_global,class4gl
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+
+globaldata = data_global()
+globaldata.load_datasets(recalc=0)
+
+Rd         = 287.                  # gas constant for dry air [J kg-1 K-1]
+cp         = 1005.                 # specific heat of dry air [J kg-1 K-1]
+Rv         = 461.5                 # gas constant for moist air [J kg-1 K-1]
+epsilon = Rd/Rv # or mv/md
+
+
+def replace_iter(iterable, search, replace):
+    for value in iterable:
+        value.replace(search, replace)
+        yield value
+
+from class4gl import blh,class4gl_input
+
+# definition of the humpa station
+current_station = pd.Series({ "latitude"  : 61.8448,
+                  "longitude" : 24.2882,
+                  "name" : "the HUMMPA experiment"
+                })
+current_station.name = 90000
+
+# we define the columns ourselves because it is a mess in the file itself.
+columns =\
+['Time[min:sec]',
+ 'P[hPa]',
+ 'T[C]',
+ 'U[%]',
+ 'Wsp[m/s]',
+ 'Wdir[Grd]',
+ 'Lon[°]',
+ 'Lat[°]',
+ 'Altitude[m]',
+ 'GeoPot[m]',
+ 'MRI',
+ 'RI',    
+ 'DewPoint[C]',
+ 'Virt. Temp[C]',
+ 'Rs[m/min]',
+ 'D[kg/m3]',
+ 'Azimut[°]',
+ 'Elevation[°]',
+ 'Range[m]',
+]
+
+
+HOUR_FILES = \
+{ 
+  # dt.datetime(2010,7,12,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071210_0300.txt'],'afternoon':[15,'humppa_071210_1500.txt']},
+  dt.datetime(2010,7,13,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071310_0300.txt'],'afternoon':[12,'humppa_071310_1200.txt']},
+#  dt.datetime(2010,7,14,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071410_0300.txt'],'afternoon':[16,'humppa_071410_1600.txt']},
+  #dt.datetime(2010,7,15,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071510_0300.txt'],'afternoon':[15,'humppa_071510_1500.txt']},
+  #dt.datetime(2010,7,16,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071610_0300.txt'],'afternoon':[21,'humppa_071610_2100.txt']},
+  #dt.datetime(2010,7,17,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071710_0300.txt'],'afternoon':[18,'humppa_071710_1800.txt']},
+  #dt.datetime(2010,7,18,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071810_0300.txt'],'afternoon':[21,'humppa_071810_2100.txt']},
+  dt.datetime(2010,7,19,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_071910_0300.txt'],'afternoon':[9,'humppa_071910_0900.txt']},
+#  dt.datetime(2010,7,20):{'morning':[4,'humppa_072010_0400.txt'],'afternoon':[15,'humppa_072010_1500.txt']},
+#  dt.datetime(2010,7,21,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072110_0300.txt'],'afternoon':[21,'humppa_072110_2100.txt']},
+  dt.datetime(2010,7,22,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072210_0400.txt'],'afternoon':[12,'humppa_072210_1200.txt']},
+ # something is wrong with ths profile
+ # dt.datetime(2010,7,23,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072310_0300.txt'],'afternoon':[15,'humppa_072310_1500.txt']},
+#  dt.datetime(2010,7,24,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072410_0300.txt'],'afternoon':[16,'humppa_072410_1600.txt']},
+#  dt.datetime(2010,7,25,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072510_0300.txt'],'afternoon':[21,'humppa_072510_2100.txt']},
+#  dt.datetime(2010,7,26,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072610_0300.txt'],'afternoon':[21,'humppa_072610_2100.txt']},
+#  dt.datetime(2010,7,27,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072710_0300.txt'],'afternoon':[15,'humppa_072710_1500.txt']},
+#  dt.datetime(2010,7,28,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_072810_0300.txt'],'afternoon':[15,'humppa_072810_1500.txt']},
+  dt.datetime(2010,7,29,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_072910_0400.txt'],'afternoon':[12,'humppa_072910_1200.txt']},
+#  dt.datetime(2010,7,30,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_073010_0900.txt'],'afternoon':[15,'humppa_073010_1500.txt']},
+  dt.datetime(2010,7,31,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_073110_0300_01.txt'],'afternoon':[12,'humppa_073110_1200.txt']},
+#  dt.datetime(2010,8, 1,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080110_0300.txt'],'afternoon':[18,'humppa_080110_1800.txt']},
+  dt.datetime(2010,8, 2,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080210_0300.txt'],'afternoon':[9,'humppa_080210_0900.txt']},
+#  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[9,'humppa_080310_0900.txt'],'afternoon':[18,'humppa_080310_1800.txt']},
+#  dt.datetime(2010,8, 3,0,0,0,0,pytz.UTC):{'morning':[8,'humppa_080410_0800.txt'],'afternoon':[18,'humppa_080410_1800.txt']},
+#  dt.datetime(2010,8, 5,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080510_0300.txt'],'afternoon':[18,'humppa_080510_1800.txt']},
+#  dt.datetime(2010,8, 6,0,0,0,0,pytz.UTC):{'morning':[4,'humppa_080610_0400.txt'],'afternoon':[18,'humppa_080610_1800.txt']},
+#  dt.datetime(2010,8, 7,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080710_0300.txt'],'afternoon':[18,'humppa_080710_1800.txt']},
+#  dt.datetime(2010,8, 8,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_080810_0300.txt'],'afternoon':[18,'humppa_080810_1800.txt']},
+  dt.datetime(2010,8,10,0,0,0,0,pytz.UTC):{'morning':[3,'humppa_081010_0300.txt'],'afternoon':[12,'humppa_081010_1200.txt']},
+}
+
+
+
+
+
+
+#only include the following timeseries in the model output
+timeseries_only = \
+['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+ 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+ 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+ 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+ 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+
+
+def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None):
+        #balloon_conv = replace_iter(balloon_file,"°","deg")
+        #readlines = [ str(line).replace('°','deg') for line in balloon_file.readlines()]
+        #air_balloon = pd.read_fwf( io.StringIO(''.join(readlines)),skiprows=8,skipfooter=15)
+        air_balloon_in = pd.read_fwf(balloon_file,
+                                     widths=[14]*19,
+                                     skiprows=9,
+                                     skipfooter=15,
+                                     decimal=',',
+                                     header=None,
+                                     names = columns,
+                                     na_values='-----')
+    
+        rowmatches = {
+            't':      lambda x: x['T[C]']+273.15,
+            #'tv':     lambda x: x['Virt. Temp[C]']+273.15,
+            'p':      lambda x: x['P[hPa]']*100.,
+            'u':      lambda x: x['Wsp[m/s]'] * np.sin((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'v':      lambda x: x['Wsp[m/s]'] * np.cos((90.-x['Wdir[Grd]'])/180.*np.pi),
+            'z':      lambda x: x['Altitude[m]'],
+            'q':      lambda x: np.clip((1. - (273.15+x['Virt. Temp[C]'])/(273.15+x['T[C]']))/(1. - 1./epsilon),a_min=0.,a_max=None),
+        }
+        
+        air_balloon = pd.DataFrame()
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon_in)
+        
+        rowmatches = {
+            'R' :    lambda x: (Rd*(1.-x.q) + Rv*x.q),
+            'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp),
+            'thetav': lambda x: x.theta  + 0.61 * x.theta * x.q
+        }
+        
+        for varname,lfunction in rowmatches.items():
+            air_balloon[varname] = lfunction(air_balloon)
+        
+        dpars = {}
+        dpars['longitude']  = current_station['longitude']
+        dpars['latitude']  = current_station['latitude'] 
+        
+        dpars['STNID'] = current_station.name
+        
+
+        # there are issues with the lower measurements in the HUMPPA campaign,
+        # for which a steady decrease of potential temperature is found, which
+        # is unrealistic.  Here I filter them away
+        ifirst = 0
+        while  (air_balloon.theta.iloc[ifirst+1] < air_balloon.theta.iloc[ifirst]):
+            ifirst = ifirst+1
+        print ('ifirst:',ifirst)
+        air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1)
+        
+        is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)
+        valid_indices = air_balloon.index[is_valid].values
+        
+        air_ap_mode='b'
+        
+        if len(valid_indices) > 0:
+            dpars['h'],dpars['h_u'],dpars['h_l'] =\
+                blh(air_balloon.z,air_balloon.thetav,air_balloon_in['Wsp[m/s]'])
+            dpars['h_b'] = np.max((dpars['h'],10.))
+            dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height
+            dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height
+            dpars['h_e'] = np.abs( dpars['h_u'] - dpars['h_l']) # error of mixed-layer height
+            dpars['h'] = np.round(dpars['h_'+air_ap_mode],1)
+        else:
+            dpars['h_u'] =np.nan
+            dpars['h_l'] =np.nan
+            dpars['h_e'] =np.nan
+            dpars['h'] =np.nan
+        
+        
+        
+        if ~np.isnan(dpars['h']):
+            dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]]
+        else:
+            dpars['Ps'] = np.nan
+        
+        if ~np.isnan(dpars['h']):
+        
+            # determine mixed-layer properties (moisture, potential temperature...) from profile
+            
+            # ... and those of the mixed layer
+            is_valid_below_h = is_valid & (air_balloon.z < dpars['h'])
+            valid_indices_below_h =  air_balloon.index[is_valid_below_h].values
+            if len(valid_indices) > 1:
+                if len(valid_indices_below_h) >= 3.:
+                    ml_mean = air_balloon[is_valid_below_h].mean()
+                else:
+                    ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean()
+            elif len(valid_indices) == 1:
+                ml_mean = (air_balloon.iloc[0:1]).mean()
+            else:
+                temp =  pd.DataFrame(air_balloon)
+                temp.iloc[0] = np.nan
+                ml_mean = temp
+                       
+            dpars['theta']= ml_mean.theta
+            dpars['q']    = ml_mean.q
+            dpars['u']    = ml_mean.u
+            dpars['v']    = ml_mean.v 
+        else:
+            dpars['theta'] = np.nan
+            dpars['q'] = np.nan
+            dpars['u'] = np.nan
+            dpars['v'] = np.nan
+        
+        air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns)
+        # All other  data points above the mixed-layer fit
+        air_ap_tail = air_balloon[air_balloon.z > dpars['h']]
+
+
+
+        air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']]))
+        jump = air_ap_head.iloc[0] * np.nan
+        
+        if air_ap_tail.shape[0] > 1:
+        
+            # we originally used THTA, but that has another definition than the
+            # variable theta that we need which should be the temperature that
+            # one would have if brought to surface (NOT reference) pressure.
+            for column in ['theta','q','u','v']:
+               
+               # initialize the profile head with the mixed-layer values
+               air_ap_head[column] = ml_mean[column]
+               # calculate jump values at mixed-layer height, which will be
+               # added to the third datapoint of the profile head
+               jump[column] = (air_ap_tail[column].iloc[1]\
+                               -\
+                               air_ap_tail[column].iloc[0])\
+                              /\
+                              (air_ap_tail.z.iloc[1]\
+                               - air_ap_tail.z.iloc[0])\
+                              *\
+                              (dpars['h']- air_ap_tail.z.iloc[0])\
+                              +\
+                              air_ap_tail[column].iloc[0]\
+                              -\
+                              ml_mean[column] 
+               if column == 'theta':
+                  # for potential temperature, we need to set a lower limit to
+                  # avoid the model to crash
+                  jump.theta = np.max((0.1,jump.theta))
+        
+               air_ap_head[column][2] += jump[column]
+        
+        air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2)
+
+
+
+        # only select samples monotonically increasing with height
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        for ibottom in range(1,len(air_ap_tail_orig)):
+            if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.:
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True)
+
+        # make theta increase strong enough to avoid numerical
+        # instability
+        air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        air_ap_tail = pd.DataFrame()
+        #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        theta_low = air_ap_head['theta'].iloc[2]
+        z_low = air_ap_head['z'].iloc[2]
+        ibottom = 0
+        for itop in range(0,len(air_ap_tail_orig)):
+            theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+            z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+            if (
+                #(z_mean > z_low) and \
+                (z_mean > (z_low+10.)) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                #(theta_mean > (theta_low+0.2) ) and \
+                 (((theta_mean - theta_low)/(z_mean - z_low)) > 0.0001)):
+
+                air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+                ibottom = itop+1
+                theta_low = air_ap_tail.theta.iloc[-1]
+                z_low =     air_ap_tail.z.iloc[-1]
+            # elif  (itop > len(air_ap_tail_orig)-10):
+            #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        
+        air_ap = \
+            pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+
+        # # make theta increase strong enough to avoid numerical
+        # # instability
+        # air_ap_tail_orig = pd.DataFrame(air_ap_tail)
+        # air_ap_tail = pd.DataFrame()
+        # #air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True)
+        # theta_low = air_ap_head['theta'].iloc[2]
+        # z_low = air_ap_head['z'].iloc[2]
+        # ibottom = 0
+        # for itop in range(0,len(air_ap_tail_orig)):
+        #     theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean()
+        #     z_mean =     air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean()
+        #     if ((theta_mean > (theta_low+0.2) ) and \
+        #          (((theta_mean - theta_low)/(z_mean - z_low)) > 0.001)):
+
+        #         air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom:(itop+1)].mean(),ignore_index=True)
+        #         ibottom = itop+1
+        #         theta_low = air_ap_tail.theta.iloc[-1]
+        #         z_low =     air_ap_tail.z.iloc[-1]
+        #     # elif  (itop > len(air_ap_tail_orig)-10):
+        #     #     air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[itop],ignore_index=True)
+        # 
+        # air_ap = \
+        #     pd.concat((air_ap_head,air_ap_tail)).reset_index().drop(['index'],axis=1)
+        
+        # we copy the pressure at ground level from balloon sounding. The
+        # pressure at mixed-layer height will be determined internally by class
+        
+        rho        = 1.2                   # density of air [kg m-3]
+        g          = 9.81                  # gravity acceleration [m s-2]
+        
+        air_ap['p'].iloc[0] =dpars['Ps'] 
+        air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h'])
+        air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1)
+        
+        
+        dpars['lat'] = dpars['latitude']
+        # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich)
+        dpars['lon'] = 0.
+        # this is the real longitude that will be used to extract ground data
+        
+        dpars['ldatetime'] = ldate+dt.timedelta(hours=hour)
+        dpars['datetime'] =  dpars['ldatetime'] + dt.timedelta(hours=-3)
+        dpars['doy'] = dpars['datetime'].timetuple().tm_yday
+        
+        dpars['SolarAltitude'] = \
+                                Pysolar.GetAltitude(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        dpars['SolarAzimuth'] =  Pysolar.GetAzimuth(\
+                                    dpars['latitude'],\
+                                    dpars['longitude'],\
+                                    dpars['datetime']\
+                                )
+        
+        
+        dpars['lSunrise'], dpars['lSunset'] \
+        =  Pysolar.util.GetSunriseSunset(dpars['latitude'],
+                                         0.,
+                                         dpars['ldatetime'],0.)
+        
+        # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). 
+        dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise'])
+        dpars['lSunset'] = pytz.utc.localize(dpars['lSunset'])
+        
+        # This is the nearest datetime when the sun is up (for class)
+        dpars['ldatetime_daylight'] = \
+                                np.min(\
+                                    (np.max(\
+                                        (dpars['ldatetime'],\
+                                         dpars['lSunrise'])\
+                                     ),\
+                                     dpars['lSunset']\
+                                    )\
+                                )
+        # apply the same time shift for UTC datetime
+        dpars['datetime_daylight'] = dpars['datetime'] \
+                                    +\
+                                    (dpars['ldatetime_daylight']\
+                                     -\
+                                     dpars['ldatetime'])
+        
+        
+        # We set the starting time to the local sun time, since the model 
+        # thinks we are always at the meridian (lon=0). This way the solar
+        # radiation is calculated correctly.
+        dpars['tstart'] = dpars['ldatetime_daylight'].hour \
+                         + \
+                         dpars['ldatetime_daylight'].minute/60.\
+                         + \
+                         dpars['ldatetime_daylight'].second/3600.
+        
+        dpars['sw_lit'] = False
+        # convert numpy types to native python data types. This provides
+        # cleaner data IO with yaml:
+        for key,value in dpars.items():
+            if type(value).__module__ == 'numpy':
+                dpars[key] = dpars[key].item()
+        
+                decimals = {'p':0,'t':2,'theta':4, 'z':2, 'q':5, 'u':4, 'v':4}
+        # 
+                for column,decimal in decimals.items():
+                    air_balloon[column] = air_balloon[column].round(decimal)
+                    air_ap[column] = air_ap[column].round(decimal)
+        
+        updateglobal = False
+        if c4gli is None:
+            c4gli = class4gl_input()
+            updateglobal = True
+        
+        print('updating...')
+        print(column)
+        c4gli.update(source='humppa',\
+                    # pars=pars,
+                    pars=dpars,\
+                    air_balloon=air_balloon,\
+                    air_ap=air_ap)
+        if updateglobal:
+            c4gli.get_global_input(globaldata)
+
+        # if profile_ini:
+        #     c4gli.runtime = 10 * 3600
+
+        c4gli.dump(file_sounding)
+        
+        # if profile_ini:
+        #     c4gl = class4gl(c4gli)
+        #     c4gl.run()
+        #     c4gl.dump(file_model,\
+        #               include_input=True,\
+        #               timeseries_only=timeseries_only)
+        #     
+        #     # This will cash the observations and model tables per station for
+        #     # the interface
+        # 
+        # if profile_ini:
+        #     profile_ini=False
+        # else:
+        #     profile_ini=True
+        return c4gli
+
+
+path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS_NOON/'
+
+
+file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    print(pair['morning'])
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1]
+    print(humpafn)
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0])
+    print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime)
+file_morning.close()
+
+file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+for date,pair  in HOUR_FILES.items(): 
+    humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1]
+    balloon_file = open(humpafn,'r',encoding='latin-1')
+
+    c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0])
+    print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime)
+file_afternoon.close()
+ 
+
+# file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') 
+# for date,pair  in HOUR_FILES.items(): 
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1],
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_morning,hour,c4gli_morning)
+#     print('c4gli_morning_ldatetime 1',c4gli_morning.pars.ldatetime)
+# file_morning.close()
+# 
+# file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') 
+# for hour in [18]:
+#     humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/humppa_080610_'+format(hour,"02d")+'00.txt'
+#     balloon_file = open(humpafn,'r',encoding='latin-1')
+# 
+#     humppa_parser(balloon_file,file_afternoon,hour,c4gli_afternoon)
+# file_afternoon.close()
+
+
+
+# path_model = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/HUMPPA/'
+# 
+# file_model    = open(fnout_model+   format(current_station.name,'05d')+'.yaml','w') 
+
+
+records_morning = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='morning',
+                                           refetch_records=True,
+                                           )
+print('records_morning_ldatetime',records_morning.ldatetime)
+
+records_afternoon = get_records(pd.DataFrame([current_station]),\
+                                           path_soundings,\
+                                           subset='afternoon',
+                                           refetch_records=True,
+                                           )
+
+# align afternoon records with noon records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date]
+records_afternoon.index = records_morning.index
+path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS_NOON/'
+
+os.system('mkdir -p '+path_exp)
+file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml')
+file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
+file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w')
+file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w')
+
+for (STNID,chunk,index),record_morning in records_morning.iterrows():
+    record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+
+    c4gli_morning = get_record_yaml(file_morning, 
+                                    record_morning.index_start, 
+                                    record_morning.index_end,
+                                    mode='ini')
+    #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime)
+    
+    
+    c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                      record_afternoon.index_start, 
+                                      record_afternoon.index_end,
+                                    mode='ini')
+
+    c4gli_morning.update(source='pairs',pars={'runtime' : \
+                        int((c4gli_afternoon.pars.datetime_daylight - 
+                             c4gli_morning.pars.datetime_daylight).total_seconds())})
+    c4gli_morning.update(source='manual',
+                         pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False})
+    c4gli_morning.dump(file_ini)
+    
+    c4gl = class4gl(c4gli_morning)
+    c4gl.run()
+    
+    c4gl.dump(file_mod,\
+              include_input=False,\
+              timeseries_only=timeseries_only)
+file_ini.close()
+file_mod.close()
+file_morning.close()
+file_afternoon.close()
+
+records_ini = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='ini',
+                                           refetch_records=True,
+                                           )
+records_mod = get_records(pd.DataFrame([current_station]),\
+                                           path_exp,\
+                                           subset='mod',
+                                           refetch_records=True,
+                                           )
+
+records_mod.index = records_ini.index
+
+# align afternoon records with initial records, and set same index
+records_afternoon.index = records_afternoon.ldatetime.dt.date
+records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+records_afternoon.index = records_ini.index
+
+# stations_for_iter = stations(path_exp)
+# for STNID,station in stations_iterator(stations_for_iter):
+#     records_current_station_index = \
+#             (records_ini.index.get_level_values('STNID') == STNID)
+#     file_current_station_mod = STNID
+# 
+#     with \
+#     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+#     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
+#     open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+#         for (STNID,index),record_ini in records_iterator(records_ini):
+#             c4gli_ini = get_record_yaml(file_station_ini, 
+#                                         record_ini.index_start, 
+#                                         record_ini.index_end,
+#                                         mode='ini')
+#             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+# 
+#             record_mod = records_mod.loc[(STNID,index)]
+#             c4gl_mod = get_record_yaml(file_station_mod, 
+#                                         record_mod.index_start, 
+#                                         record_mod.index_end,
+#                                         mode='mod')
+#             record_afternoon = records_afternoon.loc[(STNID,index)]
+#             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+#                                         record_afternoon.index_start, 
+#                                         record_afternoon.index_end,
+#                                         mode='ini')
+
+
+
+# # select the samples of the afternoon list that correspond to the timing of the
+# # morning list
+# records_afternoon = records_afternoon.set_index('ldatetime').loc[records_afternoon.ldatetime)]
+# records_afternoon.index = recods_morning.index
+# 
+# 
+# # create intersectino index
+# index_morning = pd.Index(records_morning.ldatetime.to_date())
+# index_afternoon = pd.Index(records_afternoon.ldatetime.to_date())
+# 
+# for record_morning in records_morning.iterrows():
+#     
+#     c4gl = class4gl(c4gli)
+#     c4gl.run()
+#     c4gl.dump(c4glfile,\
+#               include_input=True,\
+#               timeseries_only=timeseries_only)
+# 
+# # This will cash the observations and model tables per station for
+# # the interface
+# 
+# records_ini = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=0,\
+#                                    by=2,\
+#                                    subset='ini',
+#                                    refetch_records=True,
+#                                    )
+# records_mod = get_records(pd.DataFrame([current_station]),\
+#                                    path_mod,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='mod',
+#                                    refetch_records=True,
+#                                    )
+# records_eval = get_records(pd.DataFrame([current_station]),\
+#                                    path_obs,\
+#                                    start=1,\
+#                                    by=2,\
+#                                    subset='eval',
+#                                    refetch_records=True,
+#                                    )
+# 
+# 
+# # mod_scores = pd.DataFrame(index=mod_records.index)
+# # for (STNID,index), current_record_mod in mod_records.iterrows():
+# #     print(STNID,index)
+# #     current_station = STN
+# #     current_record_obs_afternoon = obs_records_afternoon.loc[(STNID,index)]
+# #     current_record_obs = obs_records.loc[(STNID,index)]
+# # 
+# #     record_yaml_mod = get_record_yaml_mod(odirexperiments[keyEXP],\
+# #                                           current_station,\
+# #                                           current_record_mod,\
+# #                                          )
+# # 
+# #     record_yaml_obs = \
+# #             get_record_yaml_obs(odirexperiments[keyEXP],\
+# #                                 current_station,\
+# #                                 current_record_obs,\
+# #                                 suffix='.yaml')
+# # 
+# #     record_yaml_obs_afternoon = \
+# #             get_record_yaml_obs(odir,\
+# #                                 current_station,\
+# #                                 current_record_obs_afternoon,\
+# #                                 suffix='_afternoon.yaml')
+# # 
+# #     hmax = np.max([record_yaml_obs_afternoon.pars.h,\
+# #                    record_yaml_mod.h])
+# #     HEIGHTS = {'h':hmax, '2h':2.*hmax, '3000m':3000.}
+# #     
+# # 
+# #     for height,hvalue in HEIGHTS.items():
+# # 
+# #         lt_obs = (record_yaml_obs_afternoon.air_ap.HAGL < hvalue)
+# #         lt_mod = (record_yaml_mod.air_ap.z < hvalue)
+# #         try:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = \
+# #                 rmse(\
+# #                     record_yaml_obs_afternoon.air_ap.theta[lt_obs],\
+# #                     np.interp(\
+# #                         record_yaml_obs_afternoon.air_ap.HAGL[lt_obs],\
+# #                         record_yaml_mod.air_ap.z[lt_mod],\
+# #                         record_yaml_mod.air_ap.theta[lt_mod]\
+# #                     ))
+# #         except ValueError:
+# #             mod_scores.at[(STNID,index),'rmse_'+height] = np.nan
+# #     # # we calculate these things in the interface itself
+# #     # for key in ['q','theta','h']:
+# #     #     mod_records.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_mod.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# #     #     # the actual time of the initial and evaluation sounding can be 
+# #     #     # different, but we consider this as a measurement error for
+# #     #     # the starting and end time of the simulation.
+# #     #     obs_records_afternoon.at[(STNID,index),'d'+key+'dt'] = \
+# #     #                 (record_yaml_obs.pars.__dict__[key] -  \
+# #     #                  record_yaml_obs_afternoon.pars.__dict__[key]\
+# #     #                 )/(record_yaml_obs_afternoon.pars.ldatetime - \
+# #     #                    record_yaml_obs.pars.ldatetime).total_seconds()
+# # 
+# # mod_scores.to_pickle(odirexperiments[keyEXP]+'/'+format(STNID,'05d')+"_mod_scores.pkl")
+# #         
+# #                 
+# #                 
+# # # for EXP,c4glfile in c4glfiles.items():
+# # #     c4glfile.close()            
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# # 
+# #     
+# #     # {'Time[min:sec]': None 
+# #     #  'P[hPa]': None, 
+# #     #  'T[C]': None, 
+# #     #  'U[%]': None, 
+# #     #  'Wsp[m/s]': None, 
+# #     #  'Wdir[Grd]': None,
+# #     #  'Lon[°]', 
+# #     #  'Lat[°]', 
+# #     #  'Altitude[m]', 'GeoPot[m']', 'MRI',
+# #     #        'Unnamed: 11', 'RI', 'Unnamed: 13', 'DewPoint[C]', 'Virt. Temp[C]',
+# #     #        'Rs[m/min]D[kg/m3]Azimut[deg]', 'Elevation[deg]', 'Range[m]']
+# #     # }
+# #     # 
+# #     # #pivotrows =
+# #     # #{
+# # 
+# # 
+# # 
diff --git a/class4gl/setup/setup_igra_20181217.py b/class4gl/setup/setup_igra_20181217.py
new file mode 100644
index 0000000..2733f3d
--- /dev/null
+++ b/class4gl/setup/setup_igra_20181217.py
@@ -0,0 +1,361 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thursday, March 29, 11:30 AM
+
+@author: Hendrik Wouters
+
+The dry-2-dry global radio sounding experiment.
+
+usage:
+    python setup_global.py 
+    where  is an integer indicating the row index of the station list
+    under args.path_output+'/'+fn_stations (see below)
+
+this scripts should be called from the pbs script setup_global.pbs
+
+
+
+dependencies:
+    - pandas
+    - class4gl
+    - data_soundings
+
+
+"""
+
+""" import libraries """
+import pandas as pd
+import sys
+#import copy as cp
+import numpy as np
+#from sklearn.metrics import mean_squared_error
+import logging
+import datetime as dt
+import os
+import math
+
+import argparse
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+#parser.add_argument('--timestamp')
+parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
+parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+# parser.add_argument('--first_YYYYMMDD',default="19810101")
+# parser.add_argument('--last_YYYYMMDD',default="20180101")
+parser.add_argument('--startyear',default="1981")
+parser.add_argument('--first_station_row')
+parser.add_argument('--last_station_row')
+parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--station_id') # run a specific station id
+# parser.add_argument('--error_handling',default='dump_on_success')
+# parser.add_argument('--subset_output',default='morning') # this tells which yaml subset
+
+
+# args.path_output = "/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GLOBAL/"
+args = parser.parse_args()
+
+sys.path.insert(0, args.c4gl_path_lib)
+from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+
+fn_stations = args.path_input+'/igra-stations.txt'
+
+
+#calculate the root mean square error
+
+def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actual = False):
+    """ calculated root mean squared error 
+        
+    
+        INPUT:
+            y_actual: reference dataset
+            y_predicted: predicting dataset
+            z_actual: coordinate values of reference dataset
+            z_predicted: coordinate values of the predicting dataset
+            
+            filternan_actual: throw away reference values that have nans
+    """
+    
+    y_actual_temp = np.array(y_actual)
+    y_predicted_temp = np.array(y_predicted)
+    
+    if z_actual is not None:
+        z_actual_temp = np.array(z_actual)
+    else: 
+        z_actual_temp = None
+        
+    
+    if filternan_actual:
+        y_actual_temp = y_actual_temp[~np.isnan(y_actual_temp)]
+        if z_actual_temp is not None:
+            z_actual_temp = z_actual_temp[~np.isnan(y_actual_temp)]
+    
+    if ((z_actual_temp is not None) or (z_predicted is not None)):    
+        if (z_actual_temp is None) or (z_predicted is None):
+            raise ValueError('Input z_actual and z_predicted need \
+                              to be specified simultaneously.')
+        y_predicted_temp = np.interp(z_actual_temp,z_predicted, y_predicted)
+    
+    else:
+        # this catches the situation that y_predicted is a single value (eg., 
+        # which is the case for evaluating eg., mixed-layer estimates)
+        y_predicted_temp = y_actual_temp*0. + y_predicted_temp
+        
+    rmse_temp = (y_actual_temp - y_predicted_temp)
+    rmse_temp = np.mean(rmse_temp*rmse_temp)
+    return np.sqrt(rmse_temp)
+
+
+from class4gl import class4gl_input, data_global,class4gl
+from data_soundings import wyoming
+#from data_global import data_global
+
+# iniitialize global data
+globaldata = data_global()
+# ...  and load initial data pages
+globaldata.load_datasets(recalc=0)
+
+# read the list of stations with valid ground data (list generated with
+# get_valid_stations.py)
+# args.path_input = "/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/"
+
+# df_stations = pd.read_fwf(fn_stations,names=['Country code',\
+#                                                'ID',\
+#                                                'Name',\
+#                                                'latitude',\
+#                                                'longitude',\
+#                                                'height',\
+#                                                'unknown',\
+#                                                'startyear',\
+#                                                'endyear'])
+# 
+
+# ===============================
+print("getting a list of stations")
+# ===============================
+all_stations = stations(args.path_input,refetch_stations=False)
+df_stations = all_stations.table
+df_stations.columns
+
+if args.station_id is not None:
+    df_stations = df_stations.query('STNID == '+args.station_id)
+else:
+    if args.last_station_row is not None:
+        df_stations = df_stations[:(int(args.last_station_row)+1)]
+    if args.first_station_row is not None:
+        df_stations = df_stations[int(args.first_station_row):]
+
+STNlist = list(df_stations.iterrows())
+
+os.system('mkdir -p '+args.path_output)
+for iSTN,STN in STNlist:  
+    one_run = False
+# for iSTN,STN in STNlist[5:]:  
+    
+    fnout = args.path_output+"/"+format(STN.name,'05d')+"_morning.yaml"
+    fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_afternoon.yaml"
+    
+
+    # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
+    #                   for EXP in experiments.keys()])
+        
+    with open(fnout,'w') as fileout, \
+         open(fnout_afternoon,'w') as fileout_afternoon:
+        wy_strm = wyoming(PATH=args.path_input, STNM=STN.name)
+        wy_strm.set_STNM(int(STN.name))
+
+        # we consider all soundings from 1981 onwards
+        wy_strm.find_first(year=int(args.startyear))
+        #wy_strm.find(dt.datetime(2004,10,19,6))
+        
+        c4gli = class4gl_input(debug_level=logging.INFO)
+        c4gli_afternoon = class4gl_input(debug_level=logging.INFO)
+        # so we continue as long as we can find a new sounding
+                
+        while wy_strm.current is not None:
+            
+            c4gli.clear()
+            try: 
+                c4gli.get_profile_wyoming(wy_strm)
+                #print(STN['ID'],c4gli.pars.datetime)
+                #c4gli.get_global_input(globaldata)
+
+                print(c4gli.pars.STNID, c4gli.pars.ldatetime)
+
+                logic = dict()
+                logic['morning'] =  (c4gli.pars.ldatetime.hour <= 12.)
+
+                # Sounding should have taken place after 3 hours before sunrise.
+                # Note that the actual simulation only start at sunrise
+                # (specified by ldatetime_daylight), so the ABL cooling af the time
+                # before sunrise is ignored by the simulation.
+                logic['daylight'] = \
+                    ((c4gli.pars.ldatetime - 
+                      c4gli.pars.lSunrise).total_seconds()/3600. >= -3.)
+                
+                logic['springsummer'] = (c4gli.pars.theta > 278.)
+                
+                # we take 3000 because previous analysis (ie., HUMPPA) has
+                # focussed towards such altitude
+                le3000 = (c4gli.air_balloon.z <= 3000.)
+                logic['10measurements'] = (np.sum(le3000) >= 7) 
+
+                leh = (c4gli.air_balloon.z <= c4gli.pars.h)
+
+                logic['mlerrlow'] = (\
+                        (len(np.where(leh)[0]) > 0) and \
+                        # in cases where humidity is not defined, the mixed-layer
+                        # values get corr
+                        (not np.isnan(c4gli.pars.theta))\
+                                     #and \
+                        #(rmse(c4gli.air_balloon.theta[leh] , \
+                        #      c4gli.pars.theta,filternan_actual=True) < 1.0)\
+                              )
+    
+
+                logic['mlherrlow'] = (c4gli.pars.h_e <= 150.)
+                
+                print('logic:', logic)
+                # the result
+                morning_ok = np.mean(list(logic.values()))
+                print(morning_ok,c4gli.pars.ldatetime)
+
+            except:
+                morning_ok =False
+                print('obtain morning not good')
+
+            # the next sounding will be used either for an afternoon sounding
+            # or for the morning sounding of the next day.
+            wy_strm.find_next()
+            # If the morning is ok, then we try to find a decent afternoon
+            # sounding
+            if morning_ok == 1.:
+                print('MORNING OK!')
+                # we get the current date
+                current_date = dt.date(c4gli.pars.ldatetime.year, \
+                                       c4gli.pars.ldatetime.month, \
+                                       c4gli.pars.ldatetime.day)
+                c4gli_afternoon.clear()
+                print('AFTERNOON PROFILE CLEARED')
+                try:
+                    c4gli_afternoon.get_profile_wyoming(wy_strm)
+                    print('AFTERNOON PROFILE OK')
+
+                    if wy_strm.current is not None:
+                        current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                    else:
+                        # a dummy date: this will be ignored anyway
+                        current_date_afternoon = dt.date(1900,1,1)
+
+                    # we will dump the latest afternoon sounding that fits the
+                    # minimum criteria specified by logic_afternoon
+                    print(current_date,current_date_afternoon)
+                    c4gli_afternoon_for_dump = None
+                    while ((current_date_afternoon == current_date) and \
+                           (wy_strm.current is not None)):
+                        logic_afternoon =dict()
+
+                        logic_afternoon['afternoon'] = \
+                            (c4gli_afternoon.pars.ldatetime.hour >= 12.)
+                        # the sounding should have taken place before 1 hours
+                        # before sunset. This is to minimize the chance that a
+                        # stable boundary layer (yielding very low mixed layer
+                        # heights) is formed which can not be represented by
+                        # class.
+                        logic_afternoon['daylight'] = \
+                          ((c4gli_afternoon.pars.ldatetime - \
+                            c4gli_afternoon.pars.lSunset \
+                           ).total_seconds()/3600. <= -1.)
+
+
+                        le3000_afternoon = \
+                            (c4gli_afternoon.air_balloon.z <= 3000.)
+                        logic_afternoon['5measurements'] = \
+                            (np.sum(le3000_afternoon) >= 7) 
+
+                        # we only store the last afternoon sounding that fits these
+                        # minimum criteria
+
+                        afternoon_ok = np.mean(list(logic_afternoon.values()))
+
+                        print('logic_afternoon: ',logic_afternoon)
+                        print(afternoon_ok,c4gli_afternoon.pars.ldatetime)
+                        if afternoon_ok == 1.:
+                            # # doesn't work :(
+                            # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon)
+                            
+                            # so we just create a new one from the same wyoming profile
+                            c4gli_afternoon_for_dump = class4gl_input()
+                            c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm)
+
+                        wy_strm.find_next()
+                        c4gli_afternoon.clear()
+                        c4gli_afternoon.get_profile_wyoming(wy_strm)
+
+                        if wy_strm.current is not None:
+                            current_date_afternoon = \
+                                   dt.date(c4gli_afternoon.pars.ldatetime.year, \
+                                           c4gli_afternoon.pars.ldatetime.month, \
+                                           c4gli_afternoon.pars.ldatetime.day)
+                        else:
+                            # a dummy date: this will be ignored anyway
+                            current_date_afternoon = dt.date(1900,1,1)
+
+                        # Only in the case we have a good pair of soundings, we
+                        # dump them to disk
+                    if c4gli_afternoon_for_dump is not None:
+                        c4gli.update(source='pairs',pars={'runtime' : \
+                            int((c4gli_afternoon_for_dump.pars.datetime_daylight - 
+                                 c4gli.pars.datetime_daylight).total_seconds())})
+    
+    
+                        print('ALMOST...')
+                        if c4gli.pars.runtime > 3600*4.: # more than 4 hours simulation
+                                
+        
+                            c4gli.get_global_input(globaldata)
+                            print('VERY CLOSE...')
+                            if c4gli.check_source_globaldata() and \
+                                (c4gli.check_source(source='wyoming',\
+                                                   check_only_sections='pars')):
+                                c4gli.dump(fileout)
+                                
+                                c4gli_afternoon_for_dump.dump(fileout_afternoon)
+                                
+                                
+                                # for keyEXP,dictEXP in experiments.items():
+                                #     
+                                #     c4gli.update(source=keyEXP,pars = dictEXP)
+                                #     c4gl = class4gl(c4gli)
+                                #     # c4gl.run()
+                                #     
+                                #     c4gl.dump(c4glfiles[key])
+                                
+                                print('HIT!!!')
+                                one_run = True
+                except:
+                    print('get profile failed')
+                
+    if one_run:
+        #STN.name = STN.name
+        all_records_morning = get_records(pd.DataFrame([STN]),\
+                                      args.path_output,\
+                                      subset='morning',
+                                      refetch_records=True,
+                                      )
+        all_records_afternoon = get_records(pd.DataFrame([STN]),\
+                                      args.path_output,\
+                                      subset='afternoon',
+                                      refetch_records=True,
+                                      )
+    else:
+        os.system('rm '+fnout)
+        os.system('rm '+fnout_afternoon)
+
+    # for c4glfile in c4glfiles:
+    #     c4glfile.close()            
+
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..22efb32
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+# Inside of setup.cfg
+[metadata]
+description-file = README.md
+
diff --git a/setup.py b/setup.py
index 4dcb51d..524bd21 100644
--- a/setup.py
+++ b/setup.py
@@ -1,9 +1,33 @@
 from distutils.core import setup
 
+
+# I followed this tutorial to have both the git repository matched with the pip
+# repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56
 setup(
         name='class4gl',
-        version='0.1dev',
-        packages=['class4gl','bin'],
-        license='GPLv3 licence',
-        long_description=open('README.md').read(),
+        version='0.1.2',
+        license='gpl-3.0',        # https://help.github.com/articles/licensing-a-repository
+        description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description
+        author = 'Hendrik Wouters',                        # Type in your name
+        author_email = 'hendrik.wouters@ugent.be',         # Type in your E-Mail
+        url = 'https://github.com/hendrikwout/class4gl',   # Provide either the link to your github or to your website
+        download_url ='https://github.com/hendrikwout/class4gl/archive/v0.1.tar.gz',
+        # I explain this later on
+        keywords = ['atmospheric boundary layer', 'weather balloons',
+                    'land--atmosphere interactions'],   # Keywords
+        packages=['class4gl'],
+        # packages=find_packages(),
+        install_requires=['beautifulsoup4','pyyaml','pysolar','basemap','xarray'],
+        # long_description=open('README.md').read(),
+        classifiers=[
+                'Development Status :: 4 - Beta',      # Chose either "3 - Alpha", "4
+                #'Intended Audience :: Atmospheric scientists',
+                #'Topic :: modelling of the atmospheric boundary layer',
+                # 'License :: gpl-3.0',   
+                'Programming Language :: Python :: 3',      
+                # 'Programming Language :: Python :: 3.4',
+                # 'Programming Language :: Python :: 3.5',
+                # 'Programming Language :: Python :: 3.6',
+                # 'Programming Language :: Python :: 3.7',
+              ],
 )

From 3090b8c027496a4e0930b58a3228e8ce17698477 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 24 Jan 2019 19:05:13 +0100
Subject: [PATCH 109/129] readme; changes for release v1.0: multi-processor
 runs without qsub; make vizualization possible without global data.

---
 MANIFEST.in                               |   6 +
 README.md                                 |  52 +-
 class4gl/__init__.py                      |   3 +
 class4gl/class4gl.py                      |  11 +-
 class4gl/interface/interface.py           |  20 +-
 class4gl/interface_functions.py           |   6 +-
 class4gl/interface_multi.py               | 550 ++++++++--------
 class4gl/model.py                         |  30 +-
 class4gl/setup/setup_igra_20181217.py     |   8 +-
 class4gl/simulations/batch_simulations.py | 190 ++++--
 class4gl/simulations/simulations.py       | 752 ++++++++++++----------
 setup.py                                  |   8 +-
 test.png                                  | Bin 92416 -> 0 bytes
 13 files changed, 939 insertions(+), 697 deletions(-)
 delete mode 100644 test.png

diff --git a/MANIFEST.in b/MANIFEST.in
index 174aa24..9ba551d 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,9 @@
 recursive-include class4gl *.py
+recursive-include class4gl *.png
+recursive-include class4gl *.pbs
+recursive-exclude class4gl *.pyc
+# include simulations/*
+# include setup/*
+# include processing/*
 
 
diff --git a/README.md b/README.md
index 2d11adc..ad8b3b0 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,50 @@
-# class4gl
-Chemistry Land-surface Atmosphere Soil Slab model (CLASS) | Python version
+[![N|Solid](https://class4gl.eu/wp-content/uploads/2019/01/cropped-class4gl_small-1.png)](https://class4gl.eu)
+
+_CLASS4GL_ (Chemistry Land-surface Atmosphere Soil Slab model for Global Studies) is a fast and easy interface to investigate the dynamics of the atmospheric boundary layer from weather balloons worldwide. General info and tutorials for using CLASS4GL are available at class4gl.eu, and video clips about the atmospheric boundary layer physics can be found on the [website of the original CLASS model](classmodel.github.io/).
+
+# Features
+  - _Mine_ appropriate observations from global radio soundings, satellite data, reanalysis and climate models
+  - _Automise_ mass parallel simulations of the atmospheric boundary layer and global sensitivity experiments
+  - _Foster_ a better understanding of land-atmosphere interactions and the drivers of extreme weather globally
+  - _Share_ your data, experiments, and code developments with the research community
+
+# Method
+
+### Description
+
+The framework CLASS4GL is designed to facilitate the investigation of the atmospheric boundary layer evolution in response to different land and atmospheric conditions observed around the world. The core of the platform is the model CLASS that is used to simulate the evolution of the atmospheric boundary layer. Instruction video about the boundary layer processes and how they are considered in the CLASS model can be found as on the [CLASS model website](https://classmodel.github.io/). Observational data from balloons, satellites and reanalysis, are used to constrain and initialize the model. CLASS4GL uses 2 million global balloon soundings from the integrated global radio sounding archive and satellite data from the last 40 years.
+
+### Components
+
+  - A global data module that employs balloon soundings, satellite imagery and reanalysis data
+  - An interface to easily perform multiple simulations of the atmospheric boundary layer in parallel, and multiple batches of global sensitivity experiments
+  - Tools for Pre-and post-processing the data pool of input data and experiments.
+  - A GUI data explorer
+
+The tool is under continuous development, and it can downloaded and installed as described in the tutorials on class4gl.eu/#getstarted.
+
+In case you experience a problem or a bug, please don’t hesitate to contact us class4gl.eu/#contact. You an also open an issue on the github page (https://github.com/hendrikwout/class4gl/issues) . Any feedback will be highly appreciated.
+
+### Data sources
+
+CLASS4GL employs the balloon soundings from the Integrated Global Radiosonde Archive (IGRA) to initialize and validate the CLASS model. The sounding data is supplemented with ancillary data to further constrain the model. Therefore, a default set of gridded global datasets from satellite imagery, reanalysis and and surveys have been used that span a period of 1981–2015. An complete overview of the datasets can be found in the table. However, the default set can be replaced by alternative datasets as long as they are provided in netCDF format.
+
+Schematic overview of CLASS4GL:
+[![N|Solid](https://class4gl.eu//wp-content/uploads/2019/01/image4-1024x794.png)](https://class4gl.eu)
+
+A CLASS4GL data package is available that can be directly used to perform and validate ABL model simulations and sensitivity experiments. The locations of the balloon soundings are performed for different climate regions as shown on the map.
+
+
+These are the 150 stations from IGRA of the reference dataset to perform and validate the ABL model simulations with CLASS4GL (see Sect. 2.2 of the CLASS4GL manuscript). The different climate classes are indicated with the colors according to the Köppen-Geiger climate classification. The markers indicate the locations of the atmospheric profiles from three observation campaigns (ie., HUMPPA, BLLAST and GOAMAZON): [![N|Solid](https://class4gl.eu/wp-content/uploads/2019/01/image-1-480x300.png)](https://class4gl.eu)
+
+Data library of CLASS4GL: 
+[![N|Solid](https://class4gl.eu/wp-content/uploads/2019/01/image-5-768x492.png)](https://class4gl.eu)
+
+### Reference
+H. Wouters, I. Y. Petrova, C. C. van Heerwaarden, J. Vilà-Guerau de Arellano, A. J. Teuling, J. A. Santanello, V. Meulenberg, D. G. Miralles. A novel framework to investigate atmospheric boundary layer dynamics from balloon soundings worldwide: CLASS4GL v1.0. In preparation.
+
+
+# Get started: 
+see class4gl.eu/#getstarted
+
 
-This is the extension of class to be able to be used with global balloon soundings. 
diff --git a/class4gl/__init__.py b/class4gl/__init__.py
index f9c2212..d192b95 100644
--- a/class4gl/__init__.py
+++ b/class4gl/__init__.py
@@ -1,5 +1,8 @@
 # from . import model,class4gl,interface_multi,data_air,data_global
 from ribtol import *
+from setup import *
+from simulations import *
+from processing import *
 
 __version__ = '0.1.0'
 
diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py
index 2bdf6b4..a0de860 100644
--- a/class4gl/class4gl.py
+++ b/class4gl/class4gl.py
@@ -835,6 +835,14 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'):
             air_balloon[column] = air_balloon[column].round(decimal)
             air_ap[column] = air_ap[column].round(decimal)
 
+        # in order to avoid warnings: the ABL values should have the same
+        # rounding as the values profile.
+        dpars['h'] = round(dpars['h'],decimals['z'])
+        dpars['theta'] = round(dpars['theta'],decimals['theta'])
+        dpars['q'] = round(dpars['q'],decimals['q'])
+        dpars['u'] = round(dpars['u'],decimals['u'])
+        dpars['v'] = round(dpars['v'],decimals['v'])
+
         self.update(source='wyoming',\
                     # pars=pars,
                     pars=dpars,\
@@ -1923,12 +1931,9 @@ def dump(self,file,include_input=False,timeseries_only=None):
                 the yaml file, including pars, air_ap, sources etc.
         """
 
-
         if include_input:
             self.input_c4gl.dump(file)
 
-
-
         file.write('---\n')
         index = file.tell()
         file.write('# CLASS4GL input; format version: 0.1\n')
diff --git a/class4gl/interface/interface.py b/class4gl/interface/interface.py
index bb234fc..7ededbd 100644
--- a/class4gl/interface/interface.py
+++ b/class4gl/interface/interface.py
@@ -172,10 +172,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         for ikey,key in enumerate(args.experiments.strip(' ').split(' ')):
             # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
             # clearsky = (cc < 0.05)
-            # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-            # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-            mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-            obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+            # mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+            # obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats'].loc[clearsky]['d'+varkey+'dt']
+            mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt']
+            obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt']
             x, y = obs.values,mod.values
             print(key,len(obs.values))
     
@@ -218,10 +218,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc']
         # clearsky = (cc < 0.05)
     
-        # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt']
-        # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt']
-        mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt']
-        obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt']
+        # mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].loc[clearsky]['d'+varkey+'dt']
+        # obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats'].loc[clearsky]['d'+varkey+'dt']
+        mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt']
+        obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt']
     
     
         nbins=40       
@@ -373,7 +373,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         key = list(args.experiments.strip().split(' '))[ikey]
         data_all = pd.DataFrame()
 
-        tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy())
+        tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_end_obs_stats'].copy())
         tempdatamodstats["source"] = "Soundings"
         tempdatamodstats["source_index"] = "Soundings"
 
@@ -403,7 +403,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         for ikey,key in enumerate(list(args.experiments.strip().split(' '))):
             keylabel = keylabels[ikey]
 
-            tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy())
+            tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].copy())
             tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy())
             tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date
             tempdatamodstats['STNID']= tempdataini_this.STNID
diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py
index b880ae9..630439e 100644
--- a/class4gl/interface_functions.py
+++ b/class4gl/interface_functions.py
@@ -66,7 +66,7 @@ def __prev__(self):
 
 
 #'_afternoon.yaml'
-def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
+def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'):
     filename = yaml_file.name
     #filename = path_yaml+'/'+format(current_station.name,'05d')+suffix
     #yaml_file = open(filename)
@@ -96,13 +96,13 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='mod'):
     os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start))
 
 
-    if mode =='mod':
+    if mode =='model_output':
         modelout = class4gl()
         modelout.load_yaml_dict(record_dict)
         os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start))
 
         return modelout
-    elif mode == 'ini':
+    elif mode == 'model_input':
 
  
         # datetimes are incorrectly converted to strings. We need to convert them
diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py
index b5e5c6d..69a2cf9 100644
--- a/class4gl/interface_multi.py
+++ b/class4gl/interface_multi.py
@@ -6,10 +6,18 @@
 import sys
 from contextlib import suppress
 from time import sleep
+import copy
+import matplotlib.image as mpimg
 
 
 # sys.path.insert(0, '/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/')
 
+import class4gl
+path_lib = os.path.dirname(class4gl.__file__)
+path_worldmap = path_lib+'/Equirectangular_projection_SW.png'
+img_worldmap= mpimg.imread(path_worldmap)
+
+
 from class4gl import class4gl_input, data_global,class4gl,units
 from interface_functions import *
 # from data_soundings import wyoming
@@ -51,12 +59,12 @@
 os.system('module load Ruby')
 
 class c4gl_interface_soundings(object):
-    def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False,tendencies_revised=False):
+    def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False,tendencies_revised=False):
         """ creates an interactive interface for analysing class4gl experiments
 
         INPUT:
             path_exp : path of the experiment output
-            path_obs : path of the observations 
+            path_forcing : path of the original forcing, which is needed to get the end (afternoon) profiles
             globaldata: global data that is being shown on the map
             obs_filtering: extra data filter considering observation tendencies
                            beyond what the model can capture
@@ -74,7 +82,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
         print(self.obs_filter)
         self.tendencies_revised = tendencies_revised
         self.path_exp = path_exp
-        self.path_obs = path_obs
+        self.path_forcing = path_forcing
         self.exp_files = glob.glob(self.path_exp+'/?????.yaml')
 
         # # get the list of stations
@@ -115,23 +123,23 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
                                            refetch_records=refetch_records
                                            )
         # get its records and load it into the stats frame
-        self.frames['stats']['records_all_stations_mod'] =\
+        self.frames['stats']['records_all_stations_end_mod'] =\
                         get_records(self.frames['stats']['stations'].table,\
                                            self.path_exp,\
-                                           subset='mod',\
+                                           subset='end',\
                                            refetch_records=refetch_records
                                            )
 
-        if self.path_obs is not None:
+        if self.path_forcing is not None:
             # get its records and load it into the stats frame
-            self.frames['stats']['records_all_stations_obs_afternoon'] =\
+            self.frames['stats']['records_all_stations_end_obs'] =\
                             get_records(self.frames['stats']['stations'].table,\
-                                               self.path_obs,\
-                                               subset='afternoon',\
+                                               self.path_forcing,\
+                                               subset='end',\
                                                refetch_records=refetch_records
                                                )
 
-        self.frames['stats']['records_all_stations_mod'].index = \
+        self.frames['stats']['records_all_stations_end_mod'].index = \
             self.frames['stats']['records_all_stations_ini'].index 
 
         
@@ -141,46 +149,46 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
         self.frames['stats']['records_all_stations_ini']['dates'] = \
             self.frames['stats']['records_all_stations_ini']['ldatetime'].dt.date
 
-        if self.path_obs is not None:
-            self.frames['stats']['records_all_stations_obs_afternoon']['dates'] = \
-                self.frames['stats']['records_all_stations_obs_afternoon']['ldatetime'].dt.date
+        if self.path_forcing is not None:
+            self.frames['stats']['records_all_stations_end_obs']['dates'] = \
+                self.frames['stats']['records_all_stations_end_obs']['ldatetime'].dt.date
 
-            self.frames['stats']['records_all_stations_obs_afternoon'].set_index(['STNID','dates'],inplace=True)
+            self.frames['stats']['records_all_stations_end_obs'].set_index(['STNID','dates'],inplace=True)
 
 
             ini_index_dates = self.frames['stats']['records_all_stations_ini'].set_index(['STNID','dates']).index
 
-            self.frames['stats']['records_all_stations_obs_afternoon'] = \
-                self.frames['stats']['records_all_stations_obs_afternoon'].loc[ini_index_dates]
+            self.frames['stats']['records_all_stations_end_obs'] = \
+                self.frames['stats']['records_all_stations_end_obs'].loc[ini_index_dates]
 
-            self.frames['stats']['records_all_stations_obs_afternoon'].index = \
+            self.frames['stats']['records_all_stations_end_obs'].index = \
                 self.frames['stats']['records_all_stations_ini'].index 
 
             self.frames['stats']['viewkeys'] = ['h','theta','q']
             print('Calculating table statistics')
 
             if self.tendencies_revised:
-                self.frames['stats']['records_all_stations_mod_stats'] = \
-                        tendencies_rev(self.frames['stats']['records_all_stations_mod'],\
+                self.frames['stats']['records_all_stations_end_mod_stats'] = \
+                        tendencies_rev(self.frames['stats']['records_all_stations_end_mod'],\
                                            self.frames['stats']['records_all_stations_ini'],\
                                            self.frames['stats']['viewkeys']\
                                   )
-                self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                        tendencies_rev(self.frames['stats']['records_all_stations_obs_afternoon'],\
+                self.frames['stats']['records_all_stations_end_obs_stats'] = \
+                        tendencies_rev(self.frames['stats']['records_all_stations_end_obs'],\
                                            self.frames['stats']['records_all_stations_ini'],\
                                            self.frames['stats']['viewkeys']\
                                   )
 
             else:
-                self.frames['stats']['records_all_stations_mod_stats'] = \
-                        tendencies(self.frames['stats']['records_all_stations_mod'],\
-                                   self.frames['stats']['records_all_stations_obs_afternoon'],\
+                self.frames['stats']['records_all_stations_end_mod_stats'] = \
+                        tendencies(self.frames['stats']['records_all_stations_end_mod'],\
+                                   self.frames['stats']['records_all_stations_end_obs'],\
                                    self.frames['stats']['records_all_stations_ini'],\
                                    self.frames['stats']['viewkeys']\
                                   )
-                self.frames['stats']['records_all_stations_obs_afternoon_stats'] = \
-                        tendencies(self.frames['stats']['records_all_stations_obs_afternoon'],\
-                                   self.frames['stats']['records_all_stations_obs_afternoon'],\
+                self.frames['stats']['records_all_stations_end_obs_stats'] = \
+                        tendencies(self.frames['stats']['records_all_stations_end_obs'],\
+                                   self.frames['stats']['records_all_stations_end_obs'],\
                                    self.frames['stats']['records_all_stations_ini'],\
                                    self.frames['stats']['viewkeys']\
                                   )
@@ -213,18 +221,18 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
         # 
         # 
         # \
-        #        self.frames['stats']['records_all_stations_mod'], \
+        #        self.frames['stats']['records_all_stations_end_mod'], \
 
 
 
-        # self.frames['stats']['records_all_stations_mod_stats_stdrel'] = \
-        #        stdrel(mod = self.frames['stats']['records_all_stations_mod_stats'], \
-        #               obs = self.frames['stats']['records_all_stations_obs_afternoon_stats'], \
+        # self.frames['stats']['records_all_stations_end_mod_stats_stdrel'] = \
+        #        stdrel(mod = self.frames['stats']['records_all_stations_end_mod_stats'], \
+        #               obs = self.frames['stats']['records_all_stations_end_obs_stats'], \
         #               columns = [ 'd'+key+'dt' for key in \
         #                           self.frames['stats']['viewkeys']], \
         #              )
 
-        # self.frames['stats']['records_all_stations_obs_afternoon_stats_stdrel'] = \
+        # self.frames['stats']['records_all_stations_end_obs_stats_stdrel'] = \
         #        stdrel(mod = self.frames['stats']['records_all_stations_ini'], \
         #               obs = self.frames['stats']['records_all_stations_ini'], \
         #               columns = self.frames['stats']['viewkeys'], \
@@ -232,42 +240,42 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
 
         
 
-        if self.path_obs is not None:
+        if self.path_forcing is not None:
             print('filtering pathological data')
-            indextype = self.frames['stats']['records_all_stations_mod_stats'].index.names
+            indextype = self.frames['stats']['records_all_stations_end_mod_stats'].index.names
             # some observational sounding still seem problematic, which needs to be
             # investigated. In the meantime, we filter them
 
             print('hello',self.obs_filter)
-            print ((self.path_obs is not None) and (self.obs_filter))
-            if ((self.path_obs is not None) and (self.obs_filter)) is True:
+            print ((self.path_forcing is not None) and (self.obs_filter))
+            if ((self.path_forcing is not None) and (self.obs_filter)) is True:
                 print('hallohallo')
-            if ((self.path_obs is not None) and (self.obs_filter)) is True:
+            if ((self.path_forcing is not None) and (self.obs_filter)) is True:
                 print('exclude exceptional observations')
                 print('exclude unrealistic model output -> should be investigated!')
                 valid = (\
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt >  0.250) & 
-                         #(self.frames['stats']['records_all_stations_mod_stats'].dthetadt >  0.25000) & 
-                         #(self.frames['stats']['records_all_stations_mod_stats'].dthetadt <  1.8000) & 
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt <  1.8000) & 
-                         #(self.frames['stats']['records_all_stations_mod_stats'].dhdt >  50.0000) & 
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt >  40.0000) & 
-                         #(self.frames['stats']['records_all_stations_mod_stats'].dhdt <  350.) & 
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dhdt <  400.) & 
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dqdt >  -.00055) & 
-                         #(self.frames['stats']['records_all_stations_mod_stats'].dqdt >  -.00055) & 
-                         (self.frames['stats']['records_all_stations_obs_afternoon_stats'].dqdt <  .0003) & 
+                         (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt >  0.250) & 
+                         #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt >  0.25000) & 
+                         #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt <  1.8000) & 
+                         (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt <  1.8000) & 
+                         #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt >  50.0000) & 
+                         (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt >  40.0000) & 
+                         #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt <  350.) & 
+                         (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt <  400.) & 
+                         (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt >  -.00055) & 
+                         #(self.frames['stats']['records_all_stations_end_mod_stats'].dqdt >  -.00055) & 
+                         (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt <  .0003) & 
 
                          # filter 'extreme' model output -> should be investigated!
-                         (self.frames['stats']['records_all_stations_mod_stats'].dqdt <  .0006) & 
-                         (self.frames['stats']['records_all_stations_mod_stats'].dqdt >  -.0006) & 
-                         (self.frames['stats']['records_all_stations_mod_stats'].dthetadt >  .2) & 
-                         (self.frames['stats']['records_all_stations_mod_stats'].dthetadt <  2.) & 
-                         # (self.frames['stats']['records_all_stations_mod_stats'].dqdt <  .0003) & 
+                         (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt <  .0006) & 
+                         (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt >  -.0006) & 
+                         (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt >  .2) & 
+                         (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt <  2.) & 
+                         # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt <  .0003) & 
                          # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & 
                          # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & 
-                         ~np.isnan(self.frames['stats']['records_all_stations_mod_stats'].dthetadt) & 
-                         ~np.isnan(self.frames['stats']['records_all_stations_obs_afternoon_stats'].dthetadt))
+                         ~np.isnan(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt) & 
+                         ~np.isnan(self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt))
 
                 for key in self.frames['stats'].keys():
                     if (type(self.frames['stats'][key]) == pd.DataFrame) and \
@@ -275,7 +283,7 @@ def __init__(self,path_exp,path_obs=None,globaldata=None,refetch_records=False,r
                         self.frames['stats'][key] = self.frames['stats'][key][valid]
                 print("WARNING WARNING!: "+ str(len(valid) - np.sum(valid))+' soundings are filtered')
 
-        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_mod'].index
+        self.frames['stats']['records_all_stations_index'] = self.frames['stats']['records_all_stations_end_mod'].index
 
 
         print("filtering stations from interface that have no records")
@@ -355,9 +363,9 @@ def update_station(self):
 
         # create the value table of the records of the current station
         tab_suffixes = \
-                ['_mod','_ini','_ini_pct']
-        if self.path_obs is not None:
-            tab_suffixes=tab_suffixes+['_obs_afternoon','_mod_stats','_obs_afternoon_stats']
+                ['_end_mod','_ini','_ini_pct']
+        if self.path_forcing is not None:
+            tab_suffixes=tab_suffixes+['_end_obs','_end_mod_stats','_end_obs_stats']
 
         for tab_suffix in tab_suffixes:
             self.frames['stats']['records_current_station'+tab_suffix] = \
@@ -365,11 +373,11 @@ def update_station(self):
 
         # go to first record of current station
         self.frames['stats']['records_iterator'] = \
-                        records_iterator(self.frames['stats']['records_current_station_mod'])
+                        records_iterator(self.frames['stats']['records_current_station_end_mod'])
         (self.frames['stats']['STNID'] , \
         self.frames['stats']['current_record_chunk'] , \
         self.frames['stats']['current_record_index']) , \
-        self.frames['stats']['current_record_mod'] = \
+        self.frames['stats']['current_record_end_mod'] = \
                         self.frames['stats']['records_iterator'].__next__()
 
         for key in self.frames['stats'].keys():
@@ -382,24 +390,24 @@ def update_station(self):
         self.frames['profiles']['current_station_file_ini'] = \
             open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
 
-        if 'current_station_file_mod' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_mod'].close()
-        self.frames['profiles']['current_station_file_mod'] = \
-            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-            self.frames['profiles']['current_station_file_afternoon'].close()
-        if self.path_obs is not None:
-            self.frames['profiles']['current_station_file_afternoon'] = \
-                open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+        if 'current_station_file_end_mod' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_end_mod'].close()
+        self.frames['profiles']['current_station_file_end_mod'] = \
+            open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r')
+        if 'current_station_file_end' in self.frames['profiles'].keys():
+            self.frames['profiles']['current_station_file_end'].close()
+        if self.path_forcing is not None:
+            self.frames['profiles']['current_station_file_end'] = \
+                open(self.path_forcing+'/'+format(STNID,"05d")+'_end.yaml','r')
 
         # for the profiles we make a distinct record iterator, so that the
         # stats iterator can move independently
         self.frames['profiles']['records_iterator'] = \
-                        records_iterator(self.frames['profiles']['records_current_station_mod'])
+                        records_iterator(self.frames['profiles']['records_current_station_end_mod'])
         (self.frames['profiles']['STNID'] , \
         self.frames['profiles']['current_record_chunk'] , \
         self.frames['profiles']['current_record_index']) , \
-        self.frames['profiles']['current_record_mod'] = \
+        self.frames['profiles']['current_record_end_mod'] = \
                         self.frames['profiles']['records_iterator'].__next__()
 
 
@@ -416,16 +424,16 @@ def next_record(self,event=None,jump=1):
             (self.frames['profiles']['STNID'] , \
             self.frames['profiles']['current_record_chunk'] , \
             self.frames['profiles']['current_record_index']) , \
-            self.frames['profiles']['current_record_mod'] = \
+            self.frames['profiles']['current_record_end_mod'] = \
                       self.frames['profiles']['records_iterator'].__next__(jump)
         # except (StopIteration):
         #     self.frames['profiles']['records_iterator'].close()
         #     del( self.frames['profiles']['records_iterator'])
         #     self.frames['profiles']['records_iterator'] = \
-        #                 self.frames['profiles']['records_current_station_mod'].iterrows()
+        #                 self.frames['profiles']['records_current_station_end_mod'].iterrows()
         #     (self.frames['profiles']['STNID'] , \
         #     self.frames['profiles']['current_record_index']) , \
-        #     self.frames['profiles']['current_record_mod'] = \
+        #     self.frames['profiles']['current_record_end_mod'] = \
         #                     self.frames['profiles']['records_iterator'].__next__()
 
         for key in self.frames['profiles'].keys():
@@ -444,16 +452,16 @@ def next_record(self,event=None,jump=1):
             self.frames['profiles']['current_station_file_ini'] = \
                 open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
 
-            if 'current_station_file_mod' in self.frames['profiles'].keys():
-                self.frames['profiles']['current_station_file_mod'].close()
-            self.frames['profiles']['current_station_file_mod'] = \
-                open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
+            if 'current_station_file_end_mod' in self.frames['profiles'].keys():
+                self.frames['profiles']['current_station_file_end_mod'].close()
+            self.frames['profiles']['current_station_file_end_mod'] = \
+                open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r')
 
-            if self.path_obs is not None:
-                if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                    self.frames['profiles']['current_station_file_afternoon'].close()
-                self.frames['profiles']['current_station_file_afternoon'] = \
-                    open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+            if self.path_forcing is not None:
+                if 'current_station_file_end' in self.frames['profiles'].keys():
+                    self.frames['profiles']['current_station_file_end'].close()
+                self.frames['profiles']['current_station_file_end'] = \
+                    open(self.path_forcing+'/'+format(STNID,"05d")+'_end.yaml','r')
 
         self.update_record()
 
@@ -466,20 +474,20 @@ def update_record(self):
                   (self.frames['profiles']['STNID'] , \
                   self.frames['profiles']['current_record_chunk'],\
                   self.frames['profiles']['current_record_index'])]
-        if self.path_obs is not None:
-            self.frames['profiles']['current_record_obs_afternoon'] =  \
-                self.frames['profiles']['records_current_station_obs_afternoon'].loc[\
+        if self.path_forcing is not None:
+            self.frames['profiles']['current_record_end_obs'] =  \
+                self.frames['profiles']['records_current_station_end_obs'].loc[\
                       (self.frames['profiles']['STNID'] , \
                       self.frames['profiles']['current_record_chunk'] , \
                       self.frames['profiles']['current_record_index'])]
 
-            self.frames['profiles']['current_record_mod_stats'] = \
-                    self.frames['profiles']['records_all_stations_mod_stats'].loc[(\
+            self.frames['profiles']['current_record_end_mod_stats'] = \
+                    self.frames['profiles']['records_all_stations_end_mod_stats'].loc[(\
                         self.frames['profiles']['STNID'], \
                         self.frames['profiles']['current_record_chunk'], \
                         self.frames['profiles']['current_record_index'])]
-            self.frames['profiles']['current_record_obs_afternoon_stats'] = \
-                    self.frames['profiles']['records_all_stations_obs_afternoon_stats'].loc[(\
+            self.frames['profiles']['current_record_end_obs_stats'] = \
+                    self.frames['profiles']['records_all_stations_end_obs_stats'].loc[(\
                         self.frames['profiles']['STNID'],\
                         self.frames['profiles']['current_record_chunk'],\
                         self.frames['profiles']['current_record_index'])]
@@ -496,15 +504,15 @@ def update_record(self):
 
         # select first 
         #self.frames['profiles']['current_record_index'], \
-        #self.frames['profiles']['record_yaml_mod'] = \
+        #self.frames['profiles']['record_yaml_end_mod'] = \
         #   get_record_yaml(self.frames['profiles']['current_station']['filename'],\
         #                   self.frames['stats']['current_record_index'])
-        self.frames['profiles']['record_yaml_mod'] = \
+        self.frames['profiles']['record_yaml_end_mod'] = \
            get_record_yaml(
-               self.frames['profiles']['current_station_file_mod'], \
-               self.frames['profiles']['current_record_mod'].index_start,
-               self.frames['profiles']['current_record_mod'].index_end,
-               mode='mod')
+               self.frames['profiles']['current_station_file_end_mod'], \
+               self.frames['profiles']['current_record_end_mod'].index_start,
+               self.frames['profiles']['current_record_end_mod'].index_end,
+               mode='model_output')
                                 
         record_ini = self.frames['profiles']['records_all_stations_ini'].loc[
                        (self.frames['stats']['STNID'] , \
@@ -516,20 +524,20 @@ def update_record(self):
                self.frames['profiles']['current_station_file_ini'], \
                record_ini.index_start,
                record_ini.index_end,
-                mode='ini')
+                mode='model_input')
 
-        if self.path_obs is not None:
-            record_afternoon = self.frames['profiles']['records_all_stations_obs_afternoon'].loc[
+        if self.path_forcing is not None:
+            record_end = self.frames['profiles']['records_all_stations_end_obs'].loc[
                            (self.frames['stats']['STNID'] , \
                             self.frames['stats']['current_record_chunk'] , \
                             self.frames['stats']['current_record_index'])]
 
-            self.frames['profiles']['record_yaml_obs_afternoon'] = \
+            self.frames['profiles']['record_yaml_end_obs'] = \
                get_record_yaml(
-                   self.frames['profiles']['current_station_file_afternoon'], \
-                   record_afternoon.index_start,
-                   record_afternoon.index_end,
-                    mode='ini')
+                   self.frames['profiles']['current_station_file_end'], \
+                   record_end.index_start,
+                   record_end.index_end,
+                    mode='model_input')
 
 
         key = self.frames['worldmap']['inputkey']
@@ -546,7 +554,7 @@ def update_record(self):
         else:
             if "fig" in self.__dict__.keys():
                 self.refresh_plot_interface(only=['stats_lightupdate',
-                                                  'worldmap_stations',
+                                                  'worldmap',
                                                   'profiles'])
 
     def abline(self,slope, intercept,axis):
@@ -570,9 +578,9 @@ def plot(self):
         btns = {} #buttons
 
         # frames, which sets attributes for a group of axes, buttens, 
-        if self.path_obs is not None:
+        if self.path_forcing is not None:
 
-            for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_mod_stats'].columns)):
+            for ikey,key in enumerate(list(self.frames['stats']['records_all_stations_end_mod_stats'].columns)):
                 label = 'stats_'+str(key)
                 axes[label] = fig.add_subplot(\
                                 len(self.frames['stats']['viewkeys']),\
@@ -640,9 +648,11 @@ def plot(self):
         axes[label].lat = None
         axes[label].lon = None
 
-        label = 'worldmap_colorbar'
-        axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
-        axes[label].fields = {}
+        
+        if self.globaldata is not None:
+            label = 'worldmap_colorbar'
+            axes[label] = fig.add_axes([0.25,0.44,0.40,0.05])
+            axes[label].fields = {}
 
         # we make a overlying axes for the animations on the map, so that we don't need to redraw the whole map over and over again
         label = 'worldmap_stations'
@@ -661,7 +671,10 @@ def plot(self):
         buttons_upper = 0.28
         buttons_left = 0.25
 
-        button_types = ['dataset','datetime','level','station','record']
+        if self.globaldata is not None:
+            button_types = ['datetime','level','station','record']
+        else:
+            button_types = ['dataset','station','record']
         
         for ibutton_type,button_type in enumerate(button_types):
             label='bprev'+button_type
@@ -671,8 +684,13 @@ def plot(self):
                 button_width,\
                 button_height\
                                                      ])
-            btns[label] = Button(axes[label], 'Previous '+button_type)
-            btns[label].on_clicked(getattr(self, 'prev_'+button_type))
+            if button_type !='dataset':
+                btns[label] = Button(axes[label], 'Previous '+button_type)
+                btns[label].on_clicked(getattr(self, 'prev_'+button_type))
+            else:
+                btns[label] = Button(axes[label], 'Previous input var')
+                btns[label].on_clicked(getattr(self, 'prev_'+button_type))
+
 
             label='bnext'+button_type
             axes[label] = fig.add_axes([
@@ -681,8 +699,12 @@ def plot(self):
                 button_width,\
                 button_height\
                                                      ])
-            btns[label] = Button(axes[label], 'Next '+button_type)
-            btns[label].on_clicked(getattr(self, 'next_'+button_type))
+            if button_type !='dataset':
+                btns[label] = Button(axes[label], 'Next '+button_type)
+                btns[label].on_clicked(getattr(self, 'next_'+button_type))
+            else:
+                btns[label] = Button(axes[label], 'Next input var')
+                btns[label].on_clicked(getattr(self, 'next_'+button_type))
 
         
         # label = 'bprev_dataset'
@@ -799,7 +821,7 @@ def plot(self):
     #         current_yamlgen.close()
     #         current_yamlgen = yaml.load_all(current_file)
     #         current_file.seek(current_tell)
-    #         current_record_mod = current_yamlgen.__next__()
+    #         current_record_end_mod = current_yamlgen.__next__()
     #     current_yamlgen.close()
 
     #     return records
@@ -920,8 +942,17 @@ def sel_dataset(self,inputkey):
             'after')# get nearest datetime of the current dataset to the profile
 
         print('seldata0')
+
         if 'level' not in self.frames['worldmap'].keys():
-            levels = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev']
+
+            if self.globaldata is not None:
+                if 'lev' in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims):
+                    levels = self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev']
+                else:
+                    levels = np.array([0])
+            else:
+                levels = np.array([0])
+
             self.frames['worldmap']['level'] = np.max(levels)
             print('seldata1')
 
@@ -936,27 +967,28 @@ def sel_dataset(self,inputkey):
             print('seldata3')
 
 
-        print('seldata4')
+            print('seldata4')
         self.sel_level(self.frames['worldmap']['level'])
 
 
 
     def sel_level(self,level):
 
-        if 'lev' not in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims):
-            raise ValueError('lev dimension not in dataset '+self.frames['worldmap']['inputkey'])
+        # if 'lev' not in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims):
+        #     raise ValueError('lev dimension not in dataset '+self.frames['worldmap']['inputkey'])
 
         print('seldata5')
 
-
-        if level > (np.max(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev'])):
-            raise ValueError('Level '+str(level)+' exceed those of the current dataset: '+str(self.globaldata.datasets[frames['worldmap']['inputkey']].page['lev']))
-        if level < (np.min(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev'])):
-            raise ValueError('Level '+str(level)+' is lower than those of the current dataset: '+str(self.globaldata.datasets[frames['worldmap']['inputkey']].page['lev']))
-        print('seldata6')
-        self.frames['worldmap']['level'] = level
-
-        print(level)
+        if self.globaldata is not None:
+            if 'lev' in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims):
+                if level > (np.max(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev'])):
+                    raise ValueError('Level '+str(level)+' exceed those of the current dataset: '+str(self.globaldata.datasets[frames['worldmap']['inputkey']].page['lev']))
+                if level < (np.min(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page['lev'])):
+                    raise ValueError('Level '+str(level)+' is lower than those of the current dataset: '+str(self.globaldata.datasets[frames['worldmap']['inputkey']].page['lev']))
+                print('seldata6')
+                self.frames['worldmap']['level'] = level
+
+            print(level)
         if "fig" in self.__dict__.keys():
             self.refresh_plot_interface(only=['worldmap','stats_lightupdate','stats_colorbar']) 
 
@@ -1086,13 +1118,22 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                 # x,y = self.gmap(lons,lats)
                 # #self.cont_map = self.axmap.contourf(x,y,field.T,cmap=gmapcm)
                 # self.cont_map = self.axmap.pcolormesh(x,y,field.T,cmap=gmapcm)
+        else:
+            # simplified version for only showing a simple image
+            label = 'worldmap'
+            axes[label].lat = np.arange(-90.,91.,1.)[::-1]
+            axes[label].lon = np.arange(-180.,181.,1.)[:]
+            axes[label].fields[label] = axes[label].imshow(img_worldmap)
+            axes[label].axis('off')
+
+
 
-        if (self.path_obs is not None) and \
+        if (self.path_forcing is not None) and \
            (self.frames['worldmap']['inputkey'] in self.frames['stats']['records_all_stations_ini_pct'].keys()) and \
-           (self.path_obs is not None) and \
+           (self.path_forcing is not None) and \
            ((only is None) or ('stats' in only) or ('stats_lightupdate' in only)):
 
-            statskeys_out = list(self.frames['stats']['records_all_stations_mod_stats'].columns)
+            statskeys_out = list(self.frames['stats']['records_all_stations_end_mod_stats'].columns)
             store_xlim = {}
             store_ylim = {}
             for ikey, key in enumerate(statskeys_out):
@@ -1107,12 +1148,12 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             key = 'dthetadt'
             x = self.frames['stats']['records_all_stations_ini']['datetime']
             #print(x)
-            y = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
+            y = self.frames['stats']['records_all_stations_end_obs_stats'][key]
             #print(y)
             z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
             #print(z)
 
-            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_mod'])))
+            alpha_cloud_pixels = 1./(1.+1./(0.15 * 10000. / len(self.frames['stats']['records_all_stations_end_mod'])))
             self.axes[label].data[label] = self.axes[label].scatter(x.values,
                                                                     y.values,
                                                                     c=z.values,
@@ -1124,13 +1165,13 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
 
             
             x = self.frames['stats']['records_current_station_ini']['datetime']
-            y = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
+            y = self.frames['stats']['records_current_station_end_obs_stats'][key]
             z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
             self.axes[label].data[label+'_current_station_hover'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=5,picker=5,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
 
 
             x = self.frames['profiles']['records_current_station_ini']['datetime']
-            y = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
+            y = self.frames['profiles']['records_current_station_end_obs_stats'][key]
             z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
 
             self.axes[label].data[label+'_current_station'] = self.axes[label].scatter(x.values,y.values,c=z.values,cmap=self.statsviewcmap,s=20,picker=20,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.8)
@@ -1141,8 +1182,8 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             for ikey, key in enumerate(statskeys_out):
 
                 # show data of all stations
-                x = self.frames['stats']['records_all_stations_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_all_stations_mod_stats'][key]
+                x = self.frames['stats']['records_all_stations_end_obs_stats'][key]
+                y = self.frames['stats']['records_all_stations_end_mod_stats'][key]
                 z = self.frames['stats']['records_all_stations_ini_pct'][self.frames['worldmap']['inputkey'] ]
                 qvalmax = x.quantile(0.999)
                 qvalmin = x.quantile(0.001)
@@ -1162,16 +1203,16 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                     self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
                          self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.4,lw=4)
 
-                x = self.frames['stats']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['stats']['records_current_station_mod_stats'][key]
+                x = self.frames['stats']['records_current_station_end_obs_stats'][key]
+                y = self.frames['stats']['records_current_station_end_mod_stats'][key]
                 z = self.frames['stats']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
                 self.axes['stats_'+key].data['stats_'+key+'_current_station_hover'] = \
                        self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
                                 cmap=self.statsviewcmap,\
                                 s=10,picker=10,label=key,vmin=0.,vmax=1.,edgecolor='k',linewidth=0.3)
 
-                x = self.frames['profiles']['records_current_station_obs_afternoon_stats'][key]
-                y = self.frames['profiles']['records_current_station_mod_stats'][key]
+                x = self.frames['profiles']['records_current_station_end_obs_stats'][key]
+                y = self.frames['profiles']['records_current_station_end_mod_stats'][key]
                 z = self.frames['profiles']['records_current_station_ini_pct'][self.frames['worldmap']['inputkey'] ]
                 self.axes['stats_'+key].data['stats_'+key+'_current_station'] = \
                        self.axes['stats_'+key].scatter(x.values,y.values, c=z.values,\
@@ -1183,8 +1224,8 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                     self.axes['stats_'+key].data['stats_'+key+'_fit'] = \
                          self.axes['stats_'+key].plot(x, fit[0] * x + fit[1], color='k',alpha=0.8,lw=3)
 
-                x = self.frames['stats']['current_record_obs_afternoon_stats'][key]
-                y = self.frames['stats']['current_record_mod_stats'][key]
+                x = self.frames['stats']['current_record_end_obs_stats'][key]
+                y = self.frames['stats']['current_record_end_mod_stats'][key]
                 z = self.frames['stats']['current_record_ini_pct'][self.frames['worldmap']['inputkey'] ]
 
                 text = 'EXT: '+ format(x,'2.4f')+ ', MOD: ' + format(y,'2.4f')
@@ -1204,9 +1245,9 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                 # axes['stats_'+key].set_title('relative deviation per station of '+ key)
                 self.axes['stats_'+key].set_title(key+ ' ['+self.units[key]+']')
                 # # highlight data for curent station
-                # self.frames['stats']['records_all_stations_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
+                # self.frames['stats']['records_all_stations_end_mod_stats'].iloc[self.frames['stats']['records_all_stations_index'].get_level_values('STNID') == self.frames['stats']['current_station'].name]
 
-                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_mod'],'2.4f')
+                #text = 'EXT: '+format(seltablestatsstdrel_statannotate[key+'_ext'],'2.4f')+ ', MOD: '+format(seltablestatsstdrel_statannotate[key+'_end_mod'],'2.4f')
 
                 if ikey == len(statskeys_out)-1:
                     self.axes['stats_'+key].set_xlabel('external')
@@ -1240,10 +1281,9 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
 
         #print('r1')
         if (only is None) or ('worldmap' in only) or ('worldmap_stations' in only):
-            #print('r2')
             label = 'worldmap_stations'
             axes[label].clear()
-            
+            #print('r2')
             stations = self.frames['worldmap']['stations'].table
             globaldata = self.globaldata
             
@@ -1261,6 +1301,7 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                     xlist.append(x)
                     ylist.append(y)
                 #picker is needed to make it clickable (pick_event)
+                print(label)
                 axes[label].data[label] = axes[label].scatter(xlist,ylist,
                                                               c='r', s=15,
                                                               picker = 15,
@@ -1306,7 +1347,7 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                     text = 'STNID: '+ format(STNID,'10.0f') + \
                             ', LAT: '+format(STN['latitude'],'3.3f')+ \
                             ', LON: '+format(STN['longitude'],'3.3f')+ \
-                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_mod'].shape[0]) \
+                            ', #SOUNDINGS: '+str(self.frames['stats']['records_current_station_end_mod'].shape[0]) \
 
                             #+', VAL: '+format(VAL,'.3e')
 
@@ -1332,8 +1373,8 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
 
                     # #pos = sc.get_offsets()[ind["ind"][0]]
                     # 
-                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_mod'])
-                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_mod'])
+                    # axes[label.data[label+'statannotate'].xy = (seltablestatsstdrel_statannotate[key+'_ext'],seltablestatsstdrel_statannotate[key+'_end_mod'])
+                    # text = 'STN: '+str(int(axes['statsview0'].focus['STNID']))+', DT: '+str(axes['statsview0'].focus['DT'])+', EXT: '+str(seltablestatsstdrel_statannotate[key+'_ext'])+', MOD: '+str(seltablestatsstdrel_statannotate[key+'_end_mod'])
                     # axes[label].data[label+'statannotate'].set_text(text)
                     #axes[label].data[label+'statannotate'].get_bbox_patch().set_facecolor(statsviewcmap(seltablestatspct_statannotate[cmapkey]))
                     # axes[label].data[label+'statannotate'].get_bbox_patch().set_alpha(0.4)
@@ -1360,7 +1401,7 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                 self.frames['profiles']['record_yaml_ini'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
                 # +\
                 # ' -> '+ \
-                # self.frames['profiles']['record_yaml_obs_afternoon'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
+                # self.frames['profiles']['record_yaml_end_obs'].pars.datetime.strftime("%Y/%m/%d %H:%M"))
             
             
             
@@ -1373,12 +1414,12 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             # #axes[label].set_title(self.morning_sounding.datetime.strftime("%Y/%m/%d %H:%M") + ' -> '+self.evening_sounding.datetime.strftime("%Y/%m/%d %H:%M"))
             # 
             #print(self.frames['profiles']['record_yaml_ini'].pars.h)
-            #print(self.frames['profiles']['record_yaml_obs_afternoon'].pars.h)
-            #print(self.frames['profiles']['record_yaml_mod'].out['h'].values[-1])
+            #print(self.frames['profiles']['record_yaml_end_obs'].pars.h)
+            #print(self.frames['profiles']['record_yaml_end_mod'].out['h'].values[-1])
             hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                           self.frames['profiles']['record_yaml_mod'].out.h[-1]])
-            if self.path_obs is not None:
-                hmax = np.nanmax([hmax,self.frames['profiles']['record_yaml_obs_afternoon'].pars.h])
+                           self.frames['profiles']['record_yaml_end_mod'].out.h[-1]])
+            if self.path_forcing is not None:
+                hmax = np.nanmax([hmax,self.frames['profiles']['record_yaml_end_obs'].pars.h])
 
 
                 zidxmax = int(np.where((self.frames['profiles']['record_yaml_ini'].air_balloon.z.values
@@ -1405,43 +1446,43 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
 
 
             #print('r15')
-            if self.path_obs is not None:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values
+            if self.path_forcing is not None:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_end_obs'].air_balloon.z.values
                                     < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_end_obs'].air_balloon.z.values)))
                 zco = range(zidxmax)
 
                               
-                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                axes[label].plot(self.frames['profiles']['record_yaml_end_obs'].air_balloon.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_end_obs'].air_balloon.z.values[zco],"r*", \
                                  label="obs "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 self.frames['profiles']['record_yaml_end_obs'].pars.ldatetime.strftime("%H:%M")\
                                  +'LT')
 
                 #print('r16')
 
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_end_obs'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_end_obs'].air_ap.z.values)))
                 zco = range(zidxmax)
 
-                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                axes[label].plot(self.frames['profiles']['record_yaml_end_obs'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_end_obs'].air_ap.z.values[zco],"r:", \
                                  label="fit "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 self.frames['profiles']['record_yaml_end_obs'].pars.ldatetime.strftime("%H:%M")\
                                  +'LT')
 
             #print('r17')
-            #print(self.frames['profiles']['record_yaml_mod'].air_ap.z)
+            #print(self.frames['profiles']['record_yaml_end_mod'].air_ap.z)
             #print(hmax)
-            valid_mod = len(self.frames['profiles']['record_yaml_mod'].air_ap.z)>= 4
+            valid_mod = len(self.frames['profiles']['record_yaml_end_mod'].air_ap.z)>= 4
             if valid_mod:
 
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_end_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_end_mod'].air_ap.z.values)))
                 zco = range(zidxmax)
 
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.theta.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                axes[label].plot(self.frames['profiles']['record_yaml_end_mod'].air_ap.theta.values[zco], \
+                                 self.frames['profiles']['record_yaml_end_mod'].air_ap.z.values[zco],"r-", \
                                  label="mod "+\
                                  (self.frames['profiles']['record_yaml_ini'].pars.ldatetime
                                  +dt.timedelta(seconds=self.frames['profiles']['record_yaml_ini'].pars.runtime)).strftime("%H:%M")\
@@ -1469,12 +1510,12 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             # 
             if valid_mod:
                 hmax = np.nanmax([self.frames['profiles']['record_yaml_ini'].pars.h,\
-                               self.frames['profiles']['record_yaml_mod'].out.h[-1]])
+                               self.frames['profiles']['record_yaml_end_mod'].out.h[-1]])
             else:
                 hmax = self.frames['profiles']['record_yaml_ini'].pars.h
 
-            if self.path_obs is not None:
-                hmax = np.nanmax([hmax,self.frames['profiles']['record_yaml_obs_afternoon'].pars.h])
+            if self.path_forcing is not None:
+                hmax = np.nanmax([hmax,self.frames['profiles']['record_yaml_end_obs'].pars.h])
             # 
             #print('r20')
 
@@ -1500,38 +1541,38 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
                              self.frames['profiles']['record_yaml_ini'].pars.ldatetime.strftime("%H:%M")\
                              +'LT')
 
-            if self.path_obs is not None:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values)))
+            if self.path_forcing is not None:
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_end_obs'].air_balloon.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_end_obs'].air_balloon.z.values)))
                 zco = range(zidxmax)
 
 
-                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_balloon.z.values[zco],"r*", \
+                axes[label].plot(self.frames['profiles']['record_yaml_end_obs'].air_balloon.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_end_obs'].air_balloon.z.values[zco],"r*", \
                                  label="obs "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 self.frames['profiles']['record_yaml_end_obs'].pars.ldatetime.strftime("%H:%M")\
                                  +'LT')
 
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values)))
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_end_obs'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_end_obs'].air_ap.z.values)))
                 zco = range(zidxmax)
 
                 #print('r23')
-                axes[label].plot(self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].air_ap.z.values[zco],"r:", \
+                axes[label].plot(self.frames['profiles']['record_yaml_end_obs'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_end_obs'].air_ap.z.values[zco],"r:", \
                                  label="fit "+\
-                                 self.frames['profiles']['record_yaml_obs_afternoon'].pars.ldatetime.strftime("%H:%M")\
+                                 self.frames['profiles']['record_yaml_end_obs'].pars.ldatetime.strftime("%H:%M")\
                                  +'LT')
 
             #print('r24')
             if valid_mod:
-                zidxmax = int(np.where((self.frames['profiles']['record_yaml_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
-                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_mod'].air_ap.z.values)))
+                zidxmax = int(np.where((self.frames['profiles']['record_yaml_end_mod'].air_ap.z.values < 2.*hmax))[0][-1])+2
+                zidxmax = np.min((zidxmax,len(self.frames['profiles']['record_yaml_end_mod'].air_ap.z.values)))
                 zco = range(zidxmax)
-                axes[label].plot(self.frames['profiles']['record_yaml_mod'].air_ap.q.values[zco], \
-                                 self.frames['profiles']['record_yaml_mod'].air_ap.z.values[zco],"r-", \
+                axes[label].plot(self.frames['profiles']['record_yaml_end_mod'].air_ap.q.values[zco], \
+                                 self.frames['profiles']['record_yaml_end_mod'].air_ap.z.values[zco],"r-", \
                                  label="fit ")#+\
-                             #self.frames['profiles']['record_yaml_mod'].pars.ldatetime.strftime("%H:%M")\
+                             #self.frames['profiles']['record_yaml_end_mod'].pars.ldatetime.strftime("%H:%M")\
                              #+'LT')
             #print('r25')
             #axes[label].legend()
@@ -1577,10 +1618,10 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
  
 
             #print('r26')
-            time = self.frames['profiles']['record_yaml_mod'].out.time
+            time = self.frames['profiles']['record_yaml_end_mod'].out.time
             for ilabel,label in enumerate(['h','theta','q']):
                 axes["out:"+label].clear()
-                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_mod'].out.__dict__[label],label=label)
+                axes["out:"+label].plot(time,self.frames['profiles']['record_yaml_end_mod'].out.__dict__[label],label=label)
                 axes["out:"+label].set_ylabel(label)
                 if ilabel == 2:
                     axes["out:"+label].set_xlabel('local sun time [h]')
@@ -1589,11 +1630,11 @@ def refresh_plot_interface(self,only=None,statsnewdata=True,**args):
             label = 'SEB'
             axes[label].clear()
             
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Swin - self.frames['profiles']['record_yaml_mod'].out.Swout,label='Sw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.H,label='H')
-            axes[label].plot(time,self.frames['profiles']['record_yaml_mod'].out.Lwin - self.frames['profiles']['record_yaml_mod'].out.Lwout,label='Lw')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.G,label='G')
-            axes[label].plot(time,-self.frames['profiles']['record_yaml_mod'].out.LE,label='LE')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_end_mod'].out.Swin - self.frames['profiles']['record_yaml_end_mod'].out.Swout,label='Sw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_end_mod'].out.H,label='H')
+            axes[label].plot(time,self.frames['profiles']['record_yaml_end_mod'].out.Lwin - self.frames['profiles']['record_yaml_end_mod'].out.Lwout,label='Lw')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_end_mod'].out.G,label='G')
+            axes[label].plot(time,-self.frames['profiles']['record_yaml_end_mod'].out.LE,label='LE')
             axes[label].hlines(0.,*axes[label].get_xlim(),'k')
             axes[label].set_ylabel('energy flux [$\mathrm{W/m^2}$]')
             axes[label].set_xlabel('local sun time [$\mathrm{h}$]')
@@ -1682,7 +1723,7 @@ def on_pick(self,event):
         #d.set_offset_position('data')
         xy = d.get_offsets()
         x, y =  xy[:,0],xy[:,1]
-        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_mod']*3600.,'ro', markersize=5, picker=5,label=key)
+        #axes[-1].plot(seltableoutput[key+'_obs']*3600.,seltableoutput[key+'_end_mod']*3600.,'ro', markersize=5, picker=5,label=key)
 
         #print("p2")
         if len(ind) > 0:
@@ -1693,8 +1734,8 @@ def on_pick(self,event):
             #    #seltablestatsstdrel = self.seltablestatsstdrel
             #    #seltablestatspct = self.seltablestatspct
 
-            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+            #    #self.set_statsviewfocus('STNID' seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_end_mod'] == pos[1] )  ].STNID.iloc[0]
+            #    #self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_end_mod'] == pos[1] )  ].DT.iloc[0]
             #    
             #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
             #    self.set_profilefocus(STNID=self.axes['statsview0'].focus['STNID'],DT=self.axes['statsview0'].focus['DT'])
@@ -1704,8 +1745,7 @@ def on_pick(self,event):
             #el
             if (label == 'worldmap') or (label == 'worldmap_stations'):
                 self.hover_active = False
-                if (self.frames['worldmap']['STNID'] !=
-                    self.frames['profiles']['STNID']):
+                if (self.frames['worldmap']['STNID'] != self.frames['profiles']['STNID']):
                 # WE ALREADY HAVE the correct station from worldmap/stats because of the hovering!!
                 # so we just need to perform update_station
                     self.update_station()
@@ -1734,19 +1774,19 @@ def on_pick(self,event):
                     self.frames['profiles']['current_station_file_ini'] = \
                         open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r')
 
-                    if 'current_station_file_mod' in self.frames['profiles'].keys():
-                        self.frames['profiles']['current_station_file_mod'].close()
-                    self.frames['profiles']['current_station_file_mod'] = \
-                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_mod.yaml','r')
-                    if self.path_obs is not None:
-                        if 'current_station_file_afternoon' in self.frames['profiles'].keys():
-                            self.frames['profiles']['current_station_file_afternoon'].close()
-                        self.frames['profiles']['current_station_file_afternoon'] = \
-                            open(self.path_obs+'/'+format(STNID,"05d")+'_afternoon.yaml','r')
+                    if 'current_station_file_end_mod' in self.frames['profiles'].keys():
+                        self.frames['profiles']['current_station_file_end_mod'].close()
+                    self.frames['profiles']['current_station_file_end_mod'] = \
+                        open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r')
+                    if self.path_forcing is not None:
+                        if 'current_station_file_end' in self.frames['profiles'].keys():
+                            self.frames['profiles']['current_station_file_end'].close()
+                        self.frames['profiles']['current_station_file_end'] = \
+                            open(self.path_forcing+'/'+format(STNID,"05d")+'_end.yaml','r')
 
                     # go to hovered record of current station
                     self.frames['profiles']['records_iterator'] = \
-                                    records_iterator(self.frames['profiles']['records_current_station_mod'])
+                                    records_iterator(self.frames['profiles']['records_current_station_end_mod'])
                     # ... and go to the record of the profile window (last one that
                     # was picked by the user)
                     found = False
@@ -1764,16 +1804,16 @@ def on_pick(self,event):
                         except StopIteration:
                             EOF = True
                     if found:
-                        self.frames['stats']['current_record_mod'] = record
+                        self.frames['stats']['current_record_end_mod'] = record
                         self.frames['stats']['current_record_chunk'] = chunk
                         self.frames['stats']['current_record_index'] = index
                     # # for the profiles we make a distinct record iterator, so that the
                     # # stats iterator can move independently
                     # self.frames['profiles']['records_iterator'] = \
-                    #                 records_iterator(self.frames['profiles']['records_current_station_mod'])
+                    #                 records_iterator(self.frames['profiles']['records_current_station_end_mod'])
                     # (self.frames['profiles']['STNID'] , \
                     # self.frames['profiles']['current_record_index']) , \
-                    # self.frames['profiles']['current_record_mod'] = \
+                    # self.frames['profiles']['current_record_end_mod'] = \
                     #                 self.frames['profiles']['records_iterator'].__next__()
 
 
@@ -1817,8 +1857,8 @@ def on_plot_hover(self,event):
                     #    seltablestatsstdrel = self.seltablestatsstdrel
                     #    seltablestatspct = self.seltablestatspct
 
-                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].STNID.iloc[0]
-                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_mod'] == pos[1] )  ].DT.iloc[0]
+                    #    self.set_statsviewfocus('STNID'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_end_mod'] == pos[1] )  ].STNID.iloc[0]
+                    #    self.set_statsviewfocus('DT'] = seltablestatsstdrel[(seltablestatsstdrel[selkey+'_ext'] == pos[0]) & (seltablestatsstdrel[selkey+'_end_mod'] == pos[1] )  ].DT.iloc[0]
                     #    self.axes['worldmap'].focus['STNID'] = self.axes['statsview0'].focus['STNID']
                     #    #self.goto_datetime_worldmap(self.axes['statsview0'].focus['DT'],'after')
                     #    self.hover_active = True
@@ -1828,25 +1868,25 @@ def on_plot_hover(self,event):
                     #el
                     #print(label[:5])
                     if (label[:5] == 'stats') or (label == 'times'):
-                        # records_mod = self.frames['stats']['records_current_station_mod'][selkey]
-                        # records_obs = self.frames['stats']['records_current_station_obs_afternoon'][selkey]
+                        # records_end_mod = self.frames['stats']['records_current_station_end_mod'][selkey]
+                        # records_obs = self.frames['stats']['records_current_station_end_obs'][selkey]
                         
-                        if self.path_obs is not None:
+                        if self.path_forcing is not None:
                             if label[:5] == 'stats':
-                                records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                                records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                                records_end_mod_stats = self.frames['stats']['records_all_stations_end_mod_stats']
+                                records_obs_stats = self.frames['stats']['records_all_stations_end_obs_stats']
                                 (self.frames['stats']['STNID'] ,
                                  self.frames['stats']['current_record_chunk'], 
                                  self.frames['stats']['current_record_index']) = \
-                                    records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                                    records_end_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_end_mod_stats[selkey] == pos[1])].index[0]
                             # elif label[:5] == 'stats':
-                            #     # records_mod_stats = self.frames['stats']['records_all_stations_mod_stats']
-                            #     records_obs_stats = self.frames['stats']['records_all_stations_obs_afternoon_stats']
+                            #     # records_end_mod_stats = self.frames['stats']['records_all_stations_end_mod_stats']
+                            #     records_obs_stats = self.frames['stats']['records_all_stations_end_obs_stats']
                             #     records_datetimes = self.frames['stats']['records_all_stations_ini']
                             #     (self.frames['stats']['STNID'] ,
                             #      self.frames['stats']['current_record_chunk'], 
                             #      self.frames['stats']['current_record_index']) = \
-                            #         records_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_mod_stats[selkey] == pos[1])].index[0]
+                            #         records_end_mod_stats[(records_obs_stats[selkey] == pos[0]) & (records_end_mod_stats[selkey] == pos[1])].index[0]
 
 
                         self.frames['stats']['stations_iterator'] = stations_iterator(self.frames['worldmap']['stations']) 
@@ -1888,10 +1928,10 @@ def on_plot_hover(self,event):
 
 
                         tab_suffixes = \
-                                ['_mod','_ini','_ini_pct']
-                        if self.path_obs is not None:
+                                ['_end_mod','_ini','_ini_pct']
+                        if self.path_forcing is not None:
                             tab_suffixes += \
-                                ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+                                ['_end_mod_stats','_end_obs','_end_obs_stats']
                             
                         for tab_suffix in tab_suffixes:
                             self.frames['stats']['records_current_station'+tab_suffix] = \
@@ -1901,7 +1941,7 @@ def on_plot_hover(self,event):
 
                         # go to hovered record of current station
                         self.frames['stats']['records_iterator'] = \
-                                        records_iterator(self.frames['stats']['records_current_station_mod'])
+                                        records_iterator(self.frames['stats']['records_current_station_end_mod'])
 
 
                         # ... and go to the record of the profile window (last one that
@@ -1922,16 +1962,16 @@ def on_plot_hover(self,event):
                                 EOF = True
                         if found:
                             #print('h5')
-                            self.frames['stats']['current_record_mod'] = record
+                            self.frames['stats']['current_record_end_mod'] = record
                             self.frames['stats']['current_record_chunk'] = chunk
                             self.frames['stats']['current_record_index'] = index
 
                         #print(self.frames['stats']['STNID'],self.frames['stats']['current_record_index'])
                         tab_suffixes = \
                                 ['_ini','_ini_pct']
-                        if self.path_obs is not None:
+                        if self.path_forcing is not None:
                             tab_suffixes += \
-                                ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+                                ['_end_mod_stats','_end_obs','_end_obs_stats']
                         for tab_suffix in tab_suffixes:
                             #print(tab_suffix)
                             #print(self.frames['stats']['records_current_station'+tab_suffix])
@@ -2020,8 +2060,8 @@ def on_plot_hover(self,event):
                                 self.frames['stats'][key] = self.frames['worldmap'][key]
                                 
                             ## fetch records of current station...
-                            #self.frames['stats']['records_current_station_mod'] =\
-                            #   get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+                            #self.frames['stats']['records_current_station_end_mod'] =\
+                            #   get_records_end_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
 
                             # ... and their indices
                             self.frames['stats']['records_current_station_index'] = \
@@ -2030,10 +2070,10 @@ def on_plot_hover(self,event):
                                      self.frames['stats']['current_station'].name)
 
                             tab_suffixes = \
-                                    ['_mod','_ini','_ini_pct']
-                            if self.path_obs is not None:
+                                    ['_end_mod','_ini','_ini_pct']
+                            if self.path_forcing is not None:
                                 tab_suffixes += \
-                                    ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+                                    ['_end_mod_stats','_end_obs','_end_obs_stats']
 
                             for tab_suffix in tab_suffixes:
                                 self.frames['stats']['records_current_station'+tab_suffix] = \
@@ -2044,7 +2084,7 @@ def on_plot_hover(self,event):
                             #self.frames['stats']['records_iterator'].close()
                             del(self.frames['stats']['records_iterator'])
                             self.frames['stats']['records_iterator'] = \
-                                self.frames['stats']['records_current_station_mod'].iterrows()
+                                self.frames['stats']['records_current_station_end_mod'].iterrows()
 
 
 
@@ -2054,14 +2094,14 @@ def on_plot_hover(self,event):
                             (self.frames['stats']['STNID'] , \
                              self.frames['stats']['current_record_chunk'] , \
                              self.frames['stats']['current_record_index']) , \
-                            self.frames['stats']['current_record_mod'] = \
+                            self.frames['stats']['current_record_end_mod'] = \
                                 self.frames['stats']['records_iterator'].__next__()
                         
                             tab_suffixes = \
                                     ['_ini','_ini_pct']
-                            if self.path_obs is not None:
+                            if self.path_forcing is not None:
                                 tab_suffixes += \
-                                    ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+                                    ['_end_mod_stats','_end_obs','_end_obs_stats']
 
                             for tab_suffix in tab_suffixes:
                                 self.frames['stats']['current_record'+tab_suffix] =  \
@@ -2104,8 +2144,8 @@ def on_plot_hover(self,event):
                 self.frames['stats']['current_station'] = \
                         self.frames['profiles']['current_station']
                 #print('h3a*')
-                self.frames['stats']['records_current_station_mod'] = \
-                        self.frames['profiles']['records_current_station_mod']
+                self.frames['stats']['records_current_station_end_mod'] = \
+                        self.frames['profiles']['records_current_station_end_mod']
                 #print('h3b*')
 
                 # the next lines recreate the records iterator. Probably it's
@@ -2116,7 +2156,7 @@ def on_plot_hover(self,event):
                 #self.frames['stats']['records_iterator'].close()
                 del(self.frames['stats']['records_iterator'])
                 self.frames['stats']['records_iterator'] = \
-                    self.frames['stats']['records_current_station_mod'].iterrows()
+                    self.frames['stats']['records_current_station_end_mod'].iterrows()
                 #print('h4*')
 
                 # ... and go to the record of the profile window (last one that
@@ -2139,7 +2179,7 @@ def on_plot_hover(self,event):
                         EOF = True
                 if found:
                     #print('h5*')
-                    self.frames['stats']['current_record_mod'] = record
+                    self.frames['stats']['current_record_end_mod'] = record
                     self.frames['stats']['current_record_chunk'] = chunk
                     self.frames['stats']['current_record_index'] = index
 
@@ -2148,8 +2188,8 @@ def on_plot_hover(self,event):
 
 
                 # # fetch records of current station...
-                # self.frames['stats']['records_current_station_mod'] =\
-                #    get_records_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
+                # self.frames['stats']['records_current_station_end_mod'] =\
+                #    get_records_end_mod(pd.DataFrame([self.frames['stats']['current_station']]),self.path_exp)
 
                 # ... and their indices
                 self.frames['stats']['records_current_station_index'] = \
@@ -2160,9 +2200,9 @@ def on_plot_hover(self,event):
                 
                 tab_suffixes = \
                         ['_ini','_ini_pct']
-                if self.path_obs is not None:
+                if self.path_forcing is not None:
                     tab_suffixes += \
-                        ['_mod_stats','_obs_afternoon','_obs_afternoon_stats']
+                        ['_end_mod_stats','_end_obs','_end_obs_stats']
 
                 for tab_suffix in tab_suffixes:
                     self.frames['stats']['records_current_station'+tab_suffix] = \
@@ -2233,7 +2273,7 @@ def on_plot_hover(self,event):
                 #                 self.frames['stats']['records_current_station'].iterrows()
                 # (self.frames['stats']['STNID'] , \
                 # self.frames['stats']['current_record_index']) , \
-                # self.frames['stats']['current_record_mod'] = \
+                # self.frames['stats']['current_record_end_mod'] = \
                 #                 self.frames['stats']['records_iterator'].__next__()
                 
 
diff --git a/class4gl/model.py b/class4gl/model.py
index 92792f5..21e86c3 100644
--- a/class4gl/model.py
+++ b/class4gl/model.py
@@ -426,21 +426,14 @@ def init(self):
 
                 
                 if self.air_ap[var][1] != self.air_ap[var][0]:
-                    raise ValueError("Error input profile consistency: two \
-                                     lowest profile levels for "+var+" should \
-                                     be equal.")
+                    raise ValueError("Error input profile consistency: two lowest profile levels for "+var+" should be equal.")
                 
                 # initialize the value from its profile when available
                 value_old = self.__dict__[var]
                 value_new = self.air_ap[var][indexh[0][0]]
                 
                 if ((value_old is not None) & (value_old != value_new)):
-                    warnings.warn("Warning:  input was provided \
-                                     ("+str(value_old)+ "kg kg-1), \
-                                     but it is now overwritten by the first \
-                                     level (index 0) of air_ap]var\ which is \
-                                     different (" +str(value_new)+"K).")
-                                        
+                    warnings.warn("Warning:  input was provided ("+str(value_old)+ "kg kg-1), but it is now overwritten by the first level (index 0) of air_ap.var which is different (" +str(value_new)+"kg kg-1).") 
                 self.__dict__[var] = value_new
 
                 # make a profile of the stratification 
@@ -952,18 +945,15 @@ def run_mixed_layer(self):
         if (self.dtheta <= 0.1) and (dthetatend_pre < 0.):
             l_entrainment = False
             warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! temperature jump is at the lower limit \
-                          and is not growing: entrainment is disabled for this (sub)timestep.") 
+                          " Warning! temperature jump is at the lower limit and is not growing: entrainment is disabled for this (sub)timestep.") 
         elif dtheta_pre < 0.1:
             dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre)
             l_entrainment = True
             warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          " Warning! Potential temperature jump at mixed- \
-                          layer height would become too low limiting timestep \
-                          from "+ str(self.dtmax)+' to '+str(dtmax_new))
+                          " Warning! Potential temperature jump at mixed-layer height would become too low. So I'm limiting the timestep from "+ str(self.dtmax)+' to '+str(dtmax_new))
             self.dtmax = min(self.dtmax,dtmax_new)
             warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "next subtimestep, entrainment will be disabled")
+                          " Next subtimestep, entrainment will also be disabled")
             #self.dthetatend = (0.1 - self.dtheta)/self.dtcur 
 
 
@@ -1232,12 +1222,7 @@ def integrate_mixed_layer(self):
 
             if itop > 1:
                     warnings.warn(str(self.t)+"/"+str(self.tsteps)+\
-                          "Warning! Temperature profile was too steep. \
-                                  Modifying profile: "+ \
-                                  str(itop - 1)+ " measurements were dropped \
-                                  and replaced with its average \
-                                  Modifying profile. \
-                                  mean with next profile point(s).") 
+                          " Warning! Temperature profile was too steep.  Modifying profile: "+ str(itop - 1)+ " measurements were dropped and replaced with their average.") 
 
 
             self.air_ap = pd.concat((air_ap_head,\
@@ -1277,8 +1262,7 @@ def integrate_mixed_layer(self):
             in_ml = (self.air_ac.p >= self.P_h)
 
             if in_ml.sum() == 0:
-                warnings.warn(" no circulation points in the mixed layer \
-                              found. We just take the bottom one.")
+                warnings.warn(" no circulation points in the mixed layer found. We just take the bottom one.")
                 in_ml = self.air_ac.index == (len(self.air_ac) - 1)
             for var in ['t','q','u','v']:
 
diff --git a/class4gl/setup/setup_igra_20181217.py b/class4gl/setup/setup_igra_20181217.py
index 2733f3d..03da366 100644
--- a/class4gl/setup/setup_igra_20181217.py
+++ b/class4gl/setup/setup_igra_20181217.py
@@ -153,8 +153,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
     one_run = False
 # for iSTN,STN in STNlist[5:]:  
     
-    fnout = args.path_output+"/"+format(STN.name,'05d')+"_morning.yaml"
-    fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_afternoon.yaml"
+    fnout = args.path_output+"/"+format(STN.name,'05d')+"_ini.yaml"
+    fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_end.yaml"
     
 
     # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \
@@ -344,12 +344,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu
         #STN.name = STN.name
         all_records_morning = get_records(pd.DataFrame([STN]),\
                                       args.path_output,\
-                                      subset='morning',
+                                      subset='ini',
                                       refetch_records=True,
                                       )
         all_records_afternoon = get_records(pd.DataFrame([STN]),\
                                       args.path_output,\
-                                      subset='afternoon',
+                                      subset='end',
                                       refetch_records=True,
                                       )
     else:
diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py
index 5260a95..61c7da2 100644
--- a/class4gl/simulations/batch_simulations.py
+++ b/class4gl/simulations/batch_simulations.py
@@ -11,15 +11,21 @@
 
 import argparse
 
+from time import sleep
+
 parser = argparse.ArgumentParser()
 #if __name__ == '__main__':
 parser.add_argument('--exec') # chunk simulation script
-parser.add_argument('--first_station_row')
+parser.add_argument('--first_station_row',
+                    help='starting row number of stations table')
 parser.add_argument('--last_station_row')
 parser.add_argument('--pbs_string',default=' -l walltime=2:0:0')
-parser.add_argument('--station_id') # run a specific station id
+parser.add_argument('--station_id',
+                    help="process a specific station id")
 parser.add_argument('--error_handling')
-parser.add_argument('--subset_forcing',default='morning') 
+parser.add_argument('--multi_processing_mode',default='pythonpool')
+parser.add_argument('--cpu_count',type=int,default=2)
+parser.add_argument('--subset_forcing',default='ini') 
                                         # this tells which yaml subset
                                         # to initialize with.
                                         # Most common options are
@@ -27,24 +33,37 @@
 
 # Tuntime is usually specified from the afternoon profile. You can also just
 # specify the simulation length in seconds
-parser.add_argument('--runtime')
+parser.add_argument('--runtime',
+                    help="set the runtime of the simulation in seconds, or get it from the daytime difference in the profile pairs 'from_profile_pair' (default)")
 # delete folders of experiments before running them
-parser.add_argument('--cleanup_experiments',default=False)
-parser.add_argument('--experiments')
-parser.add_argument('--split_by',default=50)# station soundings are split
-                                            # up in chunks
+parser.add_argument('--cleanup_output_directories',
+                    default="False",
+                    help="clean up output directories before executing the experiments")
+parser.add_argument('--experiments', 
+                    help="IDs of experiments, as a space-seperated list (default: 'BASE')")
+parser.add_argument('--experiments_names', 
+                    help="Alternative output names that are given to the experiments. By default, these are the same as --experiments") 
+parser.add_argument('--split_by',
+                    default=50,
+                    type=int,
+                    help="the maxmimum number of soundings that are contained in each output file of a station. -1 means unlimited. The default for array experiments is 50.")
+
+parser.add_argument('--c4gl_path_lib',help="the path of the CLASS4GL program.")#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+parser.add_argument('--path_forcing',
+                    help='directory of forcing data to initialize and constrain the ABL model simulations'
+                   )
+parser.add_argument('--path_experiments',
+                    help='output directory in which the experiments as subdirectories are stored')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 
-parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--path_forcing') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--path_experiments') #,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
 
 
 #arguments only used for update_yaml.py
-parser.add_argument('--path_dataset') 
-parser.add_argument('--global_keys') 
-args = parser.parse_args()
+#parser.add_argument('--path_dataset') 
+#parser.add_argument('--global_keys') 
+batch_args = parser.parse_args()
 
-sys.path.insert(0, args.c4gl_path_lib)
+if batch_args.c4gl_path_lib is not None:
+    sys.path.insert(0, batch_args.c4gl_path_lib)
 from class4gl import class4gl_input, data_global,class4gl
 from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
 from class4gl import blh,class4gl_input
@@ -64,72 +83,141 @@
 
 
 # #SET = 'GLOBAL'
-# SET = args.dataset
+# SET = batch_args.dataset
 
-# path_forcingSET = args.path_forcing+'/'+SET+'/'
+# path_forcingSET = batch_args.path_forcing+'/'+SET+'/'
 
-print("getting all stations from --path_forcing")
+print("getting all stations from "+batch_args.path_forcing)
 # these are all the stations that are found in the input dataset
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+all_stations = stations(batch_args.path_forcing,suffix=batch_args.subset_forcing,refetch_stations=False)
 
 print('defining all_stations_select')
 # these are all the stations that are supposed to run by the whole batch (all
 # chunks). We narrow it down according to the station(s) specified.
-if args.station_id is not None:
+if batch_args.station_id is not None:
     print("Selecting stations by --station_id")
     stations_iter = stations_iterator(all_stations)
-    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+    STNID,run_station = stations_iter.set_STNID(STNID=int(batch_args.station_id))
     all_stations_select = pd.DataFrame([run_station])
 else:
     print("Selecting stations from a row range in the table [--first_station_row,--last_station_row]")
     all_stations_select = pd.DataFrame(all_stations.table)
-    if args.last_station_row is not None:
-        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
-    if args.first_station_row is not None:
-        all_stations_select = all_station_select.iloc[int(args.first_station):]
+    if batch_args.last_station_row is not None:
+        all_stations_select = all_station_select.iloc[:(int(batch_args.last_station)+1)]
+    if batch_args.first_station_row is not None:
+        all_stations_select = all_station_select.iloc[int(batch_args.first_station):]
 print("station numbers included in the whole batch "+\
       "(all chunks):",list(all_stations_select.index))
 
 print("getting all records of the whole batch")
 all_records_morning_select = get_records(all_stations_select,\
-                                         args.path_forcing,\
-                                         subset=args.subset_forcing,\
+                                         batch_args.path_forcing,\
+                                         subset=batch_args.subset_forcing,\
                                          refetch_records=False,\
                                         )
 
-print('splitting batch in --split_by='+args.split_by+' jobs.')
+print('splitting batch in --split_by='+str(batch_args.split_by)+' jobs.')
 totalchunks = 0
 for istation,current_station in all_stations_select.iterrows():
     records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
-    chunks_current_station = math.ceil(float(len(records_morning_station_select))/float(args.split_by))
+    chunks_current_station = math.ceil(float(len(records_morning_station_select))/float(batch_args.split_by))
     totalchunks +=chunks_current_station
 
-print('total chunks (= size of array-job) per experiment: ' + str(totalchunks))
-
-#if sys.argv[1] == 'qsub':
-# with qsub
-
-print(args.experiments.strip().split(" "))
+print('total chunks of simulations (= size of array-job) per experiment: ' + str(totalchunks))
 
-for EXP in args.experiments.strip().split(" "):
-    if args.cleanup_experiments:
-        os.system("rm -R "+args.path_experiments+'/'+EXP)
+experiments = batch_args.experiments.strip(' ').split(' ')
+if batch_args.experiments_names is not None:
+    experiments_names = batch_args.experiments_names.strip(' ').split(' ')
+    if len(experiments_names) != len(experiments):
+        raise ValueError('Lenght of --experiments_names is different from --experiments')
+else:
+    experiments_names = experiments
 
-    # C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
-    command = 'qsub '+args.pbs_string+' '+args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
-                str(totalchunks-1)+" -v C4GLJOB_experiments="+str(EXP)
-    # propagate arguments towards the job script
-    for argkey in args.__dict__.keys():
-        if ((argkey not in ['experiments','pbs_string','cleanup_experiments']) and \
-            # default values are specified in the simulation script, so
-            # excluded here
-            (args.__dict__[argkey] is not None)
-           ):
-                command +=',C4GLJOB_'+argkey+'='+args.__dict__[argkey]
+odir_exists = False
 
-    print('Submitting array job for experiment '+EXP+': '+command)
-    os.system(command)
+cleanup = (batch_args.cleanup_output_directories == 'True')
 
+if not cleanup:
+    for expname in experiments_names:
+        if os.path.exists(batch_args.path_experiments+'/'+expname):
+            print("Output directory already exists: "+batch_args.path_experiments+'/'+expname+". ")
+            odir_exists = True
+if odir_exists:
+    raise IOError("At least one of the output directories exists. Please use '--cleanup_output_directories True' to delete any output directory.")
+else:
+    for iexp,expname in enumerate(experiments_names):
+        if cleanup:
+            if os.path.exists(batch_args.path_experiments+'/'+expname):
+                print("Warning! Output directory '"+batch_args.path_experiments+'/'+expname+"' exists! I'm removing it in 10 seconds!' Press ctrl-c to abort.")
+                sleep(10)
+                os.system("rm -R "+batch_args.path_experiments+'/'+expname)
+        if batch_args.multi_processing_mode == 'qsub':
+    
+            # C4GLJOB_timestamp="+dt.datetime.now().isoformat()+",
+            command = 'qsub '+batch_args.pbs_string+' '+batch_args.c4gl_path_lib+'/simulations/batch_simulations.pbs -t 0-'+\
+                        str(totalchunks-1)+" -v C4GLJOB_experiments="+str(experiments[iexp])+",C4GLJOB_experiments_names="+str(expname)
+            # propagate arguments towards the job script
+            for argkey in batch_args.__dict__.keys():
+                if ((argkey not in ['multi_processing_mode','cpu_count','experiments','experiments_names','pbs_string','cleanup_output_directories']) and \
+                    # default values are specified in the simulation script, so
+                    # excluded here
+                    (batch_args.__dict__[argkey] is not None)
+                   ):
+                        command +=',C4GLJOB_'+argkey+'='+str(batch_args.__dict__[argkey])
+    
+            print('Submitting array job for experiment '+expname+': '+command)
+            os.system(command)
+
+        elif batch_args.multi_processing_mode == 'pythonpool':
+            from multiprocessing import Pool                                       
+            
+            # # load moodule from absolute path
+            # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
+            import importlib.util
+            print(batch_args.exec)
+            spec = importlib.util.spec_from_file_location("module.name",
+                                                          batch_args.exec)
+            task_module = importlib.util.module_from_spec(spec)
+            spec.loader.exec_module(task_module)
+            print('hello')
+            print(batch_args.exec)
+            
+
+            args_dict_current = {**batch_args.__dict__}
+
+            # we avoid to pass Nones, so that defaults are taken from the child
+            # script
+            for removekey,nonevalue in batch_args.__dict__.items():
+                if nonevalue is None:
+                    args_dict_current.pop(removekey)
+
+            # remove keys that are not relevant in the child script, so not
+            # passed (or those that are redefined in the host script manually)
+            for key in ['exec','multi_processing_mode','cpu_count','experiments','experiments_names','pbs_string','cleanup_output_directories']:
+                if key in args_dict_current:
+                    args_dict_current.pop(key)
+
+            args_dict_current['experiments'] = experiments[iexp]
+            args_dict_current['experiments_names'] = expname
+
+            print(args_dict_current)
+            all_tasks = []
+            for ichunk in range(totalchunks):
+                all_tasks.append({'global_chunk_number':str(ichunk),**args_dict_current}) 
+
+            print(pd.DataFrame(all_tasks)) 
+            def parallelize(analysis, filenames, processes):
+                '''
+                Call `analysis` for each file in the sequence `filenames`, using
+                up to `processes` parallel processes. Wait for them all to complete
+                and then return a list of results.
+                '''
+                return Pool(processes).map(analysis, filenames, chunksize = 1)
+    
+            def execute_kwargs(x):
+                return task_module.execute(**x)
+
+            parallelize(execute_kwargs,all_tasks,int(batch_args.cpu_count))
 
     #os.system(command)
 # elif sys.argv[1] == 'wsub':
diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py
index 20fc34f..3087fe3 100644
--- a/class4gl/simulations/simulations.py
+++ b/class4gl/simulations/simulations.py
@@ -9,375 +9,443 @@
 import pytz
 import math
 
-import argparse
 
-#if __name__ == '__main__':
-parser = argparse.ArgumentParser()
-#parser.add_argument('--timestamp')
-parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/')
-parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
-parser.add_argument('--first_station_row')
-parser.add_argument('--last_station_row')
-parser.add_argument('--station_id') # run a specific station id
-parser.add_argument('--error_handling',default='dump_on_success')
-parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv'])
-parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset
-                                                      # to initialize with.
-                                                      # Most common options are
-                                                      # 'morning' and 'ini'.
+arguments = []
 
+#parser.add_argument('--timestamp')
+arguments.append(dict(arg='--path_forcing',\
+                    help='directory of forcing data to initialize and constrain the ABL model simulations'))
+arguments.append(dict(arg='--path_experiments',
+                    help='output directory in which the experiments as subdirectories are stored'))#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/')
+arguments.append(dict(arg='--first_station_row',\
+                    help='starting row number of stations table'))
+arguments.append(dict(arg='--last_station_row',\
+                    help='ending row number of stations table'))
+arguments.append(dict(arg='--station_id',\
+                    help="process a specific station id"))
+arguments.append(dict(arg='--error_handling',\
+                    default='dump_on_success',\
+                    help="type of error handling: either\n - 'dump_on_success' (default)\n - 'dump_always'"))
+arguments.append(dict(arg='--diag_tropo',\
+                    default=['advt','advq','advu','advv'],\
+                    help="field to diagnose the mean in the troposphere (<= 3000m)"))
+arguments.append(dict(arg='--subset_forcing',
+                    default='ini', 
+                    help="This indicates which yaml subset to initialize with.  Most common options are 'ini' (default) and 'morning'."))
 # Tuntime is usually specified from the afternoon profile. You can also just
 # specify the simulation length in seconds
-parser.add_argument('--runtime',default='from_afternoon_profile')
-
-parser.add_argument('--experiments')
-parser.add_argument('--split_by',default=-1)# station soundings are split
-                                            # up in chunks
-
-#parser.add_argument('--station-chunk',default=0)
-parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
-parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations
-parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations
-args = parser.parse_args()
-
-sys.path.insert(0, args.c4gl_path_lib)
-from class4gl import class4gl_input, data_global,class4gl
-from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
-from class4gl import blh,class4gl_input
-
-# this is a variant of global run in which the output of runs are still written
-# out even when the run crashes.
+arguments.append(dict(arg='--runtime',
+                    default='from_input',
+                    help="set the runtime of the simulation in seconds, or get it from the daytime difference in the profile pairs 'from_input' (default)"))
 
-# #only include the following timeseries in the model output
-# timeseries_only = \
-# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
-#  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
-#  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
-#  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
-#  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+arguments.append(dict(arg='--experiments', help="IDs of experiments, as a space-seperated list (default: 'BASE')"))
+arguments.append(dict(arg='--split_by',\
+                    type=int,
+                    help="the maxmimum number of soundings that are contained in each output file of a station. -1 means unlimited (default). In case of arrays experiments, this is usually overwritten by 50."))
 
+#arguments.append(dict(arg='--station-chunk',default=0)
+arguments.append(dict(arg='--c4gl_path_lib',help="the path of the CLASS4GL program"))#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib')
+arguments.append(dict(arg='--global_chunk_number',help="this is the batch number of the expected series of experiments according to split_by"))
+arguments.append(dict(arg='--station_chunk_number',help="this is the batch number according to split_by in case of considering one station"))
+arguments.append(dict(arg='--experiments_names', help="Alternative output names that are given to the experiments. By default, these are the same as --experiments") )
 
-EXP_DEFS  =\
-{
-  'ERA_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-    'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True},
-  'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-  'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
-  'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
-  'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
-  'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
-}
 
-# ========================
-print("getting a list of stations")
-# ========================
 
-# these are all the stations that are found in the input dataset
-all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+if __name__ == '__main__':
+    import argparse
+    parser = argparse.ArgumentParser()
+    #parser.add_argument('--timestamp')
+    for argument in arguments:
+        name = argument.pop('arg')
+        parser.add_argument(name,**argument)
 
-# ====================================
-print('defining all_stations_select')
-# ====================================
-
-# these are all the stations that are supposed to run by the whole batch (all
-# chunks). We narrow it down according to the station(s) specified.
-
-
-
-if args.station_id is not None:
-    print("Selecting station by ID")
-    stations_iter = stations_iterator(all_stations)
-    STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
-    all_stations_select = pd.DataFrame([run_station])
+    args = parser.parse_args()
 else:
-    print("Selecting stations from a row range in the table")
-    all_stations_select = pd.DataFrame(all_stations.table)
-    if args.last_station_row is not None:
-        all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
-    if args.first_station_row is not None:
-        all_stations_select = all_station_select.iloc[int(args.first_station):]
-print("station numbers included in the whole batch "+\
-      "(all chunks):",list(all_stations_select.index))
-
-print(all_stations_select)
-print("getting all records of the whole batch")
-all_records_morning_select = get_records(all_stations_select,\
-                                         args.path_forcing,\
-                                         subset=args.subset_forcing,
-                                         refetch_records=False,
-                                         )
+    class Namespace:
+        def __init__(self,**kwargs):
+            self.__dict__.update(kwargs)
+
+    args = Namespace()
+    for argument in arguments:
+        if 'default' in argument.keys():
+            args.__dict__[argument['arg'].strip('-')] = argument['default']
+        else:
+            args.__dict__[argument['arg'].strip('-')] = None
+    print(args.__dict__)
+        
 
-# only run a specific chunck from the selection
-if args.global_chunk_number is not None:
-    if args.station_chunk_number is not None:
-        raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+def execute(**kwargs):
+    # note that with args, we actually mean the same as those specified with
+    # the argparse module above
 
+    # overwrite the args according to the kwargs when the procedure is called
+    # as module function
+    for key,value in kwargs.items():
+        args.__dict__[key]  = value
 
-    if not (int(args.split_by) > 0) :
-            raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.")
+    print("-- begin arguments --")
+    for key,value in args.__dict__.items():
+         print(key,': ',value)
+    print("-- end arguments ----")
 
-    run_station_chunk = None
-    print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
-    totalchunks = 0
-    stations_iter = all_stations_select.iterrows()
-    in_current_chunk = False
-    try:
-        while not in_current_chunk:
-            istation,current_station = stations_iter.__next__()
-            all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
-            chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
-            print('chunks_current_station',chunks_current_station)
-            in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
-        
-            if in_current_chunk:
-                run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
-                run_station_chunk = int(args.global_chunk_number) - totalchunks 
-        
-            totalchunks +=chunks_current_station
+    # load specified class4gl library
+    if args.c4gl_path_lib is not None:
+        sys.path.insert(0, args.c4gl_path_lib)
+    
+    from class4gl import class4gl_input, data_global,class4gl
+    from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records
+    from class4gl import blh,class4gl_input
+    
+    # this is a variant of global run in which the output of runs are still written
+    # out even when the run crashes.
+    
+    # #only include the following timeseries in the model output
+    # timeseries_only = \
+    # ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin',
+    #  'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta',
+    #  'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat',
+    #  'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw',
+    #  'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl']
+    
+    
+    EXP_DEFS  =\
+    {
+      'BASE':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+    
+      'NOADV':{'sw_ac' : [],'sw_ap': True,'sw_lit': False},
         
-
-    except StopIteration:
-       raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
-    print("station = ",list(run_stations.index))
-    print("station chunk number:",run_station_chunk)
-
-# if no global chunk is specified, then run the whole station selection in one run, or
-# a specific chunk for each selected station according to # args.station_chunk_number
-else:
-    run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
-    if args.station_chunk_number is not None:
-        run_station_chunk = int(args.station_chunk_number)
-        print("station(s) that is processed.",list(run_stations.index))
-        print("chunk number: ",run_station_chunk)
+      'ERA_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+      'NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+      'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+      'W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+      'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+      'GLOBAL_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+      'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+      'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+      'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+      'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+        'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True},
+      'GLOBAL_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+      'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+      'IOPS_NOAC':    {'sw_ac' : [],'sw_ap': True,'sw_lit': False},
+      'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False},
+      'IOPS_W':  {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False},
+      'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False},
+    }
+    
+    # ========================
+    print("getting a list of stations")
+    # ========================
+    
+    # these are all the stations that are found in the input dataset
+    all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False)
+    
+    # ====================================
+    print('defining all_stations_select')
+    # ====================================
+    
+    # these are all the stations that are supposed to run by the whole batch (all
+    # chunks). We narrow it down according to the station(s) specified.
+    
+    
+    
+    if args.station_id is not None:
+        print("Selecting station by ID")
+        stations_iter = stations_iterator(all_stations)
+        STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id))
+        all_stations_select = pd.DataFrame([run_station])
     else:
-        if args.split_by != -1:
-            raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.")
-        run_station_chunk = 0
-        print("stations that are processed.",list(run_stations.index))
-        
-
-#print(all_stations)
-print('Fetching initial/forcing records')
-records_morning = get_records(run_stations,\
-                              args.path_forcing,\
-                              subset=args.subset_forcing,
-                              refetch_records=False,
-                              )
-
-# note that if runtime is an integer number, we don't need to get the afternoon
-# profiles. 
-if args.runtime == 'from_afternoon_profile':
-    print('Fetching afternoon records for determining the simulation runtimes')
-    records_afternoon = get_records(run_stations,\
-                                    args.path_forcing,\
-                                    subset='afternoon',
-                                    refetch_records=False,
-                                    )
-    
-    # print(records_morning.index)
-    # print(records_afternoon.index)
-    # align afternoon records with the noon records, and set same index
-    print('hello')
-    print(len(records_afternoon))
-    print(len(records_morning))
-
-    print("aligning morning and afternoon records")
-    records_morning['dates'] = records_morning['ldatetime'].dt.date
-    records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
-    records_afternoon.set_index(['STNID','dates'],inplace=True)
-    ini_index_dates = records_morning.set_index(['STNID','dates']).index
-    records_afternoon = records_afternoon.loc[ini_index_dates]
-    records_afternoon.index = records_morning.index
-
-experiments = args.experiments.strip(' ').split(' ')
-for expname in experiments:
-    exp = EXP_DEFS[expname]
-    path_exp = args.path_experiments+'/'+expname+'/'
-
-    os.system('mkdir -p '+path_exp)
-    for istation,current_station in run_stations.iterrows():
-        print(istation,current_station)
-        records_morning_station = records_morning.query('STNID == '+str(current_station.name))
-        if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)):
-            print("warning: outside of profile number range for station "+\
-                  str(current_station)+". Skipping chunk number for this station.")
+        print("Selecting stations from a row range in the table")
+        all_stations_select = pd.DataFrame(all_stations.table)
+        if args.last_station_row is not None:
+            all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)]
+        if args.first_station_row is not None:
+            all_stations_select = all_station_select.iloc[int(args.first_station):]
+    print("station numbers included in the whole batch "+\
+          "(all chunks):",list(all_stations_select.index))
+    
+    print(all_stations_select)
+    print("getting all records of the whole batch")
+    all_records_morning_select = get_records(all_stations_select,\
+                                             args.path_forcing,\
+                                             subset=args.subset_forcing,
+                                             refetch_records=False,
+                                             )
+    
+    # only run a specific chunck from the selection
+    if args.global_chunk_number is not None:
+        if args.station_chunk_number is not None:
+            raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.')
+    
+        if (args.split_by is None) or (args.split_by <= 0):
+                raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.")
+    
+        run_station_chunk = None
+        print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')')
+        totalchunks = 0
+        stations_iter = all_stations_select.iterrows()
+        in_current_chunk = False
+        try:
+            while not in_current_chunk:
+                istation,current_station = stations_iter.__next__()
+                all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name))
+                chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by))
+                print('chunks_current_station',chunks_current_station)
+                in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station))
+            
+                if in_current_chunk:
+                    run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])]
+                    run_station_chunk = int(args.global_chunk_number) - totalchunks 
+            
+                totalchunks +=chunks_current_station
+            
+    
+        except StopIteration:
+           raise ValueError("Could not determine station chunk number.  --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[')
+        print("station = ",list(run_stations.index))
+        print("station chunk number:",run_station_chunk)
+    
+    # if no global chunk is specified, then run the whole station selection in one run, or
+    # a specific chunk for each selected station according to # args.station_chunk_number
+    else:
+        run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])]
+        if args.station_chunk_number is not None:
+            run_station_chunk = int(args.station_chunk_number)
+            print("station(s) that is processed.",list(run_stations.index))
+            print("chunk number: ",run_station_chunk)
         else:
-            fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
-            if os.path.isfile(fn_morning):
-                file_morning = open(fn_morning)
-            else:
-                fn_morning = \
-                     args.path_forcing+'/'+format(current_station.name,'05d')+\
-                     '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
-                file_morning = open(fn_morning)
-
-            if args.runtime == 'from_afternoon_profile':
-                file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml')
-            fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_ini.yaml'
-            fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
-                     str(int(run_station_chunk))+'_mod.yaml'
-            file_ini = open(fn_ini,'w')
-            file_mod = open(fn_mod,'w')
-
-            #iexp = 0
-            onerun = False
-            print('starting station chunk number: '\
-                  +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)')
-
-            records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
-
-            isim = 0
-            for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
-                    print('starting '+str(isim+1)+' out of '+\
-                      str(len(records_morning_station_chunk) )+\
-                      ' (station total: ',str(len(records_morning_station)),')')  
-                
+            if args.split_by is not None:
+                raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split_by.")
+            run_station_chunk = 0
+            print("stations that are processed.",list(run_stations.index))
             
-                    c4gli_morning = get_record_yaml(file_morning, 
-                                                    record_morning.index_start, 
-                                                    record_morning.index_end,
-                                                    mode='ini')
-                    if args.diag_tropo is not None:
-                        print('add tropospheric parameters on advection and subsidence (for diagnosis)')
-                        seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
-                        profile_tropo = c4gli_morning.air_ac[seltropo]
-                        for var in args.diag_tropo:#['t','q','u','v',]:
-                            if var[:3] == 'adv':
-                                mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
-                                c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
-                            else:
-                                print("warning: tropospheric variable "+var+" not recognized")
-                    
+    
+    #print(all_stations)
+    print('Fetching initial/forcing records')
+    records_morning = get_records(run_stations,\
+                                  args.path_forcing,\
+                                  subset=args.subset_forcing,
+                                  refetch_records=False,
+                                  )
+    
+    # note that if runtime is an integer number, we don't need to get the afternoon
+    # profiles. 
+    if args.runtime == 'from_profile_pair':
+        print('Fetching afternoon records for determining the simulation runtimes')
+        records_afternoon = get_records(run_stations,\
+                                        args.path_forcing,\
+                                        subset='end',
+                                        refetch_records=False,
+                                        )
+        
+        # print(records_morning.index)
+        # print(records_afternoon.index)
+        # align afternoon records with the noon records, and set same index
+        print('hello')
+        print(len(records_afternoon))
+        print(len(records_morning))
+    
+        print("aligning morning and afternoon records")
+        records_morning['dates'] = records_morning['ldatetime'].dt.date
+        records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date
+        records_afternoon.set_index(['STNID','dates'],inplace=True)
+        ini_index_dates = records_morning.set_index(['STNID','dates']).index
+        records_afternoon = records_afternoon.loc[ini_index_dates]
+        records_afternoon.index = records_morning.index
+    
+    experiments = args.experiments.strip(' ').split(' ')
+    if args.experiments_names is not None:
+        experiments_names = args.experiments_names.strip(' ').split(' ')
+        if len(experiments_names) != len(experiments):
+            raise ValueError('Lenght of --experiments_names is different from --experiments')
+    
+    else:
+        experiments_names = experiments
+    
+    for iexpname,expid in enumerate(experiments):
+        expname = experiments_names[iexpname]
+        exp = EXP_DEFS[expid]
+        path_exp = args.path_experiments+'/'+expname+'/'
+    
+        os.system('mkdir -p '+path_exp)
+        for istation,current_station in run_stations.iterrows():
+            print(istation,current_station)
+            records_morning_station = records_morning.query('STNID == '+str(current_station.name))
+            start_record = run_station_chunk*args.split_by if run_station_chunk is not 0 else 0
+            end_record = (run_station_chunk+1)*args.split_by if args.split_by is not None else None
+            if start_record >= (len(records_morning_station)):
+                print("warning: outside of profile number range for station "+\
+                      str(current_station)+". Skipping chunk number for this station.")
+            else:
+                fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml'
+                if os.path.isfile(fn_morning):
+                    file_morning = open(fn_morning)
+                else:
+                    fn_morning = \
+                         args.path_forcing+'/'+format(current_station.name,'05d')+\
+                         '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml'
+                    file_morning = open(fn_morning)
+    
+                if args.runtime == 'from_profile_pair':
+                    file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml')
+                fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                         str(int(run_station_chunk))+'_ini.yaml'
+                fn_end_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\
+                         str(int(run_station_chunk))+'_end.yaml'
+                file_ini = open(fn_ini,'w')
+                file_end_mod = open(fn_end_mod,'w')
+    
+                #iexp = 0
+                onerun = False
+                print('starting station chunk number: '\
+                      +str(run_station_chunk)+' (chunk size:',args.split_by,')')
+    
+                records_morning_station_chunk = records_morning_station.iloc[start_record:end_record] #  [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))]
+    
+                isim = 0
+                for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows():
+                        print('starting '+str(isim+1)+' out of '+\
+                          str(len(records_morning_station_chunk) )+\
+                          ' (station total: ',str(len(records_morning_station)),')')  
                     
-                    if args.runtime == 'from_afternoon_profile':
-                        record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
-                        c4gli_afternoon = get_record_yaml(file_afternoon, 
-                                                          record_afternoon.index_start, 
-                                                          record_afternoon.index_end,
-                                                        mode='ini')
-                        runtime = int((c4gli_afternoon.pars.datetime_daylight - 
-                                             c4gli_morning.pars.datetime_daylight).total_seconds())
-                    elif args.runtime == 'from_input':
-                        runtime = c4gli_morning.pars.runtime
-                    else:
-                        runtime = int(args.runtime)
-
-            
-                    c4gli_morning.update(source='pairs',pars={'runtime' : \
-                                        runtime})
-                    c4gli_morning.update(source=expname, pars=exp)
-                    if expname[-3:] == 'SM2':
-                        c4gli_morning.update(source=expname, pars={'wg': c4gli_morning.pars.wg - (c4gli_morning.pars.wg - c4gli_morning.pars.wwilt)/2.})
-                        c4gli_morning.update(source=expname, pars={'w2': c4gli_morning.pars.w2 - (c4gli_morning.pars.w2 - c4gli_morning.pars.wwilt)/2.})
-
-                    c4gl = class4gl(c4gli_morning)
-
-                    if args.error_handling == 'dump_always':
-                        try:
-                            print('checking data sources')
-                            if not c4gli_morning.check_source_globaldata():
-                                print('Warning: some input sources appear invalid')
-                            c4gl.run()
-                            print('run succesful')
-                        except:
-                            print('run not succesful')
-                        onerun = True
-
-                        c4gli_morning.dump(file_ini)
+                
+                        c4gli_morning = get_record_yaml(file_morning, 
+                                                        record_morning.index_start, 
+                                                        record_morning.index_end,
+                                                        mode='model_input')
+                        if args.diag_tropo is not None:
+                            print('add tropospheric parameters on advection and subsidence (for diagnosis)')
+                            seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 ))
+                            profile_tropo = c4gli_morning.air_ac[seltropo]
+                            for var in args.diag_tropo:#['t','q','u','v',]:
+                                if var[:3] == 'adv':
+                                    mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] )
+                                    c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo})
+                                else:
+                                    print("warning: tropospheric variable "+var+" not recognized")
                         
                         
-                        c4gl.dump(file_mod,\
-                                  include_input=False,\
-                                  #timeseries_only=timeseries_only,\
-                                 )
-                        onerun = True
-                    # in this case, only the file will dumped if the runs were
-                    # successful
-                    elif args.error_handling == 'dump_on_success':
-                       try:
-                            print('checking data sources')
-                            if not c4gli_morning.check_source_globaldata():
-                                print('Warning: some input sources appear invalid')
-                            c4gl.run()
-                            print('run succesfull')
+                        if args.runtime == 'from_profile_pair':
+                            record_afternoon = records_afternoon.loc[(STNID,chunk,index)]
+                            c4gli_afternoon = get_record_yaml(file_afternoon, 
+                                                              record_afternoon.index_start, 
+                                                              record_afternoon.index_end,
+                                                            mode='model_input')
+                            runtime = int((c4gli_afternoon.pars.datetime_daylight - 
+                                                 c4gli_morning.pars.datetime_daylight).total_seconds())
+                        elif args.runtime == 'from_input':
+                            runtime = c4gli_morning.pars.runtime
+                        else:
+                            runtime = int(args.runtime)
+    
+                
+                        c4gli_morning.update(source='pairs',pars={'runtime' : \
+                                            runtime})
+                        c4gli_morning.update(source=expname, pars=exp)
+    
+                        c4gl = class4gl(c4gli_morning)
+    
+                        if args.error_handling == 'dump_always':
+                            try:
+                                print('checking data sources')
+                                if not c4gli_morning.check_source_globaldata():
+                                    print('Warning: some input sources appear invalid')
+                                c4gl.run()
+                                print('run succesful')
+                            except:
+                                print('run not succesful')
+                            onerun = True
+    
+                            print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') 
                             c4gli_morning.dump(file_ini)
                             
                             
-                            c4gl.dump(file_mod,\
+                            c4gl.dump(file_end_mod,\
                                       include_input=False,\
                                       #timeseries_only=timeseries_only,\
                                      )
                             onerun = True
-                       except:
-                           print('run not succesfull')
-                    isim += 1
-
-
-            file_ini.close()
-            file_mod.close()
-            file_morning.close()
-            if args.runtime == 'from_afternoon_profile':
-                file_afternoon.close()
-    
-            if onerun:
-                records_ini = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='ini',
-                                                           refetch_records=True,
-                                                           )
-                records_mod = get_records(pd.DataFrame([current_station]),\
-                                                           path_exp,\
-                                                           getchunk = int(run_station_chunk),\
-                                                           subset='mod',\
-                                                           refetch_records=True,\
-                                                           )
-            else:
-                # remove empty files
-                os.system('rm '+fn_ini)
-                os.system('rm '+fn_mod)
-    
-    # # align afternoon records with initial records, and set same index
-    # records_afternoon.index = records_afternoon.ldatetime.dt.date
-    # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
-    # records_afternoon.index = records_ini.index
-    
-    # stations_for_iter = stations(path_exp)
-    # for STNID,station in stations_iterator(stations_for_iter):
-    #     records_current_station_index = \
-    #             (records_ini.index.get_level_values('STNID') == STNID)
-    #     file_current_station_mod = STNID
-    # 
-    #     with \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
-    #     open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \
-    #     open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
-    #         for (STNID,index),record_ini in records_iterator(records_ini):
-    #             c4gli_ini = get_record_yaml(file_station_ini, 
-    #                                         record_ini.index_start, 
-    #                                         record_ini.index_end,
-    #                                         mode='ini')
-    #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
-    # 
-    #             record_mod = records_mod.loc[(STNID,index)]
-    #             c4gl_mod = get_record_yaml(file_station_mod, 
-    #                                         record_mod.index_start, 
-    #                                         record_mod.index_end,
-    #                                         mode='mod')
-    #             record_afternoon = records_afternoon.loc[(STNID,index)]
-    #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
-    #                                         record_afternoon.index_start, 
-    #                                         record_afternoon.index_end,
-    #                                         mode='ini')
+                        # in this case, only the file will dumped if the runs were
+                        # successful
+                        elif args.error_handling == 'dump_on_success':
+                           try:
+                                print('checking data sources')
+                                if not c4gli_morning.check_source_globaldata():
+                                    print('Warning: some input sources appear invalid')
+                                c4gl.run()
+                                print('run succesful')
+                                c4gli_morning.dump(file_ini)
+                                
+                                
+                                print("dumping to "+str(file_ini)) 
+                                c4gl.dump(file_end_mod,\
+                                          include_input=False,\
+                                          #timeseries_only=timeseries_only,\
+                                         )
+                                onerun = True
+                           except:
+                               print('run not succesful')
+                        isim += 1
+    
+    
+                file_ini.close()
+                file_end_mod.close()
+                file_morning.close()
+                if args.runtime == 'from_profile_pair':
+                    file_afternoon.close()
+        
+                if onerun:
+                    records_ini = get_records(pd.DataFrame([current_station]),\
+                                                               path_exp,\
+                                                               getchunk = int(run_station_chunk),\
+                                                               subset='ini',
+                                                               refetch_records=True,
+                                                               )
+                    records_end_mod = get_records(pd.DataFrame([current_station]),\
+                                                               path_exp,\
+                                                               getchunk = int(run_station_chunk),\
+                                                               subset='end',\
+                                                               refetch_records=True,\
+                                                               )
+                else:
+                    # remove empty files
+                    os.system('rm '+fn_ini)
+                    os.system('rm '+fn_end_mod)
+        
+        # # align afternoon records with initial records, and set same index
+        # records_afternoon.index = records_afternoon.ldatetime.dt.date
+        # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date]
+        # records_afternoon.index = records_ini.index
+        
+        # stations_for_iter = stations(path_exp)
+        # for STNID,station in stations_iterator(stations_for_iter):
+        #     records_current_station_index = \
+        #             (records_ini.index.get_level_values('STNID') == STNID)
+        #     file_current_station_end_mod = STNID
+        # 
+        #     with \
+        #     open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \
+        #     open(path_exp+'/'+format(STNID,"05d")+'_end_mod.yaml','r') as file_station_end_mod, \
+        #     open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon:
+        #         for (STNID,index),record_ini in records_iterator(records_ini):
+        #             c4gli_ini = get_record_yaml(file_station_ini, 
+        #                                         record_ini.index_start, 
+        #                                         record_ini.index_end,
+        #                                         mode='ini')
+        #             #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime)
+        # 
+        #             record_end_mod = records_end_mod.loc[(STNID,index)]
+        #             c4gl_end_mod = get_record_yaml(file_station_end_mod, 
+        #                                         record_end_mod.index_start, 
+        #                                         record_end_mod.index_end,
+        #                                         mode='mod')
+        #             record_afternoon = records_afternoon.loc[(STNID,index)]
+        #             c4gl_afternoon = get_record_yaml(file_station_afternoon, 
+        #                                         record_afternoon.index_start, 
+        #                                         record_afternoon.index_end,
+        #                                         mode='ini')
+    
 
+if __name__ == '__main__':
+    #execute(**vars(args))
+    execute()
diff --git a/setup.py b/setup.py
index 524bd21..9dc7e6b 100644
--- a/setup.py
+++ b/setup.py
@@ -1,11 +1,12 @@
 from distutils.core import setup
+from setuptools import find_packages
 
 
 # I followed this tutorial to have both the git repository matched with the pip
 # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56
 setup(
         name='class4gl',
-        version='0.1.2',
+        version='0.1.18',
         license='gpl-3.0',        # https://help.github.com/articles/licensing-a-repository
         description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description
         author = 'Hendrik Wouters',                        # Type in your name
@@ -15,8 +16,9 @@
         # I explain this later on
         keywords = ['atmospheric boundary layer', 'weather balloons',
                     'land--atmosphere interactions'],   # Keywords
-        packages=['class4gl'],
-        # packages=find_packages(),
+        # packages=['class4gl'],
+        install_package_data = True,
+        packages=find_packages("."),
         install_requires=['beautifulsoup4','pyyaml','pysolar','basemap','xarray'],
         # long_description=open('README.md').read(),
         classifiers=[
diff --git a/test.png b/test.png
deleted file mode 100644
index cb8d3fbb6dbfa33ca9cb45b795db2ce0becd8795..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 92416
zcmeFZcT|+u+b;Yd5fY`u7zC*rQBkA7|Eb3B%0u>}T(L-{rcld;ejm
zug$rQe;bBjoI0m|Ifr5FZ5Z~=i*MQBou{Jjs^PzHa6ju9e+#d`Z|#4F-@m(Z$_$5L
z!mrUk7WX!9b$C%;{wBacg7zP6qYn47KF6;}^0SEpb1_&9jtoIKq3$VwfNl9kwV
z*~{ySvb6L+ACU6!bd;9gPJfGGdoZ0}jvHS~oE!*vVCfJiw&7voZX$Yb`+)=ByxqFx
zz)exjQu4sTn`~~!UoK^Q`c}EU>KF2KzR=X!ODtN7;T`3bXP$B>w;w!wW?xDEiI>8hdmd*zcJt+=H@
z`CNFhz|+7hWN%(c|JJ{L#x(c0X|n(IbI9dX*1vwz{DIT!%im8uxz6?FN1dZ>U#_s{
z4>sj5Z;HPA`gKwMu`h4l`-N5L%a2k2M=u((cLKjbV#dYC$Cu!DNm*Q&j>539$9f40
zl$4F}l)2>PO0kY~J>8(7poxhIS+~BTL|m&9eKr!Gn55zxQ*j1HmHqOwCohIkK7X#|
z=B=9_Z4o!xk6{a)E`+KW@x0QXhE}_Y1wm^|T;!CXrC!yw$wGQBDR}cw8uSK(5o8lf
zjXYx*ht8D1*gTaaz4`q=8y|`n+PwPB!n@0)!Y!G&{N65-61g!JIoGDe&8g`P-!c}5
zX}y(LS6BDktT0`MdRi0d%ZsP?kTSMNc~8&9I4qhOsM-~X|6eO
zG-xKky2OpFV3@WA!wel@A^1Fn{YLI{kzRJf{AY4C#Y&S>7`-VDwC-FtoL&9hmY$l8
z#pl*F>P<0{P11OJ6WsEE?@*BMrIvV;W5EOE-t-C68s#Nnn4ub+Sw--ctZ5}19l6fjhoQ($^B&mNpkQ=NOe
zFI34CtlPNcT-F2Tsx{Nac6zdq)VkX-@m<<*{avAi(CxgtM$hM2^g1r0?_S&duIlWc
zJN>4Ld$!^7&@8(u4qmf^2RC|DG1n+wQCzn-Oz<3^23SZ{-gekmuN<$HZO1~ccWx|A
zJw17+5v`wX4Ls3^Vhvp{IdHDQm0TjZHbt~X&l_!^t~6YCM;CS^i8wh&ePZ`+jo?7r=dk?@gH?Q|n_0P4?`0(WAFf#&n28cKNR{)YOLkfu;3%b$%)m5F+X};n{QF1CJe1#nyy+Ngn8hqkK~s>EW#Mm#&wyP
zI@B&sy$I}L;XA;AP*zKiRU
zJ3{uR1+fP8Op#OkC$bWK`kefR)&^rMW~u{-E%TyI?(VI=)$>V$zWrDI=#<#CDl07H
z@pQe$##LKYpUL-$exq>%RvE>`#WKQ2e!V%WguTSXAD%Az6fW+$@-d96GDL3`PN3?{
zjXjMVt(o_zT@Rt{I@;2dOB{?}u9^wztqux^?J+C7
zq^G|6F7ndsr#C^CKVewS@+yr$&Cp;tZQ9866?s3?qw?}Rb>80IRoR?a$h4EQb7Pps
z`p5pj`IZ6Y>Fk`m#{M$gJb7rNL0P4=Zha5tTUDg*
z+_|F@)m!LD?Z=T?{t!!kM66!S?JdJOp3lAba0a%yS-F?f$M@FjpLorx1Iu1;VQmKP
z6;mayy#;A9zDvE1%a?Oat%HhX@(6^PtCkq%WuU98+p0`^-hZ`TD3rcq-^mfR89xK`
z&Y1)SNxQm;2DEVKg7jD1!S`ZfVniFL8caIZ%qu?wt}$tF+8n#52$k=#;oTCXyT`?0-;X{R6woQIc;~R#txy#UFNEKhWb^ET7TLSKv1&AdjyGmSASs5pg%dzt{O*M#<-A^0}4oKHa=pLV~
z4dW&~IdOX%Zi-;P1xxjC9VnLyTGlr6vbVAA0UR_Bf1dgvo`yyjSEm
zKN|RHXUf6mBnTw*QN^0F`t4?eli(~{z*d&H=3#a2Q$vEEUgv^mtzcUv)-a|713$mA
zw0g?mF?gySIqi*>xX5d~9ky?<`4E^f#+V5g&wC4wgIovhqX`}-p|3BGn9-`fHpzwC
zzDr{49IJ4u7_z?~AZElAyY%cD)mK~W9ANug<-~lRMy#YrR^@>q-}PDkSYKTPK`Yo&
z1hdHufZaErG!vL1;|o7Ie){u|fs2mQ{bfR<{nbKa2DMEv7>=7f;^)1t>9@lM8Fwel
z|GCrP$2xbOtyr5zAAaR(f9X{vSnGhIZU~3lmE6I{G*$Y|`DS%-@
z#U82B7Ns&wh)?ZLi+4$0G`^gga_!R}1LbZ9F>JuMmQ(S^I%*0&c!EZ&sM3eM5vIiH
zm8wA}LkxO7M`_wVj3#-vlBYef`$C0}n^n+a*Xu-c{1$8>a*1={2Df^tHtndQBDH#L
zTD-Z6G3D{sHk;1MNN$|y%Gr*?Z&2}rPm5tk8}mg9D#_wTo5!!ZVM&k(*_^tJ-@2M#
zFHD}27xo`}{Q6qsgF|tuex8JhEtt(RvhLva3=x5LV{z$A}*K$H?vpY>mOr|lb%w8ZH}4Bvu{iAYLH
zqCIQt&SWglyt}Be)=Szpyj$+l!b*!dhLxu?-bjGYqrICVM~`T13~Ow&n`vNS=%Fjl
zz(944jErpa#Ew@4u~v!cUBfWtp8o!RTleNiM>c(~Jo5Io(>FxcZ6ax4_bz=JcO54>
zv(&6y#@o|E^VyW!hLEK#^x;2w!{VcGOzd%D3VPcLVX64Zyz7I};xW@@)MS)Mgu+^2
zAsfW2!*#D$Bt86eBfF9Ng|KV(Xg*o+?@KAR1^iHek}s}T))&~}A=joWJe-}KVe`!#
z?Bp3*A648w>{~f36sO|rPH<+$QgPr{L$A8Ik^0U;H1ORv3<+X$ibhSnH;Y#&M8+Fd
zYvT25pXgMzs&FAJr1cK0Tl4|F!_8F?$OkUkK{_Oqj1L}9i(M*LK$EaBeLO+IZDNTZ
z#jdBnF3S6|yG%OSB~t39vSMJ)K3OT?O~?%9;Jerf@Z
z+FCLz!kiVM8f=UhtUiOwF1drXhtBScRBfMCzjXF*P$*$hlnbF0JtJgr@op$krNb)|+aI?u2oRB;(iX@sovp81}wG2{-yA4<3Ikf{fA%C~AK`BgTaZS(~IEHA*3@XXA(k()mjZKnww^`^cJb>p+)v}L&
z|4swd`|~7?8X4b){jnpEw@!7e!j%i*{ruW~4aiHp#@`FLm+r;SE8+{uZiCURY`>vN
z==&?*NYT(JE*?-}R6+9KbouhZ=I?0Wrd%MA*{$a9WjkH79?Oh^Y+`2ikx9;ZiUD)M
zV7u=g<)!y_^KYbs^M@wB!BU^fkyPb|>m#L`E%A&+l$(~<+m(0>B^w0KMbkfrX(U4C
z2CK@oCUN0KD#>N*WdmfxxyWws%&?0DzL1$7H<<))X$dd&rTj>q(HKed7>FtZ$i{4g
zArYoGnpQ^*M{1};mh6&{qU`PLUQHk65K#}*bY{aoa4s$`(nrKJdTlofQy8urE0N4&
zkf|&efd5tf{F1e7JUwA9%OL15yWkbgyz12nQIAUIQp!dL{;;45guFBHN1Ze$*l#J|
zZ+v58=z`zA4E8|>_b2Q6{)JT+#6#-F8pNQo**bwj+uBF`}aueRf?mS8`A!$*{f{hqp;nOY6R8&8tj^5ksk5xcq(
z2%XaoYC1S4B1PL>Ut74LhC!~OfDuEmwl2M58zW(c(iCp{G7PJ1fOOE4>_()xc?6ipM#gF2gN3hs
zxDFd|W-MAocgw;}WDMum?x}$XYfe%jl4(UX;g+y<`Hsz8t5@-)Ci!0KD{@MjR&nny
z4Wu_&w?^&PEickQRszn1-r^A)jnaScTg}4D32^;tNODJEIL(Plxc2t;C~c9Tq8E|N
zBO%w%b@cW1U7UtQ4K`GIR@Nbnl~>%JO_IbB$Prqh07-CPh666KA`1
z`0q;iaNS~$b;@?B(o{E9cna6S%CZO^7Og+bOaCgG4UecY1rLgS{rD|5WkdbHD`Mja
zh?+mv@sGXKZ2wX(r~af0X)_vytv;&|->I)3y}3m6AmrpUp{)cotNA7E<=jG)!^StY^
zSHk!z?aN*n#IT0E%DGsOxWG~|`*vUC2pF=s?`}*}V-2mgU8Gt`ze8I+`Zzy)Tn9e>
z_;FNW;@{JWX>CY182KvS!qx1*){8*~A{L!ODmha|9aq2h1B-pVVu@Z_a
z4W3Z4ndruJXod}3n=gErekLvo>ZKPEU0`Bw=6_@tGvmT;v6;DFiK1XOmorBssJo`W
z%*OYBPa5|BFRbIs@cz#NDF64OXa@f8ocM^CTe^cJj<
z(o*J2X?%zZecJ%NtOu(+;*Dqfgyl)`58Kt(9#n9IWT#)}3K|3d)ZCt?3j^dR6-x;$
zs7;7p6hA+5j~ic#F8!2C?I<_F(tKJLOO+V0T1qDrJ_Z2wK~n;5=B&2R!!VKH(ic@Q
zj!%P3|G6-YGsEk2iWF;Vc3G&(mksxCfaqV7?}oxXRFjlp5#}}Zx%-hnmY_`aHm|gP
zRA*P017uW`gF7;}R0PL*)y@2XHJ+GG>uWH_40{h#EC#yz13rfGj(Tj2daUMhVTU14
z^#Z?b#4jLFI0NO3Pk^1>FlPFV@l(qhT`cunLzS_L-*)Vb3>CKq!X|CSdRMI>Ilc(DKg#!nVe>1yk5>|`>B98n4ed-jcPK&8WBNtV`pRrpZmyn-3oRs&lRamEk<2#caf7n#V~X=APRhA8
zQD(cR=44P3XFz<#NUoYdwg$L!owTG12-6ioXR?f)*OmucyWo0+%DK}7m&xaMTRlA>
zl!1Fc_x^khN!oZCYK$Zrq2*eMiD?%)c%L1Dx?A1t?Dv0V@n&84;aq-w5!1
z8Nqk|;Sj^!&nf_$9KU5bz?dJb^utM2uYBT~2T;X(X$a4R*?&0Wa_rkmCSyk0_q&(h
zAAKL};Tsc>Gi4!fOyYE4@-O}*u%zJIU@pYjl>#x&UM8tRS;bTlN=864Y0JvWO;reW
zLHH{dxry=c8WZ5Phh`u}QlTx8QFN_jJ$OELijTW3<19>w88w(2^2EHq0
zndVi>9U8NL1mmYX{iqSU&5GSEZQY?pF9UR`!Yj}e9}9*;fe+7|y@Nfd*Gvpx{|(pm
zLH6EI>|YZd$jvJA$mHUvtGoO3(hO=Is0$U7VBD^VT@8+Q@C5k?tn;i+TmzWV1M
zWrdddEUBwbsYXX&-93kbmMOiCDSBF3T6Gb-qglC*?a>zUHcmH4v6`McXKEU|X)sYR
zi4Fj#2`zP4WVBDN4o(^DI~nozu8>MNPG`a@sCdY_!h)aZLCTrhV1jj4tW&FKz6~q0
z!??vRQ{VUM!Rsp}47N`$-?o`QOs254+t91Px@i_BLTqy`ojtuTGRkxZw~Q&9<*gd`
zmF0*nc6L`^z0u#=+65@#ZWSNbnQfCl3nftop=P*s>(=WM;$302%pv9$zU-rIhe{_4
znmhsESYBCybZ$?NrLQINt{>EXp73w9PQarXARnI!l8#u?15)6qoYG9?yuXFHwTqjZ
zo}HatSre9R0Y-?d1$+_54+VmZ8$%h&%9~pX*c|$5@bJc8yq|W1prNty$?D&J4XNqZEcQ|
zD|Bdzk+7~-m9AO$Brl=z&7A1oLJD3gEHS5X6rH%eYrl>@Al&_UCL`2MEb+?EF;bQn
z?-qm{E~NQ>r&*lcEur1>myfU^*()7B{+FAGRG!FWA#jGwwP`vMg4c&ds5f`&kIij>
z&5ySSlQfutt!m5tKcCW|k3{1D@VT@!+3SXDX=y=^JO@mQ0zeizQEzVZQloUG8XlTj
z^-{_)?AA|^p{4pmB8O2PUvIV2OIB6t$S|0h_m-|&Xh+$$TVT767zMVKcNlp#0)Xt4
zjfD){=U422B(^#wF#CF{V^D$IT;$ZTTB!hV{LeA0Sf*8hAg1Y8+QY><4S4HY4d%QC
z@s9Rg!8lr8g*$;%;5QR6l(3oB1Mrau^+TmK4eHT-Ux$;_q3QS%FmsK@)+7~j9h6<}
z2nra#n!;SCZXZr?FF&NGrx#>}5UvAFESRx0`YMipjHA@ty(~>;1HRgqHhhVcoCKw3
zwcC213xTpk_L%8F<-5xNbRW%JU~I(CKff%%EuyXpp~y2fQiJlqXxp&4M^!Q5G(a6Y
z(eo@q8jF>mWKCizcJ9i4eGU8=Dz)LmFKLX
z6`e^^mIpMu7hos3pw)%-hLm-k>XkRrqX1!!cl$@GuU@td1R&FgwxY$QWKW>6V4vB)
z-nY>XXpYnZ22{$2A`%i33f;&hs`JfqWq?%RC=X390lZ6ZkF9>tp|6tLvDlqwY0f7C
zsOCVnBNTz5vjANdLKzbwqET%QKmo_rf3951Gq%uI;Lzl`I-WtjZN2_0@KVd=Gf>EH
z8wM!fn^uh6PI-Cx>wcgj+H>VGY=!mf3bz_XGP
z1p!6(W-V^D2M7r-n<^pqEkPt{0rQIkCxM{v01IDoiCgGZ;0}o{IRt_Mgii7bAT(1;
z{9xAWRz$_4N4)~{u#nSxo4`)dB0%|)S_v-Y7zA!qA)^QlA+vW%T`bA#9D;ZpdJ7!r
zZDHyU#FDQ)aU;h9Y@T@K^><1@8uE
znd+GgM@CK{3-I%M0qloG0}A>SA=i2^!ZgpKjLfLJD`d`Bv(TsV7b>YOYaG3p&iT!Z11xezS
z41>;|9{5K2*3QY^!tKKVd;8G5lYw!1eXTW7X+T*xmALDGS&DrvV0|UIR#oLw=e?rE
z3|hh5*h-Ft2uD#uPK)}oc-@g;XSl!wo7DsdDY(9Gvn5Y>(+X&hzln}MC5qj+!KNJa
zi4toq6)`jyL!9)fDZ^2B)d3aIyfj5F1O(4(+(~~c9)6(`!
zYd^dd^tTqE^7CuW*l*fg3Iq-R7?BFeyH82Q!{!f;EPLueZACYVL^rz5E_^V88@RNA
zX+?*|2h9nJgekK#Dn65MfznIpFwm&dB_Q|DQwOx{rRCdO%b%%`ekPY*wS(kg8_wID
zc&VmZDtpU=@^b5}&M9hx$}+1@a2w?UI2Ikg94;sDd2M0MJ10!)%xh?>}zB
zfHg@0p+<;y@}WHf;$I=wI@r&48ii_z9!uAnfiv=awUxK%jx|JOi)$Z4h1_SCFClU{
z9^yuxo2uu?TMI;jX7gY+GXHEfa7ndSfb#$98F
z3i1O8Ngip4RtZW1h(rcx9E14`{Az|D6noL4fR0%sG}7*_(gEOio59USA>s|9@VSrg
zHP#9>ns`ht-YGI`UkbaufAP3!Ezg{oSDVpAJ5Okhuu{ckoogWpkcuXn9~?3&`1MWr
zh7^0MX3UMpX)1nhdlpR;9#n)pJ{j}$qOmjEcrKumg|%{|NTtz7>#&l#T3XaB`_oaU
zPl?j7T}
zl$%H(tfTApVNkr7mAINg@F*eMsl$4YLgbZH&G~LjWT%+^?xrDYCR5z0>Ou_FOq$=Blz;kWx%(kMfE-VX>dEa
zd??r{QRcCXskODn_(pcL2?;|`9GLcf{hindf}dMeW-5iNxY{&!NuB8?82C&+I$!E$5!G4L-27QJ
zRx~bg(JaQU>FiH0@9k}3p&Ss{vXCgjzR>-8UAx=S(`UWwW>LwSj3%Tex0uxeNBukt
z!blC+=K%h(BE-FVwklCYb%0lnf@FaT{7lnr>FP&+QT`4<j0#Rf5bF}?T)
z9{4pfy-uQ-OEvPZJkxz06
z#OTzJ4pH<)X>v-^BQNcjQ$kr%1&+4$<^pp>8HqL!ZtD>Q3U6@st}n-!%WF;N<@Ya+
zKQ^BD6r9zf>!-`y)j>KiN)c_y;~o>1w>v3pRZ;SS1bo_kN>eQN6$ebH!PBrRTS;!o
zTcL7snHfxdC0FP?U=->GrM&$#hzLq%H5pJ-9g_LpUTG|DaP)di%vGI=i1v0_DdOPj
z$-{miJBTyM5kBOqvHl^hY+g?(E%9t#=NtA94^DR_DUqW|AseaJxnwkh@z3r@
z&3cXx9jof@d{FTXX7erIwzzz9?-k4XER&qZdB?8l)&)#+>v)htX>o1y4en~c0TmTj
z>td;_l9lc@nNrz|K(>XjydY5!z-+$Q5R>-T>vsU(|9Xkea(IQX6xwQ)iH`-$Q)_1X
zpS7N8m?`*aeywH?7IN6MB`&|F;G>FTb91)00ng}YtD3CuVDGHbGwfws7g^AIG|mw~
z_gJ3MyGoaA-K!SwI6kcbLby5G1ds?)(h)h|lBPoxSq<^_d#uU>9*qy|--NRq{5Cb*
zDV3Gu-XA-gERPT6OnnME<80LUXJvoQ8_4dqbS3Uy6BpnTQIK*XGCzMz$XO+A!^Q%t
zTG;lTbk$jFKJx3IF(uw%+(Z4gYoBe0509!5`W}>*Si5;U>W5ta@DEN40-m<*(PA~T
zGeRmp&!EzKk(-{jzgE%1->&Al$XGl0MVo@Xm?nL)@q$X7!71ABJ3`%nn%}Lye#fUR
zubJ&-GK{m$t|^##Aro@z1Bf(q`y1%tFk!(Axi-Hq(Uh+fC04Uod$cvh`u%xG&L9hZ
zCg{Loyq|dGe+!&{+CA2mfq?Qv2tfb=u~|S82_DNfFWvZb%j2+^tS1}9(NNw-%PrW0
zJN-?A3*O)mu@Fx`+%!f|+rO6ky=d7)VQXCflfUMgy7Be(C+V(gsuWG3e
zeu07LZw};M0L%L~lKp?t5!jx;84K*c76L}uLje)u3<3aN2fzklp0fWL4&R8s9WYQ~?7_2QhEBpj@u;qUuC2QG^e@;k5@1I{>A&D}0LQ4yL6t5DD+-rC
zeC|J1Q_sjK?(h@BbH9&UNfTQD7xY;DdH<*}eH)f-3MB*=u!}@;@ETA&gZ&ndat5l^
zz|uH=adLR4>T>?{+N2SniRHkt91ICw?a@f
zrC%LZ9w4{_#IX9Bmo4=tY-~4OAPVWjAhx=$1rO~6FptB#qQK6PZ8fos78D;TMn6^I
zwl8{8xi%n$E!FdKs+J-EMSuYxfx>adov5gwz;)#WCM17HAhwRNEDiW^o6DS{C{nou
zuT4?cgfQiNah3D&mNOlb>hBIbG(CbLtP%zc{FYu6U?0!&^4tk)n)@djGp?*@f%*eD
zsD{fXD
zet6oAIY?G2G(HSvu+{h?P4AJ+UQp^CYOSJ3`caB|!o4{KVxG;PG3Qs1AQYJs1W&q=
zyB%GMMTH8_>o!^8xe}SzPaAe*li7ukUP1|q!6;E6c`dkrfA06{&~qcxXta5dxn|T}
zMX)d+y23=QvMT!8qgNgVLy5zij5K
z-DZvm)j(BqneKIAgkm<%l^bXhuhFyqOzcy)4x)%#H{8s#R{=XS
zc^qG8iNoQ_bl>LyrJkcJe;4qfj6~1V1nU{3v;(^F{G$Xlw~N86&2n$br79#^hU>ho
zdSWD%C#a+M|Z;4w*>p@^Lw;pb#?^z0JPfLuiJ!apbUzAFGY#I;1Pb{%@Nqk1%8&IB>aQW=O>(Wrq>smjjHZP{3xv7VL<
z*><$EP_RpTgRPeffiz2b=SXp~lA4Q9Ig*1n=2LvX9asBzgjC&+G2OS=P>N)pim>W73G;<}hZ}0EJ4S4rVE%wSGr7Lc;z6Sgdhp_lDkX_&6
z|5~H7>ar$D(LI<`H5AzS`ca?*k&CtTF^lX04l?)ws8&kufM4uyj+f8r7*=P}NxXGv
zgn&r+Td>2>peFhYiM6qDO6|znuNyOhIt2Ra5C#h3z<|m@Nn-ywKK#N{k*W5pDCYo?
z>ySb)K-Ngnx?IZjpD^0Xmw*BjfR~!jj|`FkMoI_410zKp`m|Ypq}lHsm@Ghgd>vWPHv|HFHr3^yb3XTQ1Qqo4
z2ri+oMC&VO$yinSdlM#DFuvxm_5K53^#T^4rGSF8W#l?*=4wwNK!MB7El49HqYi(o
z@LmUHdO^=3hF!by&z+%qCL+xS67u%u+i6FQ05JD946-UKLIr9*Sov^+d%AlT8Ysjg
z6y5tD0VKIxE(JjPFJI9_Kd00}bL)+{SaR|oMb@q%by%4P32e$bQQ$t28Wz#iQT717lmS7t
zy6$~=f2Ih?tOnHC5Lx>-I+`ir{!u?_1fYzTBq-h#s)1HPDLR!P2aEf9B`wx!qI(u*
zllFaQ{T+cG8jvg5abK;V@zXZ1C{g6k$@m(Ypy%M5~HgxR6c
z79pqvEg+!kf4ziSKFqyKhVYB?v_3ScD9~BIJ$;RJiD$R^=T?L7$jkr0NEd~8V=>P8oSbu
zgt{b`c(ni@+qJ%8mx@JxG~>3a&~V^>{yRwI!fu2PkzrtK|9mu
zIma@qv$)Xb(3|Dli#~-0x@^k<<^q5&!B&Frl$TEut!LU}Cb0!YPA|4jK*nS}1}mRi
zl*u*-5M#@i2Lo_Gc=(R5pyWkH9k`UW@?TOL&m8t;nKS{e^?=@-$%G5wSrn=3dr!x(
z9&Ryg-hJbUb%TP8jD6XJ{QKY;ZXV9@^`IQ~iWdeZ(8LjJcHReDHFtyUCJVg-`{BC-
ztDBJjb{FUi$gNfPsMhP00w74=b{&$$8eMtyjg?`)57*iI>X7i<^ChVF{&73TyI|n`
zbZSb9ZS4krXUEvEdm#LY!mZnEJAUK+zHK+(
zB@&%AuAKV1`DL?J04Y$NG|WmPcK6l9#!Xob6%SSQ@OKoiKNVpdtO3>F>cET#prYb2
z0J~3uXq(#)@bv{fho$ezU
zDWsx+r^n)z5*@Z+ZQD@{3P#(Ff#detHj8=`{uXSCHXwaLB~>d!*H
zcoRhBS!0f;Y&Z?B)G3}XzLctvD_tIP);ZXxzbQup=u8r*Ou{MzQyamq&Vz|MI$ePR
zSS|i!OQc3}T&-74tL1${i1U5Q1Swf~hVLSDmy!C486w3QVt$MrYYUU5R64C{e}kKr
z>BSZ^=rnUu`Sg2uKH$Uo=b;l!&zdN)RIE
z=bEIcrZuZ3D6I$&A_0H5-ZHeVx5I%|{_^Q}TO-b~R^3E2fBKnO^WW~DjA3#9KG7aNSb`q!M=uA?DYC(k$$qp
zk?!{PPfDDfs+?ZiaEQW13_C#MZ9H`Cm>XQKCN}bH3HbELF3Ot*V&kDl
zt*x!$-f2r(9gn(nqMXcP!#rJg>@P@2uUew69|_~O5_`GTLu9E0dLCFkJ!mB77wpnR{vEbn!Qk}qku6S-O+1IA6NtW
zu&zhhs3U!UP~au@R73u;$r2R3>jgL~?2x(@8b9I)K$0QG4z@PQ9@OTW@M`m;`=HP)
z72DRC1!I6sx5G7#b
zZ%eT{pCg*z?YiUX7eY?kOLAm>l)UteifbHPymH4VAj^uQE>JJPaQ5sjEE=@(V
zF>pREXKiHcl};u+ek)BT8k~-_^Qj+EFVqSUvGe9jTQzBYIla?bs>d$fdlUsAJd9o)g*0T1_#W8&uVAtj}x>+2L||uIZET
zzNYj0Z}^Q_U%En$JnGVOcTz&>M|)AB-1_1HEa
z(mGvb+MzRL+)qkZ4!^P;nl9dU*`3d&B$i(ueQZ|Z?C|D&rJpvmdL=KLZN+>@h9j|{
zg8+`U{&3xBi|Wza6+-pC%4#apejENCI-ca@*mJm1N2l)5kYHkhx|&&e(YCED98qk_
znnI6c)8LKCxx^dbM7(}G=*QD
z)gK9H{u3t7NW^a6?Ds?W=3bV#WRfJliwKnZ3f0U4c6aMtk(0_sKO_XoDk|dL5Bta+
zr61FT$PSjpx~!PZkI_buEt6U~ii6rmycA4=m#Yz~byeHN+3^5traa(p90_5h*ds&@BKwOm+0+AYhcpW*(};(Sln0kcQkg&9Y9#bLKUDq=cJdHC;j!UuC6nqc
zA*+itd6$CJNWt08iLQq3Tpl~OLPy6pR1NFU#_;^b7q#J1EH=c;skPtx0epg#GEbVI
ztMcse4M>2xi%Lq+ZeXtr^6M`xYE97q_4NxCi+>Rg2Bg-y$0>SPz1y#8*<0df-bnF$
z=U})Wr2=SleDs8m=cG4B*C+R5Jr#N>o~5NZe4Cvm0@);Ni8g6YulJlt9QJNMpoy7#
z5t`_d2XXp|E@tzn^yYtC;CFPFCDxe$B+D^qL=Z3M$c%?*1hd9#S@-)<-|2qT(s}~0
z-ANWI`()O|26Dk6%#f2nB2`gRVOS&3^mXt_(QA1{MMub#;FGWg*xVrBGAppxc?VD@
zCjXKS04#?Eo?d1lh59AKc~}-ke)!+(ocmdYZvBarSD@kE1^UO^B_$;fnkeD<=gJa+
z3p>u!m0?XV8{W{vWKJK`u#pLWyE(9&(-hB(F;bG{5w=8FxpTsbd1JC6>S*zffh(GS
z$;$mBx*Evd7p~!Q9md<6m1in{JDhE>{Sv8lUsEhEh(QgvwsP}D{4`hMx((~hK4(tO
zyf$?84fKe4Z>+=8uYUNKvHz`q$M)#7U+`ivlt%K&qT*u8liN~_kgHhdyMx;+TkJNE
z7KP=6lFTR^(HdF5I$&x}skIm~`Ql=EkZ~9gXaEuBSnM!tf{UJ^1+RHa8jQJA
zup~k^N^w1%mMf{MQX^Lc;mXx?%r$I%i)a!LqeLm5N&*V5WBI|L6@!+WQ;v=q
z2mISfmuYDYP`r###yfH>2Vam~nqtB!OV1qH
zTs;WmOp&#H>_HMb@_TFGcouVXi_
zcuqE|(!gJT=Tua6K2nq+bQ^j!;!&Nq)wr9uF*iR5Elw%H6!ERb=R-1*Cyp$D2yU0*
z+qQc*_;mQgrM?6Z%=!BNO9bJ#0PtZ?P}}44s5we&;}HTbBdNjjAe6bME46#B>|_+V
zX5~$hUUL784>Bg`a?CN@fh~9(EBN!2Hy{SwW9u*TgBNyBGPO^POr4N(ywsa*!efUz
zjsZYtFRGzxynK7EY4;UqJ~|O=7>%0ZSQxHLP{|=ct=75?<0u-HOi-j&fkMC}GqZfD
zuORJL`nX51oEtFVN4i0uhKQ(e6PBQNni&Q#eRQNBK-w!mxLN^lwM9fCYN_n?s9~TI
zLl80tCs)DvjI(7Htw^7Z3@MI(n01R{F_kDNYLho{z{av-QhrzA@J4E{>;CYWl
z#x9gm)9n;M%(=ES_0UcyUeQAsO~g4z$0&O+yfRow|K}-3o!akColdi*SFybHoaCy1
zc3ohMe%+2SCii~GcZXjEXgUka$ivWK%T>CRM8cc#-inmamIvGOmh3tI?A9$IUf$`M
z3;k_v_W)rfW*09yIJPh-RL`=+pq{*3XSWlVPZv~?U9JHtM^)|omecESAkeQ3Fst*R
z5&H@Ba;M5H1}RGcGgZE*3pNl`i6m%_uQJEEL+xX|@|t%JIvDq$2PXi{*?E@!zFs{B
zd~qr5m#TxX?|?s$EMiLyhGPrtp=A}^v&{p@u2vI8Wa1mjWs2;TY+g*vUXajWUDRYv
z+J+Py_d$WcacvF61tZYR>w|g(H)Y$1l{y|Yx5#AFFk?|lYgXn-ks<~_-{$;BZu~LD
z{wrFDJ_8M&_e&hd!=#k;i=Z@uPTljH?#fc0ev|K>Xp4a2c?o7-{*kGH8Z%%v7XPpb
zdAz6T_#!yrqtM6RH!s4b9mC3&{kMN=d1!T=KlOU6)ExmuDG*IHLibc6(!N-oXb&BIPQ
zpLh(lmv6RegET<`hz5aj{1}$#*?34N3XV>=1V=VN`l+$8g8Jl4a@jN8G6I(hd78d>
zd(#M^poMpvIjq?(nZ~!CGJ$Anrvf)vYazv7Jr`O3$&y0tQSo1zT)$czzGVz#Ry=i)
zA}NKf%Hjx~&_DL^CQ?7q21N%X%?dQXP!#E$Q}(L$&gO=bJ!;gTrOoQi3exRB{WtBI4EL{T0zKA1
zv1-85B&xS~_NwV1wIYb3#E{GyAg%H`R2?=b0%2c%A8s?TDenVeL)Pb28P)HBS3vrT
z`$*t+2lcKaMdez*BTP44gGpr?M_A%o((VDXD*r`y
zW5B!gzs|%hs2;SQtYsMc4)SIxq60AA%mGGoEL90G_y`z$3sBB*Iz~V%eHn+
z$5Y5B@id_hMgkck3$h*_JJ3Y7hfv3>r`(7G3Lk=?@p%)6t3lxAz>Oj9YbFjx$GwF!I8LrkJ5ZQ@^41O(J
zGw;?t*`8Vw1g)0MP-%CuxCH=kPi5CwIi9Yc)JH{%)aMrlyGnwPiUysikq71!?AI0v
zm_7g7zpLMa&`20C`Mj{Tu^zfw6G6|&aIb(uIGh5|*Ktx$P|84=aVH25E1DKvdh&!8
z?6zQ?o9g9F5JL;ZZfZUtO-+0HbRUXafXDlmxhQ|5rq9O5rf
zQ0pdEchQEYK;g*K1i#Kq=N6zE7xn|STs(*`
zRqBNUT&)6U--tG#cK<|(>3%&GgI#uO7r&bTvDk;rhjserVYy?kI7YU7AsODh=Y>#j
zMFem#NIV8<3y+hC!Hm^#3TP*}R*!&e`}jZR4#dEzzz?hO{(Gu(tqLGA)4_OQM6`m5+JPQ!!4**uQJkyCTk7=TKo?rC*Sse<8a0(>Z9L~wp
zWP}z&cSaD~A|KXOB-IcvIwd5e)?ExaK})Vk$`&vda5^8p`slD+rGlLySW_;mVI
zlnV~1$lm^C%a3o=+IGtV(8<$ePIRBj2gQnTJd#|&L+C|S0=Mi3J5&}>SeBNnKzNx-
zkf75?F=y+oz7Nvt3d#@I%Pq?)&0|S9`w{D1{*4W&OkZ9$67QwG;k7;kY4V#3^XYnV
zG90)&>8{F#vJ(43CA8H9B}e
z=(bd9Yp!L5f>B2IvO#`fVd2;hAp@XMetNdbk?`^84+Z0z4KE_TDG*$AFA~J@RTl4l
zHKPRjlM|+fCbGPXb^H5w|9~8LBZavlu~vF@SQdI6tcXF&6+@Q!>nQKl#6g4i>y!B-Sb9Ame}#1*-?)Qfz13UvSIti;S_4NPGL+s
zGFs=02|of+yTM|_aN2F{WQnw|%0XB0}vOuh<;E9{%$;;Y|F^#KgU5+a2M
zg|snA+B`*zNvTTBZbhu{Y^*WyBd$gO&kMR{Ct!3hNmW3
z-$3@<&IUM<=QMeTws8cimLY6xHTk9z;uz0GbMyIHkqsF*yN4V48f76;3wplY8wVZn
zC?N*EiyG&c+<;OcaBjkDNOq18$+aX$=w;hHMr^14vAw9HCTe6;KM#S8g1QG$n|gP5
z49Iv!VZw6XFT-|j0E}4fDAUH~Eo_Ale)%@tDwJO~{-|-nh6Mhd)l|IK1sDRJ*c19u
zF<|HWt((#f$$+zo*Tb39CsV$w>Cr}+?fJ*
zOE~mQprg0fEI+R^_%eT?vW~1IZO-uS)YM+=;5`X;L-L55jj6p-$o=t0s*k+mVx~k=
z5?kE~y}*XvA$U*0v1`KqnIT)Q!#Qx3t>7_|p#5b(+Bm2|03A%mn8u-u3<)xjG#b>$
zSK4qJ8O((gbizX5$L-c{k#bypX3-1Kh|zXk=@xJsiRZG8j`@YWGABv_Wjf9rpm<*du7Ag4S0iS})cRRs4!t7uD
zwlxlloO>;1Qn7^qOZ{gNg<9ZHi3iItx;MdbPcj@^f(~rZLAqMg#G8ox(v6YGdJmf>
zk_3+o`uE*R$#{S({T1=S?e%52(7)kI{*Xg{kH>ykEwWjfR4st~;ypUy3w4*KhTAOk
zbzKd9c8_v$W9Cuc$MsZyXL-e$yr1Tmp2USgrTGw5nnH2VR>eK8%1Sw%#!+|qSZkB#
zKBg}RW4BWobhL
zaQi=4&OhS8Kh@e;wh@Da-tjEBZ>$R~_l=VlRAbZPG}gC5ij>{u>^^9DViZbk?v?@*
zadAL!A&T)z3%C1WAp>&lo+Mfl1OYllT4VypsDx>{N=%fEY9_TmZ((7$D+c)&cscQG
z+v1-Ffc1+*M&Ws>zMQO;Cv~(M^5v{n|ct1Tfr)(v%}u
zXU_e=Gw#lPkEWGI7WwPX2)>(^7-53a2*&%qd_m#|q`hZYf8Vk|6lfKrzZM;9E51bLm7XNH(yA
ziN`(eCyIW3BL{_Q?u8y0_a&%5SU|G+^6efERxCoDoAO??A)H{H&`qBg?=CJXvImv?
zi3eM~hW`ab+XMhnafnX9`(FUS_Q~F@CdMhjRhRS-{Au!OJ>=EFpFEn8sXW5wS$!Dm`1~8%*VTmFyGMx8sjzPXsU1K@s1~~X=$c3F_O?HAnHEh~#x$zC=QxBdu8vIJZnC$A&FX!Z)h&}tbtV{`7Lk&BR(bdazv;cp4C#YKKcbHGs4M(g8qwClCBLKzUJAF~jjfaMRQS0EB)8<@@@!
zW&d8i?%%?{{OgvGvyDp4W9P&-2YSm0RFZ>7?M`-4gPa#9D&dgFHy3Ji2eoSF=*
zqDA(oV3}^Mwg=mVKy6a;$4>D&_3rnNpj}J64#6%h1#l#)Li~_+U!1ShY0qGyw_SAq
zYTr--bPOJXMhx%UtV4+}?R62TYK48n0~-T#9AnndW0
zIaB~sYLc-Bv$+s4x_sYfG-rrth22XtVQgSiGg_kjNRKDLPgO?COc
zSV%iuXO?oKmPFlxDzEBZy@-I@TmG;{JuYUDMS+IGfAa4GHVX@Jk;&r5rXKZD*pkF@
zXZiKy+GFW6h932euhv_`-;O1vor~I#bI3BzzD>2RQq#aDEY$=B!e0Om^h9^;v4xZ`
zqb$cR7_JL_ud4R*`hPXu5(LS3X61ULxR!!8zR|8ubX>zDwmzbvX~0>?J$IbJd^%lo
z10bxN(>z_=Ws!)8aHYOV2h*MbU@qW!4IqfxQ({wme(q(In8!gc)6-S`TjaIY9!=&O
zEFGzyUv!70l#cph?HK43KgJSS8jwdVriJbMLb<7!7!`FZUgF?d1yrQdONxXM@9A1R@z!*t}rN5xjV&1
zGeB>;xZS+0puP#TLoy`J@C8}8xvyN2)enZ5*K6D#0+
z2(X5^+n-34h&rDZ7uz)G;D?hUQM5=R##AjtmpXhqJtw2wO>f-xnd~+5>T%t3Cv@2b{A8
z;4B^Lb>WuHz`6$&CJU`>ANSapWh0v_LlWwg4(+Ti_H`!c&`+8U)}G{DjFm`7&h!~K$qKs(6kjGpg}ZRsDcPZ8s{pC17>
z-5Z%TT$BJzaIG#u5g<$!$|sk?(07^T0YIWSWt)r;U50hDjXud6B}h}*wE_TOr%y<3
zA%Y_U%E>&nCTSqVf#2pK@s+5gEu%rfc9bp5mYD+|8LZGqk5CcXqKK288SL{YSW`<6
zS2i|gG|c=L65X5=zTPk@ChKWQlY=NZGR0UU(X9x>saiK1Ug0fuLuo1eWos{V6`Jk1
zv3W{;PdOP()4LwGoMmCptg3tRYT;O2U0wGSL(GGY&vJYBj7G0>6k%kP7O`k-5zBt@
zViS?2-!GX#9jXUdX|1lwdO6t{D-Mnmj9grb(`60#M>R@VQR}d`ahV@>piOgJ?+%AL
zpYS7)A41L+c75)bmXZPhG|NY#gUyJ)=IPbwLXPHdclMV-eBvO%voKjeocfxjmM|>0
z!}RrmAHS;s3KaPKhoH_4D;DwTOU6!yhF7kL0&0QUUBZ((N`4wS{^OPB{C6z2qT8h(ET<@A>yp^e8zifY^dEsc8%r~AtYF$=(a)Q{_
zu_s2TOF)ALWIItu+R1izd5u_%wbFTLU8FmAu!h;R<#X$1Uo|>rmu&KnwWIMrSAa8(
z8ScvVO4vCVd9qTb!u_~-VuKZR4hms41Xd3
zB&DN_NM$WGg|5@i867|eBLCA(ymiB4F^-w7vYeN|^V3qgER(S${%Wdyoc)-8EnDW?7^!*Pv_kcVC^&
zrKw@JSfUUnIeZ#XaX}*Q&8RZ$C4-y$KS-~9xUD;BV3MzLPgTqvK!8@r?({d!XSYFC
z!AOMmoDi7|F$7BPu5p}naqJp0_=uq<3J2$$)7KdYt4|Lpju7mUh7=la7Sd}
z0p(4FV=~Qm0`V2IEekV_AIr-pP7kjMoRmKy)%_~E1L>KvMB&MxK``Zifdph^-9Nxo
z7X`K;)=PC8L5$}HNUUqN&J>|sF*1{j?Xj-`G%f2JHbwm`{8LSph*d%+2~rie4(J^r
zsEaKllY`9hGwzyj*wQ`ig`Bo{wEJVauUe$~1ISq|e*a$}-bjQ771Z_)zNl(KWZq*o
z?VGVzZKg-N5d~Z7^p(sIV7=;M1oLEk``<{+C_!SdpvR~i8yS6oOueOZ92HoEx>m~d
zFF{|Wzx=gE$d{za?(b)&ymN$7a}mO!{UKp)@`lNpf9FnC;5ibxm^TcsIm7N2hs+->?|3)1olmxCz9gOC4*f|zic{9l>+nXT
z`;G(ImI3=Y-X{}FQ+LhQWA90>B_~L+TMik&#qj9=&;kquy4ho!pigw1=w}!aJWp||
z!0NuJXB@-fDSZ?%TkTwfHxA*HRk=F}7ljt1-fzcJg6KsZ8>FS*s&_=}Rr<|+6s!Kn
ze7)Y0+KJ%>X~d4-Yi0|?sTe^RDow3Uo3H>Yt!zJ8D+B55$pTLEV<@C*+T7MF5`enS
z=Ao;B;;R)Gtf8QPjY&(AxKoiNm6i|w&yVa}l9(}8Gp3NOtHsVj^Rdcsfm~ugFNSx~
zQAgvpDoRP&MqV{BDFd<+uDN>0D%@?u+q27WfvOrs%A%$HWtVYJ&%}MWIz#6I;pFjW;uqiC>p39P
zak9?i+{R+Dde*u6llFps48Gfn!kX>w_uIR#o%-Nl`Y|*C?uP8sD8{xPdvj*#HqU(p
zMOh893o@|)gNKH&40gzvXfb0|VWZ#(^u)|tOY}c6M
z*xcZuej^jT{7rP20^2*MNE%o@eXTzX8phzG7kt1K9&~x|_@m(_$0B3ab>>5X48#6KXage1%G6*5XR-k$)!K1C9KNPP0ubb{29
zco+glyUn32A4ZzL?5*&Xlnbv2fBT(Sl|J&^nEu_>BO#UdWj=w-c}`@RE&UFYIXomN
zK4LxjRG^al@T3AH=$9KQHKcCUd3IDJl}}C6vig2?Lld64!7}cx?N|?4CNz7|cj7Vp
zBq~POC<;1}(6K(rHYgxcJ(d?`=vv+FNBu5;>Ydki%&J#Vz6eyCA^aDr?au<&w4j0?
zRU%!gv}Who0Xg@o7O_5&&K;XSj+8O)Q;j*7D1>Q%Dw`;RRnsIclc5o=@*s3{P(&Q!ygHqDD$)n$ZkIG-T0|EsX7iNPNh{Y3$0XtB58hk|5{bA
zeQFIo9eW2kx>$P%0E>g~@ZFO)dEtLjjB9f^iA4A^yC%p8J4hvvH5wwS8h`A9>)joB
z8@lGs!$tPni3@%JR$EtXeU0->Sf`5y|FJjcMEvAl>?t&>S`jn1>fnK#cisvI!Y8;d
zLEoq#8!vJE@%>MQ4~6rgEu3ATr-R!6iwM;E8U5ExiuL?7eMxo_!Hc}E;RWKZR0Kma
z5Lf~SjRy4*)e%mPv&kZLS#uuU4yF%J&Ac%9(8E`l5LtXcGRbxIlJ)dhwEDd(oaeFD
zZY~zH*#XgOgPbfNv`0~MG*xcepHSsusiQh8ArHO#RD1O-v&?9@hFyOm*#LjUnU!Gi
z-U%lT=k<3>B(dKR5-HW126l7>Ze!T7
zLwe*NJn6^gX06z0gAb37*>At<{z)Yky5X%1TRGgKf9rv$dAA-|1J;C-H6R8
zX?FY0IXO12n^%0J&Yg`NIBp_EaL|Es1O58lM_wv%#c@`Jo-1KjzTRdGXrMzQGe7e&u3FBwm`Ehj|o@r2Tq%z}c{Q{XX?ZNGe
z#u2t#{tNH+JM&`Wo6~?+Ew2>$1PgbCU39A1WpG&X^zc95)&YxCRk=5{FD%o&nc%+H
z0`l`2ch8K_Ii35ww=Nz3(fvR;|Jb&6`+`u}e7DK!-8ATTU-AXDUwYJ54v26c7<{yv
zO~q<#VP$nDQaCaBwQ0^#>7?Q`LqB9;zVvoyqU=cP`(W9?fBc00=??3*qu-I=W!7Ki
zgRF^+_wN5u#!GqWNBi@AKDrP2gtHv3o$D-%QLjN*GML`A%Im7Q{+}R*|CaIo8!@EG
zc&CS<*afmn5pXqsToH0cJZ*M4Wfc*GK3ujegJJ4_>UhV_S=HOtUc6nxxEVsbRnD-0
zVWHe6n6?T!-X*3?bU08TzWM1PUs2ICcRt1MN?vW8W{u63TQ81|e_=5Ha;*%ujhMZ76d9N)dikyWauV)UMS*#;fe;
z=4M(J=C7X=yF&T{RBTZyApTp(crl=Nbnd_gxQiTu0d#M!eJfEen)3Ni9NfMHTvqxC
zK6XVL`duIF>x=xPlCE;K1OiUl!|=d?#H~VUrkbO(i{GhA3b&_|Q6yThKA=L?{^E*+
zwr(dFRdwg$JP2}732|<&t0Mtqg!ht!|1LsI?>58`2Y>WWa?%X1|A|=#`KpFEYY9*A
z#60>D`SC_Z)gZ_kgG=|RL-i4^A1@zpF`Ho;J6_Y+gu%y9#tDnerzoWre*C<=2_LX!
zJSmxR0_6_Ui42387XQ>xP4$-)JrT*&i{Jm>b6J0?DgWIDN>O(=Kd~MYVO1jv0-K>c
zHrn!v{gUvJ;s~-b^j)fIitiJ{7|?CYi$D(q{fZU*{@(Efq5d?1HN@Za?`%H27h5f!
z#JQZ(SM&=~H_Qj&YL>TFO+_!7m|q@?y%aLvpQsw;3?txd8S%cx;lRY*Mm(crGb57V
z@H3K1YyS&yf9GF-G;C4QnTHrCFkE&*AP$TdHi;fBT!?vOa!8Y&BP6aJb2t>J5UHOb
zJE+8sBV`EqB@QS}
z$81FT+cuHf_JlX?&xeWS>lQEKiXDB7qa+WqcaSuiFvrPKC6a{g)8t7s=P#;ae3r%E
zFVA_~LN`Z@^TbW|l|&#MI&_F66SWi37HJGbx?_btGd_av&+SdWX=CmHZZV>>KHD5zf1N1{X)@p{p5ATOwzazf*F$nEJsF<
znIL_{(wiFL32l=~CiY6&!>|FviwJx8q&L(6(&QQFwoSzD>h@zf43hre_B?MPiE>TZ
z#g)wI+-}U0lUBP*;XX45y|1F1ruk{{1t^(eN9be(vz+@I1{c58ccfb6i`Hmz_Pqck
ze3FM#{hq!SsvLxL5KJt7>!}1r4cz(qDX+hOhvFo2Ch->yy#3?<4oahNM5
zSz&x6(Rai*IU^)|Z0YwuS86egcAm#BxGQinm!IS+@G0u+cRG!q$5KENh%p!4VaFs&
zZGbJi{6iN1aL1ojRWjuye@}|^2J#ivZhmR1CG^NDb#QQDesJNJB{lu)(w&W$QkH9_
z2)SKhQR0Zad&gJ)KEyL;U1Y4P2!D#yqE)BlHu5{r)LK|IoepnS>#CJbSPf_RdJ7-fI;d7A!yY?g^
zg2>X)^t2O)UQbsUKl=szW?g@A_To*VDmT*2kr?n`sJ3`{k#f@pA&Cb8OKt?nP2Q4o
z1tf6A(9jJzTDeC>Gg*nO&>EdSXb+zkRb|wc#H(sV%3T*zn@yg(Srxe<5?sq3dtNiEp0
zX^-+Jcpkez(=0sKt$;!FbSsiYPK
zV(S?FH1w#Od3hfe=E82S+gbWNGoTlUOmQmTEbv^WG}7piJf>anQiLQ}#j$4_?gyBj
z6l}6~GBtQ}kL~v%Q}OYRi+{)-S@bCe@f^-{);fEZ9k|FLJiYwZhj$6yLIR$-%Scd2
z!Jo@k@+x{C_U4lXEi}GA@%sx=d?N5lEG*#tW&_{6d4nsYgSrURfqpLfV5Ma)0Oe$1
z)4FkyPjNMd99kl^uGF6VgXawDYx1C`3cArxLvqJKmiPkw%aTw?(^EMp!CX3Qg>
z-6V7D3}o*GNH=MP1K9&$;%-@}0t>*+u6^|Ak%1oqX2}C@y@=j6oftWW&_8cSzxem>
zQ@R-3dVY!)?!z9x_d!doh9VGiD@eDd2Lh;*zq+Zs6eHSgFBM{wVZI&P&tw
z_X((|4|`0AjV_bqpj;_?(tgM;tKQMj>zm^SCapk;a$#ZNnd#qW9`Px{Y#C~1RCL}t
zOVF?I60c5K@7vJO%h~1$Ue5NGox9I?q9T-6ytBzPqljKvYSOP?vth
ze<&*}vpnGwR6lMIQ5UEjY`-m{J)r9@Atq&EW^5?3nTP(+|A6VskcAYJa=Gyfp
zhixy`se9ux-Zh)4HUghoc6Pjy(YJcSz;u77b$`9(yHrGPvXCe+O$kGlj7;;+{bw{Y
z>YIDhEgv;B2no0K+t0W3mA$iW?lwDiRK7r?uCleI%&z^P;g6k+7RUc6DuHUHygwGg
zwjr=;2|3f~8C5PZus0|mBfm(?(sK4|Qo{<{_h2^jNx29twb0CBYTV}T$
z6WJ!WEcGutZ$25rSNwza(l*+5jvQI>{fh&u{Iq>Db>SU@?DcndCgxG3X5Yn$SRr?i
zqm_<<$sOc(9`avSv#`JgW`#^MebI&rSw{rn&i>6ayI;0=u?h?d>>8n;MjKMZYNlHc
zdJ=!w5NcHg`P|Qc-)aL}>UB>fO@L&?GfkXo$+WZHhB$eGbEg1T*1>`qiQN-VI@ni9vq*7!e;gyN_zS
z5jtcD=J;Hs@~npV+ym1(!m0XWAi=RUJ&7JF7|Gg{Y9-IKkqyF!uy`pIsB#JJyi`Tr+G5;RQCj!V
z=r;Yb><(Y`v)Y02M4ahG_>q_*Iwyyjvl^SYj7p8NHw?}DUYDea{CQOpp3oVw{NqQWTlEeM*IuaTFkT
zF5+6)6}@wDA#~xNP)5*f{~XvS9ul`sx}wG+s3QeLa2w7IZYS+~PTQ;Rwhzh5l0~({
z4XLg>-U>f9Q#Q~?u4KUqkBImvB8pugjq{_!d1rL$ya8@e+6dmgolQ0#k6t3gjPy{{
zWMkPaZN=I?S!kEc7#0z6@0m$fYR)*xB}w_C`ZpyvH<{x7f``A)Pj9}tJ@M;amd=Y*
zme#V!6xVqSW^qW5hegH4R`fEiIPCjUZrCn#u-~4e`NYV(C1#iP9S;FSzKfU}e@ho8
zZcdlJ_-KEXzGf~K&CYSRru32Vg%?u1=OWC`yH!_L&(A3(xdl5rK>cxd+Q(Mx
zRAv!<(pXUszs$cp{k??qF&rcd-Rj$-TF1aV9%wnfN=
zHxP||Os5oV!8*ukh{f4l9c5^wR;CncI^P1r?v?2La6$tVAZgNUErqnh?WX87mWw@#
zjkKU#O*#;-RJmA|=-6wBb-V%&&G>8YoR)}P{_xn}lsavgbgj5|uhV=mXFTVqT5~};
zTf32fF*3jZ(^TSRc6lP9Gqi-IBBLy635(d}KvQa2NHbMC)l6^PRQ72TPM#VhB}zJ(
z-qy4M(~o4A43WHyNFId!lSnKkxSMVO$<_gk^1x;=&V)1vZbd(YLan6i^?)Ztd3hm^
zTmulUux^}q6rEbWF00&$-KA1w2`<*v#f$Zam)STP*J^H}BIjR8pkMOg?3FS#b6%Z7
z*@k8|mQ&K48Ey$HNks4Xkkb-6Cflsc&RRQTw~rvR7Nq%KAsW2kxI_~x0yDfIB+Z8t
zV-4tKBO;Y1`b@55OxQ`dE*P{9&?{
zoi2@t$=X2L@0opSg0pjIs?0Gf$4`)W`EK9{ZT8PJsfGz;x8HCXWh2c>M|(y(Jfj1W
z@fGAr-mA#7;wa&5l1XdvCu(e)F7wKnb6fCw3HJK3{W&VBkRLv
zowBoCL`bE9AV-;i&Yp-EPozQb{C*ffH75oL?WW(+0HQ8$#%Syt1|HT*IyZjGB5Q8x
zFuv(mQ;+88G(`gKx|CvYdAUADcfIeGq4<+yXm=+^#>YxFkq-gKv*f@$4S`-Lz*Wy8
zzi74K>Pn7sw}k?=&>zC#LHtb=qZW28-w!{5ve=Z<<+>J1Un9yRp$d~3KDv!mw8ZB?6i4E=s01aJCH5QgS
z`kV5(*o|2nhxr}7zn%_WNmTl014w)939fxNu9H>@WZCRXLu1K2;#NwYrcn0oAgU7!
za%rCnyV2Xq=aQj`%RWTR9}WL0yCgn@Xtq4`^VPvh%r$Nx*N`z6*N>7~b|SSb55KKa
zUf-QclCih(z%DFP?+DgTa{nAc1-zg{{z?ElDPp(uL1+w`eJGeNpRgh)Vo+-`V#r1F
z|2pa1*K~!JL)
zMfv?IXoYZ2c{l22!r78$wT%$9n1bX!1dA5#;oQ6$>PO;^orRXt4G>#CNocZ~vd95M
zZsyCtxPkQ+1M@%yCk#u4XNf$A8F_A$G$QKx{?4x|m@l8mtj6vLp~MDKfTQ%w$6=$D
zplAdX;^oCILaULH({@NzRathK)DUE9?b~o38^7UcKS8qOyvC8YW&fJAjIh;ML!mkn
zNz0FyXt9qD4|ju{?Ug3IO@fJ1Ojua=5a$g2n0;qo5z2wOHh30ke;`h-=2Zpj<<`@I
zr32=HW%d-xd%2%?lWXl9dzVk(rUQ4{w;?U|`>R`*MrX`V_dYi^xpKu7(vc7}eWm!&
z`l`@|fF>+cpWQaS;ei)kMiz??8J>c~o|X1>^#E%lN+s|3SxEiz(cc9(1!LCS7d=)T
z%JDqqyMMm8CON~9W;7XPPxutc6)&2};LJe7o`Wc@$fZgjKr`lh0NgF@JRz~|*v3Ol
z&k@W{&60w=dlYiNPERv6kaS|1;tL3-h=myMpLGoFXgER5Dtw>>`>~P;(O}gAJa-Wt
zlb4SSkMSD6s_E|WpYA1HVSoi%z*eJ{YW`Y$Nqj?>f-yk7*c(HweW=y48(#*^W1+Bm
z%20)Vz~K3YBEr717SkIO|m?`1za(CsxSWJ)>D^%Bw;6RBl(SS
z#RUFHo|psT!HyYS*8$mAKAHGs!%hwtj@2(4)rE~(?6P~dbM*UNtUJ##1??g(5CgyK
zN=0Iaif+Ez?|W-jFuCPu$D2*lHJwgn2YF;#grmEvjpLX?!UZ;Tjm@MMsOnn{Hgl0O
zl&mWRpUEeCKBi(+_nV1AeL!@_eP*hvs-#baO$;-|iH?l!BV`LMf=2TbK_(z6^34A&)%T~4m5OF1R1y0L6uQb;^e+=6;@pZWp
zo#cmTX}t9|Qq_?zOkPXq=r7g4(kn$w#_Ab2Os$$w#I->nD|n@ZI`wUL#OyN`y8+?q
zN1$@@0N+ffaRzyjG1duztJ_o62%S@+gBR=6+7Aot!w}24d$XJLpGKFS=Nt1C&a{8kc0;v;o3X
zSE8Y#XX1Qpb=E>>es(GrT^epoevy%@gwL)O;uLq{U={;Ry6dz>a1|^XF&(MWYIBnauLw`tJ
zlH7a4K{VubaIoF-VBVq$^LIUTk-hgi)9q{DUf~qu)z^gY$C1t_DlX+OyL3F$UU)<{
z^Xed%V+bj|uQgaS!y+TyG1T})a3lQ3aC1G0C)5-^wtNU!o~@~b6nhPUO1TFe7iLy!
zVjr+083eRp^43w$lI)X!L*l%&A&+1cevy7>6@0JI^N@aJL$JB>LvR+?Vv2-WSJF2P
z9Gz5);h*AWm5MCS_9!GIop^6^nw#Z8!ZFYA8#27S`T3#h8~9%<^ET}o&ya;hZd5QX_xQ8#X+5d#M13#n4Xd3>Pm#khJ?$oq1Bfp&;_$3Rkbtuk0-=$k6yoi
z!cDs%bUjCuny2eA%K^zj6kXMJn)$K1n|(!&zo2$))~lhmq?+rG58=i=(AhFtg?*5R
z(7ZVP7GVEnBgZF%#^%??>XNxMbO?!nk`vlXi*iDbvvBN@?^{qiWhuW($Y}rQIlF@a
zk_M-?IYRk&4U~r$)Y)sZ1cELQ^^3A0`+3j5Mxnh9J3FnFY=z<}7AQ7;k^i`x-pr*H
zw)&^tJs3^pb3Y{rJO3k;PCs3lnbj%|e=sg-GDaOMRO(04R|!alOh12-lw|CA&+W$M
zgO)OHUEMt;KB6f9jMs@Cv$PVu?Y>?3fT?$f)2WJp*XPdn@{Skoj7mS`)7a^nSz+d6
zIpzNJT2savi}UBZ9b}(xWwR92(yOLs;{>Uj6A+_*NIn>3dG(H+zg6+qwq*B|T_~8frF+
z&pt|r-!k#*&dZyB$XB?h1vn#dVGwGo0G8}wO>U}kLxyBOZa}`f%*6ZdqLkY8!a4_A
z*nrB~N5;gbyE9UJ(#QAh##W}DI-D%o;0e1OcZ}oi+u?C^bWJ{77mWGYZE#$
zF=9{~h*eq}*&Z^@ufxNIVDedqitR3R9C-lln;0G~UaE3!A0zk)hj&=@1<2vpA+6!T
zXJBwQJnHFJ9v*%Jn~JSWS(;_9+1DSxJRsyIV!rjE
zV9^hvp-axKbrsNEK1G~GzdwzWL*$hWU8S6h2-U{p3LmLuE@T^O!*20P9mAIP2nmhc
z&_yz3Wx25cyFFU`wvu5{xU&NQ_CvV=GhrwF_lh;?EdEG9Q1!M{ISFWqGR0~5E{bI<
zyJnc^6>uyh(`Va^ZNCc#kXAUwKme(0S6v=Z@%ZAz#E1NPddca6uiGAUROW}
zEL$1o#iyS;`S*sd{&8coiOVz2KkuxPI3aC9NLZJMsu}1nXowGgm=W1bH}StW^EMY|G0#i+*W-l9iG1Bec#U
z*mS=Y=fSUaLR{V(;WhbgTLPm`)A;W!-$G_){42$Iq{LgDDf#K&UrL
z&Aj=Ba2U0e+R8I-{X-V9_r9!25a|ku0#;rg$Cth~I*&pZISp4UAynpelSAu?UU-JL@lJzrpSJMy#E_ytq`d3!2NY9!d
zXu|qlrU@SaHkVf+tXq(~ik
z9GiJ@gjO#3@yZLQc7a&-<^$UO{n1t67V13k0X_)Yqf!NA#?4wVYvZOk!2P_~v{9>t
zE;IJ~Xxqe(fl%e6N;gLgFMtE)s$Qd|x!LZ;r}FU*|AVWmKyKbNIUm6ha`%9VS^n2>
zCRpmFnm`0J$wV*qeRrCVxWG6{I-26syw=5gCw7wVF)@5KJ_Js$w)e+s+vT|86V)Wy
z`eC|6!-RfI{;c+Vhju<1qE@y2{FUq#w#=i=y9&~!O)%6E{!9aaNME(-_PsyeXUw0^
zze{pvGy@ASwZcl};VPr=-EuvLu_=qe_P+-+ajz!ZE`IzjZM=nr(bsbJ@2-(^>9}|w
zp>lTrv-op?ls~op$=qpgY4bxk*5!#8Yko34J>A!q!z{Ni@5ti2=!(IzCyQ!3fr!1x
zBPdY#>94`_^cUs+>nmQjcJ8j3b2xR+qS57D80!lV8Keta#t<|fhqgZK9_Pd?M;AMu
zGmFa-RZ=jVei}#bRwQk>0<)3;l*P+yTeWL(h&wgoXk&|bbE3?E4=)ib3GRheL$lXJ
zQ<6l9)%SNtv5mkv1cnSZG*}RYB;W6vCk{e{WON6ii8iepSnM`{W_PePYRQs;c2)UIggrFEx__2lfgn=+cwo(izDw#4DCN3JpX1lwL
z@OmVRC9asbcC>mH=V*{4c`fARVO1TULR-1uxVX5FLlB)aG$N&-lbqzSGTlJh6G5PC
zq89lKr{PDoboRLSbp72}PN8FQY*QD^l5G&JDKYuRmt7{;e77V+YdlP!bkiuO>728B)*PE%Ap2Xh08
zV+QT%ZPLl0wp-VL6WU)nw+2a!mjrHScN2hQy6LrfO`i+orIf?^%w~>Rv7qc7s?jr
zzEkBHa4Mu3;XPNGk|E-H>wcS=mL^5(QDVLi!;uVRBA6nvG@ek;$w8kH-{yiN4b2<%
zcKXLMVh@7*ca|X%%}0}K>ebc6X!F3~BVYQM_&CW^P7=@KwO4~tnX$5$-rrAw&a4fw
zcp~a^9Of
zv>M-Ty1dSyx$yOFP|>kh)k@PsUwXQ!MQ83AET|9ZJScY>9}lqGBw*N@jub-7i6KKE
z1uaTi0nH3v|1?Fyz=*DdY)|%QML0=IjC>>sRFWitpoR=M!-7;$-sKPbX`e`US4jXC
z=s`o^d@?X`CvwT`$cGPz6^0(;NQA3k-C746fE@MTL`R=U|Do78qEfiKyn%7UE&;-d
zSg|jzEIxNZ2rPc>q@xt4A2dkny|@WEY=Mwao{n~%9{{a}3POM#6RZMSLo4=C7B!D0
zPL*Hmk4*`
z8$5IrYMA=l3xtQ&d;hCk_}8-i|LvE&;VVqJc~9{TsRf*VOEDS(=K-CS4f1
zFvM)C8yh)+(i`|GSK@Z816Xl5eHBGOdf3`P=*~^Sz$KKtE{F@=fnC4WJg7yOF~ZlR
z%n^o@G?8{%yCfudBjYG0@4D6e`xXkt=OO;M0Gb%^X&Y6se=NR1
zN*?uRt@rP$k2*nRsN9VO3z<)AF#c?#kfuxQ+n=^vyY^t$?hqAaQ#;d~J6DX1j^Vv_
zCASZ@s&&!cYe*zIGkZ$TeoH9sQUNL>zBaYf{fvqVa$}Fn%g@3*=z(z^s=Pr7*Xsoj
zd4X0WG%2R9>PjU2MRFKC6NS&@`atCx;^x-y0kfz>G}|AarQD>jB`>CY>Xa-UwjlP>sonv!a2joUn#)S^}60s)-tb*pK|2x>~RxK2!X_IPJY8Q
zE6qAFUT~Fxa^zl_oZG`jrctE*`P>DQi;Ly{;(*J5wH?$P&caJ;U(R)|q4=zsde*!F
zwLhA3i1TKgpal6(&c57R*iSVf?bIwg8*UjCT@t6&t9W^f+Ge~rz`9sRm1X_rs#oQ`
z>84yve_X}1q3X-={TN7w3*!xrvIw)tL!BXQ6a#h`@KW&Hl*L8|<0Y)wS+kj^jHVg}
zLz)piQzC18H>=pzt}`CvIe2f}gBnH(OWs1Newe_gGC4?ys$UKfcnF($M>NN7q2}h)
zoq>8CB~WGDb(J}5JZQwmQc+oX$|5zh>ixY@HFR$~PQ<|VTP#TXted14&)a!~D!S+A
z=R=?-RMbf`DfO=oI?oN!LBi3lN;c-Vvi*)T1e+C(P6^JL2AM5+(3A6Qf?Oa|ZrOIH
zRyEfa;XLo`Ijx@O$Y`B)c#$UM@bS=D6UV8@_21uex1*MlvD+8x*}o_6Wl!Fli`G{E
zEM==VXq4=wQ`jGJQ0rB8_~G;)!(M>?XC%?@zrJFDBcX{z7r)m0gVyp9@3+THgM5sx
z13W@h1*L$}tiCw%mKzlx>gd_qm(r}|+)%-*^bCzGcS;!5yE8EpwU2aTpGjF%GK>|P
zj+g3_q6G+HaIt-c6-B^DXQ44(h4rdzBhC(9`GNmal2ELF)@J9o=hmP#`?n{{e;n<`
zi5+-4LjM$|CYG|I>Bx8l(m6mJu8B~K@x(%vhx8L&2=$cJ)Yu5vOvq#cc-H6|BXb7=
zWKNIF#<7d^3!Z!ye3MF@_
zYHDszrppX=KJjU#XvXjGwma@a@&@5XFp>6st_6CK3;VztH1lv=i~6K~KbJK)^Pv)i
zD{~{&^NO+Y0IjMceeCiO+_nbc2(A0MBIHmqJ)dt{@+t&%9){TyD|V%%OOl6OFpmol
zzH7cDDZt}Uz{a1-)wP__f|s||Yre}%2mQC2xLdGPqf#(w#7V%kaEo6qF)j^bvI(=*0qyXOG)Etfg@(Ex&Fw0-U
zdc@jsU~=lWO#b5iUkY1BKVj!a{mZV5>Z)r=flWVkVN>ybn{QKxOuZYNo6D*Eyu2g}
zb1pg%a6CcH4tjBOVW1Ml1L>}0o=>x5l6=S5>tFs^FMjE8vKMIwT|iRY;-j%flL~je
zgs}#E`(h4rz1sSsFObQ>NeFKU87GU>vMO`*)#nM{%fe#*^}}6K2phq8(9Bl0Z{*kp
zy(O0%WtZ3=^1b!X!|d*;i0CvSUS6UsQU`xrRE@30_tx;|5wBK0+(zXGR!dj|3B^o1
zx8M-ohM1GCyJiBXd&2JO|-Jkq^CE7~K%d4C@ldr(0$x0X<@`tp0
z_{D`Mt0fkKSr6c>F*#eFk+yNh_UEro3^dV1k63E0n*887nM6ss9tW}_c4-j8DagcW
zBbHSwO`MiVM1yBjy7ff6AR57L|H(1tDbPK4b)Bq^aac>}e@^ocVolBZU?5CMA)NSG(OW=~>^Z0d*|kM1YbG|@~oB;pxCdo2>+-=Fa@fDNw+MQ^_7lvX!?
zOA=k+H!}x$Wa-)YO1~^t$j16C0-?msCn*yrIuXrFbM4ZY9#%F+3#KCMljgqY3Rc@a
ziI8_;TFWLs25fdjkDBahiKs~h8jVD_8BEmCpmP+;_%Egz8cpg}WVw0x81%7&8E
z-;S>&Kt}ow6IXqdF8cdz!+3n8X^dkOB$H$R+uzgd_#N0@ETtS!5J?tesC8Eik!S+{
z4p4PtR~#u62a-knU4_t6PVEeu1wc}KRu(cJDJ}rgWs97?R5SW8Nj(Q$Ht>rslYmV$
z5kVX>$P1a@uHOWrtf5yPi`5gAaM_*H{Ew`d{8AslM>RM-ZE!Q8@Hutgq%@Ut)4pC
z13lRa`m;Z@0L_JGqobnwD@-T2vT+3E?r$-MteHl7cU5Nqx2692(3RBJ1*8cvnU>!1
z14PJ2nz?EYq<`n$D`F=YduB>&G@-=16f=&CW%=u~1RPCab0&)sfQi5bYXG_27Ew40
z-}7>uZL5G#M^gg2y-9!qEfG`ReBB?1fUh6-?M?sJzad5JT^0@S{I>#ya>s#IzlIBx
zD3O*}&u~N>xU=dmbe5O>urt`H?;QhRpw>@zu=fD6F&~V;YTr`=#@VWJ2eV
z^cOeudmn*IEtjmWvILUBvOM@PPjT0L{7D)*$ynvnHra<~(lF
z=biNJl|9Q;)Bq5U6}cEa-}Iiix~h(iRm`itjAhxB2%3Snf2n{LQGn^7$@36yyM7IM
zGV<|AtsC>zAL{+m;bwE|0)lUL-=xgd;kAEf+5Ru3fGZ|qE{+x}v^bP*z+pYzzB4Ep
z5kBR6uGO=Wq5$%DrUvK+JQ-v8YYuOxI(}YdG2RQctUIE_6uQp
zyf>TNM$JSpdiCSE3!747?d0vNS@*M+OnX)iwLQj)QvS_{!XwF^bh4ql!kTed-maSZS
zjMRw?>~g99ljJtq8KiiDWK0mC`Sna+eWEbUm=-w$EcJQvQ#=hVb)6;&Yei^~ynF_>
zvLTXjPYY_TVPqHQ1LB~a3nRXNjjlan^*u>j#q`B_q*x~sRwCc6xJghJ6;-au5LLJ(
z7ACzG))S~_yqTTk%tL1^ZAyv|hyvnuTW*e&q#)Oz6~e8O=%e0zS!hfzucxQy)w8sy
z;cLQ5fu%cj%Ix2yi5oYj_#a)mGz8kuE{>`9e#7kLYtJ%CCELWGytzUT2D*y!a!WII
zHkQ1{=X&sVo`#+*qm3m3SGu~!qu~IF#PRP(Vz(y=U%jw~XuznRBCQI;-^w|IZ&tiv{cH#bZGH!=Z|
zm^s#YY(Cf?3E6B()*TS)(tW2jHT!qd_xaIFg^{itsL4{0G!`5ZvW1s-0|!SN=}I~{
zNgySJtFP6b>rq>&zU6-C!xL_`34+0imVb;t8|wG=EmU3`1Kzl{mi6f7Y`;XhAaiJv
z+}e*i$zTQ0@Mq2TJTA89Tns166Z@y_DF0^^B$KYC%kX*%-*l!bR#N#@X#<0-`a}l8
zjpTe%S?{-h0>&mX$nuR1s%q
zU7Cb9yB-N>0j9bI^i)~oYlR6~Wcidi>7b^tyIQuH#CkczhH6^s>ABm87PFo=tYg*H
zJ;2Eln>b=hNAKQulk!e$SNUBe@mWoNwckY2-Dz|&4!XK1&|_igy#Z`Vr~Ul{Q`l4s
zP`GTsCW*oGG%)iX2Qa>?mrHYhkt1
zr?B8mASQfM);1&O>7y|gh1DNzqeApl`WmJk$Lm@P14F2nL*%-f)yx-c1#8%xa2Ff{nRD=QFX*fM|P7+jMjzUD>B(!1*lm8{C%~lQ7(Y6R)n`T!bWP3G&yC
z*&)w~^~)%==e0Xu+QYFgaVC298cxIf$2|)s3dQqu4o14(*&A7GT3u~&@Mv*@zh%6B
zAS<)n>~-x&^gcUas2RRI+BI9MR#DRXN=5!Wgc2im*C~`0A}uy%+6EoP;YD9qWuu!#HAdb^||V7XpIV0mZf(Vg7nR
z@hdN%>o4gC@OGIfvw-q1_dWXPE#3>3e|`CdyQ^H3hK!MBWt#M(=TLN#rdN?iK{iSD
zyzvndG8Q$iy$_8?dCb2-uzT3pKkzPaitK~}Sjeo5Jqi{x0IsgJ4=a|W9i`)%cK$br
zJiTm+gW_aXSzEilSldAo#-|mC!e4~VH~#N1E5d|}XyQTOw$?vE-!0%C3=I4z6g|ax
zk>h#yMLTv*?E=Ig$I|^8^UxZo;1iwcO7}>;yjz-~7#~>KFC=
z*U!uW`TyH5<>giDmr*)8NCk{mBB7e}%gTS?r%S>}x)||QVA`g8V@{^Kdn=*dAV^5P9%Tyx8s10XDf1Wf2eaB78{BzmwDtk7%
zxgFfU>F}fGKb)xl*=;{C$h>Eym~W@xt7EIR$_h3!rq394^hH?TNt$Um;XK~DVh;eGhWoF?De
zco&2J4MkQ~7W%eF@i|oXqbE=L<>|!Qlw68^?-MZa%GGIj;*bt+*?Kxk(CW_{UK)O>
zf6>0)dMUNr#>S>Ql%CO_>tI;Fw1bA)PH`hG^Hf_^t%hW2!Qdj{&HFUgnV9IiED74M
z(U~czG10L4$k5PGDHEMuAKXRD=7ZW2?bDxntp7q@q-N{3W6K$s_DaLZXnaFMS7p0~
zv=3+735r1Gvkkk-lMKI|wSE06a3u4x>~*J+H(U&U+mz4se!{a+cfZ^IV^EUM^!xH3
z_B--ld=GxuuE~E({;TO@0J#Y;^kTmoG1X93J-ma5)}q
zS%>@Y&a2Na)L(^EM>sA(a<^*c*5lz$u6RacP7U24@h|mgDec#2`bYHR$Lmp&)K!^U
zn}3{+*N^DyBoSI-ip-tDBli=&IxciJ(oyuhev@(%e-IE5osh7r`i*jqu?+-o%MShK
zqm>pIShNU?
zvSPoZDsmHzr`y%vxG#uU%1oYL=hSlt15&XPRC@6%p7^@R`MDXk8}>jdj~qRE+SbXluQ{U@^fiS*^`}zoSmJIF#o5#N4#->f&1p$
z57Xd>st=v@dKs}oUqg5AdCSy%L5pmaimNVeX?rFkZq*3q#A<1Jp8S09G`p-^(E;Va
zVs-}yH@n{L{PJh4+&EW08fpoNW^gQCs??OS_lG`)3>u0M-Rdsd7vKNye@v(w^irHD
zH->`82bv;4*9Z#=D*B0PpE+^D6LTsg2%>-ErgJdj_}x2NXV0)%SokQKWWDs{k`8_K
zif?LqT4?WHbu5p{s1Kh+F^n^=-w~wJUlVoy_2}p*Z!hlU81yPCy?>gw%i$a&dFn#o
zGwc3ST@O#5xuo7<*wnHRJ?i9yUp#%PYG$facq1=GfIHueI;K)DW~Z97`(U-7-G?(S
zxC~i)x|(0zwc?gF8mWiJF4!b%DzwV}<*@EHmtm!*e3m>e{u`m8;k`bB9Xv{6H?*=e
zf~RLSS5W+2ev{%;ymPS&7VA?tZ)PABAQBCt*VS#^u|o+B?`xpGs{&oyt7UCw7N?`D
zn|O1|EacUz+Nh6Vr}<^jATbSc*cdw*0slH95mmc~g?x%fPM)j*IUaP#KPBN-wpt8#XHUwxg#F^_`Zy`y5?qcDm2
z(kVSR^>8lw>nfcOL%!L&v2+$$KlYVNsCk>6nOqD%MTWh7yJPp2a_VlwDkUqcB=3h0
zk!E4_%;Z%lmLvFXo~>T9W(>}Rb?FNbXW)2nI@t6^s#O0?(H$E4MMqX2r0p*8uT0rfgt%1&eq$_H2F{X
zmt)<(M&FGZ;}e#hwJabpMsu=`#pRYocl|X}
zEczjc8Bqj6n=ELD#7VcrtlC=j=W>qCX^j}w_BfXAxn-d^JUUv1;XjJ%z9C~C!gkhP
z8zn3g5=(Jy5hBK>D~Y&}qT9boMK(4bu{yByC~*+WLTX(bFt_|Gpgc$5B>M>|K_)L#T^-bO1e3lx*%(4~6Zz`sooHn?EuIgz#=
z2X%@M^Xe{vL?gL>
z4W?n9F_9abr@eBx2<-xQ-1ct3_i9yiry7~eD;zqt4
zyA4%>*Kd$8Hj2MF&?t#Ppfx`~zoiGa;qyT(MZa!}>EWQ97jNDO*n?Ba3zxR&QYs0Q
z)7H|`y8Q|CX5!gs&<1yPhR4Pp)ft_!wM`pKgzs*L5}xS23+;+jW7WdWZ*w@PEiE-A
zjMv>$2(%x60-e(+bOBvS4aUN8$LF2Xuow^7W@cnq%+K1m*snVTB<*M;>W(0(
zi?k7DNgF5vaw4daDj}N(_Ej{e4-xPdx2pg>rEX5X9n;56o`xE340!Qrz`TBZnwhq$
z0EtnarIaPI{~-8+i1p-`sUT->HMPjn998!iXsY&ul7zo$Mi0zhdD
z)|GAuo>h+=af^$OA0tW~vOFWMwd?IkZEfvm9SI2us|y}wAD@@K*9LLxZD0N^Te@Lf
z5irfh6Fyp|D@g_hnI#3}QtoA5ZlRHkTXOrazuo}zE&A}mhwu0kaRn%<0?8Vheod&D
zD#OS3Dwp<#*8F?k4j)*Wxtc__++M{w2$n~DFdjr5Zb$`CEWz&j5kFvVVepsMM`>e0
zeAdz4-o83&5cg>reUF2K!xo}C3)&{;D=bTUQ*rp*H_7mQ=ke!nwp+;RHr7*JTz63;
z8BoMoh8}NLHa7m89euKE1o!Q0ygb!a@p8*KwfX5KOO^~zOo%G;;3MkYjnx>U-I_;?
zx~Z+Y11kOVwZ%FA?Q?e%as3!TP&xs1%mHS;zR-BrTW6aKM-!$Tr^BAh9qq0sk5k@_ZN%arZQA-*
zN!-1A_x}9z&%RFxna?stZpR$rWxZeZ^%#7^S7?*ipb@R4x~
zDW5GzO?%I7+v4E4G1_#O6?KYSsyD{r!~Yk1?*Ucixo(YOjX`4)6%`R|3q=HpC`FW_
z26c%ADbhitS5caP2qsY?f@O&i1q218Nf&7XDhdM9dlwPupwguO^C99s+4tV_|L2Z-
z#<=$%mvQ#l*`chpzVH3sXFhYzXTHWBXF~NnR`dY)JWv`gV!s-4F2Ha3&bIMqn^v$X
z9bpt7r}~wkW?341MN>DA;~TbtTbX!R1uA8nv2BlH3gQv%Sg&~A&XKW|	Re
zc#z3tMku;T+5(4L*)0$gz&&^E{IGweXTzT@N3*XEZS&rD-12b|`vKN_zy3J^nr!kS
zpKuBDV73{AwvzQB5!=o3T0=baqH{)&du2Q|IUyTu2F6k}A1(>kH-^^VKbVIHNc-RS
zKn4@4QPb_achw(=szhs+pa9qov>)#>b&mo`2?o^U0(iv_`@_02JT}G;5iOQro^i4h8|Sl(X_bP3TY+>
z0w;w-sB^!1HDR+;^Q9Fw{RLtxQZjHK^O~BPQpaJo4@YgSO|y?VlJ4QLC>=EODhZpg
zf-V4M6-0SlO#r>ZR>AWhi$d#*?ZvwIBoX-QCfH}r+S*oNT?b<(BCn|ESafuBD$w>~
zp50-{{o1?!;$G~mf|aRz_wL0ZN$i6iWd{J1Siy*LKaLM5j}V;-7%#Pwt@gk-
zq70mRqz>mfc0g6$JTZTc42N}g&8+%o>Rq+{rS1wCWVJvgdYUz>o36$)5oL$^H_83k
zcrsdB-`t594_AE*4spxo@b+VcqpUCGbJ>=y_}-@a_N`k%Kn$b=%T^8!4_6_|eh{`R
z%xZEtabxSKK$XG8x8U~Rx_lpD|H_$@qmd1w*m=tUh%D|^%w-dEUu*Q8)toK8f=-&6
zn#-2Gy!Tr_J?-_crx)%ffHppjiiw05uV~e7ji_z=j!ov(`iqO1rTF86SeKpHoJ!DN
zc=`Kl89U#~4Tf%G42pBr&a=U@=gf&QOTT&ZW^Iaf=vSytpvE#CzhH{FsA;HfCdrm}
zG|jFwTv5T!PLK1luA*U{DZd!E52K=N^F+i@;ewypEX8RJ-u}62vCJnt$F8L&p@I@2
zj(Ha(WG<(r9l4XM&#Ide)*b9*F3#96mraxwQhAhy7{+3cu)NUv;&OCgpyAM=L#3_x
zJ_d2BNsK`
zNR?M^Dw|g$-tMUf`cuo^ZOu(PHD@-zO#0M>Q+9>2+K8o3dv9;Fgl(rL+%Naoj84){
z;E`;{w=>&cY$E~l{E6MWfs|1s&GYKDw3ORi0z%H4o;V$Qq7yT%$BMWvn5!hXm6pDVm^3seq9H)j=xq#kC;QII1c;hGL=qIPud4U#5NzszK)Ozg*TOc2!80G78S4`C;
zW^lqvOH0l3o<2FPlJN5ezn{qP?V5KPmOM83pD!poF-xD@`|w5Cn)wO~2?>D}dc&zZ
zE9^z`rT@qg(4F}9<;wU6UvTgpXFD7JGdy56I5S#e*CMYtY`$s&%YCa3FTZ+NSNEv<
z0zMZP#zu)<7`0i8mUdM%_n~WNHepGxxpKu+@rOc@v1@D6t&YGt@OLhNk1)tKxgQsn
zJ>RV8w^W|j`)8is?w{C{mg0kXr6D4r9d>LxyOs2oBEFB_hLUj{pVG_ft!qLS-wN<@
zA&Hhf|98kjK0SmSTus;~GcOlN>eZ<;b}L=Wkh399?>#~UZ(b^zJ8wRSJFD*fV;-30
zOnLhBDM-TGCq;ezxo5lbgFbN;7IB$8Z{$XAFK|`!Dy+g~tsvfP>=v-On*O0Nb9Nqm
zf9*f7<$rpi|Nn0y-s}I<7a_NxW)@q88d@U-MMc}ePGvx9@m)Cf_Jcbmm}fOBOuZnaY+uwC!pZ;efU!Q^l3
zUUBrDsWmHChJhJkwXwOfqnCQ$(h_vz#!p}X52bo56**n|I&;Cc?AO`ce0o4}ag&+w
zLml;)%VrgXX;=0I3u3qGJUv%(rR3{hmubb8&8J65Cz@Aqx1^;1>L1XOw)OM~f@JHM
z3JCi8gKR_C&GY_xb?PP~B{fxjDR=+kLV&q}~Qx$}Rk5BO%-
z5KuTDUKtl0Y?Z_=zeDRJB-D}jcK`l;+?B-wfSQi=JUh?6tZANkXO=(FX8i4In!T6b
zlgu@9H3H+Utx*+RKUbhV;k?}3{fq08E7z`_u(5gkw-`ddT7UAde{8`2CURiM;kQKN
zT4}^DSBg0<5re#CBu8XqWJG8rMX%qmVGvNY=J75dfQ`rQJwm0<%gG^%d6l?TAUs?r
z-b)BBzY3H1gV(PQ;%*A!UpaPO2t?uYW^1x>idq4a4bl|WVH!K*RyPw57y)8bYs-0OP4PnVqxhIVopx2
ziA!2qx&eL^^;8>^n|+>)DD8B0V%*S@wf6Kx!2(5bm0(#z8FYzZJPgT@v^O+-h@tc#
zFgBa{_zu!D1I&ZZn5dt-L`}f^)bQ=U{tAQ5;slY>;7ib2E#u@2r*VK9JMcenf5-RU
zS|wr{h9HwvK=whnJ}BJk(vzmA4hb+5oe??RsELdB`O~Ko*nS>C^yq2IY|>4m4?Nt3
zKoqZ=H}AtFCn3-mZw8Qm@@w&Eycq(zz%qO^o
zeGAxs_@M%B5wL#`_a=!<#UQoxqVgSF&^mge|E>OIQC4+^b6QE@o;hiQct}0ruM*cp
zT0xr!K7O5r@7Pi_>Z^gyU!S8t`p4zV%Z)Rwn)jMb)K}rJdcwC_Wo2!hkaqO+>C-JZ
z+&&?#5W#+x>V0*mGX>?POw0N9u}WZ0(ZlevonGF=l`B`yd}&k_SWY<$i|Z;QRf>?y
z7)d{}kF^ay?&i$Vip#Vg?(O?y%@{HkrW=vu+-T7lZ*cW^=uvLBiE#w1#MK=DE4|I8
zy?C$Q4gJR!b(NN5j~)q-nMB}mR9adZoNv3+dRA=JG>%F#$Y0(<
z1_y;71O+X0)=7wu_W;4b#qxmOQ|EZD_RE(mckbM2SJx)FTzaf~dsbatU2_|`F1m-#e16Xi!m5+CtfNV1
z=e~Tdwv8GgJBxx$PhwO^-ucYEn91rKpi&*>*4=#@*(xewCcb^|p0>Uuv?py46*9lJ
znza|-mjKf+2`_E<>86a&LqnA?z%AqAio_`zokU1s7PCA?M>-@Vqyq*YWuL8D@6l0A
z!360zSfO$o%e^G%fEoJr&*%v{q&ICk0CT1y)DlKGh}BiQ>>l0SVe$anwkgh%R!=m&
zJ=9$j#3Sj*w{G1YJV}dNxP|rqB(fhP*Jkj_fbOZnm8t-_JlnSA-Mazg`?SOPRDtuO
zR?yyMVq&76ayF##!k7ioEy$^S%w>Ou{&CLyMa5WLI~+di4|dlm;;)lHfVjH|2pYzC
z-Oi@WOdOY{7&t-wX`>U;G_Wh}IP>n;#M4dMoSd9!=*y6!aUU^D?a=9}ICP>>!6Kc8
zdnIhzZ!k7PhPb}_^Pj){{=4m$kGkmBs1e;7ScB0CMJxEC(#Hu>Tf}46P&K=Z)IC2Mtp6V*}0#j@v
zhnUQ`v|iXImFTkbGU2R`x%}grXl#RMus7pEBGc6yJXHws0U*{CaXYv(HX&Ux4F0S8
zcwefR%L&9M3WboMX7uvMmA}Z~gH>TJmU_%--Sp+FR~IP!1=Zqz#Xv;`i25iCt{ujK*z}CC+dE=rlEH+3K^{MB
zQTg+dCGV`b`sEHDl*jwJedo?*Y&fr8zBC3wM;j^1%ns*K8*Zuds_>n8U%!4G5rWOJ
zF*v193vW^d>q+Fo_&L1d(aXAgTeluXGKmU+5~dWsx9w}x+Oubms0B0&lb*(O4c3lR
zFBkm4vs?A~^XEjMK-3w;W?_Q<`c>oBuhrJ|*3MnO|ERRT!eIU=E{Z4G@D(G_yG@PU
z92-6twhs*q?4ZNVX4__rf0i@zhn5+H%bN|BioM)d(&)pFkm@JGg?oqtQ`3SVbetcb&z^w9?9lh@gWqkC%jEr!C?-!6a
z!bQ1UV=l_z#QNovzkmnZ%mF{>iI2G=tRDv+tU@WPwAJccH1Pe>jjCgmEOhbg)mNt$
zt&yr%$8hA%OO^@Pk{W8pu>b7`Ww}nvw>iMYw-@7$*dH-VHG6w>f4tbjNfi6UXc+KD
z?CS79bAqxFV{oE?&a1p>fB6Zc1*ZyHt<1tDi5kSz!&0|Fj8&}L$7=5CGSf_Vta#WB
zhBq7Y?%YojRR#6~iLA^0`03L&)a`}kn>$r)9y<>i1}2?uYLwWud+XL{83)kWwuZ@|
zF&kPW?B}OW+J~|^Z4*+9f2rPvz@*sC$RFGe@$;%kR((msPyB}rB*c{GE4TVaCtv8+
zLtiCS*k7?`Y$fQ#$OykShxronvhXXc7|~mKIoPbIq|o`qnZ|(nHY={n$MtoTpTmNu
zVv9ZPSr;yOrCLL7ZZ1y$xC_DcL|`L0<*4n}F7Ug}Zl#6BsEF3mz_AqZ87>a#m@`Zr
z`QXdT#OYEhfX`Xyf`WpsO7=IIi<;qlOgkf=>s(@mqp9i1a_{ggpTII-_>>0{8yI%C
zE<`xR4T?K}1yn}Z;cQzFF2R5O^_RjcAMB49$qpt>?Vn*WXw{%>yi&4EUgG@>FbLD7s+Hn-vqEyG!CF=I!Zl}
z{plpKI5$JTS9q=`jZHyKO^u~LyfQDp@yraM4Lc8_KwcKeEw$RVz%ZeqPC5o6Cw6a0
z;6m}4h}#N$x$VSb>4}bGi}Q)x=9Y9+LXK4PlXDIZPysX{PQ^Cz?5@IkON8HtS=D$I
z+h%o77Tz_4IvRHh#N9FjWt|E%(^FaCa<%)#0CWXQ1$CBbdD#7rAFZ%`M#|Ll>I&Z(
zz+2uaa2gyIQ0_zg;uaARWoQ+Z@qi@Rqfjh%>lyrneIr~K=$e*k|)VKMScNoh+u4L#O~3De7MXlNL{a30ll
z-cSI8?qKTK)6_|Mm7lJ#-m)dBeG!Lg-m?rRv3QXyg599VfN$t`ug{E9?slA
zr3)aOZ`PJC`QkOWoRSpPeT8YNz+vTkp^T
zjsiF=^lB?S9E1H9<3_qk*KwUNU88|=?)
z8q2v^-*OT=Fyyhv8f}g)nZT)o$Za~r!Xed)mo8oEz;scSijH##x}0U{>t-s*A$UVv
zO$m|d-nn{J?xyYM%KTkHCsOSqKWKvK6?}XhN_vfjkB;t@@$qP}UKU@kM0Zph$Zpnq
zEAR7LRaB~$SlC~{g_u2iw!7apzqEVf&J@N2w8W~fueWJ_F;7r4SssVB-JlNuy9DMn
zp7(OkI=l1@FA)T82Cp2B?ZQS=TrAI=ce>@mP`4UJOOBQ%?%G*Bw&YiAy$Gy6iZO^2
zI3)0Dmc9*4SC7~0rD~^)JrExg8WsmWcl7(9a<>T&rlDHj$1|XqCtB7_6(jI36l<#i
zk&B~P7~#Nz%QNtsjCUa~B>Gj6WJ;d~Pb<aKJ;kD
zf@9DQV2BaS{nS%81qZ$AX~{Jl93f~xyMW$P9S0+@CBgKiyVA5e_I~iYDITx=Cc`>v
zz|9X&#Arw9To{q133ALG;YzCj%uM4FJeA9!27USR<*ern0W2?iPJWZTEV++fOJFM@
zC)UkDvcC=YYAx4Q)^Iqjqg@^Kn70F%(XLL}2nsm1ccq&Nk=AF=t}dLt_1gH@{n)yp
z6-Ls2yX=nwuwU93Xq}XMd=038%3$WL9VVp=SmR949gV)fb!-5*bF-wR)-lOJFQCek
z=@sW1>)!hw-q?Uh=*3l9mHRzb-p5hpM<8D#Me`;MB5OOQiVniZq5?d%@NpVC3?wCP
zWny^Kjs4BZqR1U%Ny%r2Ujh4)^wBObP~Y-QU-))s?{~5(IIF-0X^aQ4GKcYoOi|1T
zJ_9ubAsa&HyB~SHe0)?=Ud=rAe3FKNbCkWHk$>J~Z)XUch1prfx0}RLEQRm5cX#dg
z5z8F`CSRs*j}Jr_1S2P247f3uIdYBQk>lI2p(V9Sv2twP_U($;n(V8om0Gf7346~5
zAs|27-g-5{t5V}1_c5ysW!juSoNF|)Yikrue}J}3wJLzC361olgdpw$sU~a6Y0t5-kz^Unp$Qbc1vK<82u
zFP)At7DB8OYSHhjZ
zg!&f8AcZgj!flq+3L)+8SaUE8m7EpWjdwJ&XAA<(#1QQ&rq(<->Hd6_YC^f~&$>PN9;Ac?0-C=Kw
z8MA9dgf8pEBUJ$M#U#L%fO{B3t>XibLh3G_vxLWITK50#`$8-CLRSa94SI5HY3T!)
zEaCgRnXCmFg$N}P|J@D>EIJc84AiWs#er+9!_goZe|-BKneMj$dN-oeM;Bp+4vH`f
zBe$wb1tA5oCcDh5e|h!ll?i4+MzKfI67=NUzF#ZV=D584jcCo32MAB@MBiA9KF154
zDE{$FYGceeI^PPAP!`p(dw^O_n3_f~a*|H_L`x<~;=N(p4Z*>JTgV`X6hEl$o;Dxe
zaAKs$2BlF8yj_3wW_eWlc0L*^9rXK(qyTCuS+4
z&d8S8<^L0@i;KA>yd1tB8N=opFALV!Jr{UV=lrKjh_47F*&KnHk2jk36M6!1)7sUg
z@oA4l2?8Zag2k~k6{Sq_PkEo
z@}R$Z6FHFaG(CL~0al$T0~MIZj0U_(m+{A7ZwHd766+>WX6R;#sND;|7dHCn8#ivu
zHdu51<3(ce0NXp!e^Y%OM6Ipu$;rtHE$-n9N84@&J-zaM3SNee^W9sAtSuG$23sE}
za1XH{amQ=C#7xT%VuDf6)<}h~2Q6T5Y;h(qQ4zA#B;k816F|T6FH~&LcC@3?IJyY$%_jDCmO$
zQ{xBFlqiG3Yz1`$U||bO4VoW?y?cLyxvaZeev4nOere|9pb(+m&W5xkfFgZSUk>zF
zxG*Nz%shyh+g(0q8Z4>L3DRtpAhf`7VJJ$!E&hjiRV6@LcPh&&+kfYiWx^h@daO@
zUoaE8ZyFUI!5vDp?K*+ceQQ%lgg~3*C?%&fj3|c=9*mW$OaJAV)JU)t2tOF0HDCga
zE0oIQf!n7N`~!iB24ta=9E4xSD{ml6uJriF-yxY8q#HP^rD2ccShFU^N9TeXI(!vX
z)xXdT=}Im~iHO45JO+CxXkkI0X_b_2nnAvPwbP$MR=<<$2xyZJPSA5b1w6L;gp46J
z`}+E3wS7NFEaGXVMqlPs29NW=@l6Nj)~^&7tKlzj;8C74-J@UaKR=iwHJ>eTG=jg)
z0XTw<{|#s>{Rx+xYux@T$wfK+C9{X54}*8`ZNLR_RyD(-PBi^<-WFCcp6mz;o#j~d
z?PudOr+z;$i|zG>ojcWj{`qH+^a{Agw%9ImQI$3wyEjdgJLC8d_~>t89f81W?zWmC
zu-Vu#gu%wQ9X3$)C+2t1`xz$HgV{m@ScSWw`26O#pIqh%U>kUN2W-hij^Xhh!~@!i
z@qY<8@bTPotXdU;hN=^f?+G9af9{z?I5t)*HFaK>DzaB#N=1xtK8(
z;3&T5GU>4Cz??t<+IKGNPLJA5zrGB|vqd&+H@yD(i;qc9j$b!dP=l|Phn9_3>&eg(&i%A|bmTrxV@5Hf9BPK!bT_x;RJji;UK
z5ClkogW5$1gJpwq;wv#S_Ik^%T^h8#L#pCfy;=ZHX=0|ZLtJlk>=FduwX2@o#7H*^
z$M7&K7;=#ruovJFsaUYY<07nQ)+V0`61fJ5kJfeai{2o%96}GDKbMk;48o80=FOXB
zGeR+=QHdI33rhz7+O;xxml*0JJM1=Qp9ui(Qd$7oABoIA${F%Elw8P4f=q!Y_`4G`c}
zb~Zicbj4(3TvP3OB7pjq*9?(?D^@qFJeau@Q&zckbs8WivGRh@1O?LAM)43e0ockz
z@gPUwKmmL{1z>v-IM^*3QY&=R#&fQ}I#q&rKSBe*Dk{K%?$@qWLQ67=8b&=@ks?W!
z!Kgv|?rL2{Z!fQ4OcNTvzOjz3C&*{oRe>CSSS>D(U4Ul7>SA*B6)+CWbV+GWffV-o3msuNLY;>}xyVD`DtlR}-&|FJ3Kv@-bel
zShTshdGYnzkpEY*Jm~+U*b8G?gZ;7e`7c%%hnumbi4v{r*0pHi!tE>;Yjs8_tbrnc
zb_+e703P?NtKZF%diU-fJ?qPB4*&+kAfT7zj4dy!Zwj)7;W;O#I^y9fkYRHtOWeH$
zD#m^|q8EDN(2N)2#7GV~T)z&S%Lxo&U0sCbL=+JkaN
zq}h8X#HIhwm9vHZ&3^gjqCj!l96uee$wyJdS^;j5A_)R%Q3*8E0(Qw59g-uSEqc0-
z1W0K?Ypx>Z*&^t{gYsytT}%bs5C)Smu{R&?7$FFB
z4TrHWBy*^BG%Z)j`PK!22+M$=04vPE2-ia0rG5tY$YkVm>%7r}|22*0qw_d!0m1W%
z7#$tLRDkXJD=%a1ks^ehuL}Xq@^F=MEL*13E)}=wzalR_?{&I2Eu;e(4}!*0L-@*Z
zu-60NJ;+*In{+yWNIrV)zNh`yj-;Wp+e^C)qk{xTK>Kho0_4o3rV=>c})Znu2K~}1LUo@h$RPgQ<(4b)n2jkf?
zfsli#pehKv97~t#rH}24M$sg0mv$V62^t=0=Rs$P
zBcvL5K`fD-Tqtuqo(vdCnG;8-g8^DHDYGL03__g2L=Ra8d`o~6av|@gO=jqHM`4gy
z-S}yyHpBuPlXhq1mk|+tqnF
z(N$-IcIS>3nmRlVBq?APZ@
ztk*w}8+iCI{7&u3THDw2MaouZ*!GBiNqCdz=B<^+aneM=dtc34`41J3U$5g3Wd1DT
z_uWrM*0bjp_Wjsfp0xBTb*HEV*wUScj7W>Pi_OkV(GU#E{yUH>
zca59-UGel3sii31f{~FCQ3DK!oc3}+Yh)3vlfL&kIi~Z*!AtKKeF9|efcEG_6^K0Z
zH-b)_Rm-73#qad-^3vD0uJb)$m{8;A<70FIn(Q0a8!YC85&sm&$){MKi^pO?G_Btr
zUhrYc*%H!=Vef~wHs7++($$qBz6Bo;Gx;j)bYD{$00-B^7jUxvPn4DBr*8S++}uqd((2ND^7z9`{433(={QH~M4$J?(`r
z1~;0?U!Asl=gyS!F_03cx_wr|r~u4RW0?Xc4k)KXEUz0k-aamXF(?AW&Z|@x=yW6=
z`}V^_GlkSI28VvHj(>q4C>rASrk^bEF}07Iv0K7>>~Ly`f<^s$2*7N#Nn5g
z)}fXX1A&G~_i&dRU8bBJoSeFF^Ks}(%<9G<>>YP81C`Z<6GyY%e<&QXO?qTyh0;^|
zMhjaDj8@{0ws7cIph;FVOi%--X{-K(p}DOuW(eC&BymP-+AjV9_mz|eF!vNoVs`YdgxR@l{oHzKR%sEp}S+MJa~km>W4!hQB7Za
z@)I7pNPxSLS~n!&9b^HeQPU(LScjRqbPTuA2rRZ_WvmllVT_1%B6T>fPul9OjGNr<
z6+B~T&mToo0)Jk9{w+q@o?owG7bfUVW`I+t9su&eg0huKW|HHDYFwDUqLGS3>$anj
z5-P~>EJasuID8kN4bY{utjr=?VKv%6a{dSZHlh`!E|S&?(RuLY(qrw&elG^4G5{`s
zhN)sTo_t(BOT`SD6tPvJc&f>DIAgv1OWN5O=;eiU3a~c~P
z+XDUoN1T;_$J_ySEvS8Mouk-nAK
z7F~&(G;PbBYVpTnmUD7$Ub80PR8td6(BfG0n+mkc8d`Ep=C*5aeOyzG2Fsnx|C6yv
z7IfuKYx_z9jc3oE?e;z+y1c`sCB;$T)fvf_^wh=*^(4!l$K;VY7|-1gRga&Ea^wYQ
z`4HM@7s!=LtPZ~?J{Ww#5@oV*4jyl)b%DMxr*&D0=B@W1FI{>8YVPFRt{#K?8inW!
z$f;ykhk*p*$;dW?O)?zInVz*zPFx%u29H)Aznhs9{nzj%+5P+T3JN|2s1$zahOoUc
z{O!CyP%q8E$PguY)K9$m{7*Ai&S#zL;y{I6P$3^Yc(4Q82A+(9*9_bDC}bgxp#Ug4
zDw1+*2TVb2Rh3^$cvSJ?VtS->Bq}La4!lWc9j}8lsEab%xp>i{9o}hMBYpq_i!GyC
zP*G`qw83s!NqZ;FZH_`tLn(m
zUN*Q>LgUcWWDBp52csJG8Q#JEz=`4b33~sCNBR!Gqm8Da45fS!7{e5ZaupngjW}5?
z=xFnb@2I`T7)`1B!7m#d1
zJQx)MqVmv4D2E*4*zmd@J5LnA*o!x}OMBg5hs5!VQA8}ll18&N1dX8)lD!E%@?9zC
z)DMOJk=VyFmBwbUjf?W!Vr+#OW4&AA-+;KnsJOEJ&NXY+p!F@_Xy8`_bS*^+ID
zx3l))&NIXGw8QnpR*$Z>gEOBpPi?N+({@n0HjUp=)?#B@>nV3kf4an#s(r$q$ZI6}-cO-5!rATEw2OB76<
zzdk^&nf%^X-|^GVvED_XCKitg@M+RW+}w|cH!9w=Q2rI3j$f{!Vd;zYGiYjK@gF^r
z$k6NS!-H>v_qjF2UX5?{`G&_Px60x%RLElYj@;_E^R&qZ;C}R|f+x7(>%sn?8ZG7X
za?oSA(@~`kVn~^tK-JalhJb!V|AVH!48_F4FG=9~RwfK;BDHEzJj;eO3y`Ql(6%HL
z4-do+CXU0o$-psg`{?mIl1|UDkihQok6DX-wbVK7m6B3myzrYB*8#tREL8JJ&CbxkLjz4bboles`;7JzI(qUkzWLs%g`PkaVr@(imaGxo;G1tj
zeT5Bk<;4PQ4;oshQIWB_1(}GUyYAttr%M|LvTb?8-?m-@3{ACJU3IEF>^O^k_A^LN
zacMb&5av8iPN9q23_a4VM^G!ZoPS^aI}A!7Mm#zAw{05Pr*SB~PCIt@#dQtv|Hk=^
zRNN_=>YJL7-oTZH?T%QkEITZ?9T?l;hJ9c6j#&x#24GS-e$~{5`14NKWN^y2eNEKT
z)vL|RM-Z}#VMskiLx|&df6@J|%;~#%^EhjU3$rzssl0Jg5qBO%Se90&O~Bxb$VFbt
z^JmT)7_U&e=IJm?(+5nSxBZGk)9NFNq?gySg1@%yqof89Y|EA`c>tHN8JYz`f#n&_
zPCf!5jnljW;4@hV;3os)_IiP~sB!T#zA=>WN#R^iBwD`Tn()Dn}If;bJBZ=kNGj}G_6K)%68mLn4@5DhPfKxP99
zkgq5WgoC~3$CC<)yBs?Xi6*GbCNNb%GLAs(_9iA7qw;K;dLunoZrh7XR7P+Kg9JFy
zRvv6tDVgdqFAFz8J8=C?26-YJM_PUSpHxLCZy`>O!k|WndLm&`G(+R#2+y5o>uc=!
zA$68%1rd$p3j2A({&uV~6{H80ia+{J@J85VQ1kOzEH
z-&P07Uq?uP)YSCDS6k#jjKGsIZO^`O
zBM^;@?12OM;8Yc!^R&Kd2(8DX7d$!Mf2;u$<`5<21EieGF@*_4mychCYngHgJkwjU
z;3dE%_Zglcv1Qn}Wq1uyyzvgg;Rlnzl*e)b)swe!xnZUO1XTdL%A28sN68ka9A%(fH7U|V4}<_s(_&pO~`%XDy(~OlWu2bW@?}R
zBufe+vrButtqTv|+pUIU)ToGpW;563`F_!&a%izp
z)b;AJX-(r$65!BIm!idsiaz2eew~@Q?$F1NrC}w0fI+|$V=CdWY|9~k_wTQd
zDzBFGvhgQkwuP7y@Hj*+19-r3SMl%&7Y+47`4aZl7DDA0i_%O{z{&RVxXP_y!vld~iCnBOIs<
z8d(rJEz;W1_>r*y&FyVJzegWGeq3Ki{+0_9gDReDq_x535CN$S6&$+#bJd@pH#XMb
zpiuCL&cR4D?<46B9#{D8ln^mTrFv?-O`0a&@e}3E={R(klX@&-q9~!F0*x!I&!iGS
zHXX#>?|?zM81*3PgdATu^jUR-1TpjKV#^zuu>e@cfDn#wUwSHYV*MbVMvr9HQBaH(
zXz|zgp~4=cNg@z&nRxs>yZgJE-5qq6fH6OG^l0T{lQP264USz=Fgp5RNQWq0yhCAPW$7z&#;GmA$*!T1@H)VD90
zCI4H0GYhwD04H8`cxzbRM5#jKg8#!_tJ$bEhd&Akrp12qwrz1w(`K3$S4E!IhFNT3
z3t;jH2K?znliS+x^%yH$Hm{|^#j|8c9=?xplsxR6#P!k+hcZBHW@l;0RYnKdp+FtL
z*DzwYJUI_W0aJa4Z?DmH03nPAiF#mkT>OrwxW7nRr!akH$3?=+S{xL^AS*Kqr6=ob
z?>Et#EpH)i*bOhtT2gwFK-;}{Mbc_g
zBg|>15>5{gw84RrOhESJxXb}4YYZj`z{mV8=6xN&w&Z>#po>*b^KxjmBfuAr!mkzu
z6~?WF9%EchG~!4klanihK*51YIND*q<
zi59F1ROQw2!<2>rwyhJ~30WsdB!_8(92OYY+7h3^@E1R`m-aT$4&w;h!|W&692(F7
zEU;D=gIx4woR@56WY$75);fG_>>_f62D6NuBo!1Cb{M`{DJv_x8T1>F%yccq@omQ7
z&1rTiD;0mK3?3Z%W%Og%{|8efCCe>bw8>_GP%gi4H%p#jus!a#$
zOfTlg2>p}52(juIiKG5cnh$Gh-|P$bLw2*hf#GqM}5QloUa--B1vtsK;@ckv4f?G4eySOT`F40pSf2tqGIi(jG1;
zDcRb^HS@f@%eVBWpVmTcC%YP!OEJz6
zqy7pIS-gj4Kp+ssfEK|TI6`TE)cN7T?|{}hJ%9#iK~r&<-6#PXrVhW_g5ri~{7EU%
zRC#))1dJ+~o^WCFi;9XKjuYA>ck;AL1TGq&xGmrV&YU?z?i8StAR16bR}>rW1O<`c
z52)8|!q3n&$AC;gzELNpTzqiUtGzR@uVZ&y4v|LHk8h$Tqaoh{01izb@5YPFN5H<6
z=`#($`YY7$o%~u*!ID5r9<;((w0yg(*fGh(mW3
zAo+LV$^o|fkgSQ^JhzGDdu@hyvHqPP>b-Un@RWJqt>#b;3Rh{zPS
z^^o$k**VwO2z}nfV7y%EmJk4A9Ux2?NI+dFVB;(jl|P{Wy32bfD=VuYB_+iUr)ONn
z7=na3Wu$Je-FyfPK9mlSJ|cY$X;qeYfOHGGF(2ive-G_m!65L$stMSuMAqy+SKK^7
znTe4`qX@{W_R1|P60-+eB#_(P+^jpCf{v)lxCI1Ee9Aop4`v`Y+(VdrYe()t+x6wW
zgF%o2+>Y3k3!?)j)^H%;mBB51Ku_o^4Ahb>Nx)aiY3R|BJ1~E*GI)3_p!Z|K8AEI-
z`?-)#?^c##GnhFKebV-{l$1NXcaREanAD4g*HajpUtCIWM4Kpj@gUC!n)y#hA!mEU
zgBRImWMqWo5SaFle*hO@Cy|Zuy71SN_iel?>=g};v-%vA7Vp+W{FdhVqNXqBvN4FB
z%%4BJ+bM9)?Q+;nl-?i1yjxr?x=z0MDEA16qJUKP=Nf=Z;nujN6np+0*y8Zv&Ahy?
zsyFQh@FWSn;VjIBv~n4ooW<90IFvGYvMrSp;MK$7j+jz`wYmDE_a_MY*cbdLkH%5Hp@@chaAK{`t#L|Ix#TcXS>sMQ!)cGqQM$L016vC*jtKFnp4e
z&@Kjl1LK(0+;V69P^w^SWHBVcfmJ;nNDgVjg6nsbnUG;
ziUzE60w+-cbHTo`|DoNP)w}H8Sl)muT!8|{EJMK;FOGquW>rpGQ9x3vH#EQ1Ntng%
zki=kbB&IgI@QWnl!)z2)`|D?Gj}$#5$j
z?K?hQ%LQ^^fRWAEebOWg6fBmbU0;W}KA~@hjbqm|RP8YAhXg4-P9TDN%S|
zRk~k||EP~cmliT@=xo5=?ka
z(t|OY?4-dm>_82}6pZYWbq=>!|$GNoM*k*2_{{!1>H!Lk=
zfo&;!yA~|VA{B!}L!v{@LY)cZDF#QnK6iAxT}bKcpE5B%>S@q=b=R8UQm$|B#b6r7
zz=QmRD5&?xnkTMqxdnvYQe86H%o;$>L-P9zSnboPexa>YvRl3&Z}@C(dj4Ke4;YmL
z_16;%3BzpwNkJ=Z*^TaibZ*FwOAw{r1#UWD34_f@BnB;aivdaK74F@1SlKwc(UhT;3}j(ud{fiXO43FLae8PXiqx-PDN_QbsA
z0ovPTBP|h&dg2Xp*}TatS1q_5HfVSlt9gePe*gVVrPwK`(r|W|HKZm&0@=tUKg)lC
z!NVty?*6_pM==A?n4@|fo_7z|_l
zBr6z4$Gwg~P}tvJN}_1MmDK5mpDn=|9K~pc63uIbLD-xoTO*NLz>ex@8SQf?1Zwio
zqOIC?>JPMMVTcF(+786=6Yt7(@g>c1q1{L$b`(AWV?Z(L9Z{#FK^cSt@sA=EK&v!|
zMV>K_iF7x}xwzHSvlpOkC<~DEd3Bg*7#*m^C$95In%Dk?(u}kaOm2ljJcTHC=Fs88
z<>+~_q6LUp0a5A0Aoim*^}Rh}8%2=F$}A<-8M2NsOg-uehMuHIn^y3{AEZn`?T_!5
zKI+^rD{F#3#|p*=jD$miCif`!nznj0U4E1gjQWE5)`8V1ka$M*!$Sr|UDF?|s26yu
zo|V`Rl#c7PiAf)oc4}agPEc#u6eCF+Cqsq8p8ZoG1?8l^nb7lY9w?DT2pXn5N4pb>
zrj2hHZY3y;U}LEIsKnvL#l=gSXtp&KHzX0mG88ilZ8|B{QqVOlRgOKTfe)q8B>an2NFT+Y+=Nm^WF`7Tl7~^+
zJ$NVN%CI}YOmaFLjdBF8c)pYdnty7Eh>b-&ps{
zz06OaJZ7;apHcSi8~J=}Vul~W-wvFOt#jrzkl%#L{ICDD_6plS4yE|t`kSgd5t>3K
zS5k4H6W&Dh5#}>sOvz6(`*uzE4F0C=`2MeGyE&lk-e!_3fCk<`2$SZMsGBt(QKi$0
z3>v`!M5wIf>;Q`A>+5|dv=N65z*$6L3m8^(u%Dq#UAdMZlJFxki=%*XA$B>!RF|J3
z=9-8$?g*?mHyJ2EseVo~x9pkc6-)?xi9vlj#KL5ZYN6yTbIkWJS)kAGn5zAY`+J-H
zD;qc4B=z4c1YN1oPvQVm0A#qyK}z{ppPu>5$=jwI0OaPD5F3g1Ws88oVUY3g0V(2v
z;Bo5SW>KG9VwwJ8?5+lev~tCbgzqc^zx6kcd%>p&pGBlF7(g&|c6~Qt>I5^WfQ{7)
z(h{TyvS||K11@@T&H$7`^+8ISA$A-r_6GQg0Q#wsUYrl(hzYh3Q)tK7pC97v27070
z*{F;spsZ*+J`Xx{gg~b)18juFeR?dZx4$9SD-jbu>1wR_L8nVtNI!S`1
zs&=Z!|HP|`8o=4IY}qo3NAuBk%(dJ_UI_&JEL*xX2odbCwM9Z?qER!147A3&K;xiW
z2Q7-RSx~URuy?SpX-Z=Mx2GyP`)_h?OGkCwgZzqUL5muN(TUB1xES<4%?sOLyoalj
z)DH41q~my3c%cs68QcOM2`g
zZQk!<^{oRS*Y^&hg6Q>~VUiX{Q(4M}@SqU0fxU)C95uA^5&Mnkvrzn?uMZ9;77v8o
zV7knGC{P5~_b3`YW*4|^*=x)ewjBI?XX4$-*55{bu$s+x69bZ4gjvmwZ@pvph8;3$)
zLLmdV`Jm7!^4oF*gEYoaA}KKnqxVsyGvodM1Et#a)`x>(I|2M%3d!FRq#Zxt*5Y%-+LRNj+Nmm8G&io3r`r_NP3kUATCwwni&O
z1!8tLE=|Y=e}g7jP(bAfo&&1|lQg!VdUR~8t~{;dp)bxG*&b<)p}-RFy(Xi0;2*Qw
zX!p=e$8T+PeV{o!il(>^dVh{X-QhIRiWRR6-1xp(cPgM2T*GwQ2Pk(>Yw)f%=Un6r
z?WonDL~IftFw(pf$$o$|jT_r{W8iSZM@iimb18i0#IO@W2X+9c5LFCc3UMhS1kRJ1
z-Q$+|w2mOB@i;ukraPwy+~ChD09;{T!|0bN!b-9uCSmIuz#Bfq!f}|Bpe$&ygA}LX
zWB{p3Ts{li2~bg$v_f|EChwzDWiZ##h{UfJM@SHK4Iry>wGnW@AtG;6v)O01RzJNT
zZ30=T9St)$jEKa+)`)tN;Uct@03S40jk7CQJX;Max{N(`c9QXc+l5
zQTCh*4a+~bz6#h#tS{>^DqNa@fb|og=u{-kM_ahge8|9RET!aTeIp~{n~_jM#|A(s
z!N{y4Y%>_4hDPZ)=_sPP9xy6{96^&fvF!R>ms-wTE{|Q#3Fr5DuhW6&`
z*c{ny!AOzLvvFfKpA(J}R_Kvy4rzq3V(qQm_X9bl0a(X7&HJ(Gr9G6|VOsvWIT5==
zOS087anOk3ptGaOh8F4wX%=x^Xn3k?SleH9sybw_t4T|Vo82_0|xK2sz*TM$s5y(-`~Y2?H+^dR-9flaO0
zFPB^JlD4|L`o^c?0r$7m@-4h1Jq%Cx!BL%|5p=;cam!>KnLmo99EulEpI3wHONW${
z;K<~FQ^UOC;?*Dg3`Xo|meL`vZibd;tGdPppMTAjgc_?YA2$AAY5B@5W<9Qj4_NL^
z;U|karum^ZD5|t$!DK53L21Cd#t&k;ll>UXyc$qTIqj`
z;dcJKqNQP6`b7P|OS(1S{Wi5T`Pnuhd?Vp2#L;1mzncL`U-Hq{0
zg*ud`GxeD+E^e*teR3Dl-Sfhp@I7rBMRAuq-xRMPUYthVqroi}tGm)#mdl_3jScZM
z?6ccxRuefSkFX3hqIl%R=S<-7DF%3AFlhq9q%8i3>lW?$*lw2#5KC80ARzkhuaMtK
z(~8v6h)@h|57?`5Ttxy!8cdGlOWKE)G(}o{6yCL~PB#SwCk&<}R&bH@lqTq3{!gHT
zFw5XM@r>+!R>wY}>7`&YRxpYL22fvySa^LW7jO)QE=(UWD{I_d+l^GZm~wurc6mf|0@8uvTI3ba7LFvsyTlx{&8Sp0gqGEn8jKGat1vAYT)
zaa9gDD4S7pA#56W6XH)NG0l1?{({=K3+uIb2m*RISnM#kUXhnr0)S_OK%^g5?KlW7
zl!$YJtFZ4JLanBWDY^4P*z=)DcA%(x!>oah{QXc0Lmy}&-7eNp-yHFJbKeZiKhs6m?bM;vZLwuVqpK1!|yCQaVjBfs!K1
z5&Lfl4ZHA2s-s-rfHMNiFq8)JFrz{Y6*=ociq%?lVT^<#0|ro0=~OS`q2gBU#SAKz
z>WO!sW_ldg*+hAnqM4x6a!6h!%O9n6#-6;WdAtulIV%n
zMMG-}(}W;5H??Gfs+^WcNudP4AC8CA3>&#!-4IUx!GS4PM%&oI0+pew2j%x883{?_
z#^F!qc@)8kT{N-INagYIO6=SUdjfHQ$C{+gSPw}!57*lDvtN~`%9Ax{$%+M80@wnO#o3S^0*q94~
ziDJAn&`Oxv!|UL>3sSBz5nMD;0&T#2^2_Em3abtE;O^+A76dAWVyV5zuxi5e9%4k98PX4#@rhvqe{|KkNZChe^4<
z6rdq|hrz^;!^KpSxcHk7MqC#o-5);p&Xq4g^zN^qaBedwdlN79D%#iE`ytqAOl_<7
z6#W1@OLJPTIy0l~_|A{hbjK%2mzEaJIc!!f5duy9`v0IdX1C8=X8iZT_5UD282`=R
z^!TCi{EQc1`d$2=zP{M*gc{g~zP?S@L*VA$p2B@)kz@wv-E@oYVhfl+qelaoeS9-N
z>WoKZML#GEr5io=*mlqMB5ypV7qDTl&5==lF5?pW$cv`iuB48C<-7{jVEDll<$8`Tx{684h0d
z-?_T;K^GFo@o{`PN9Y_wVi|_K32)!obuND-X4E>+yE^SjN0<-
z=c4hyL_YdMJ8>N|49{WAX3
zjoqKjoQ!ZNovrm;UYB1K4T9!Aj;%aDPRvy>W7F}Q+oX?|d`hbKEc!gv%Am|m9-eWb
zZq~adJ0<-$$CPZ|9It}iP~?Q~6?b?3wnp)6oi(o9Kq{u_E+`$t4-(CBn3i`Av0HiI
zMd?3pkQwwe%~e7EOA0WUkIV1!_diMg{jF=6t(J$qr-a-}pJ(=jz5Xj8to^T`=;OzI
zEQCD1oq-I#i@%5|K6C~WfzW$pQ(7*jz)&U25wy~k8PxPk<e?gh@3cRzTmk#X?wGLZ*s-*L?2_~bs}
zt}?QoT{XEg@%cR2baxJ=xsP1k|9J4n^5A)Y%CyftxR<$itwGzcoAj_@{N}XH3ArBz
z&+i`k^mcXO9qYp0P1c5O-ESy7oQ~B5chicbCUkQ2rV#W5|F_Gf#GJTOgGROH=DHayERRG>%bZF9umeSHPEy!5XmNfpx2I4@I8>6
zw_wpCfhSRbtF>Q%PV*|R{g3v({GsN&4V%H33C&|jCGyOSt&SqJmzo(dT5(RBN|cH=
z?fYWP*xEdq7VQl>)oG!9p&}{@(P@>UIHeVBQq+6hhk53if8c$8dHpgY)j8+${e14v
zeP8!=UH1d)ZBWNqx}hu7>I^c9D7kip$HW**RkQxMfzC=b8tNhFwKpQ?mX_JEPLvo*
zWB_dmQn{7mHi|oddqb(c(cXt%uMAWdx2^Lc?fgJtkcZwe1UQ3`av~y;C^iJcP}v~t
zp1^SHaWKwS0?Kha_xjJZw_93TiqUlmB@0ddGWUdusDgy~7I=vmGN6rxStwmZh+!Rm
z;llpx!pwM+eTo;&8P)+BylYB3>XX!t!x#JEMOnKj8DJ}yab*b(!RsndNNxungK6)%
zUglhkH2H0T+wTYKV3{&4nTUN)*%PXck5BYXAC|b5JdwNP6JJBWz!B-SJ9zty3fO!Z{
zdCt6^o|S~Q!S$cVQU;~88kpTn7g|3_tRJYKz`jJBj+uW|Vu4a;98&hXg=&
zks{qF><1L<<|JVH+U+|Sy#_4to|cjNt7$3*RlVq2g`g!NN)@epyHL&vUB61E3Kq<2
zM|1d*5OLKf9Y3qfcBS_+yJ93qcZ>7ddNsGAnaHq@PxCr>`mo3sw#7K-M8AVxdkc^J
z-*8a$(IYLuj0id)3ucdiFFyt^V))rcFoE%Zw;j+gBt|J3O>P_zffvgEQtY%;1@%g7xCj3Sr2kQle5L?7f(
zxeACCTa1f-^^v?bZ5}l+;gwhFW-?O0V_F+)wYBOL3LCP@36?JXg*?6h8d5#SECYTG
zY`|Ez=noJS!KT~~{ucWKy1f}t(Sc;vf|7&)Ztm=v-+w=fKu8Xukz!08>V}epD>0jy
z8U}egovM{M6t@7~qSgIHJ+5XsuN?j;^7z;*Owj?V_7b3!OSiVmmJ)r^`W
zLK*K&sQ7_X3;OsrwZ}2ljeI_Een@#lQ46KW&mRc^B_CL(gLCF9(af;N1P>uxc!sx^
z4CdCo92m#|$A!qW?5!w88gNSqiHQx2wbIcmxoBP|UvbIBSSSg+zQT~uG`U&;FfTnS20m!$EHEOGVzWW
z#nphELvfcbH=aQLRE<8;s_tTJT-|5Zt{@kl#!Z?&gAIIi6&RQ-evS^l{HiD9yUw)~
z1B>;x&?+i*!|_}sh1aY(PV+rjg?LNVU@CB)UbMZ0Vo(IC>}r$aWmfiWxm8`G4BAW8
zpqa1ke1R0Lq|pO3rciL%nPq5NiJ}PG775wSHtIK>G5_`%$N{?N`NF#NY~<83kOcpL
zAk1>aVcG>WJ3SLpr5p&4Q=jCxXdC3ZRJebbbUvd`x2)>c)htRFj>G2WI%hlF?
z6~pIChNT;}*J4u2Xi#`j$!O83H)iDEuLvN2I4w}h?B+k>py0=s4x0WN)_v_n*vjL;bp-#Qq99l*5PG#U
z%P!N#KBc-#gD-Xli*P*A?@@DyrV!JQ&+s7#}rOO@-X3T@BNB5FN|v{g~&pf%i{
z`w9wjEr*&~uR>dGS$(t@LeZjom|~Lg5YrR7)G;W{TenHd&&3XdP}U{>%lAd7yfzP$
zhmXWVTi578N?uMcOgOtmfR?T<_k*cYnPzA@8-d6<-4Fb8n(M;#HAP4C1!zd?nh|bd
z(;u#jHnq5tk|JahfE5#*zJ&&FR^tq;l%8}=xI*}0eeQ)QrV^$5X=`hz4;d-0&oC}|
zCh6z0<=_9lIf;IsR;DY)17C*K0NV#oo>3qbCZu{MQs#idmo!48-(fAN#O7@Oph#DS
zbSFCZfrH>0Fi4qN!9t76NPoi$))(G)sIa
z5`n}OS`&6{$kz{PWYkg-k)uB6qc<7vw||fCS5#3}fNxc!hd`17xEp)Z-ASz;JrJ+r
z-Eb6fAia~&)l5s?#Y}5nzyL0gElu9I4-;%DdI_2*L-%n0+X1J{SBjZIk1I!`aFUrx
zv(TTc$`&*)_w!C4&x|$M=PpxmNzzqz$CBF{-#(NQDxn*ZyS!8Hs!>j5huO(9Bmc9C
z=${%L{IPN;4{cw(aC
z8$?gP5PFS!Q!?iUwj-x7c1Oh}Ti10vw3y-i-L_=Wt26;l`+JtxdPcy@!y=oKeU*;S
zm2D;0X&iIkQdrheAB_C!bkrA4=Z3$Zt9w8lgjVqF34(CCOUqvq3c9Pz1N*M}IWTVz;l-}T4T
z0_V)H?v0D60{xaC3|G0s09g$cdA?H!-=OU2{`oG_Ub#3qu!
zS=2>ua9lfJk&mpCt&xolgpp|#mF?kYAJ;~JxPoi9jf+nPtFlAkR#X9%j>zK@>|68G
zFU?if`*^{2b+t138?uLsuBZv8E`mFN6DJF2`511tiG19xNXiZDy~BjQD-pkpG==R3
zk*U^@kTm+Wg%))O@VIUN`?xjfaht{a&fhCU7hcx$uUUIaUtZ8MDqWQkal9fpY-|v`<5$|i3BugHeyIR%&iRuW8YH2Y>jI%yRG2|@c&t_*@B0SN}i!@5Tkz`~1
zf(_;l+oaLJ+GMc30~L`TCEP&z$N01DpA>9U)hkEAKSh7F|2Ps5#&>V*k-qUD$hMBt
z>Sm?gq+vW^a)IWAktlv*j3o0Iq9F+gu~muBWYCR2s)zXb3NP-N!&5Z)omD6TH#?qv;uTl42MsWtPvJUngD6gn3f_|hdhza0x
zKIt%zXBA_Zk=b0rt0-i@P1^&2vaiQ;HP2{4nfmYns4?d3rjjwSwplfQsRK
zE=fnDO8eI44H{J;53F5_U9xY;FpYEG8zwX$rbSp)j7O8zHt;(iL_&BWk_9aAAx+$n
zOhjNJlUdo=FrYys=@zQ9wV2%bQBl|*hTsU{W;yM>Moag%S>)qLvU@*P*VMq7sy2NB
zT=7GgEz0@eAGFQkmj6Zb*)x-wK9Wj1vVQWr#J&iwm4rWLjsVo8@g2n6B^^DLZ@^4E
zr;HX{`=VET-)h8Uv`*S0Jb{{$v=fvo8o)wh+_8T8GuxHAD9{>10>fi
zBu33taw19{!?k{(*y&Hq48iwfd$MV|7#OAN9Fyu7S%p5Xte^OZ;T(f%of{ue`Tn1p
z9Sc&@|L2l;qXr%Y3tScyurJ5n3hZrDcz7xmMreFNF)H+92#&+Uf||F4UZv#-=-t7b
z5b5KW-|RGPjNZ~{FC=8M;P^HcE8bYN-XQerhwY$z(0CT+LHtUv{%7_NM7G-`x=RBzV9$|so4aKTEBDSAVZIVT(0y|
z`T7ankY-a^{{W4q9!01@LEB557KlnxrN
z2dbgq`BmyoSjC&Oy_lo8XLX~!K~+rT+xySLeCelPwvx~ntd7!|NS{U{ZLBlJIVdeU
zhzL+&MkDUwCPADI8o*#^<_<<1SbwCeLpGQO%e_Zka#icz-np2bK3yO5n4O^rN^pZN`v*T9-`#_TNV7~sf+c`d%!vyWqceP8g>p|b2#sHmroxAOyao&_2-B0V5)X87N3u__t>F?SH~Ns%M7QRY*jLVf{-!>#^q_yAhr$+
zf@pv+30L5s!W|mN>ND1J?usnINLWgGfpfJ+%+f(c5G0uo>6+mMWI!H1(88=GW2RC!yGqF59*|<}2%OZ0
z*qzDSsXiq$5D9Aw5^xU5O1N04pC=gR<=!PLXlP(x+F+p144;PAn@1m<1?71DFnEH4{O)t(WTCQP(Ta=NxgOBw4F76fpksxk_Kxua8oWSmrNoCw1P2bEwK
zO7EXEF__Qa3)JlFx+tO3HG+UD=*%z{qkBOEkoP0m7Sqe$t^g*&CtrZpB0~o>
zv*i5uU2C%!Ov-vWS$=)x=2La@nB$C8l9p*EZ)*)ppH&@T2YwZM3}MHNgJEO5Yh~Sz
zFCNa^8P(qvvVu2Kx48w>Q6jGPV+0xX%#l{4;N*Z_Vcg$wy{K#uu}H_=cf4Q!rCb)U
z*MAgMlSu>>mTvOgFjw;1Uh%GbX#cGiAFm21vbDe}JOK*hs42X|SpNbUbeJDm(FtPN
zdN?2VH$?tefk_7E+8KSbl8i(Yi8C8eZ=;n|hF7+%2ksqKK0D)!$5nK{T}tmMs-{<9
zO=%k{H>ERo*NthHzqAb2(<_`nT&kh~EFEcH@TQeqOdOjl-DFfX7JA)opr*F{_Z
zN2{SC)DH$vhq8j1#W(oqxKFs1qQRIwvRYm^V0V7W2KGypbh1|SgRC@h+x`zh{kLK!
z^S?#V&;w~)!Yry+XP(eG*Yj`O20b$tu6VTXvQW9kJ`mEl?m!Zw
zB7O-M2*oDV1YqGxdrV?0WxR(9T2`1e^V?^WTnG#SYuov}MpKmhD_&vl(WaI{m?ZoO
zp^xtrOmuaVb{4eRlT=RPYOY1TQyWHbS|n*X@_@piYX@FyQ-opT3>!6@prJtY25ZxE
z4^Y>{I4(VS0TF|==!uTt;p7yVVBZZJ#7t*Z)@Qqpm%OCF<$WsYw<%$OfQDApN4@VS
zsXbfH1(?6_t?n`dtihD+2m)t)+vhdts;EbnS(?7#W|G3Yuk>mHihQ28%h=`<8g$ja
zIok}9P!C0YPZ+te$4D_BZuZ+kiB)1%s&)m}@J}(>>H;XZ-Q3(zp;Iemz669AB$CHq
zS{q-Q4MdHW5r^Pl%CW>8i*H5r4)(rB)aysPcESySw2rxx0ArOrNYx%wZD+FYq){(cu1%C+kb1WdwU=6NnS
zcw5ugf`W3oaPqgRp5F^zj2J@B*0LHr3fCMS_?A|Um@>}V!3RFs+A$WKZNNJB55Qln
zji-cn(h(Y?@
zdt;#8%Q<8K^xoy+;gQ3`;5vKB*I3#?KsJpYnf#s}lW5J@`CH0tkkD15-^55M%g8RR
zZEQwACKRnii#ifRvqvCL*!~b!M7GLE^*zr1hQBX2*#Sk*QE*ezA0n^HXhgFGoK6uC
z*4@)}G1c_8u<;%;j@QbRcX+a$`5p=GonJ69w(tWIADu|
zD7CKsqRj2at3cdEF^0{9lTDDm@TlE;z!1`m7kO*>81ollYGF76D|&7BN;kdrleAso
zDV+@acoAw$7l5T~RQKeJoss&&#kZ*+b}W@_dn|!oR0F`)=AkB8t$UU{8F`z-(+;2*
zII?!b2IU(_C~c-PVK)-IDLOvhv_JAVa*?e-LbcoBqLP#Y%V=f^qL&!N$W99T)W|ur
z<};B03e0vlg~wyKoP@=GTUVwzsCS6NJfz`C&@3)S2YuyJBtMcAsQhr~n55QBX
z7@I^HcsywH0BIhAOGj+&6lP(=h@kZo#wF
z;}$xY{2(u1)R@C>hK4+bS>$DWjB3gW(Ml@0yHzgk!P)cY<+DqAR-W;%2O4U&GEV;=
z4|JQd^7Yn_$Gep8(ZE&Sl=x1x)cw$i*aaDdmOHg^&%C|`@wDE$@qZ;&b04N&>Pb7h
zU<{v6^qJpisFpj3G1hLcJ3AT+5u9?+MvrbkBYR`*y>FQTKyBQs
zgbZl}4zX-dKnTp-n}h=oc!A9)=FZC;-YU5lL?J?VvmL0L-=%o;6x2w>FI7}joKV4v
z;kwlU1k?bZa@gA^!@MlE-z@OsIJq`)>shGdvQ{#6Uh;VQhug`W2}z!o)H{%JAu!v=
zRsk3=KG?<-HggMyV2Om2uy<`{^+2QF0s>>gu%3ZI6pG*HZ=QBjyB#VI>bxMd0z>ln
z-#&U4*TGnp9m_^ON=jbUA-M8W0gAC)@Cc$G!gF;)io0(8dN?+<92TQ21@P&=!5G}%
zj;;!eMt5N(ATt1H2{|Dq!1A1I({XlnW}kuMnguUJgkfhNd#oLG6zysk!FderF8H6$
zBiD0sZG<`1u@9(BxF90;yYZ^#>zo
zo~CBvMF7}tDp32>RoxB?3Z#P*gd9_%3ykM3tR;zJgh%a$#w@c8*9^HwqK_J{^)biA
zQG<6lKx+0wP_3nvUo`~Fu%_k>=jA>74L$Xfckli}JT3x+$&(d-1=V+ju?Ier4@wds
zv)?lV5WdMa00qW@$5t%YM2{UpiMgjDyLI^kt3_7jb<$M*0{z@SH?j?T_C1A$9
zg@(?hs%4y{!F;8DRU$P(O-Ye5vEkueBXZDXI(~ri&q_Rrg7?BvyQ=$F(^eT&+jKE*
zOOAR~ZoyMI4GLClcOzIhM=o)gXhWmghG<+R{xTjW>|-n@M);nI%Bnm4iJ*E&Rhf%k~Up}1^?
zuX7jUxA7f4P!$djOG=)NTK@&prEaQL8DzlAEQl6<@;lBj4h@+A&7ecFl^I1;WcQDF=6D=
z={41t8VXIN2xfTcRvm`oHUvGzhYILN#I!%aZXgHT%gDN`3c2u1w}i^!^aE=~iwM+q
zj*G=rT8T)On*f28C89_*G6m4kA}u-g!%vQrHd#c~$1NxD*#dLwc9P9%wD3iNZqS-?
zx9$S@I5hCXi#5COs+t`JXvl|<98}0f%$@2)m7bQMIP-mPGeh(;gdfcUe^z4LI5Avn
zhpoel-Mx34_Xs+bU*g{q{K@@?d;toMe_8vxN%6d@U*Ir@Fd8PV%0Z<>coj0iy};8z
zgc%qfzBLaZ(JO?VCOmAga4+El05vAW#1@7BsX{)TWId}KKa6v=i{`FExdLf3l>b{O
zPQrK~<53|&h=_hf!A2MsShR?V&KcS3LaSwH#J?c#K6ucC3lW&;4)BV0;;4X!OH(dk
zR-me}b7vY}6MWJ1YjO^9aWtZrz7f9nx1HPSA@4ffN-U`Bakm8QKjXy#_kayTI)ZWmc|zb&UfUSVkdK0XLTf-jM+FG2ra}{fpGU
z{pYKoM*sUC=j{3WL*lFdKKK7$6w7d8?f<>yP5T~dE(W@b-i@c|CZATCS`^C!(VxWh>^%Y<%mA5gTrv!gCe=dU|`U
zgm$DWVYFX#kqlgPaL(fjxkyL=UFLQxlgR!$lrh-3t>BsK-zck&6%AzkTi9pIDI!>diZdYbK+qU9|)p|MHe?>35QL9%xH-|`|}}khSiQ)
z?_o0d3dl(28?s-E&~R6_DMX$zU13sSn*y+^Bu75d4$%$TmTf#ljRu{vxqHQFD|4;@
ztB^zv-_en_1$`^Q7soJs%U1~svFE!CR#DYUtxg)ZA~B1agFkWg{p50gr7LKBae&E5
zYzEq%TMDz==ehlE5ASbT><_DniTqS^fAG6pJw4~DFQBQw^yj7}2!cPNN$zQ+5(ux%
z?Epb6gzNF1_vQe4Oa)Eq2{*C)gy2orYoE`r%1`Kuon9<@31*UUBM_Bys7Z>{!?9l!
ztN4KV1|VYW_IX~5|J(Hz|l74B%-~A3a0>s1JI%+VY)?Gb#`gL
zWa1fo_PrXUr~+0G+Gu_+3|U_xqiK9LGa)ZOfen=0zX!bzWTEIEUxM|)=X)t7af7P{
z1c6An2m3f~(-S1WgEbY27EwKCK!(-_#MJ=AK`JRdJ-@-+QpB#CKPqYh?XJ;Wg1VP_79fPyjs=3QOAebeA72L!04Ec42Q`Btc0W3HN4`U3yBSK(
zqtpe0t6fp=0yOH;*=odKgWmEmJQ&t@Afu3wq>?2aRr45;
zv3jNgSPXq9uJrA=^=*NSk(tNf>?}~~qd;|plmzGKFMC&q^RyP%$n@}aJ#3=QE^kdQ
zEWZ5+w5VipRUaq=ZY3rqCT8RM?}vJj{&~vv*N-Gv#8N@McR<1tTSNUGuvHEMLY!_-
zGX?+x`?Y+#3fp&98A=DBL43z@4e3O!z{CwWK#?@O6?KVAk+IWUe=z4DTbFt#H@g*f
z5U^vaEcY6Q5CKJR=AEihV3iptSH4bHd1Im-ZzMxWnUw|_}Pj-MI+>+RW5`j)Q9Om_M
z#>*$W7m?cxN?-N|psRhkb<#{U;weeP!6V_!6PPlREAY*E->`nEn9EY5xTEqAt-yQ;
z;P*5jhZw_)y}Y2j-3nnd7~35Rk#4(=9AnGQp=bGqVObix2(q|;6%+%SIlmcN}QgY
zJPNY0PxrYE@ToZ>ISrWK7A9Xr^A?AR0=Q%u+72sjOg)9X2anRBP03sx+#6EgOe?DG
zv3*uv1`K}s+oX;F=zuyftC3wbihRco=ZaiG((RG3(PS%{9r*;K)3K@s!vXWs5{pkM
z&$;>HgS$`z`CyRi^*OPCB!7#&2PEy_s^^s)@E)@Dj
zST_EQ;UvG2a|p9u_CM_Xjk2#dzX1LO#{Y-rCN8pPNJQ=?lGiRu_4Rs*nK-3UM(_SS
zBfcNoMYYWD{m6(rIl}3l1B9bmlnYpps3?`UZ+EmZkYjLc(DTS`v!56va}Y#~NTUtF
zR>+kh^}%eBn?M&8PL#&Lvn(QV<@|FGFF
zt{ycSIfPqrd;wUb_ERVr6i`dF!A3!Ix&h?Kr;R`G7ZGU6#3`G;dXs2i_)q($sE@(C
zzTo(?PV5KbL-8C$PXCFzs2?Z3@R|2U+;l*$$y6@B^C8~7wdVD$Y!Xmldk$+O_bTGHgB6P%k`=Q2G(JUw@oD?V6KV>waA<6L1d1*0}
z2ztejs3e`W5yPvpzCmDY!M4}-2Dj%rWe8`M!6tc*>;w4b>-53N8;&`HmV&eUk-7jO
zF~Gq!b96-k#2!PRak;qo@QBCSQQt#VlXJZU4L(f~P%jt*#zBgD_6>ymeojVq14+{w
zaWv+<0|dS;#nr{dwDUB>9ari&SnTaws}f9-?QpuiggpkGz!<_w^f8kK&wi^FSNrtT
z@S}6m3K5Uka2{`tfWNMsnR}ByS?kC7EWnL;@YEV-ul`1f=>B$lzJ*WU2u>E8QC?Si
zVh8F$?qwem83MthVPY=c^xKib+_latVX46fj3+nO>&wrzfqH=)GHgK94FVA=_`e{4xKh^}r7(lI^5z(MZfN?=RKAke0Rj2c4A=kD~2WqL4&%Ny4;L!Z_zF?-aOFMyMVmoeUK(W3MAz#MqIwz31>7L5XP8KB4sEkhI~jjdf{{9
z|CRxV5$Xuw154I(7+k&I+z=q-$Dx(JiS|Mh6D~}HeF!lziTe(IdwUD@Ta-GQ0e8{3
zXy4NBS3FwBH?Y&o*z;kH4^GCm=c%YYQKl_eE
zJEiV^8o;?4MtF53zpS4|v?J>o;x$(_kHS%ef2!!B#GoQ``ve?eqVzyW!F#Q6!Z!G?
zH{?t+<6SH>*3Ox;ux-!IA6fjCB#vNRQlrCke?CtL
zvHI>0WLv1Zn7BcnVqmRX<~ZoU#G5ENU?3l^XAG;(dG1~O#V4r5E0Z{w2n5@Cq@56Q{N-`5G7s;_}pFT4ma9Qr~&KrYDs
z_M!9}x$X(?1~v5^3X*g4EXLb;dE%e+QC?Z{NKoY
zY)=D%I7!y7zP|PVi9QCa>P)yAkctbMh4wL
zFw!IQIw)k&7Ez;D!pNzVgUEA{eh*;3gdY*FV2%0Cxw*}GktYg;EsHN}kE5wmXX=T}
zoNr{Uy!mY>&u}6iIDEc!;JasVbV7AK%m^3a@~EP
zXDRh@r#d+iRp6+F_>&lzzYo@Y07ED4sp;VdtPF&Z
zc}?Oq;EzoO2T_ab1P{;QZ2lk2OQmt%H!8{4QnuibtUIXTkk_^y*Q3tGEw>_ODTcSV
zf7tmP3J~&Nj(pr9Py~Zqe;_(U<#WTO5p5&~R~po`Ey%ftya8xD2GxiGK#TLQf4mMF
zAlvbl3H%0Ql!>IF34PH83l~zQMeaxF^N*t+un|HqRuQ`Sf`*oc^5~&FLWY|H2XC@k
zLLe&b4q^-zm8)SMFF6aqgpUZVEO-f|&wwYJ42(j4T>ONxm+0xk#n<7E@%c}h7a?2-
zm!CEb`c6NBC>HsMF>nVkT;Jm$(xDQ=a41lH&Q15u6yZ;S3_q^!PRz0B+UfW2`SX)d
zp&S@OQIQTH32kH7bX_zC3y`R!RN~9}O*+9o?0$z?_Ffu}hg>orWMEu*8UQz1kS`Gyfs;odG+M_rE(gUu{EMZ~
zYeh@!MBHDn6aV9$pbG8pKNtSG>-k^$|2XldmHB^zh&Rm6lgoIzS1>MjC%uh58oE2P
Je>!yX{{W;O>e&DQ


From 1ddbc876d3fe4cdf1dbbcd1bc872deab98dfae37 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 24 Jan 2019 19:09:31 +0100
Subject: [PATCH 110/129] make readme

---
 README.md | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/README.md b/README.md
index ad8b3b0..fa727c4 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[![N|Solid](https://class4gl.eu/wp-content/uploads/2019/01/cropped-class4gl_small-1.png)](https://class4gl.eu)
+[![CLASS4GL Logo](https://class4gl.eu/wp-content/uploads/2019/01/cropped-class4gl_small-1.png)](https://class4gl.eu)
 
 _CLASS4GL_ (Chemistry Land-surface Atmosphere Soil Slab model for Global Studies) is a fast and easy interface to investigate the dynamics of the atmospheric boundary layer from weather balloons worldwide. General info and tutorials for using CLASS4GL are available at class4gl.eu, and video clips about the atmospheric boundary layer physics can be found on the [website of the original CLASS model](classmodel.github.io/).
 
@@ -29,16 +29,13 @@ In case you experience a problem or a bug, please don’t hesitate to contact us
 
 CLASS4GL employs the balloon soundings from the Integrated Global Radiosonde Archive (IGRA) to initialize and validate the CLASS model. The sounding data is supplemented with ancillary data to further constrain the model. Therefore, a default set of gridded global datasets from satellite imagery, reanalysis and and surveys have been used that span a period of 1981–2015. An complete overview of the datasets can be found in the table. However, the default set can be replaced by alternative datasets as long as they are provided in netCDF format.
 
-Schematic overview of CLASS4GL:
-[![N|Solid](https://class4gl.eu//wp-content/uploads/2019/01/image4-1024x794.png)](https://class4gl.eu)
+[![Schematic overview of CLASS4GL](https://class4gl.eu//wp-content/uploads/2019/01/image4-1024x794.png)](https://class4gl.eu)
 
 A CLASS4GL data package is available that can be directly used to perform and validate ABL model simulations and sensitivity experiments. The locations of the balloon soundings are performed for different climate regions as shown on the map.
 
+[![150 stations from IGRA of the reference dataset to perform and validate the ABL model simulations with CLASS4GL (see Sect. 2.2 of the CLASS4GL manuscript). The different climate classes are indicated with the colors according to the Köppen-Geiger climate classification. The markers indicate the locations of the atmospheric profiles from three observation campaigns (ie., HUMPPA, BLLAST and GOAMAZON)](https://class4gl.eu/wp-content/uploads/2019/01/image-1-480x300.png)](https://class4gl.eu)
 
-These are the 150 stations from IGRA of the reference dataset to perform and validate the ABL model simulations with CLASS4GL (see Sect. 2.2 of the CLASS4GL manuscript). The different climate classes are indicated with the colors according to the Köppen-Geiger climate classification. The markers indicate the locations of the atmospheric profiles from three observation campaigns (ie., HUMPPA, BLLAST and GOAMAZON): [![N|Solid](https://class4gl.eu/wp-content/uploads/2019/01/image-1-480x300.png)](https://class4gl.eu)
-
-Data library of CLASS4GL: 
-[![N|Solid](https://class4gl.eu/wp-content/uploads/2019/01/image-5-768x492.png)](https://class4gl.eu)
+[![Data library of CLASS4GL](https://class4gl.eu/wp-content/uploads/2019/01/image-5-768x492.png)](https://class4gl.eu)
 
 ### Reference
 H. Wouters, I. Y. Petrova, C. C. van Heerwaarden, J. Vilà-Guerau de Arellano, A. J. Teuling, J. A. Santanello, V. Meulenberg, D. G. Miralles. A novel framework to investigate atmospheric boundary layer dynamics from balloon soundings worldwide: CLASS4GL v1.0. In preparation.

From a07c2a37c27a4baeb661039ebd3f7e935b693a1f Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 24 Jan 2019 19:11:20 +0100
Subject: [PATCH 111/129] make readme

---
 setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/setup.py b/setup.py
index 9dc7e6b..9ced95d 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
 # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56
 setup(
         name='class4gl',
-        version='0.1.18',
+        version='0.1.19',
         license='gpl-3.0',        # https://help.github.com/articles/licensing-a-repository
         description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description
         author = 'Hendrik Wouters',                        # Type in your name

From 1ba4ab41ff43ed9d7a12f65da4c6bbb9c753dd20 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 24 Jan 2019 19:17:25 +0100
Subject: [PATCH 112/129] make readme

---
 README.md | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/README.md b/README.md
index fa727c4..95bafa7 100644
--- a/README.md
+++ b/README.md
@@ -29,13 +29,13 @@ In case you experience a problem or a bug, please don’t hesitate to contact us
 
 CLASS4GL employs the balloon soundings from the Integrated Global Radiosonde Archive (IGRA) to initialize and validate the CLASS model. The sounding data is supplemented with ancillary data to further constrain the model. Therefore, a default set of gridded global datasets from satellite imagery, reanalysis and and surveys have been used that span a period of 1981–2015. An complete overview of the datasets can be found in the table. However, the default set can be replaced by alternative datasets as long as they are provided in netCDF format.
 
-[![Schematic overview of CLASS4GL](https://class4gl.eu//wp-content/uploads/2019/01/image4-1024x794.png)](https://class4gl.eu)
+[Schematic overview of CLASS4GL](https://class4gl.eu//wp-content/uploads/2019/01/image4-1024x794.png)
 
 A CLASS4GL data package is available that can be directly used to perform and validate ABL model simulations and sensitivity experiments. The locations of the balloon soundings are performed for different climate regions as shown on the map.
 
-[![150 stations from IGRA of the reference dataset to perform and validate the ABL model simulations with CLASS4GL (see Sect. 2.2 of the CLASS4GL manuscript). The different climate classes are indicated with the colors according to the Köppen-Geiger climate classification. The markers indicate the locations of the atmospheric profiles from three observation campaigns (ie., HUMPPA, BLLAST and GOAMAZON)](https://class4gl.eu/wp-content/uploads/2019/01/image-1-480x300.png)](https://class4gl.eu)
+[150 stations from IGRA of the reference dataset to perform and validate the ABL model simulations with CLASS4GL (see Sect. 2.2 of the CLASS4GL manuscript). The different climate classes are indicated with the colors according to the Köppen-Geiger climate classification. The markers indicate the locations of the atmospheric profiles from three observation campaigns (ie., HUMPPA, BLLAST and GOAMAZON)](https://class4gl.eu/wp-content/uploads/2019/01/image-1-480x300.png)]
 
-[![Data library of CLASS4GL](https://class4gl.eu/wp-content/uploads/2019/01/image-5-768x492.png)](https://class4gl.eu)
+[Data library of CLASS4GL](https://class4gl.eu/wp-content/uploads/2019/01/image-5-768x492.png)
 
 ### Reference
 H. Wouters, I. Y. Petrova, C. C. van Heerwaarden, J. Vilà-Guerau de Arellano, A. J. Teuling, J. A. Santanello, V. Meulenberg, D. G. Miralles. A novel framework to investigate atmospheric boundary layer dynamics from balloon soundings worldwide: CLASS4GL v1.0. In preparation.

From ffe0ed4e4bd382194d4d899cfa2cc0d64d7f5e50 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 24 Jan 2019 19:17:46 +0100
Subject: [PATCH 113/129] make readme

---
 setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/setup.py b/setup.py
index 9ced95d..25784e7 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
 # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56
 setup(
         name='class4gl',
-        version='0.1.19',
+        version='0.1.20',
         license='gpl-3.0',        # https://help.github.com/articles/licensing-a-repository
         description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description
         author = 'Hendrik Wouters',                        # Type in your name

From 39bf91d6c28d852010f5b8486185dc4fff5cecf0 Mon Sep 17 00:00:00 2001
From: hendrikwout 
Date: Thu, 24 Jan 2019 19:19:26 +0100
Subject: [PATCH 114/129] make readme

---
 class4gl/Equirectangular_projection_SW.png | Bin 0 -> 1767055 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
 create mode 100644 class4gl/Equirectangular_projection_SW.png

diff --git a/class4gl/Equirectangular_projection_SW.png b/class4gl/Equirectangular_projection_SW.png
new file mode 100644
index 0000000000000000000000000000000000000000..047817a5b1772f4026fcf6297a8f4d0b72c1a519
GIT binary patch
literal 1767055
zcmZsiV{|4>6NaBy8{76~W81biwrxDIZETW_ZQHhO+jjEp`~N$0`plf_KXYoj?z+0G
zCR{;I93B=2761UiOG=0+0RYhdp22@WgZ{Im2m9m#02EIa!oms`Mn(XDxqo9ilmgmj
z##KAeF@V?%%W;bH_^(sS-+b^5Qro)P`3xC
z?{=+c+smyo;C2=B=TutNJnyF2(JQYVI{^C1Gbj{FSOfqK6CF`Aw@kvx!4a2E}+()|4g@NHt
z%YO(6e7?uNUw`=q$Z-=bP!0qd=LpX|K_lEm-GtrUes%M(f7=9si+`_w`RIXk$d^<;
zUhh+|s|nyj1B5^biNJ3F5`G{#!aj=>$GrzW@Ja?@y19EgFvzRNsw~iUW
z0PqK|vg%*g-W|IK&nkeMn-+EFfpFh9A9+y#h(3svABH~Y3W)7p8U{oGfKvc_2H*@r
z+JjdEg6D7Khq)uqca1FYBM!O%^i+T%4(tGIPyky2W&?oZ-!4Eq1M&bW?`PIS#Q-7$
z#@xeG16l#L-opnSKotj=^OGTh8i9iiRLTJ@fjkSK$U#4WZVU9$r^14#2q^BAxk35;
zLl^42hg+X}1rEXAXh-V?wdIFx06#HSP^dXETNolrC>oLM2v(iHHZlJR2O46bAZ9MD
z0)#WFrN3vc*$g-fSfzj#10W=Dwx7cwj{)Kj+XUVb_hFCAuNw*?1eN`u2rv*T^U|+Te;@Ii-9W+s~>>ak=v`<
zvE7Gs$hHT%TXrDn;$8>D4(San?*VV@Znt5;K?wx|cSOQSw8<8b36S}aT&c~e2B>~e
zcTg))Nl|H1=_+{@4k+9ySt=3aM-~6f=@5Sna7+#(zC&&dCm1j`gsIKskle`2%S|X?
zDu&G+%PT9!$itUlN-C#0#{vf^#~nu-XII>b
zM0$b#9J&&}MP_31ydQJ=J=ZbxF&`#>D%~=5m6rB(YwZT?M%RYNhWExf4<1huPdE=e
zk8%f=E`+Y{me3Z+7CxU3AF!+5tI|vQBkCjMT^BWkU(VWpwo
zpvOSwfYTt<03x|Dc>sACSrK_Dc{G{4BwyS{5=z2d0!=(lDuu+kWU)lEq}>Sg_{&Jr
zSVw$eJZmy9?K&lph>{+Yc$uvE=QQc{FJDz3C4aSEA!2C*1?1e4!asQ{xh=(H#b|}j
z>ZciXC5~~9op%Ya-4U@;$zv5Xsb!&OE{oW#3F+;ct!q(S#ciRU!Ea{pw(xe*IMJfA
zf|CN18R>eBWDO(@S5}wiv=)>W&nG?34=zm3V$P*5HZGAa{3i~lucxdhmFMGUlqb(v
zL_eb79pNe9SATT=IRCL6eGr`x^%i|Qv@|q0lr0-1Ln1@)N2o}wC{=bl?KV9)y)hl^
zDC8)0a$D<03$NMTJiMWy=4Y*TO+tfI15Qm*t!sT%^H$Sr-Dp!<6M9W_(`9{rgHvsJ
z{d(eygStef1{Mb7CDSFB6MK!P54zBkmGf8+!yzeURR_mN@oIsAnrlhm!xqScZX5O+WlhhO4tEZ?h
z&BG;NR%vszod6xkw0^a-K&7Gn3kP-H=OOYk>DyFO`V3v$QPM+`gU=DSaYC9nYAV_R
z3^B|pZN}C)wE^WM<%jZ>ZudNq@`EMSrSjwIEA9KPr#hAVhSI3YpY<1J9s;i!=Woke
zG%qx;H1WxlG?--Z+9-DZq=`yFLSRSFR_1)?9RG(n|zNL
z?GFdaKV%Xj{yw_5s4S+BCRBiKkhl6ulKimqB^Mh
zqPnPBu~O%g=BDvAW2kx8CG_z%IxISWvf^51+sb$3<7B;V<*f$4uCqI_cDw;-ByV81
zQ~WtS`1wb#L{A+79FYJIhp>wqmXiziIqc>o5R}0&(90V{za9es9Rm;$J`%N_=J$GK
z*EfFg8#zDL2RVR+^RpsRiD#nHWq}dyOR)cNw*@5(CMv{JOqL2+Mfza`nt$-!t+=Zgk34HoOME+w(wUH{
zbU8Q4BwOd=h<>Mzeuq|{G?$#HDpwLx_FC3%#bK^#j%{IeT5>XWDtQKndxKqvgB%UA
zUrnK0^l1uz)QZ8FGNp#I#KT6FfxeF14tx?tppNm8Q=2OL)mK$9T(VJfMTHneITd*_
zxzm3%eoc8S)Rg<#l?fj`YSlGShOCk`vLb&L58US$19G{!Y1@
z=#eD(hM6dh<*>^IQo&>HW2;s3stR$(?*@ymkD8O(mk-gYE`g7k@NRZXY`QjMt;w$3
zM{+wlslcMivuAQmiOPYc{HDdHQ7CW3bM9X*LoTf@=MT^id!F4-(R^*|+0Ptr;hTOR
zMEg5iGYj)EX3HVUS-_~14kWC>SlcMst_vSk?kwi`n>D++r|2uKSL$Ai6t|@7+-EJq
z6n>vS9*-6`PRrK!8)mhroujzM}2XCd(%o*6*PCnYJZC#r+
z^I}8tpf8YRe%^tI#9hR(Gk>ulD&T&h_$JQ?-ugr5lPFrv2h9}CFU}(}PB8bTt!gCw
zu3Yh2P2~KQSjKVfz`IMZ?=jpy&^+2r4-HKmd`sf42+J72AqYwv9tT?JBWXA8m7JLf
zr2LMk5W|;pOR`d_Dlsi1FAcSFw^%bjwW2)Oz>SB?)
z=-t24+TOm_WKOkL4PR5wttd*bJXAmO@akZ+OGTiQ5NuPiE
zM8s9|!*jrG`t$lvlUgVQnA6H&H9;>&_{t0BBvEoH7AkKSy^4h9W189<%b1qw`wZ2M
zg&1KT*APKC>F$&rTpdMJCK{ZWORf}M<=nwFH2l8*i-yD^O9
z`)ILJaw(L}&L-pX*UCFr3D+EF6&J7TFQ;&)MqY}2vE!y|zT?|5r=IfZ_wZ@y?O304
zkT8KJqF4R(xRRNGn$X>B5{CFWMcY|yN=-x4eX9Lr)DIM=#I-~-IT9rZxzp??IcdJc
zR9#@xqxSY|Zq#wwbUh^fgRb?c?FjYsWV`c|qqeSomy^Fk?j3qANs*z-t=i~uIeiuH
z&rZb@^(;r;lD;aXdZ~i+F31XvQg844;;JsbZsJ2z{d}9mH@e()XSW{ia<}b`bGt65
z?gsL5)~?C!%`g8i;a3EIxuRb(%ev1>)5h0{T}R^RpPaFYS>W&-Kh6*&`N?@ndDaPa
zJ-<8)z6U#JBWk7TmG$rmpVMnRD)~zv#4hNb(dJoh@k2f$`Cn?D9n4J6=bxuD>T#WL
z?{VR9BQvD=Q<}Rk5?BvUXg_dWGh2FFB;S6$2zyF0V|0a$gMiOl22&|V!~e&LSO)EPr!&dqMvm+KqCK#U+?#9tS{(C
zy}$v47+W6nT>!WOApJtj0YE?$$4iZzL-DS}<>!APs{vs11KR<02k;3XAwdoRUpi~@!F+{5%u|mRy+M=NM%#Q{z9YI0{
z@F)`CA`1iD1XQ*EqCV;htZQJPaWNOfR6zL-hC6sJv1J&<9{Ib9s01!kFdmUS6;_15
zoS=pR@oFKP(4_+Z188~(Mvm+WQV2Ov<<}o98JHr(djHmbA^qE$`Wl`Umle9-P#4tJ
zC~S!D2vEC8_G~TC+Ay}z&7tpo`MVZ3<2O<-I_~sdQ5SOTNJIg!z2R{p=EN09x?xlU
z*mdlcq7B4ZBt^(pVWec}3Aoge3*ubFf$`KM(j%6}xOdohV2)tHMDI$-NswcZhp|#Z
zhk2F?hzj%yP9?l75b02rVQk{S{QX(Z6C7u#H*`;IF#Qw8JS_YuW(t%<|JE3b;V6TY
zTA~yf+HlqZPeat2)LQmYp9e5kOz~jUzSEtUT|DP=Kd*o*3xB#kVM_%3*ltY-vgoH#7e
zRMF&%rcNb7+92Yj1LuXWH;KPom
znCGhJ{+rC3?c3rTEo1^DBV=ZXy)bFNc0aoqo>-Syw3yZq&5*@VKScw@FN#G9t0Kpu
z(IPNY5oXGIsd`o?&ilk8c{+2t7>xxDuV$5c9y?CkK?8L2MMHRv3{6ap>2--!vUQ$%
zlbXk-P9vbXnW4IIvpKz$@0!nqe?Rd;Za?zz$-(dAmUH7Bm6L{(pVwYXuhEfy(tpR}
zXzQ)wtd1EYjIM^ST6#CEH*H%!o!MRJ-L@ZQ?y2s6KQ7!%+-u({-HzR+KdaqMJ!Rc1
zKD3@NAD16w?6>a}-wgvTUohW%;b!2d;8$Q-;45Hwe$*khp+_M+LHl3{;iBL$VU3}u
zV{u_qV5gurVO62);&i!fJiTN>M}+l+`Ab}lEQ}y3jwy;Na?du+KFcX&L-XH~i2Xb=
zoY1VWur|UY{zYa>rpv=e=|kE}lgk;;$H?YDBgtC&6I<#@l0XVsa#ZrwVeZ!0jK=M(
zb)mZ0T8@`v(&KI8^pWMgA&VK*%yIfd=2^(;&KKE-@ovw$$>k{@@APk(cH73o#-=n#Y7u>g&edq*RQagcRJQ^ELj?0D
zCIPLN%ixZbS-i?@wBgfU2a2_%<>=JnO!@t?c6D@WdeT#BX%cH{WQw|;S-0+QK{8U@
z3Z`1ecAAbe=|;E5VVvRgTh02lTwE^pw)3R3t7Yk>ZEGa+lD1KY`hxh}TSrtAY=V|^YuaDdC@x)S)cmHs~
z^WIwG``lys=xjc3(;I93-)=46txxw|$dmC~m^k!p;)~+#!wVyP%9kd~w2kUI`)B)`
zs5q#+)vS8YvX-)Zp85Ajw_2UefyyDu^2M@6+1<>(>~C9VpD$4Bh>3*w{BqMRQ(hkj
zw-#6R*T;pcJ*$naHLa7cC9j#mEx~$`qtZJ{YfO%>ADADh7wBcl9jYOVd#p{3chwtu
z<=rq|R=+W|y6cJSXWL2}R$q1Yf!m0ign?Q^TBPeK>(#Cou3J}J*FG!vD;b}s?@ku{
z=DwHeFV&NQC4rg!E&ZSQ1=}AtdPsl|dXNuOz=R%T4v{io(KiQR>W8imCE?2{;5Px1
z=f^<=F#@>zCIP`0AY=fQ0g>)GqKEsJ+Wq*EeguIj3QTmH6P?RLv_n%t(D-TY9O)69
zK)(4G_rU4n{Q>0zz3)loN59bX4f6vnfY1<-QvjfWnFyjQz_EbS2>@r{AIO;n9|$Q%
z!07|Y^~qNNTVUKhKK59D0sMY&JM8wDpu`v3XN}+m523IZ#E4YTTp*c2OgUa9cx9ni
z!n`vKEHK_7T|&Trc^`HN7S(UGkHw(Kmu5M>Cb0&t#_%^?1@;mY5Cn**<$u|$XwSvL
zu8mR!!XDb*7q|;^^TVC717Qn+FsR#~zSj-e92`{$DL05gh=Vi$Su!j#%y9sJfOG)d
z5VbG
zyt2Mhyux`w90Nlc&D{UGFSjq{aM=R0!9@^zJZ!bUc?0tz3$L
z0-d6K(nJ9&9mY+fO$=VlP6%(NPuVvM$e)2Ef&9Y6{iq=}VtiuUL(W4OL!uOr6ktUh
zMV>_#rabjDPE=0V_f&8LB$IoS3DZnHb>lQ6X9MY@YrSnlLsOZgFmgZq)8$?t5PfUU*&{U)o-~?&|L|?kMlJ53(_O
zV5nfgVbS1t&{t5=(45gGki3XqM8h;jep3ma6wPoO1k|M4m)NJ>lt7b_(CyJB&@&O$
z(bABek$4I=XgLc0;&`a4seHA&
zu35>QeT%IDAv)yI;?#Kum?yp1o_)xs4m)K*o{)!`Lw
z6m-<~)$diy)iRWi6`z%=RkG)IW^Z1VJuqf|%v3CEFQP2M&bQ@1>S(EPi?!>#r^ckq
zmC1Ggl3JE{4!9~IsLblr4c`*?w)N)!g#5IFWP+?UehmHb2w(qWI0`FDJBlVMC2BA#
zKZ@`VfZ~k;Q)Wn3LxxxOTxNOlU{YxUa)Nr|azc6%lWu^{@Tccb#uT*_@)RevJT*o&
z-lgAGxaUge}#BdA@lRc<=DodI@*m_+0tS`%wQ-{B
zhi0?IDYY!&Y&g#6yN{shYoyO0Sk8O#b=(+*cGU2ej8U3`qxH&vk|D%pMY%v^3$w6t`fdSlPD&=~8i9XE;
zZ?3Y#Eb1$AEmq#%-8$^1?5zGKde1%19~~aWu1OZM}XMUgg_UL`aLZJ93en)>He$IY38-HfMmw$h|LS{NH4+yTVIQent@0YStX67WoZKZ(sopkUvD
z&u!3ULFqZJC&(s==kumgR2Mm`+NtkQL&)sz~O`#c6oBsXT)D5
zUPOCkh~*80kp<(0XC-c<4AW3VN5xC;L&3(V+NrjczewtklEbG4!1q)QDR05h1qMY6
zr4QwkB^N~=6D&j1gEa%!Qn?b}5FT;QgmH9nP}8Z>%UM2ItQwpfEE~RzQQRfk1;h)K
zi|z}@3(pI*@`?+Wa}aas^L&NAGyMnjhvIq^h8ae~^pi{cU7@CtyfKK~Za9T9xj6oqjy^y=;Tg*g(hlz0j1QL2gpa>#+>6(%{9kRf?PK^=
z`ZfFZ8u|wMH!K;<0-OwN9|So>EQ~2^6U-FcEVKo5Fmy6BJgg1eJ+u}~F*G}@e5Af8
znuwR^od`j=bwq;5q-dfjuIP67?m*HY=wQTP$iUM8#3244^8jI(Z@hjyL%c+SdBO{o
zeoDd-k<}wg9(uJvVfreuj1_V;*xkcK(_@hsB<)gmHyMg?WPEFUuYqEc+SD
z5H>f~GS)Z4EYm3Ct2vEnx-pmakRgWg(+r;3m9@|OM^E2G-h|<%(m2z!W3N;%P5)VE
zW6-0;`7&F8F$z}wVPkYXe(io!WFyda+Jw;Pd&7U7c*D
zSpJvPuT*U{Z5Qq8R$D@5ezjvrCnpyxmtt2$SHhc}I|-8W?kKaE7ME#!mI1UZCn9Z378k85)5~`8=?``66Qa29Mmt~VVV*eR7hvY
zYnVg$t|1qn@O|g8s4=1VgpqOKR8cFD&fym^dofb6Z$e8S+>|GB1DWoG`Pj;+klax<7Nxae@NB6no1N$;(%Tel;(j4V#
zx$?hNa`_UGlJCw#FB6~6F%S2r{8Me2-kx$-8;|!v_YTLHOchLR%%V(@b#WSs8rk%}
zwNKip9BavNGw@S8llJ47<7l(BfZ?}uteAG^(0fLz*dCF*Nc&w>z$UKZS#32j`_G$ChYn
z%g!uQ&ZJpa(ihS<*KXDZac$Wt&L_^Y7Rnc!*}~Wtw#YU+<|G!~yI?G(%-kW3T5yTRF
z^C2_VesNtqE|12IF0u>Sb#=hIxAPqGH*#FEciv1rZ}tJRpYI91_$GWGUOD*pnVG*w
zkLN$kvZvwdyXxVbe4HZhzwV{sK;XtB{Uh~K0{)Rg1IPFGtkM(xw5
z)xk31c3!tlW%lpw)v(Q#&8YQ{SKhDpuh;>??xut$gr>x1L^mbZ7+2*dzs^K{|LoHD
z9iKD$8}%~~_c%{zyeuBj7$u)`--7Cf1^zdAcR0XEMSgy?4?tiT&?9&=JKNPWJL``L
z=xGGW`TXRc{`G}Gh79;6DcCbtxz(TdeTm)r4dDQ$LZK?c$o8)hj>t$tNfrR`qyPZ?
zf&hTGez_|HUPly;6inclyj;dpJwQt
z;E=?t;|@H3=Lzk;yS;;ZM}(D5)vFTG#8M+E;`Tz8yL~tK-3_4+<^DaZqB2&R|8rr5wOFjR%
z&2o2LDFReWi#&m3;8%!gqveBG!+Tf1U3ssDY(&qt4<
zlYBG70t)fFj>;`_=L?dvK*}8msQl@mD}1D&r1;;1fq2SlzO!Hrd-y_7Cfz!5q<;}#
z(GjC{A8IUxM40x$g>Ip}dZ;$04WE8`Dm`Lx0F@
z21ZMtCqa36!@UUTZa6|Mblxg7CBVACvrnGV_;Rn0mW=zK(R8z$azEJBE7{u{f%FwR
z1Vi$UP-Dgb%EWXCc)Y`;uSE9aT+a54L^fscg@YExEN|pGTSBeGKtpXPSp&S`Qv^%(
z?3jC%VRaeTPx4>#MER#P#f4sWM-JqYvZ(RDqcEOBKYUIRyN==sFopIk
z%l3=;Dp}IDNJymN=Z`xuP>Dof|drp31iLCM1_s6*SxNvg_$7QJTvx_6P_|&pC
zW?<_nYlmmq7ef?^r^lkKqU0}VjUw29|~{W4pv?1Akhg(DsU@;
zvHD!TT69~1AJpU$q*$xffjfvATYhHhq0v)tsDFd1R9rd~2%4*zL;jLsis)wTODweg
z(|<~e_iIvj`EIA#Vn>tUgAG2t{`8zhBCjirUdMw2Ki(C(DkqTuE`YxI=}Tnb#J?eY
zPFI32uJY{7TUZ34ZJufrxu+pX?PWs@Ui}5?EFP*joE$Y6#dtMwoUF%nta3M^ipszd
z2V0h9ULtz!#Q?-xJR5FJ!~NJd$G6wLAU?g?Q%AXl3zuK25@xMxI|a`{OIq_l_?wsb
zJXX3BZ>w-?fR&ZsKaGE>Y?q;v-Eo;Ji$6sW_&6q*JwDKz_TDCNX
z!pZSo!Tj}rCyaW5skHFd6?u&cx0n0MapBmbC+`uC)*PWvH<99BHgZi}oinL5*P$ms
zTSz&wWcQ}8C2?`Y{e9gl004sjf0e|<-NF^7qLW0sXtJVZ-6Cln?B;^p
zqsiAs9)vCZ%7TU;zQ!bKZQ%u1CAA>sY6m$+HBUiqxJJ~!#g3A~|C4C>^z{n8
z0jKR(Wi58)PIL)Ye8_ElF0-yO;`DteRR>7^M*|K}&I1QQMfn3{|-jj7vXNeX{(uFX?jS@c}kFgku-?@OtZ>udf8I6`Xjcx{s^
z9oZDw3&TPhN+Vje%4SKiBa1HWU(C!yKxIN|)rgn`=L$)Py3EG{9ef%LjM6|kQFwVk
zgg$cp1d~I8)unMsra^9W)dUmex2<|c*$ZAPWQ}4*X6O%_y
z4AGez+C~zv@(J$X*<>N?-DXiH$cm*tx9Is%uTihq@%)Ptiza*IB2dYCfZ*cy
z;u!k`74beD&%|d1YAo!W*fWuQhpL3I5X6ZkR7F_KURY2+>eg^idd~mY5gvS~
zIuflNF1OQ40ZJtMj2(yt@fkU;FSA+K;EMrz%AT52WX7lb!WKSOijnUKAS;vkUu?b3
zmuI2ak|m`_n)mb&UYE4rOh?!wtVD4FXCE5b99GEL>2C{eyK*&=3gl>z#939^T*~({(-)(rTD@n8a>hECMr;Xg@qpqJ
zK25soQfA|fOBXqdpi>-~pV<{aUOMh)Z2IXQ4uzIF%*33k+IA%3JkYQsDDBT0bq#f@
z<}d9Tvs5eYzWR);Cplj9&~pK7FbT59U2hyhw0GBg1D6u(<-Nc2vVZdV%jKY+fOGwy
zn=gRtW4n4~!RMx=@zX^78xSfxhAZ;fGO7(;BVmO&8rXgmU%=2`o&gF$_*{3E&g?wC<
zv+)aO6r~2S3&IXy?22T9aVv1DB|?|UKvUz3TQ)cLdf;)OxQN&xC<=+6vFOV6Lb&Jc
zUT5m`m8|hXu6((vD35c3wr$*#Mb5%@#tww
zwthRKD7Mw3h!YmUk&{YAHQZIxGZ?nTW8ysm5^$2!1QwMU67$5*vGV?Ddk7E!@0o^?{uRJEt&;${q^d)-XpAzu;O+o+#ZW~0R
zU5{02abws1##?gO}gHV4Flu!SN9Z_zB{>h|Ls|cxe(9%D-ZO;jF3QD^Y%X^Z8dJ#G$(R(dw!&_xgwD-M5`cNJ-bY;W(3Z!I
zY_gs|nCGW3iZH{IX|)`)+)3Qk^LnTlL7ih5x2n%EmBPT0zp7hx_gb?^PKg2}t%^$e
zlb&pSouw2=zfg09aSypG`8*{J@b&WmvP(e
z&HZ-M`2Ak(`=kd;aN~;Dvn4vU+pz_<={N`{U>L-yy(tX5G)R&Vk@+E@5{BR|T(e#&
zSXNf%)#Azx-OxOo0oUDm>iqmx?yWV5RapxV`Z0tB8ZM7r^;H{L!G
zT$E0>sQH2fMBdHdW-=1sE+fgeI@RzNB-!1{G5%XZd8MvyC0bC8c(&`;+^uQ|XHQ$f
zcRDyp{ptwO{R!4)W(b#)vT~2#vIrWS8@Yf7xqcbMsLeOu-!aeCS_nW~!rFBYpPBL9
zr47sG=yuK(gw01?n$V%H{Z5Ndgkz8Im}hTKzwzB)&#k}=;P7e?RP5H_vM>fgixvO0
z;O4bkeX8-X{nB?4IMw2}4ph@;cVxfUXT35g;A+?B`v}k7V4Gm0n>l*O+jr#~v&-ZO
z@I-+Eg6556j*5kZ%;0g&oO-H*kP+y4en8HaPiZ?!Z-8*|x@-GHvAz|RLvx~EgdxNn
zdUmAkxXJE%q+s#n$(*GB&66vv^-XGSC!(q_iD2H339`xV8SvET6DsB9RNwvl!oULF
zoPGXG^R7$r7HD~GXzx28+F!eEM<3dGN?NwY0{zcl>6e8WiNOM}@tN>G_Y!Yf*_;x4
zIyfKyq_I23^nl2<`cou04Dk$a?x?L98*gafJ!OMK8~BxS|K)G@d{xBxl`mEQ3JXsX
zf_Q-!kptQ@Gl`C2H(KT{G1A4`<%Y2Wyt~PvunAc`1+`2$=;5uorba^NGB)$eaPtD#5`fk~v5YDaG44>+ru`gjXBE7j1tEI4Mk((6=bN)(?dp%Cng~WrDCIf!C!DRZ>k!t{TDERD8}2IrTg|8ku4xVq@iAudX~82
zTQgPUKpP>a!pi8U7OGJJm?ohL2|$<|&T$Ycd_)r$!P0j{r^YhMq56ouxD|_ZH4q1D
zGxx1%f(96y0T-OvZf+!ja1t#XH5oL9)F-DYs%kW0FwP{AAx${ip!b6vz(ro`hx@tFzr(R
zByfwAzUZd)ojg-@ct(m4zy5~p=Tu=`G*u1InyFw4O)r6QgX_B`7C3n-fbo&B6qZI*7tb2dpYj&N^_}(QjQcv8MeF))m8)!}t<_y>Y>9iX;F|
z5&_k)-EXYW>ER1^prO0C*vYk(eANN|pv(VZ6KQY}bop!u#@+9E<$f#_)J}#;l5AS+
zdzYB~f$O_(*YVtWZ8Id!=G(Z$iB8dLA3lu)32$j
zTE3W`nYp>KQE}duMneqc>^QmkbLy$Q3@0^lN3=vEJNjvwol{@Ln^}fvp)2`t(^ftI
zN2su`fOzy~GNkI|qo+m+JS5n4XCqsXz5Tq@=BhVkjBdA&x3(OUIEqrCzPmZ
z45L)ords>_Y0#RTjZNwJ)g$}+(_Ea1M){)B;;#M5g0GQGnD0%j51hH!46_q0{m{OH
zhWexelmC@W30KZ#E^Eno+v8d9TanKp=!60|Pa!z0;#|zAv;T-h&9Hgh#7ZoGwxBAiegaR3
z2yewPj1i?*BN7X(cKzz%{d0WW>iG@m4YV--<(%o%sasuj-m7n)bN-zTE>z11I(-yjXkoeD`T(zu~m~HYQ}EfrR%yTHy*-x-9(+t
z-fp_60S5uMtF?!Rhf;E-YkMn2MZk1P%d+03R)CDP8p-6}d(}*nRp)B{G?|dYU0B4L
zaM}IsQ)fVwSpLzH{GtBV^~R(#C1(8EFp`i$e5S^OvjS8IbwJo}D|iRitbeaws_ScF
za;W?Geyg*@Q{(CM)bo`+;3=)2^O~kupaX103M(N+ZeM-^@`Kn{-1BrhpUxHYJ(f0i
zUfNs(x8DI5m16q2QjiMncXIFn3=YR&zRq8pgq@Uw&ehQ2WsEu9_q9;p8QUI4M%t+q
zN4-A#(o)@xpDWyQyiP_BgX8S!N!EFCA{DnYfgY_J4?f1o{!umGSM_i@8Q~d)J>+v}riWZ7|e?G~fHrUHkN(d&~rlxdFo1GP`I(1W+t8RAi81
zVWNxF-`m~$1K$t9xR_%5!b=w*WZ?JLyq=EURuARsOT0o9F-Q-7a!lkBWYT%F;kQH%
zvy0cfY~L%hSN!jNCX7;o3z{P$ttNr6gfmZZWMvM1P;xS>-20b+CXqMDEc#mGyAv^d;k10pR}WNxB-x!JoRdK6NrfbLA9*
zO4;{$S+DwUv!{KciKT3c7IvXZ)vhpROuIoY!ksg%4#lUJ;ZbR`fvMpS#ZFD0g$qP@p`@X097|-wDq_F0nSdYu(CM
zLM!_Sk4(QAq`5z1lxp#WO5r!^-FDr~H@7J5YYNk|$)0D}t!$XrF56eSbBF-!Ki0;^
zT-oZdhz|NC-742M(yb?>FinA5!$oi-=|5z}NI{=Mp`KDI?M$%=+n)A*9S8i|BDy)~
zj~i?!;5WYxw^{$BP$$Ldvm>pKsJVD7EZaeA{&8xh#r
zs5ea9{^hHzC2rSt*8WdBbkT;o>c4yWwl^1v%fGqlK5VqS^_!n<|3}+fMzz^>-NL~s
z8mzblEiT2~p-?<%aV^0L#kIIgarXkjp@HHa916vyv}n;3cR1Y7d*0_8?{m(-GrlqM
zYiC@0=Nfyjwbz<+u9ZRQmWW7v$U}Jm<>ME>-~a5Wf^*GdWYdFGF2kqKEivtLQ|-21
zg*k}Tr;mrW@@g_pfC|>UDr&I8v5=|YBq2W?!JBch0F#z5j7zFML6FGT%XNm|KvR-6
zJz}$`EEJa~@M%$}tMJxJLJmtWidckw5bd9zhsk~qWydC3i~wgbN*c$OEA*d%d+lcf
z^sKm4%Lzqt+(#cuM}vl6PuxyKH(>~1J>IcJ<|N`fJ#?Q=>!J2w{1ApBoinPR@6J7-b6tuvL=_kLmH6*Hb`;%ro|;WO&HnNKG~2IDmFiC
zf5d}tUH5b+N6borh+}e}E|F*RL(j17Qo{30Em
zh@Z=mauLDur1<3b0Pc1|D~`My@DeVQNDwLjrl26w$2#QN*K>&0e{RZ|;B_zXV@#Po
zNZ~B}^OEyxMmg<*vR;0V>g>Cf4ryKrAPpVg;Q-e&)Lm
z--XsusUzSgZ<(Un6enx4MBb6Ap^tk%zH58S
zQ{Z(v|D)-|dONdt&$g@TT~HG%-p)=Sb>zWqP>^x?-STt((b?^#_2<$1H`4RTTQboh
zr(JaWE~eOve|#C(>1Lnski%3MSxa-Betat&{L3~jSJ|s6e$C?O)2t$6g3u(
z?`2>2gx}QfzvmaL`-E@YpXm_znF?^S+jleEX%i-7p}YcUpHe
zQWRmOtO6K}290!SeBT5-`E8gDexdm5MKzrQk1O)0pN+cIYaF?97A_uLpB~Ztf;uv6Le=qWnJ$>DjKf
zk{X`lv;T7#pOPLkHG4_BWjk7>3FH5G3g94%tVQxGZ#&^?5Oe16(|=efqezkg_=;10
z3UE13_2&QfJP5?P-b^gfy%+R9ZTop5DxPO}0}vJWGglE%`d9it*QTOrybCV7i8q&d
z!*lzk?8Se6&*;O>$J;pVGzxI#m;=(~e#U0{k0nNm()v*TV@$>E&;M;L(O2S0|M_l_
z>R9;yIW|(1^8Yd=MiQS5K|V1c%HS2Dvgc#=|I1weKm5G^o^oApboL%Y8chENOK!Zc
z6o)8F4Rihi~y`(6PZpdeuI(Tv(k1E(<(7BdNq>w&=r;V%Te
z1#)F@d*3j!Q-ifj2rPeNIq9IXVy!+t6)*2}$5b*C3d$4_a41SRzJ93}!j_}_4yjKO
zQ)~sZ0vZJ{w*_f~Er_DS-|8kH0c{Q9iHk;wi~OFB?%(8dybnW{`&?yULqxuu`VtUq
zU#@$V?{hHIPSBj&Kvt4I7zK!cXeYMWa*HGyTJAVly3(LjjVz_aGZR+Xe2y~E*R}gh
z$tp9;-{{{*vONHchA$j;mJw(giV+YXE2oa|C+&JrV|`0E(36j-(xPBavEhv!a2!xJ
zSF9p$=RsY@KUT-ZtLOlbFV#fW6{kXk9-`6ig{Rf8ZxFip!sP%x~puXUpHN=yEAqz*5QA@N1&C;?mjOHiL<
zBaRGnv84$A%aAvgDt{w73bn0@OkvKVS2;g+1Dz&hF&!;%t4b<#lYL+Aq7aZ*(37Ff
z>ga+9Q>vh{GC=7xdHJ-~+AMn(qhFYs(}j$T(wLkFVL$~`s0V9Y9&pqkYt^uycXk?s
zorjIC)zH
zj=jMhEHy#nVJ37^*#8!?r`olBuu9SejsLAC1Jh6UOjk%WKnHvHq;EHKsD6M#4Yz&)
ztNEgt+ze4rx_+CbPFs7u54(Gv=ixsUzjI>@H
z*`sV%tI1!K3PuG}wXPfkqR}KtXpd{AF@u5l%59TX7UHZ9L5*&rmL_N;Y$rBv`LKEixJv
z%uCE?i~hP;j|e$Dij0xW>Ulp6-fDum;W3;Q3WTy~6Y&k?RUg3ix9nF>sD(!4n6G1?
zSmuijeC{8|$6@m5phdZj@YOxLiKtbW!cD;h5uYg8j%
z>2H`g{ZOXv7{yDW&25BXhoo$%H||(tGNS0NpzqM#)zzy|u5Dv8^rgBvkIn@qJEpG4
zprf?w=29hyg$mR%RN6L}PveCfS@JhH8vL&IaI8#-aLnH(`dJOp>gxe(^;baob^y3+
zlOkh!R)u8l2UsZbse#ti@|}sv<{WnOTLApgqYg0Y>8AnDccsF~Ga^d)nC40JfC+!(
zB&?5K1;^^1OOQ`y-40JvG*qxnbA&ewLy|)r5r7yBuA$ei
z56P043!?z<#Z8dV5lX}Rd9gyQapr!>X);;nY(E&ykzM>miNH)3xdvL5iW>n;$(b28
zapmQx7PPIsb#%?k&rFs=mPGXR)6gdt4FNLIfO>ph3PQUORsg~KrhaGdLKWWG^IyC6
zNPUDznkf>Bb7Y&G9F$?0%$_)M)r|DQ`l!*epE#1-N&4!c?|!n$Um=s-F`!=gN(KE`
zZRBJiVhv?VkPQ2^WSeW8V)Ma(ErlYz&leO=FDnoMOeHa#55Yv^e5aQIR?XWLW##X$
zy4J#(nVcBUl%>hLnr>4=TkFtCBeI%P{F&N=DhJ0@3#T(Oj8-_7sC<8;=~JTuA;Kx8
zNfcE4LkhnkTj_7y_4%@9W^8;?A$Z%_8&7$5WGU!{Pn|R7zt^E}2D96(OyhK^9&tTi
zoC!#j@^;qL==$5|?(sBRW@qMY?tI)y`?$F3zb-oG0NVUewH9B}827mB>PNUgxEC(%
z`v?~h9E6+SZEUL^Ep2Qf?!K$F`+3~#dx|8?(&}zQ^wV4Ry{&{zY1A7Z$LBAS8yQz+5woBzHHeAtzL{}a7!t&EzNleUli3|-^B
z7@g2Yt%hoWIt1B?zR5hO8LF4k>jFX+$Vy-xdI`3z-JlOl1M!-P=yEyRSq@~n6D^R_r^A4!SN%WrKldkV
zeR>gxsU3IrA~YUMmtDcgLkF4>{oYlHvMP@y%|HW8wg7?p)wGm}iD9MV+jJEcdSMmh
zaaUzwNXC2#Au>1$tMin3*1nHzK<3_4lYp||z)z}|XAcJ%tFzgjG5#3JmK891eE
z$s&Z(hOrXKuat
z^CL^T?}sEk_hb;E(r|b>dgsQ$g3$RtVoAs|b*sDi6Y#@D2S-_n=$qp^gtb
z80$e7u-Njc+zre#o@f=FW09_Q(W@HWi
z?L)5K{MW6quGhw$F>G2c65QMpj7cvW4y?f{l;^@EP>s^+$wFam46aG^NEVsq;TV!9
z9%%#xw{6b57UxXkLt~uQj=okaTlm1C+$~7Lb}wyrE#qP;w2pP8)^kKxyp)!?rVa~N
z^>$B12h{7x54l0~Vb95$mV7O~O~6+Zu-xRCG@F3@Kx4l!Uxf)V1;N3n5yUCo#*hT#
znehB*rtLw}z?=xe(hf(59cDV=XsJmwY8mL3C?WxC(h>+ZU)yt~may}Zk>#xPx(++D
z=s%iT(8PU}^}=@fs7P%sUc_cVYFM4kF-E+>OJ^xC%5^4#{1s0N(|MIkZf^99=&Z>@
zfa^h-SF)LB%2BWdee+~d<#pid=No>CDT&h_EBCdOp+p>voG~H2^vK8f`2DOii1DYX
znVWbgsg-POekHwr>tlnb=+>-O=f~=>y#}#qNdX+nP5+P9Uk>J(Ln#+%OmmNRY9{wm
z5W-#c4UHgCpQAp?RY+d0v&-#KOXt&}mqY-(4M*&~r@kRGDcG#z8DV@~_^P738{A(QSf@Mz11`ON!7c&c8&F&U4+0
z*48@L)>cnVapp_hnNJ@-^*jvD0c{Nf|F%oMbH)hHd&T^tCjX6}1&x|<=(H=;n47EG
z#j)18dc9&ozmB`x^Ed8FzFV^iKJ!E6aYeS~^bb~ZZQe;6UPUl2y(XqLSp*u6G%-CU
zXjlXgX8RfUat8?fd1_Jf@7$>+iJ0b}%(JWJQnc<$drOIgfOi@xDv6FaDluvRtN3iD
z3AK`h_a!}FNK`#;zr`Momr+*C%Njw-mS%}YLx}sqHj4�D>q{62{}+c(CJZVzb#e
zZlng^)N98D5IVo>78MXkYgu+sGij(yXRmkSwr2L_%K|x^F*S7O7?BjsA5^iIkU7k-
z$#cIcj!+Xx2KLRa9?sjJFQ>(lI&*aU|Gv1G;H2_dHL8OCyaOT=2%JPT5)<y|#5v4yGVQK(AXKka
zAej2)q#<-P8umABJ<}29t5g5Q<`&bmN=r!HR$2WSBB=52TI+g}K-~8=nbA1ca9#{3
zOd9cr=iL2zJ{ReBJbF&=X!V*et>>lw{=%Qw*^Cv^qwNTac2zr<0U0Y9t}Gs&f9V)r
zzEL$vahmO454@-niZJ$cKU-}0F(kXI8d}wqw^w4ZU;fN@y32KPBgVPw&LfQ#W=*Rk
zDwL|KgiO7UP93|Yns|bu_xJYpex5h1Zo?AQESG`-=$JT;jtR}p&0p+p6}0};)4WA5
zqsBFeYT8y^U|va?o8bJ~xP7om%4Gs@-5GAkx(UZUWj}zl8s)s*X}k=;9*U|Lq1o{X
zJRZo(noMOfgB)^woXU4pkfGi%=P@u@THNZL(@JU`e7f0vydYvJ_*UP{S0k
zG@Lu|t+g$iH%}tK!P6J|0})WVaI;D44}vF7dM#rDEQU|k1GmqDws%dD%;T`>GdX<2
zN2nZm#f(@i2q~k};&|a?Rn-Jy?f
zCi0qQun9FgISGw@X#Fo_<@KNjVrOEy+B$_%?4}BDD*&NuMbW($uCdEGk}5z(1~%)
za!39+mUo0x28GO%g&tvOTKwN`Y}JFn6LI-Qxgj1{351-Km~=ok3}a&*eu=`X=_c&l
z){4Ju9Q)NMbZmfZ=kKi34#CLtxNJe&@O`*gsNQ^Ot2
z%`u|)BbDop8PD%8Z}WkAh}s~XFwGih_H
zlvwWyyxEUety+;2=?0svQanbNzt}vjMw7hC)o9C
z(>6zgO`G!O(qNR3s-~tUEm?~fMf+ckgYF_}%|zY4t{;EJ#y}(H?mm5!zgwQsHtdU(
zSX25sO%)(`y4m70@Zy}#uLzJA%UJ@93{xF$-RN@!t`O^Oo-t%$o`
zl?QD$bzi{FZVqRj9*g$c)*i;IT;8qbFX~Oxi)Pz2Q(0NuY6SiXMTmY_4RaXTFd53*
z7%vZeP*oDvtcpU_YI1L|0m{^v>Vrq5bzehyBQSArFgE?QAJU|5%6E6|FoT217iu{(
z0{`YmYG@2425vFX^5GD?zc|%54ZQi2`~zEI%m9@%>qCbacD*7JMlfJ;98fGtaS+8y
zNLE~mA+RxtRIf`Y3fB4x3iB#4D=lRMR4ddKNVz>dr2PRJOwFt3y`Q-7
zT~BFovJD915AFfb&C>e$`&b7tG232B)>2sXHJ1iw0{r=%j;VFL>7MgE&T_*U9TEVd{gnBAH3xK*1As=0
zOR7fRv=vT?FSOK*q4oABjUJ_wQc9eEGVa)sb0*;hY~5L8bIb{A|Ep{BgpuMD+*K76
z#qO*GVY#r?@_`_qE%HpkZZ`tXH(71`Y%Sb{+depjH>OiyOco|YiQbel>!g4~@buO@
z7jGx;H13xYCDe2Qm>$D?kla^eMkR4Wn_~;LdKX*KEcwGt9ZNHf99HN0b*6|5-!?7G!h9<|8s+N9VQYo6r`$sa~?9hNT2*?1v0ODQ-$=8A8XdCL&s4b+9iPD
zUZ@aA7tR5tuVf%iCmYp{j7~D@M+JR=)irVO4`}syziymQh%CWz%}QykE^=?uQF>|H
z(u|%kA*OctktVod-Wr}HD}de~)dv)qMKQw&rPjD8a^$aD6J-4n3{6s@oY
zZV@3KlCP5&xT_z|cMEQ2;wf_n!##rneQ(Ps^@SL(QqLSs@z4*9twvX%Y9e&#K-XJl_&oNuL6uGdMS@MA?EWfEuC=TGtB$
z&35ihx|X~=0A6*BrslAZ
z5pj!4@XI`ovg>~>zR8+sQt%UWnef`J#X&C+dlL2X`+M3f8MLv;uzs^hRGxT_Nq;Nq
zegrV!p7Osat1?~@O7ieMva2(FxS`aa*Q*V>{-lHOyX#jn7B#ECo^&<;V!Qi?5s7DJ
zmL70FTkw#t(cN*s{uoVyJdzN%f2;~|q>nShtpHIt)6kE)0{v8yF
zlaq5|Qk_<;nKc+0y9r^ZNX+9-eZR+lWjAi$wuzFw`Q-(miVlSiOOGtVeHAqer|8ll
z?gb9G`Rhsyk2sICfoP+(bZHbSb^f3!^k*tOs9~X+uWr@umPi={!aIzbpLIg`ssQrB9&Bh-c;GPI%<;M
zZ&brZozxO=l%Ukaeq=sC8EaoXt6tK_W>e(4&%$75Wa^=2B{<-~mov0%XZX^;0({yL
zJX2Z=$mB?riPeMXCK@@)3%DDSx7Mz_k+6!BwZvo&b*tWFt+$%QdG1|KkL(b~d72)!
zZ(8u;Go#3_%ORA@d`1w_;<4#IFTWxBug)noc)|fs5t)%-6BU%FIg5P`nDW9Q6bj{0
z&@0PJ2!8mD0jhwWkw0fBG?GRd=~V`32(iyd#ZXzS;YpRq)WNlb;x}EqZP<6|sCTv&
zJDo!qbPG108As@o|3yf-@J^W1_H~ICS6&L{+Qlp(~L
zKKfehU~`+k%WcHQ#;&ObAP0-1R63F9JY?;uW^hLh&tIm)n$t*St>Z-8cjDafq%n)0
zsb0JRqcNU0GpM}^sDP$$5R}iINVR@mkR8bYawe`1-p%EW$jWH}CKrN=#W$(4Dx#Dp
z)2aKhH07dZg&hbAkwC$rArM*xYjGNU^w7b*$HIvL7sHm`P_DlTXKp$7B4b
zzxY;u24CIKFv>6i)^XTcBbrTI4HiT)0CpPyu_cxoi4Lk90V|T7g$D;3j;)=q)Z<8E
zYu6dN)_%x)gAWU@Kp5-GiUhnONO|5&uAv(BUk6nRL3+nwU*3;^52pX^6jM
zZNIdM*rF(REGrm1SFQa<)iHn9c*U60I5p+-;BK7hal%*QPQ9v>mf<)IIWMafvr0X`
zBXwIxlHdH}pY&RU6sHp+QFc$`HcuFIjN<3tkD@6hf3Fq%ZLF}%9k{VaMO4Cx_AG!a
z38qUx2@Sj$9d+@4bSJH=!*kfP=`6XK-JbSly){nBD(Jk8d)(-LIF3B_yZVR-z|U(9
z`cP{wDY!c+c{99XsN2f?U5vI%v_kTZs$2BiDecqQ6pp`S&{8#yJz^pV)(aR_vNsyHVEy;MQ4P8=<{G@ON6Ecz|Cleq~n;Er`h*ek(kILH*D-7X6m~6EGVoY(5!+9tU}b)*C@|x-ucdJJ
zLU{%WOF1s;z}#Q;YqP6-f86YsY+UFD0!m^ZXE+YNewdw6O%mS
zJ3eD7|ALrXGQ~{4R6#vfIkejOnKQ`&MDb%P)051i+RBTmoG&X=h1$;o4q!l(hVoWH?QWI3_HAMM^-@&V~EspN7*yu+gMx?=z(jrJtK
zaNe4|z1F?=08Gno;eatf5&`$i9|hhEcXr=M?8Hi-)d$}Z9wn@xMM47d9$+jMqxHOU
zbsTT8EHgX%VlGnfCn|GP`y8~GYK>7XT#@<`93%xvir#>W!Pu1&p6Z9RldZFb1?~j4
znFI>xuc4w*f;oM*Ik@XY!rjAowuPhW;Y73DeNZzc;_F{@5q{xZ1RkVTU85#4hD43SI%Q7=OTzcBlxG(g#E0V{t52s{bU}q;6oAxeQ
z?fUSr>|Y+_@7z3BAc(2Q@S1d5Sr#2tcr>SQ;;KOGu`}_WtR30L=}eDbmlo+rFWJTu
z;=Z=MG@_&0f8s4pgBQ7iIqfGUAt50ku-$2A`^+t;!?;gzf|=ZE#8K(zzYuj@+U#KQ
z_#{IXTR8vPi|La)RcBR1#MQeWsaNS(iB56g_-rui|{>9GVazvl7OPqQq
z66Chy-7e+jLpWbmv6RZ-6zVWQ_SW{Sk;({Ho;fV;=;MS(1%#8`Dzh4#X0X5uR}Qy;
z$t>=?*}z#yQ-X+YiT3WWx~+%^5SH*z?#7*l^+HQ6JNvx*3*%yz)-@nx|;Oo5bZ6sGd
z9@h=_1-}Fl!H{`AVGkC6fb=B1QAs7*)JjLpFVD1nu`p%uJz
z*OO|)`keIOcmHqjWICY5U5J*@!i>4K6U{`y8mp6FLtggFSAk3%G^%%1j5}cc%y%C*
zZ!cw&Ta;rRysMk5QpTk2^SuJDySyY8r;eopal%eYyPDe!(X(>)1CIO3g8s&KPdmV#
z5sydwj-#S&7$;|9;Bi&m)0SPAK|*t)j^|hV_j(--4W3_J5^0}Rdia6nUw(M@%-=$9
zeVqeoP~ChY-TJ4$9=0}hy06~WdW9&nnPL2f^|}V|n3@`QH+GyJVsqmz`|ot=exw&Aqnjp-s==1!o*oZ?XEIlq$GvY^@kg%F5@3LbDK03`m3i=~jmgg_uQdJyrVY5T2!G%?bE
zfc23SclWa>vcxJ)jOgBv=F;|`yKaF^=GoFnOKo_QJ3yXaZ=K@l>8`W$NHF_Bw!s+v
z!VHCl9bFQV-5r-{LZ*IB0jDs^xeV^=w0$_+aAO>_>y=Zw;WZE=JU#IlRzH8VO3cml
z8#*aDvprCfy6ojp3TF##+Lu93*&qGkVkYW?jOC4Vb|uw{YFx#j-Y2
zRTs3~Xa9Ez_S=0dMQ2E(%RAs;O8n_;_$WZ<2MdSP$B(z;8|O2Q2sw?8nyL!Sa7enp
zrm91UZPz=tL$B`5A|(x4A(
zy-%~&Eky_2W`rLdXj#gd)sS4>@b+HiQ{|(syJO7Ngg6Oc0ySjB`ZTT1-^Tgm&qMhX
zfG&cq_b;NEiS9W$?$o<6{@VJM<0$yz
z-=zXyuA%yJF00Ih`Mm8X7*(;$hm-ev
zHN;V%auiBK$xcV_3Y9S4Cv+q@E310y(i+jzy8q`m}tiN3xE2)PL_IKazL-6xDo(@kK_K|xZd+u8i-uIuT?eF_C^2Gw~
zhqHbt#r+zYU|7rgNjDl!38^XevTbe(un90x)aN#HP(WC|hM)U(w$-9&)tH8U9zkRF
zesA+ns1RE{w^RJ*tDB;o?d9w$hdQ@_7YP=+GgtA#-p6bkSsSm9pP_;W7+XHhH8oW)
z8U(U*f=$rTs-hI1t3ozVp@3X>(99O80|+-hC#6ygPDWAuY{_IoOGQEFfSvn;(2-D&qe0^c9VeNvkoRXFlW_C!(a*iPcPfp^X2%twrX`6
zv1P;#@cK_qy;AzF?7U`*ob-Vvxt;&{w)aLrF25~R8E7nB;9B1_z(Xy;m%kv$;$jI_
z#l8A%A0_;cAiUv}Yu8funp|mDeL_lnrQHSU?)?7b`aCuI_w{rj{A5V1H-2%3xgM%6w?cd;^!y5rT1FR
zq}-+yX(`(mHS9uvI`O3zMfm(|&J@dkIHP5;$K=bt2nT%6eih6JcW~zs5^~@At)!0e
zZi(v2rTZKk5wJ1RB6Uq?Q;~FW+az`S`eBJBA-~)EA!59m-Gm}z3a@GxPAPSh!`^+e
z59_|Olk_B)Q70#gleX|EqY|=tBz|TWTB%89IFo6KQ9X
z!bE*A6Q4ZI#`wg$@nSLZs_fT#6De=Yn2PnPF7mHBSg_{j=bcx-eK_4xNjmonK+bJI
zcAEFY1P?bJKBxI43Jrmq@SsO-sV3a6*RR$W7n|oVh*@GM8`Q;x90)qQ=~<3dSVrJU10F8@kDL;d{e9^>ba8u_z|
z?_Z>%k;3^{BSx~UPR;KBu)GZXa-q?63GXpj&l1*^=hbc?nb7EXeT`w(`ZHyh_HZXL
zKT~AqOXAXR2PVmD=L&V$L{=Gt;`Pkv?O4C4kYb6tF2>wq`~nTvc?!b+dO+8LY?sF0
zesB2D)lZ&x{Jgx75BgEtd8);Z5#>X!=Grk=yg$jW74pEJPN$
z^c`xL!Q`pcV`m+CHVK?`ycdy7DyN+DY9aDOl=d`0Kx(6NquyV}c``n@Fp5yNn%j0%
z#oRnOMDqF%3O*+_<=t<(INCoemwg+MoK3TbQ*7>ct$gFl*3X0x-dgLKkgLXMvJPkC
zIFSZ(%If!N&n|^koWbAO+@Bh+q3hRg-r
z9q>3>E_t=_Bs@QT*@VdATfa&6nt0i{$98{{81QYv%H>^LY>Fm+N4299aT7LaON*_M
zLM{Cpq%Z1U@;LBa{OT_)WjWPX_(dv?4Fk`wlV>8Mz8mj*@c2ESHjF5x=N46I8K%e;
zYYzRgx*>IPM@;UkW33tOZD^5X7TGHM@&It!l)JO&$@JDeFM4r|vOJvZ^#ST(3~yUC
zrdH@7$7csjdV6rXPd=hX>fHkDxsrGOd{NNFdWX4Hvq_tmzA1gq+^59X)f(2&|jA*m#K2pK6
z)t2QSoq^r!W`>=Ia7d=xk>XOlsviH}RSzSr!u-cu~JBCW4A6d*5w8=6C+
zUabS!@e_*+d76a_2PAJEulqjB(S&FbHYspB)jiFy*S*TRlK4A(pX_r{Y)H(B`^;XHGX1)?6%xk$qKoFWV;7nHZZ=gXc-8c+y~!fggXbuKsmmp?$Znt^qSLt*VejVbrhCLF2d7
zwRD(Y;@L2=!3)*7qAutNR2gxtEweUV&dFI(f~INYX9oP8{h)iAw%x1IdCO)qSh3R@
z_p)^Pr;+>SJZ7fHl6GB=@zcz6m}sIw{_s=hOzQL#ViGzLNz+jWK7VF&L)5OYlM4@D
zSQq}t6O>PFl?Crttea8k)jYEU)6N(@s6Z4BEPJ}ZMh?X`V!|yoB(su7-z6;a8Qa2>lh#UoX1gP-WHAg
zEGqux{P7A6ZnI)auXkfx+y2-{bcqs82SSZVeKE)3xePnq=54MyBL3}sMxI(V^DT7r
zclwu@GB1fs74erZXpDkogJA$VWCcgQzihzZ-}bZ4EiBr3-L9co8R_Xpj>l5Xn6lm1
zJ?5`F+#ZZDCBy1PBc$urGgD(e6?C|@nv&xRVq|dP&y$kwOi@WK8;eSA|4BjIez&Y^
z;aP)_rdVo1RLo4*9%&`dZek6E7JfRuO&^u6pDfm+OCxtK)cl
zl6+8_x(vKm3M@?XU>{W@Zd|oiiNV5uWgar>*}so(6Lm$KN1
z35JEEv;w$VHreEhHut3X&C&iH8I274gGGfEIq44_WYs2Nq4sN`srwI~2rH`Jj7mG0
zni!hwPc`$c5Z?S+Qu@FQf;*)aJYCd@-w!abXyoFRs4z>9=Z?e;VdVRsF0Rh{YauVQ
z8`*Us{7{CmW-a~donB#n)Q;Gzys0#e)fyC^8dJB3v*#I&LX{(--{ln!~c4mo26}8Bd?ZKA4sA6rE-PR^_QZ~XzY{kf@lt?2GL45mN4xfA@4=AfM
zhNd;ggNEQlf!~{TJ&8r(B!lT#;V>^Wow}R_JY;0d@0bg5i=^Z-wPxLckM}pGRbI!d
ztkvDX9)*+0zl}ZmpwZ9&N(2vS0jbX!h=D
z`j{U>Nm)XNEFA@e!E55&(tmsrb(%d{|9#u}j#szqUiZ^P#z5=N>1Xe9{v`v8IPG$T
zu{yuLw3ii%7(vKN2(sS-+xqv&j4nYskR{0W%UX|)QLXSl2vyU+5ULwyod$WRA~6kM
zSh^;Pf;P_`Ii5EJMqU)Y_F6^@TsS^YhEnv4f!x-t*-b1O)cjY5Py5x7VxoHEgnQFY
zo#ex;_80xi+ta}V_q-BX(FRwa04KS=3{$c0iJP*cku(bdJztMZ4|s#|c{8evWBb~a
z@dc!R|5V@Qek(R3q4ja@x2gtru7Q8s4~n(xPvx{CE#DU1@U|d!uer9yHPIF@MKmk7
zG2M9RUL=KZmO?w$2#uzUGfZ1~j8=%Z33vJ~_uC7+I~GPz4jtQ73E-sJ?q5mFLo}-;
z7%CiqB-6r0mCnuv2Pz7+-+VnT7N->$7ORKy)cw9m5GOBXRRT-wmdFwlU!pN$Rfnb>fqt8bk(xj%Te=Vetsu1??WytKQ6hsc91YGCt%LW5BI^jGNL$H{t}
z(aEfE%pH#b0~MNKz|&Q7Cj0tyw7)>x%P!{+dT&SPCTec>MDo37-c-4X(?~rn-|UY~
zanocuf7luD=Ch_D6I8J*%%!3H{I81T+qmf9b
zmC;kI)Wh!N#fwz55#0yaHsw$vEiEp_n&%J?CEffNOEq@$A_oK5dO*>#GN
z`!vt)W+bHgCFeEjRM&amKLFIh
zg`l8dl>JuK^S^>*ZTFxSrKs9H9?p;-#SbcUji
z6c2T3!}IN66KUp@58PWMV)+wyU&Zg&7No9j#ufyvcJ1nJv2)mGJ>3V*UlrUhifQB@
zQQdq>3|Cq?IA@6sKf}dMMj@4lP5=f6aJDoUs
zw0E4^KtxAsfotzT?}#&RfUCy*H>$!F6BdGFcR|tk3T-zy;qMZ*e&FWz0(a{D(pg4E
zqyT?4?_}$$5yX8cj=5#U$%I;dt06_%p-#W-{qI*1#)J}x)S6RHZs*6;WO*j~gti}2
z!N50X_JU4SR>=*LN7ExJ-FOZ1AMwV#;74q^tNB7Sy
zh^L8}s}4Qyk905Unqpm+3_l1R3?FgZPY%ER+7`cr^qUSV
zoB`#F-h`Ib%T!LMs;Nef#3*7V@}k<^k|n_|0p8DEkUEFg>~W6xU(K=eF)Pc`=iFX5
zA5{VeRYYFv9Q6X{6W{)EYad?jZxaN4nr*cBGx;+Pgk{U6vOE)?Da>}z(X`!P-76mv
zMLz6|C)YigJS}&jL~HC?pOP)>vQwsScAFo5J9Xa^$(`7;?rn3B2HRhnzU7uWD+{k2
zWDh;DO}-h2UySVioPKtjC$1Kzz7J()t!@^JVuJODAwiD5TIX*F3Px1l*otX5amyoeoJ=~sEn)9OL~L$_SIVk!G!
z2r;o~gxZ3uMH&LGf(B!4IL|Rj7vZf(b-ql?MmO|ArpHkdquyXkvL{ySLtVQ0-0~H!
z7fON6mmCNE%t{oc3j+zhp?a1>D+37xhy80AExj72D%oZ!lBQjpCIixp(lR;7v86*;
z5*%6osOxLE7ZMN%VJp?g_rHh%AxP<^we-N9-}jtexzU4ks=gJ5yB&R8gLerPv!QNs
z5U6bP@Aj2y?<$$g?z|zwU8@lCi+DNe4N4jOc^I}`2KiqA>p&F0*MiHcyd_zyC6*mv
zNh7*yvWTEkQIL#Wa1sN9z#3JQ$$&^%sW~-75djHy*Bi5xy0DTYNxu0~+nOXv@=cZ_
zh8RU)j8lSmC6%0ARp=xH0M;3mW_33!0$7Ep0j!F6iB%vXXAB_G7F)GM0!b*#A%XhV
zZMK&uo0BhJh=2K&pMRjApXg6bG#}^kzkTty-}k`7fARZ1^8HUe-gGM}5WW8|KlK|w
z^X!GI!`@7HI;Tm+qW~h93Fgs6_!FN_~}cU;@P7R%xKHkA^k9#b{h
zE^Rb!y6s%od%3)|G90f_=?4>RjHQ=%$_~Er+WFP>^{4K?f6>b6#o5{EUaaakHTU^f
zF8Rskpd1v&^|Q{^OP6aa$9l(NE8p^!RB@VJKg*9T9a*~nc%GqF{LV99Ieqp-$5vbn
zUA{1`WP7sS?wD4Ky^l(P9b3@#ix>apum1YgvCkLpd-3x1<>hOC{9_;b%O8H%>8ZAB
zZhi!gL6By_U&IJ5R1igW*B*vMt-Uvnn-yZ4F50FGY2T2&o&E*yD{I*pBe3el5F2JF
z5N!m8rJtQU+H!2Iy>Mgg%69qHm#@rsd&fGhsdm3J-M1~*a(1@cDWkO^^|LR0c6EDs!Wri>so0qke%sD=g)%Y0kLPlCvSmN|f%iZ6;#ZIM
z27l{M{b&)xh0*q;DtmQ&V{`qtS8lv|fnHr5U)>I`S^RfD^()`=wo{*c_qz%QW8I0R
z6F&GRyS=KCva`7llMq8w6xdC9tf~@Xp65sMe12|zX=$l&nTSA?t=UWZw7tE3{rdGh
z&yOEJUKGVG-7(Gm_UTinMTK`#(t`G7Ro?3(=yto!7g$o4}ZJj+v*YryqIbk!DX#
zPfv?TRaJZYeJ|_L-ciux6iw$GLD-)1ecQ
zo9Pe{6`2Kj50)|FzkKQPfAyJHpC3spkKa9){~zA-zE3=O>detj%)KB3m=xGaBNo|F
zR6)iO7*}O0wzy3SQTS-!DPF#G{leAdQ^(FMO-()b>hs*?sv-eV$e7y;Sye4)C#|#S?vqdzB7N`l
z&Flw=00f2}%ry`0I68GNTC%Uib^p|pg$F?3sH(cN{=78#
zsFXyBy0vTc`*F)xV!aq4
z!-@z{%^EFbGS7a)n#nMD%TsNdCNGjiq_vbqU~S56uPa`gFysE>-7CK4sMmlH1u-f?nUpX>$zWvzI94(-
zV;Bb;>_m4@m$x^ww?_hGccA*48ZOn{6TN8?%i_%RMok=+!`Ko2JsbFf@jVRHzCvH6BN~X$_sU$N~Im&uyOxscz54qD3
z*BMW0j>OcSi`T#B+{t5e_Did)U%4S$RUq-LqRwL;V?|^H6n!wpI_qN8j7Jo2RHkh)
z)t+p{f+kho3cX(2JT{HVTzepVX;>{!I=}QP`dWG(R^^sN`i(4n>hJ&T%20!p?>squ
zf48&r;F)!kU%zoRL-#~y&ds#L@W$VJcB3<*ZbYv*(xE7$ab?VRSr*u`Y(Op!$0)&
zV;ZUuC(_BjczOB%`Mn0vgE48g1_sL``>xC
z``+&4{XhDB^D*X~Y<)2J?CY0*;nk~O7=&%(6fT!A>!YJ6?8YIEC%QEpF>{QQ%$eS#
zZZXaGvcgTKJ7!^S=J7MfA3b(#)(;-I|NcomdFjHHNuB@O*RFi(jmxW*pKLiY;xera
zCbhTLW_d9x2Z2r3Wu&MyPAr2o57bk$NtIz|V97wELc!V;rT>@2D6
zcH2;%?qyZbWtmK_-N-!oP#vFbPxmo1H9uf?VPo^RUbwckIX*Uj^!{TfmI}Lcq_5&#
zzt@>QuA>o8&3AjQWzjVmo_uG>$v_y4xRdzqHkY1+HnQ4yt6p%fjp;E
zM;D_+)5`N!hXfE*!1b~J&z=FcZv4pO(|_~3J~7?HV6=Vh{H154wlx!k6}?uG8CLHZ
zqA{F%;-QU=jn&oF(P-4HW88F=K6&OW?~X`{F?#QtdspY&-fa~#x7+RJF%j4?1OP%+
zXRQnbYc27xs%-AsGi^IH*L}|;AI`;4ZDK(bz~W(o*KuufLv09%!}7+)=bnAx!q(Q%
z6fa)gTAt+BH~osjkj>o8Y~QMq&szdD_2fgn!G|As@CV=dWEZzwTyl0N0X}gPL1AX4o#maIGMS#9o|&21I|Od;Dq>~@_QwWx|F-jjCIGPa
zfqSz$nwcC;R;ZnMHc^eC5TxBMPMe~WLw|FF3wJn(x)L7D<&=BD4jC~oHF
z+6#=~wz&d(ugS8^Ij5@K-qdI`+S=NB_<^(Sc6()IWnp0ffU#zEeWR`>bG=^PYN0G`
zZY=?U|C`_b%uj#uwODstR=n-#vH$duGaoy5`bfV3TMeG9b1{P-qi{-LEbQ2jg+f3G
z5)5bUygeRHzWD6xH`ceyv0htQ?K$_rv88Nwq0_HAGsSzKe!8x;$S6WZ00p9SjoIm(
zHxR8su{LF^>IaG$8yewgHZ|pX&%JSR)7HQ6%IGWWH>lO8U`%Im(Qrqcaaqkl5jBcx
zq$mIkNX^Ysy>*YsPQESO9Ue=7sF1U_yk#dOApYhI1KEkt#2Aes0)uwCb4i7obsqW7
zvr`U++sl(gvA@N@?sAuO=h3(}X20hrW8W+U(MSr?W<0kN2uh@|l1YqF)Y;q`j))qR
zm?f*Lv9~DdN!jT-)j*U(jng`7>&SaWFAhT2MiEnq8?=OBDGDi9F_0=ly2xd6S=#Md
zI=O0AQ{53iu5E=#6K^$;SYog6)<(8W9WuLd3}}P`VpstQG=i#S&By>+1s_1vwoY~r
z8xh2nT8Rom@bap&%1y34ErADu*oA=cb`4jw98RlcT5ydpQ=pp120
z`buePexWHO``Ip?T3DLyOyvfJ8BGQ!`u*>F`l*_vAVw5p?9%juH?FK+
zS$kzu2dy{n@%18#oU~opM7AycYL}vo#z`&udi%8e|7cz=4c$4
z4ClfeC47r7PhvH0N{qD0kEu}6pu~=dSV6pdSTR(!3~tNh|DUhE{3~x%f9buC{bxV-
z(?!2q4~JqTic{03yw~E)gu2!kjI66Q<#@s9qrKw0zw&)jD0jMG30B?IxUPO
zIvdqOyVi*zgsKd&mv!qZG#4jqiJ3t{MDbuwqDzt_xyMp?AW4$so6LzZFc6M`L>1O3
z$H-AgjIqp2LQTeSQd49tuQdo75LC$+SmP0@vg*l{iPD~I9bsO8{qzf$f8mAKc=7lT
zo}PL5=!r+Bie6W3w`0i}RU(bXYE(L?mZT=~VR-v8-)KJwQ-QUBr>U;K$L
zys>Pi*EToD-LCkyHGX+QvEJ}}^7(ZZd-S>2uRONCd92?(*Lv`w?&9&qE{s>>L|_zP
z1oEG~c=3OF=EAdYY%I<%e(1s0U;Exad0)Q=1#ApbTf+(j5YJCV1wkA|ME=^!=C8f<
z;^_6Y>Dkr;4;(!*)vAZ%^=*G?yS%hHp6a@Ho;>-XbN8VJ0!)N83XLyru5PSvuWgLK
zdU^S)%j>Ug3`#0uzz0reS$49R>HVu`pA{*OPPYpuQ}aEt)G9iKH5nNVo}9@|PO8%`
zpITTvJ%5CDbD@fgh}hkvs7+5t^Pt(4O-Jh9IC^7DRaKTvRM{SnMw9XS*5<zP2
znO--Bi79MEWpg{+_O8f@fDC>(FomIv2tZ92E?#}+wTqY5x4y6)FP>jp*LupDR$BUW&=zE`4i>Iw3|WVDohNbsN%Bh`t=(wGeM$d
zW8OS0x~}Ui%M=JPIJ0xvu>U6imS1o0`e}++cDmmPK$DWFDb-uk3y3r;Ozi!sDgV94
z*06{;%P~f0cY3CF)~vqGL7h7A@35Ks2d#VWZ$^4{m*#G2_+F6U_FcTQ7Zkl-x8LuE
zAlCBS+?=(B2vIOIH$ytJvlNA)AiOf%`Zr%*|9||ePd|D8seklmzyI`NM;#(SQi8pQ
zkjbn75isolj3DQ~{LI6TJW{k;0H81m)H>YSD0=Od@h7M3xhL*--FI}IGo)JxQ(_~q
zx;psMwe|n{-~4K?KlQQu&UUGc+gZ!mnc4QjV&D17L|U21Z@&1-|9W`^MeeGyULE&y
z8*>M=KT;fTO<&5#Hn%QQ5~DEQ$3Z2ABqLFf5THc7QxYc1tv$|7p$T^Cc#{Rl>_pmh
zFSE@p0=c~)S@R*vs-(f(%sI{qB~ej8_~3Sn+_5}K?@+x}H8|e?uD$o>ckMTNQ`hre
z4JTrvoo3$%vx11hOl)?>Ei)=akq{xIq}!(3>XI+pt$b3cSSu>VNX1+YYnQb~al)C1
zz!?cN?ohfGiC2Fe5vNhGqB556kP+2&`G8x&-
zTk_(?uoVTfB{)(8VF}vJD5})rD^jp3!vKY0AL^!KT$_wx1XjMSc=!L#0s`I4nx#>I
z=3QzH10u21D270+Hf!(CTHb^nnR#zX(z{IpxaCiG2(Z2RmCfhhj4L#ga^4(-03dfs
zDn)wdep%O&NmYh0$!*V=5d65VV~^b=Oqh!q*E-g19c`OaJ$hoc_2k_2Z(Z8_wJR65
zx%y-8dDnfNCoYeN|LBV^ygFE`jX72iKY3#A+}!ku`%b?7+=E%DuC6Tq@|Q1cZE?5N
z?acN!{bbtpRP%rH;`7gztfE0k)DkO`gNm=CPP)h}WtBJ?X>3t-io|hN-MF|=Pd)I^
z>}30L*XzpW^%u4;otiy4S^bPVnjKR`xATMF@%Wz`iC5_cTFdL_&R_lF%NMS%443AP
zO!3sIQ`7nU)R{^6-8Z&==i=5_;p&Yaf8y-N-gcrMUb_~~4XWzp^)gfwvE|%i?<0?$
z{S!ytb6=-zi!sRm((^a|&M!Q3!&A<1yV}I=3?}Y#aQS9VxGZMKfw)0o7UjLof
z&VS*?jjwKOuLmtU)BQ^HE@7Mf;h+8InyfYegaD=hQGzg3!#CPtv3I05v#&&VXR(k5
z4I-jspt{%gdbpiT`Fn3{D)zp1*npa8NJ?sDprm5r*qJLDHa0hD<*LNmPfY3Kz@v@~
z<_Mh-)RFxbx7)!M1_ZeoZV{;0SSVE9w8TX7*CIEuuDr3fXc?b%yGOQy@3^^bn>!2Q
z2&su`W0}N2_?YuYXWjQd`S3|owaQCRJ@|xs?6eYBA?9RA02MGpi8!lbGo1lNKpj0Y
zRt^;;pO?S;!|zn&V_{|5SYN$#`TBCrpMCApuV3#5LM5sqT&p1!<6G&OxZ*g4}fXaZoLPqPtUrCA9~6=+wIYEMW1?pH5F^>iHYP(e=?_wVbSv{IJrEaVD`|n(t3b`Cz~E
zzN1Uiy=?BtOuv({8y|k+k#@IdcY3j#VZ@Qpq+;Nm1zXXK4pRdH
z+r5FQd+BSMUt^3-7ix1iRo68$XIVyw;)9_$H`||In9gk$s>+Lh@%*Y*e8wZqHe~x2N|0(PnzV
zoBp}O5g7P}G{Jzq-L%(9+N{XkJbC*b+~1?Bs=OP9Sy@?`nVBhy3_y%Zh8RK=hg(~(
zy?%LW>4Bg5^ygk#S^wRyz4{+Ly7VJI`2J%@GYu1pQG_#Xm$o*{7C|URR%>AcQ@Avg(Q
z2btkkmIo!TEGkfTwjBkE$d-xhPE{0DAq5nA$8KxkW_&Yt7I`E9Dg?SqHz-TJ_g(XE&
zSy^0m65=aOhIwwuaOJ&7WXm?Q05a_j{kXPj;M*?4q$I;+coIk2$=fa;T^~r1*O|5B
zYLSc3Ha0L<(SnB6I1q{2+!bSAZ-a}g%!`XQr(L1xKiYck{?$FS*bywbvOZO@eQL
znGF=wWFM+TVo)Zw(Kr}W5n|MquXff>XL~cEurF!MzMVw?_STTti*GgGhxdhb_rHss
z-3dRaHd6z3&PQlY%5Ms95Ru+(bopI!l$&wck$7aP)n|UX6SAkzhUuxTOIMy+=souK
z$A{b7ljzq(f9q>6|H>P$|Fhp*`vUqd#w-i+%(Js~U1U?!*|Cw?e9-4nCS$h8m*!3%
z>DdTy-s;bN{GIJ$KA$tT-X4AWyWDCUcs9;$-W~
zo7*pSv~MVcHs-%du+wPyWh}ou0Q7_!jQ3
zfNJ%sDWZi+uy6IS9|Y!V_ygmAPIoth}tCw
zM~h{RWGOshDdY*wIL6~qOpL=ZijhL2(I{kTyo79WBvO(k%9Kff8vvrQqr1^-b$3;D
z?Qef~%k8(EGk@Hw>PDkma*F^#(C_odi+cBE-TdV*e|hufS-xMC#Q5CB`niMpAAjq$
zF`6v$m7Vl|yY&43^@}erfj(Xg0NJzn1bfq_L48>5fGt?%7P1V68L^TWtw-*>`OMzc
z3Y#Zd=j-8ao(Tn7Wkm#l5e0yegyz-n@3Q>@1b%C#e?HqKP~Ub22^ITs4=Zdtq}f3)l_-W=07g)H!oCWXq!5(8d28>H
z)5o5C@`23^O|Q$GL{SC`2m&yYCiGf)Vj8=-le^2J^h$D_T;)pkMlpZ+Z
zh$D{p9#e=GjV(!?Dv4s0R)P|M^^gcaybsY5XSq%(##91qnMy0%@95
zp-AZxumY`pw5#O)`k(s!00sm=_~zC=75(z?xu5#epZw<6p8n6j@Xc#kuM63q|L`ME
zoL>TCb`HkV+2J!+N5A~?H7HhAP##>-tWUXcE3>sab33)+aOj${C_0(e8w<<#tgIYg8aP9P
zLGPI1RM!ukJ!dRNWKARxwV;3^fk-Jr(jjS+2LTfR2z)DT+dAir!4z(XBiMb1rpG)K
zb-S$hwwlKqqQ)^sV@wDkga82FQOpj?*kZvHkXVE{0E^NtyZ_9>Z@#$y|M;)|;hFWb
zfAOdPKvs{Y;b0`qKYOkD;~zE
zGiNqe7Xer(RSc6hq+AsMiaUiwKovqV$}xi|umBh&7)?eYK&}OWfk{LsrRP%RD{HHZ
zt3o<1EEQJuyLpa@+qS)O<;v!%ll@*lu!tZjgUSRJjp*z=H!!a^hq|fD>1;S0T4O=r
zt{y$YcZkx=g^5B4cLjppg4}7vxEnwEZlQ@c=NsOv?DWQ2dvl5bah^Z;H}Cg|Ir9}h
zU)b|k=ANH-m;IYNFz%*d-aOlP|2>zV8V-7Sc3a3rE4;D2b!&U;kw+iDc2HhFEH92;
zcw#J2&lDVA^U`}@mGXYWI+l6%y
z=hEm50Hi=fSmJ!?D)|n(={Lp8=)?8S=bS`}Ke~z5BrC;v!99khM+RJbnH*E?ocWZlgo#Y_5n>0`%S(7ma4JLz*qy
z;)$~>Uwr|@aAmYRBaJy;m%_#>&jQrLk
zKtvGXcf2g#{tw<7Wje1_Gj7_ijoi;ZedE>1RcOM8&(-|=7oUFZ^_xBvd2hw#gWljO
zS1Yi$+0yC7^~C}oed3-Uee%7xF5UR*wX1_(kq;ky&)Q}u#(#a`#h?B9mtNXS*M&)_
zmyr&YkD*qEl*uikNkcZq7T(7c#31>@X>QehC(rb&SgLTVX@G|}NBr`zzW?}odVSP8
zSwAsY{nsyCJ3TByiI?@_Q?-8mtFNB0ue=^++mrT?&0?oN(6qdw&NcKWf8tzcxa>NM
zuIQzv;goiFcV~5TaaLTuapS`F!35Ob{_Gb%xYAi!8oaVI{^~1N|G)z$JKbB!=%tmy
zntXpNe)j2Se(hGh9ZJsJLf#`FlbOobox#!!{0*P3H?`JUtOQ6-;gD&#w61)@Ac_)b
z!$91)jJl0(-u4>2o1B(A?Lh?KdkyxY%4wZ_429hANN~BOJU`Bu>5(F857?DLu
zAp3n5KV(1vF89-$nU9_U3hz1E!fSIB))FtXr
z)3^TN)us2II_a=yLEiW1skuTE0Ez-HkgF(2%+aDnQIRDgqb4T+30pLX#`5i(GXNMB
zFygu68`EcB{ng8dfl{vblxYfAVALIShPCG=3b0}(RB1s(SQ1K53bjE5VLr+Y9C5@E
zM|@v6N)H@y#1Th)kC|l$X{`*Rw4umsW($rjkU}*#zYQw*
zMa8R`SmB1_4Z;p1h&YwL~^s?j2E5i=5R7~`KhzR;{M}H3Qq)q=Wa}&`tN^b+mPdS<}rBpnfuQCzy9p|i!_KKIj6?^
zql_J1=OXxZT^}4AEG#U{t#Pfj%>44pFYo!38N0r`+V6I)HIu3u
zS2LutZp`vv<4^s*Pw!Oz|NgIj=?fRGPfVU%*2}H63meB55YZS$P!d6sp1A=TjfZ<5
zI(6(n`pgGc7mc6_KrMqygo!%DT6qMIU=blkEeJ-F2;7=Z{@b7bxj*wq|Gmv(Xp}%J
zU=bApq{KuT7?LJTKxnK5(7;(I50j}<2*wQigAac2g96Zm_~MH%zxvv1S(cqSee%^;
zUp;>O_{rnPSfFj%Ja;HingG_tEst!cB82o4c>P
z`N@195_%^o>UmIm9;Cjz-?zOO<@c^N@w+{=+clC(!JSmWyg%cvap>Jt!JN0Zj-0C&
zFe|0*`a{ffGsg!jTiXZUym0w*pZ~&pAA9VlKJfS-`osqiB8V1b@IYiqb59xoTPw;L
z5Meqg#bTX8NK#K~&6Fs1vTS{A<%x$jhbGv(V1XOkUI
zFH#~90%hkVj@ppdvE|rc@`kzz00dFV-02a3P$A8&g5O3Cg`&UPlK}vU)9uB~Z%m-D
z++FS|>%Vc0s5eYd4Cw@bl30X+m<%8Zkcy)p^wRr2^4;^9?v$~AXM#}R9jeYj^gF#X
z%Wat&MG6sxS%f3X?bZPxAp`+Nki@gl2m%N~AWg=ACP7YYmAR+q9`BppvCaSC>#t4G
zExY)~KXe~A{wpuP)Hr%%ssHc&!Jn{2_LqMA$>(2s`Zr!xzj^WHX~v67oeyuGd;P-2
zSKI2@+0C1Bd^ueETR-)YzxMn7(vlti<1at|{H4pCsN3@JGnN~!`!%6LB=EQmpo~G&(bix_(gFwoF*747k$C_S
z?#N>yAiP;>iPPLOgC(VyP}nG?NzEwcIW1&`14vb66%D#wXYHl3uIsvL+LY3~Kzp9%
zg75sezI#s7-D++DnCAxOufIc8^_%m@^Stuybk3dkd_z{~_PzU^{0!dyI#qR}GXNG)
z%36&m5tDP~*KgLk9(9Z1hMV2j*MIdVfApb^!LNPcUq3ZIwSBldZYIC~7IFVb5^F*+07iztCJ8Naw>Q4VFCy
zTR^cb{$BO!FaNPWw0vS+YzG)H!L`HLpZ|OR_5B;~S=~6z2nylL7oLA{Xa9@W4sKzW
z?XU=bR`Aw1{=y6U{~+xqnVqqwwejBAed(p`FI>EuLZg*3W-!*gZq+e0G(gPIV1y(J
zRU*64>aNu-gj_)gQ_r!JFUA^;526icz!(*`5K~I%F{Z?CYdGu*VWwCm=PE_0>HfdKQTxIAVJbZ1c-vb0ztC5xDecNMCW;D5;BMu
zOaKN*37Q1JKx9e*VHNkTBj1jsVzX+Ovl(vw_?-8%Ln7AbQo2ElN2N&l-Xc4Ah%82
z-#hEn-mIRrRJx)rt8Ou95=ihGbz*MHmQ@O>HA-s)LU2G%X%a~64g6mA;5p)mBaZk%
zag-i7;)o-T_#VRv`#ya9^u{0m@MGtfdiy&!DsG2sYtJ3}t2b`dpldYGzIN@^n>#VF
zI9pesb0?TMR6}R(Us=9q*c)hIj89)ZtbkRIIt%zzIsWBu>{PXXU~%!($y1%Kq&OQ_
z-9ab6v9pKdm%78orxv8++{wig-K@|WnTS9rx`>bjlmac3V$_bjjwBl_t3
zqxQkU*?;oq*LDtfzIFZj=b!o7-}%OF*M$9@X}zn~&f!NW-&}i`2J2JdgIBLUp5;$H
zb8A0bJ-5(1e{AX2{xyqq7P357jBQmvgLQ2XszwOhBDeQ<>qj=Xe_~9}`TdIr{Eh3k
zZoabf!H3R0w9z{@w9mdiJ=AgtelfG>3VZLcN8+QxQP~!*SQ;!i=akklu~r!f^dZf|
z9e_|FSkzi{K)~p=)>Ie)PIu&lBCv&d=q<$ZNI*F`I63De>g}fUAg?aBwkxtS;+9h?(C&W80p?P2rVqFIX9~-
z+msfdAVgm`<)L7pjB4;pYy))5Z17JmUHZlU@~aosk<~%(7SGW)qAt3nBQVt36%l_7FI-WF71Rn0iCxETi@2TKur9GWE0$xySb}PF^^W?%^BRS9=)4gxVzb#
z`waL^f!eo?LHw_Qx0pbDH+TKF@4AV4@11jZPsH8az+Hh6cstW$5t%1rwAS;`-u&0#
z)Z>pnm=b*Wo-@Dz5fX-^kg;{i0RgA#^{Y3hi?^Lmd6Nz}+zA#V
z;uz9=t=|3XeBGMf;GN!|ndg};t@S+1l~U@*AcUX@6(P{v_E!jh=FEwO<>g5X&t1H9
z>+moi3{75?buf^PXz@DsKAx@p-5*fip0KVum|kw=IClKf*2
zJn{Gg@jr$~1%Sj8r1rzd_bb_+w%@!l2Dg6CN)HW``&HNY|I;6M@??KlCcbC&nC;qx
z!Y%8l004jhNkl<_lPD001sd&q9lOGUSjH4E5d{*pl*(;r!T%fjVP|OeK5Lu
z{n{(dFo*=kMkFPM#7YF;q4h*WNbXE0M&s6byZ7&W?WMo*?JJW67e~9NPn?Kdop*Z+Ei6%QFotMf*_NH~0XEBMW$sWJ-uOhl3$5iwWM}9!u!R#y7eBECfy2!N2S%o8bOVtw_5eUu=k*yqL3IV*rW=!hD~7WL5Yj*J5K9eu_;KBIL62!
zGIP44fzzMti_&PE*eq%nQ3WzA2=7u%=5E?sI1)$U$eaM61w}$pBWfc=j35XJBs+Ln
zDP@d#iyx0BMB&@tb4Y=*0gWKx^ty2w=}gdq=mbClTI*F4n*dc(ptH>7);LNn2S55g
ze|Q$73#suC11WTxbV5uoT-!o&uS{mY@ydnm
z?foZKPW5xkJ?nbvcWPGlNwoB{52)yt~JSTD(zR;Ap8)5|dX*7L0%cXKzx$<|VoivU>&8!$(<(EQs{O*N*PiKQ@7Xy0e|~Uz=|hi!0xbX%fxv^F
zYt4tMZep5psGCuW<0TNN}TuQMO5ELK5koL%i
zfpxc!=@5&p*}?SIv_Zak^>Q=b|EZHp=KPv*E+AdFcp#651;wzGu`!N3)T_H*FXR3*+ABh9lv`0^8SrirrWPqF)vombvs7Oo+W!P
z#
z)lI3jZkni-ZBtOJ=23~1Qc%h{7eWvT2$0!aBZ&_rQ1__e7G-`qo1x@dgAP
zn;3YfkDa&t>23xB?pDgv8)7DRq+sr}o4gy|ZD!(~#`%uNg?Ckz-QFR0MhOHToM2z2yj>5+@OFH9U>E^
zX<2RGymsNj3m|D_W%>BpK~WSNd7Oqo;@R@
zNb-P6BWjEmm_mcGRWYJKK3KrIB}1~%Z|q=F#=5B^CZmN&V~UwC`%<}3rw|!X&2}jG
zI6deXRx^kqy3=c-WaC>g8Eb+R%f4|_+c`LXe+_6j(pT
zr&I9dcoag&vceb>lXw(jQJ_F;QJT>h(FO=y=-AXRj$NCDW2Qap50?k}^zp$jJzH&+
zqYs`wHmUZdFu@Vxkbj!E7=MZNLsSEeedFnZ=5l+QwMq=HpA=4*J(8mtmy3rug(D511ks
z01yQLoRTn$Qm6<3lEk?$F(N8X4jmu^)9p`p-yR=iYfs#B;o8j$d!sf}(~T2m6tsx)
z9-9!DqbMOm#&%U>B`?z~h7j3Pgs8a9ke$smRuXH<9O-E}JD6OBe&5tyvoeWeYNJ-R
zYn9Fe4y(Zl9YYTxR~%DYa}q=rNCFy21L~Bxtz#%rA}QA5v^A(qLi7Mk3X~QkH5~yW
zlVV{@&{b3bAg5sRM6vdDs2lPjPttM5z&mOUUcx{e|x6|~S5WxL&l*2GK-pF+=8iNmDIq;Pq!?x!*N!HjMa
zUOS92-w=+(sn=N_?X^*3X7aq>DMfY;XD{w~d-XM~6&S9POiS3x3hhPRs5@2_*6tFd
zeor%Vi0s8C4M|cW4d79wgHca`pvF#Dj8zJD^
zE}slpx6{_9Ip}Cw0MgzpM_z?#YPJrm(YKf0E7
z5;7ZACsqk8kRU}s7S=2#DV6bzee2yIQD*8%H%!xPv@rg5INWvw!3^tS!!E21YCnuO
zXQMh>J8=NTc1es55t#xjmdh_K9Y5=eC9|j<+x$|d#?4H2_ah7+=
zZd@vQeN`^o)kNAXuR>(7X6oyM>$i3q{Kc1_d;Zq0DEi>$=^wrS-UrSsccsk>P1Y%M
zKF+InVvwq}fRH;My>S|7F|jCv?>pCf-^2B&$Nylw9|=$!ai^d(rSYY
z5+Z4przBCeP8_6uG)>eY_f*>H(C7Xr5_ZH9NBrIf-@abtsC(duBaZl;<1N)*;6Vt9
z3@H{3BAf(8B*aW>LSl{pi4`X+#(@qDBvG+;5K*BbPAQUv1nON4qeoVAVwQ$=t8o0u3cO%(zP}`H|hVevkxpUPkLNEb$#!JXLdbg=hn`Rr;7*UaH>)t
z>!0}8d(LzRtNRgKjDy7`6$*4_cW*CoID7VVjPcd$HxBo9xtyJT^uE&@n~N*QZcW=?
z{l>G;T)OmXcR{Pl)cbW^udg4o)@o97O91cveT#Xg)0x`?%>8V$EYn&~LbEtr%2Ml$
z9gSzqtc@KE1~{qFAS#*Gby+n%7$Uos;mNE!=5i#uW{!_9ZocP#^AiFQ+v)Dj-IX&Z
z%IWI%*8b(a&R4GP{-dv6XE)r~8S9G`Xmi*!x5kqxL&Z2M_oq`uV7o4J4ly>Zm?k^L
z*-1AXr2rDanj&GC&Uzx==d_U%&dw
z7_W^rZk&@a7s
z{crusQ@7xi1_rJ!GgH}aheMB>YQMgf`n>{emrRD1V~QvhCLPO;HuXD9^W2!hC$I{N
zqR4tfMV5V2xJB=G+u0uYmH>K46us0{xwvkPmcY~57^Cl6tCfqsIc%#uwc>qSj)B|&
z>~abz*4N#N(qustaxxvSyND4bjFy*)C~^!%p#Zt@4wWzKYUQvJ4WOvIQi(2`T4i0M
zsc9mDCCHrMhG9U4gzD@2zU+9pK?v{4$n3_$@aP1+zRArxc0)%{4PT=@M9
zAgj&TnNObl_(Q9UsT2Ri!_&`x^VyAlcV&HZJoA@cyY%wj9uL>_`{n}+Y_r#tW9_aX|-}hwIUsOm5;F#@!hJdk-5xaV2
z>)PS8IbWoDKznKFpI*nm^USN_E)Q8s-(*JTu^EQu+{&;&=#_2rLb+3v&FOCc#Nu#u
zVYs@yky{r-%CQ_SEW4r)KpFuBgn+vRlZt?lB+Uh=0;NFq((a9K
zzViBXFJ7Oi*BQoDlSNL3_B6-TLx}`YDKwb{=GHg9tvoj@K4GB9R@g@6tX+~kSJOJV
z#5z}8fRG!uRL6FdFC3E)B_sjxNUd?C9ccm+Sz;>?5E=yu2oq}$9(^K8TDO{65I|9i
z2B|IKo@FCVDzB3$DAxt@??qIOmh18a9UB|t$w!Yi(-OL;9$ZxDIWeMV|mtPtkZVm6>
z8@1!TIs@grcd_h5tc+3(T33W((AEvb+9;`2%6krh1eF(L0JH>JHHxNc#F{#WNs>2_
zod6|n+ddw|Kfj+a|*Xg==oT
zQi@EcZG(ttU`vEZ^OGtl&F%Y4{R2d<+LLsK}nzO;FA@!?|~n{yk?-~jBadk4Gw2dOElYBmK`scg|tg}a&2$`-Dy
zaC$37lNp)Wummc2Idg35UIy+SR4;Y?sly;mVFX(JDVFW&fL(my}){4kn{<
zHM+PHo0a=c>1io-v#p|d!4k$w`ih@8b>jcG!$-+(0A0!vPRFr{4GIy&qv{12_bTEkIiV8EzPqOD5Fl
zRsGTP>!b1P*R~IKrUxle4|7DA#fj%+T|Q&(`uzU0_p*WgYV7OEwo#g7btk3NvdYZR
zD2UAr*bCL7jV5Sh%&N?jgcKDN1$QB2V5=YWE%AsWjyU26z)^bOh$D{p0dPAImXSb;
zDFO4mLRkSJk|t09iIYME3!=zlY-8woD=bk`P(X+V)omG!cPZ(aFxGWVdB0Mf5Q3SF
zo5p)BwUVHlK
zr$bw=p1eP)I&yP;R9@RHoy?AvS3=-Vp8pWm)f8u++*nqWEqAjS=U;l|`Om((`%KY0
zySDoHaBs~W_Eg-zhTO({adUNL4XmCW8~{+)b^6EqkKFs%o|0QHerxCTXHA=aWIem*
zf%6$lW#C)S-FW%+3w!$)?>Tq=;fEhSckVoMA~KB!XPw;EiG3q#i4XuFF>?Z3=v~{o
z`T5`YtjV)uD~sdFjPg!mY1`IWg`rtpSzPaSzxC`h_dam{xU8Oj_2sf{&tn)zf0oR->cNccV|J~oE7*E
zt!fZ>=eEl4_q4oYq5|J>`@0FD?~vg5&U5-bA2#CMMAUb@-o~huQizFJn7X~;y$>v$
zx#zws*S7#bYi*PYq4nNdYrEZU*2yxhwE_qX+6n3e;2G-4v`rypT4}dCg@5&p*Dr<6
zEXo?}`NidNgvF`_(mJCJSv)XqtvRfX4n;S%r~`CuFK}p)0p7mKUTbZw1=Nt5rfKSU
zNN6B>vl5KsqL*`PiIhTyIF*8I7?Tf>s)Ab~6+kA?@pjN1?HTbZ&X}8ETCIH9
z8>0h2lvdd>bRrpns3df&YY0jUA#);RNNuJ=X~Z4CI*WtqP-&B$=a
z)l-0fS6rOG4Ge-L7C`4*5)1-~G%3TR8HEYaSgkZFVj|WgEtL!XUEKK#@4dIXxR#Oy
zh1{m)rOqGv)Q|CuFJHZKe0A;I#`;IY?763(|JHbaIl0;HO?7zlC(o@su(7nZzTVDU
z({>itA2sBE+Q0a@uV2`GB_PKYi&6
zm_PE7k9_8lvjBBn-uJmn*Z)7y9V{D-exkVT^?JR|@XW&C%-Yh=bn=5+H&=|lXJZXI
z7AQp&g&N`pB82g*X=ih{9hYauSOo+O0+>X>Ts#jE2vnMDsVflyMIh$Njjhjp;i+Gq
z^#fQ>8CXZo%;F?fEgEg}DxztjlfXzw*p@lOrA)_c=(uB`F^MU9)i9(6tJ#WB)B4sK
z?}j0v;#y@kxWzVjU_fOfAOjGhP7!>}jZgwZDv}}sq|}j=LGW3Z5fUke#6(EwQsw~a
zP)Q&`NJwI|vQ9LKNUM6ltQZs$35z7eNC2P}_y9^rgHs8jK_jJxI;m=nX;Xr>HG*1{
z*a8)Jn)fiVLS#q+z>JKb>r#;+Fd#~l10jrxSoy+w=9si4+o-%0J4+G-K}CrXl_pJz
zIkiDEWNd|cOjtQe2m(nn+r$}Q%Yh(LY+^l0DJiYdSXMn!1f-(EAP_NWs{y!as$uOz
zNIEHtVn9U$Voh?ELM|dmEfJDJ6d~fk$w$v*T96t5lrdSZ1SJZqL?9VP0|^impy6Zy
zyoy}cRJW8G)r|FIg5?1?9Cc1!*Ue2Sx}f{uZtRQ~hS}NoKcX*P?)N&ylshovCWq4COf@7m#ve+2PJCqA@D#pv{s%NE7Y~_~4akmv)DP+`7%)RyQ;6)qu-9EA^J?yxzpC(^=i!Z0gvM;+6B=
zg-zF_#`_TnraX9^C{bca5Vc~{2b){!iwH)6$dH0?h(rPm04(Tz1Ym`N0)(iD>ZmlO
z3>YwaMLmK%<|gC?#-*0E`GajfitTG
zq#D=Fj)i7%c;57BcIdBWG@Bd*Dsod8?Nl3SVWq#kks^vk4Z#as9v^;v`%;TcTCZhG
zgO)Qe&^>yB&0E4tC
zR-r)!gQ6_!*|a`XO1Z8J)RxVmQCjH^Dr}LG!vv7P5T}fkfaaJt3|V2vdk262fBMRA
z9yFyQt@^okjgna?a|O{yKui;OS#LGl^|^k&G#gQJ
z3J{I*1|lRPQi3*tOGYwHNkrHRphS#OFcH$b?H(YfAR?2Z^5DZu^5;ME@amxV;?0BO
z3k!euPyfCF&zywe@!s&rb^F-)hdI%U+oR84czOoV
z&6ank)5CK6$4)GNYN`LglMjUu>avv}jGo;h>o
z#*J(Jem~2yPN!$HE+Gzj{qFgDiD-9s8~n7M;mYF5!;e0(ymD@9cIzu&`X#%2IL`Zd
zXQh|r{naJ%G9=fXO#i^gKK1WoeE#a}A6zW|>X&yPa_+DH$o)?|x-gbBn@k%*2oX|+
zLc_&{POm2j!_ofrMxOclh4UxYj-5SsC7PvTz}4u12j26(_kEm*IKg;4Zdx59Xobp9
z(>5aFy+1fO0Dz(>>bedI$D_%poKBi1qOO6rLHCP+^Tn-$1CquSozkb>a<*4`xXK#(;<}04RcF
zK$B=l$%6_=VuJ!DQ8=V33Zw`rq9Hd)$=cL5&D_y3rI6+HJ~xA|R?E%=^;KcL6L
zciOG|Hm3Re;mpkMAYTq|bYRSPefwo=bW9vXw9=v=B|${3^E0QTGn~t>e+Pc@xfEaDOW1bdgA||fAGv-XY=Zf>PwgQ|JAEoKqi=&
zP}+v8wtS@Dr{#^;4)%N#JDlYR%&m_z*HPZvw;g^}RWU}i28m(}iM`QemGX6wXJ9m@
zG|?y;gisOXqo|_Fy<6}Wgv2ONsj+@)b8Z6$+ppD9D~KK-He%G?QpXY@izHOU1W`j&
z0Df?i08;WoENvrAnNP!n
z9>4eWn$sOX18QaQN7mn8?JPO>YhV8QKmX@XU2iV0bUwU$d28(u?tSq1#n*#p`oO&p
z$BP%9ef5`j2W$WR2xr&+n};8NV9+bhZ{Cv?i`x_^&(HOirmL%e;q*O&<$(s7Rimh>
zli7Q+c9@#O$;{;)WAf>2vgIZ_b@h+G_UzN+uFGMgZ#If6xuV0W9v#*;*|pVn&>hyZ
z7(pV3g@Qiz*n{^U+gPxnAjpI*B9I#UaPj8$r5oFqt89C6sJb~4)n&c2vp25WmPN1j
zYd{}U=4Tlt%poa+kmR6LB1#$E=|WaiRtZsP+f+slfTA<9Q7b~FQj8>MM1+$|+;sAUc}Xci
z6cGbtgs~~RNeN-#*rH}pK@d?zL{hm^+6X#8MIew8N^+2d6aynvi4!Iu)(Dy*8y}{+
z%7`Gw!#Z&kGu)(3aiY1tGh%OgO#QqZ0s6;tLd>p%>2*RnzgSrA3Q
zC`y6?kOUBU{wIQDB-Rm=XyZVGaG-J$_Uf5}4rsE@i)lnyXkv<9Kl9gbQECJg43F*8
z>0L4u09&aJqY0yqpL98LM7BBhdS#JOGonxzVp6h{610s`C`B<$6{SN6D5{G^CYma3
zD%Ha>kRqiJK^O&7Qgb;tWr3_95Q>o?F{G3LRi~?!K#W|miOK8fw%vQx>|a@4SkWYk
zxD=g+$CK$bqib5Fw7l-yhIqDp^_i_}&s*p6fdiBr7PZO97KQ6qdH1SYwhKMKz$?m|
zGDTDLG9aD}#@B+Gj?$=yJ)dV|lkI8OSy&TtJxsBks;b>fv4(^;Z((q43g6ss|Iu?V
zDT%d7fBW)I)20s$vkyM{*y)WP0tujiprAGYg4URxCQt~So)1lvf(M?=rZ=~DK+^KU
zVm_LU$K#aJ>WLF>aYYfukO*R(npsoTC0v_SUwHl2H-id!20AT-e9BWDqn_1Zvqw9{
zpM3wl_Z`olx>e=l{Xg;}Pg>0o)hHFe@Y3~v_~oyK(O%H`$VY8V*O?H4(S@zEaGQPS9e>_!z}y66Ze1gp|il@?B?lf+gBc3
zKelWjTJQixfL<3OH^bv6?>!ZOA^_F${$F|N+JF7oFJC^~d-C}4wO+rSO}1v^NnhV;
zn!{P$&j$)oTdY!@nyO=raTF5!7}s>COr>XNjSU=JYL%BP>qUZC07`H%F?#^8bDays
zCbb4+0koHFKcxTp*4{5(+GX-K0U|DCT{o*|-2B{?QEbZf#qJ+}-xDV(w5jQuO*=c!
zDn7maGQaRbjIrPCb-MXrFj!hx1c2di_>uR%cYlBXl~-PQ?z!iRq8JW`qtU3}@0-qo
z$uf;lIMX!c!DLEVZ?K+8@)utDpJUn|A8zD&@y2Vf-F)TxAOG}E53M?=;-#G%a=3+1
zsU+8S{f#MKtkqY_U|Ic@`xYNRrXy{A?WO(Zu=?PWXFHuj66Bo%6eYlEoCZ#-9rM2T
zKl%8R@1Mjlp6p9C9!<7;E*DF)@km6vo&MhTR#VQ5F^D)^UYuKcjz*&!H*TbqhQlE<
z&t{_&Cr&tI9qYkhShmTmE|ztrv|3mi+&UcXO~(YVn%mDj_UOas&yTBd0jP9R_pbc8
zkAC2(tNZ`m*PegvumO^_ULOD|u9Jq4zyfttcQ)S3vJACOkqH1%A)tt`(afxgP*A+6
zq=l4-L_@*1ZEUTP#ct8JrfW=w`uNgjYMN?%aR2Ge^Cwr)&#Lk6>z7|!8?1TX+}gR=
zUChs(KDDv5bZTv5xUf{I8lmxEzz_oiGn4V?vbU`7JJH~GN)Nn^)<3*eo9Oov|Aw^3
zyHXJPL8U|9NjK)-e13oDVZLK>@SU?tif+3i3y1&`0)kJVFmXzX3^V(-MNnF66gYo+
zGqgTNZev46_q&;JXshw#=kM*WW_h>s#b+-3*2P0{OFA|rlP_$?5ZhKK)#=u?I+j&`
z?`J;sp);$O_T7K_PyWZRl_MH<>rt>km<x-7{He^@U118bcRu5qD-vTp~>34UYzB~*Qbl1jesJF
z1}e)QgaJS*55Pte07NFHEZT0;B!GB01EX^Es*%RX)JU|38x)u~3(T)vRLt-@FNXEK
z=58u0rNlCKcEH4(kyAz)AY%+Db;l+H?U={e>?*sz@x`mZa{20mtDP4v>_6S!{B!F|
zA9>&7E1AQf#Hkh_Ll|cHusBs#qmTaRCqDGiAH8|;Rp0Xdq@1-kP);5@zI-Tgd*{qr
z9q;+_=N|7gFCPy7`PS}O7muAhS=HBvE2-P*30)S)`FAw?xXfR^S4zY+kL(AG1hlrba%
zB1!JJNt>z-oR~mp-q*%V5Qy~6S3oNu0cMIEqff$tP^nJbZ`ntc=iD!nQVLa9J0ECn
ztPF$-Km>?LA(h1<7{dTT5)h#x5n+VT03s%tDYB%kfoOe&L8|01G9&uXvSM=aWk8SR)OR$*Lh}}hGK94m9rX9VpM@C2|*-IE@5p@3)@C?BCSv<
z6p_eKDN%}&G6+fpN!T!2P$|!VFmM$1N?1ax01IWfu9z?oXh4$~1rkA{SiDb`j4_#V
zV1fGzwV}Wv%~l{M#3tji(5|s|Ho>j^M$O8r$a`(yA9^Lz$ccS!+^~$iP_&jPiDn&9
zJR(t)=}ekBRt9)yxneH@ENoSyh&9Kv&{HAROHhU=VrjV{+HLmN-nk8~ZHy(z10=lPOuV;Vm
zpM3H0HT9Q1_`r$bFoBea^e1ln4ynxw1Nuqyh?0od6L0D9QdRu{Pu
zVi8bID}gKwut!NXqrLrWlhHxrf$X5gLmZ`wJHqzO=YRgoFJKDExxexIPSs;t
zK70IyYq!4g(rfpx^gsOgV+N$LtUx!jz{%$w`rtC1`{;@LPnRSWavVaD)Nj_E8EEz4@2m)3b7UIEW0+%*d}VHX3~~MNkl8QF)BK;
zRwoWc9t;~tx{a|$u?9C1+6+7v9u>1B2yO}gw+}w@Gw*+R&?AKbMA`^9Z*K43+J5lS
zdvsw}U&q(4?DzWp)z#Je?!PC6=$w1>p8Ji`py>4JvkyG*(5FBBlZY5XIDULH%d%-?
z-L@{bz}{I4K;L}knde@4t*6sUzui1}PYN9e)v1N-OKRokUKnpRv+>UE=0bO=N1Gc%
zqg@keW98VfbotayXD8VvRhMr}21PepI%(Ecf8z>=v@etvmjS64d5TnD>b
z?s)3GFosTOKXW`^TsoF5p6LyUh(L&t);5m|0s+mcnIlU?A%qjh`Jo3N0RW{GGe2|d
z#;dPguV>=`&Qr5D|4QcGVPRg9u1OdbTEa#yQ2|NNoh)JUutt6!b)ixYRsU~<1
zv76;aP(p0ToL}sH;_L$-KYi@66N_C32_XVoj)*kg-@AC><^HU$CZpf_;^&R
zZcC~=i_tSnu+}P-MIR88Hng#}0$|z%$jKT35DzBbAMG}WzTy=DF-yO-{kmN!p0w)6
zPM`hy{`H$-B;8>%rH<97SGu45`U`*MUq4k%S`j>!E#YAZ(d)tjgp8nhhvS-?oJT?h
zNvS(_9U_gz6lSUpghn{}ywi4;8!=Lid$}G$OS2slTMLkySe6wyEvgqf%PsZ*P-UGu
z>X|~SZ-N^{QzQT)Zb3qn2r6Z#bZkwmP4dh!SsShOq=s=H{8+8Is6V%M5G?oi3z0mC0_fljX&4
zl(VfX%m*x&l+U;FhLr!L@+KXU)Ewe`GMWUUuf2QmXO_A+Gft*KU3BYJBd(v-h3fbeVAuRaZG}yF^nF58x+l
z(&S8zDRK@#>7lfot!)B8N^s_p4j13%mQz@2K^(FbSAYwRPFnbt<~k
zx+d!=MvT~rzTUY8F+pGU4rreY>Tb|^Fga!2?&3ltwK0ZMGJ!0psxBMm1|SntCNHFN
zrG(QyPub+8^H)`t8NUGSy5aQRDaaH*Oss
zZbkM*-USD(Nzxk@-SyPO$RL(j;TQm=$BT=&aSuAjq*N1a8*FH}jbe<_0}fvK2;86m
zBB9o(Nhcv;L1AH@=e0Cx)R|>fhuAd4LPSg$5MqFKb)`*FDP!ptgpBauX~1Z4;`q5>2Mbpju?fzmJ}P^?e{
znH1^_buJ$+@{)x@BCMQ44b1fs=bp*fu8v*g>3
zvwa`Ry|G);qgfS9CZZ>VkgF^-`sN5R`VhU!T)#g99Pey_Z!}1~x(OW@rD(ZT0w$71
z)uuQ}{kGqu)n&Iuv=)WNr%*m_AU8NaH4}xQHy4qqbN#|XS1m_
zi3$MGsg+cvD5`B;s4CYXtwbqOhOA>GWCi0G*-6iN~hn;PCk6*Lnqe%FQ5C`7k8(zk?pA0@bIsG
z^6{Vfz(dyZ{{A$t^TUw#C%4vJ#|dq<(`3rWH_Xrf?>@aW7`AA%ao+ole)i}__UC{6
z!5d}z)o(oa3$I^4Kt`#i)inoM2okwm)IMZQRSv5Fm~GbU=*uaEq~{?F8EEN3>)
zoj7)1=G=5T4Xw{z7Fw^BLIA!K|5Zv227~SG?bX%Qx~@%ZZ`{23dfS91tgNrCpV-{o
z+Z@dqVT
z?G!aM5uKmaQ|R~mgy3V)S|cC>cZ*y^A%ag4kQ7l}*W-izvMl#+UVHqWa}V8fFA3m0
z(#@Pw3RbmklWQnUzOuORQ};cPSwa!Xv2L63_5EuZsb;tBHm5yZpQJ6lq!tzuQXOLQ
zBFwqP#r37SfrHU_G%H1sa#>12k|zbZE;feUM^2qwfVisSeH(+EXU$}fc3YO#@$2@oovkaQfESzm*Pg%jts7-G_YEML;g6p^{Z~G8Vxv$`-
z4;SOiRO5=6SPLr7mqJX*H)PtZVQ>FQNQ6Sb
zL?ptL5}_yo5@AjfLQ;Be8==sVrlorF$BZnQ@hh(!)8~$T>f=9A
z=F6=oB{S+M3*@YXFg7X+J{}&9eT0L9o1?93uU)y6`R0Qk{lxCx^r>%t<(|_wN4ty5
z3#<1%`S2%y>Qk>?xiV?S7Y@S%eOgJ*noeZwY4Tod*M&B=v(U+h^|U2Iu|^~!0&57M
zRd=w_={vw3qn>o`y~i7^5Xs&e)t`O#g)d&XzCG#GzMVp{B!#tty6NPkl}!ybrHgfj
z73ogMhavClqAyi3Y1-5_snHH0Mu)i-XDaRq@13U6hX!W3u+P`>>?qajnrfoOi;4~5
zPBb?>j`n9&m~yUl%p{{s<*gtKn4}=|U&^|Y&CL2nK3oDInO10B%zG%r%*aB-DPaUa
zrB%^qDHVW(Cis?;q$DW?4k0Hm!TYww6mPeFR
zpp-VoDx}B~x3<80r4#_i5SS3Gr5Nqr-LYA;3bmmWfmx7o?%f;{0vJ;L
zSPCE*Mw=$SF`$@hy?6kJ;j$uI&yE54EF5*r4n8xstpnu;ESMnVk(
z>#~e+);1)GLIZ73{bj$nyc(t>iz;`sN;1Q_
zgUpy*VFamy_!xOaPBd6TvB-fViy{H9EX0Hi3L|0)V7*=fn;X~0@{n2&Og?6Uy_^a-
zIGjlVdzbotmwjl>fb#`~Ixkv5i8F4J(zZinee%ApGNGV)3A30``dM69IXw9&k_;`4
zfux+}7%?=l6^(fcS>v6!1+WP@wUd1^Rx1&cmjKJhF$=AM`Ep
zh$D_T;s?M{df^c^!u-dW^A9aiy?lHx^DV_g1>ywrzbg>sxhxW9jkp
zr&l|cLPZ(@HNYms2SB@0;N`8YS9Z3p?C$)J&(7Y}NCrentOPO9TaknrpslN>CG);V
zRv)10jlJP`zw=AqdUbexdx`xg&K&=#kDl!rWG-u@Sf`q8iHdl)a9LsrfZD3W)w0D?
z_n&Glw{?|TzuIvhJ^x_Jx-V|+eEEeJ|A)&WvxyYh6s!hw>u~nsmDfIWW_@|sCF?TM
zXx$UYCU^v@Q{~WHp7P)S)vtW@+MYATx+rhpYP}Piyr#C3Co+P-3sz0N|Cg6u`jxBs
zearcKHu~3Y?NwK{Z|>}`b_T0fuXI|g^ycPfoK3o%0DzbfQAh2ZG1`^0`s$VIDWz*y
zZ=5={*)8na;_$}K-oa${!fThm@YSz9aQ{93;eYU_ceZx_=2I`<{I%DUPy1l5Ak|6C8fKzIN00Ieb!9s)G69!oIHQ+
zM#XV?bzixITLE}S+%5!s%o2AQ%(*vN~o)S=7r(f!|(a@M?Ugn+ICoQWo`M!
z)~)@6+0tN9DF6mw5CTz}mCk`d0F<qlEyXD`_6XGqd65qq~`)}|bc*{WNLl6PxgouKGh}LRC
z5=fq5e0b0_O(%DQK~Defl!2%#7sk8IM$m%a?;wEsF?vR=d01!
zO0-eP2_i!j!UTD2GYkPCCP1-~g^5H{G9ghwIdzcbUDaYj8%lNaDjy!2UNXi7vcYzf
z>&L7wx=2a`!aG?m|FDoiL|8yHh=2%31kj{sIzUL+A}RycD4iKju1Od51OGYKkE@%1
zCa0ggdHH$RAIspG^^d(?HJ|&nZ=YB!9$i?zcd4I3J|4F>cP_6kFD(w%mxg)XcNi#4
z4qx2ge(l!Y3!*N*`09NdC+lM;_T%Wmvk%|1`QD4K-}=T2zxDcTx;-l&I(f3aaZn%M
zE|!+K)7uzy7ka&lf~PuJp&PYh2Py$y?>zjUxGeP67!<;-;gDxdPz80z5bI<$!c1ITpBO`FY_Bou?r
zq})$QdRg91CT$Gm;xzjnuy4BibTBW8=P#TU-A~r3tH-tn688KR|F{Vmlt$`&8G>K-|
zWQ9N^0+oY8LKch>qmKarP{g28NE87=2;Mhp>yR>|ofgG4B#a(J0Z_Aa!XPOIRAekg
z=Ek=aS*j6|djA|fVCLPSWt1QI!bB0&-is2m}QCY=>Z
z>XcLrt@xQs4GG8Ypsh-28zaex=)@2?ge1yo0tJG?1fWSV877i)uWFRI4)m67Oyuh<
z)=N(Lm<$?0G60%1Ggr0uZRNDdiacnlVoV7^DF7t^qdbeu-U9&}Ou$}6=C)04LAx9V
zHbHRKDi$f5G_FkJ6A3u&thRweXrshh60i0lu+^Zn1gg}$x?8of;vmBWuBQL
zpowC&mE-|}K%zDV5|5h4>^ldowaUlR`{+cH#Ara)`@Q@DA)JPKyP6_kcjZ(ek{YjU
z7>y<^w4E%s%F1+>HHkGfax)g4y1N7bDeJ=Gq9RnG_H~BJ>LeJcLI~Cwlj+!N4@Z&`
zWXc%TlxdctLTj^3BP&6QNk9OFNkk)q##=9eQ93i)T2urPC?trmq#H^#SG!n`D7HSv
z1~g_lnN25%q!MVziX`5(oOM-h+Byo^PFL#?f^V9D3hgkSPAUa|cmVsiIt+zK%<|}r
zgn*%mWhpM_YylhPn>2|YrQ53~dUhDwMhs>}19?B{p_nQ)WqYv43^B{d>Oh1<1y(6(
zxx
z({jgP96?>J+D^kU%ZkQkzVV@)8cBtQNUBYv!l4Qryvy@JouWoWXNiSaDP~dtD)&|?
zLp@O`B@~5<5=a%p#qyA62im8kyL%iK37U5IbZ;;UIBPXqOro4hHnk=%c)b6<9bD9QN0imvpWjtrY#`GeZDgT^kT74^WNwIj}~@F?8|_
z5Xa4|(;f6|N)vhV^fIXY)w2C>o_b~EWJpr|pl^sr9C5@EKM?et_ALOI`Fq|5%vg)ga|UB6nGwIBtQ{?d6%?qEs#kV1iAfkH)Un!4;}D?pLCAyNSP
z#`X>?Tv=Ypn1BLn3k)4<-m|{^p5=8g@!JPaU%hd?@{P7OB>@1dG)Dn?`;W57F##*B
z04eyCFcATfMpyo+552E2*brsJV|XZsp%#x#7)8D?n(glIEM$0gZDn8#=nPs58bMRB+V;o1#9Z^LUCw}`<*R8y=6@T_Zm`+Ay)DZGc3P?Ob_43Z)V<$I{4YClS
zjxkj&r_dlKCg1Ym_|;xM{Et6ykNenzu4`PLNmS*ieSNh1{5Jo>%a?CVM`ese#VQSk
zSXN9gZZ*$tx1ql9$ZG#jeDL9i7P=2@tf>ra@~AQvO9T*F&@^uFARMgh<$h5c6+>?F
z8D1-Azy4cKKe7yuojS8J=nX@2_L0+HySVm+mu`GUr5m@>i%q@P%>IM>PW{;rJ-#t;
z+mn)9hw3_SnXKME*k9;(D->tVs_G^cU%9pS%C(!D$4-IH*H_lMgM3m?GP|PsY4z-r
zZ8I%GnQ6VXwRPZ
z>o%DFv3pLhoqb|72?S&!LO=rmfIe
zC17s9-@6a5!w*Bg;cdh2xAmF+7jOSPJW}N-aNP8J)XC`{+5luYm~P)
zBSaQK0bn6TaAys?6YrRoxRao|yQlfu6QU&gwzmZWAk5?Qnj2}WSe4^y+RJTd{oc(j
z5jlU)*^^n0lWDVMe&fdEzj*4Ek;>Q7UKR2Q${J(pPX5D>J-XVKX;3_P?%~s>`TL*y
z>fe0!wSW2Q^SMo{He*e78acbr_);=caBj;gx#+da>q6nBr+FLK7SKx?T*NB#;
zg}^B=0A_@?-NmMC#MKRJjrm}uWD{!Nt!}8i%O*zxWsF$@1tLfS@0|Adp}>mF*T?PSxGiiZm=q}sR--ncNVG}XHd>+h
zJlf~#wf}T)bRt_lrDns?^NX)tz0p1O)e|51)x)?JpMAX3Y*5%-Iq{)$C$dv#7i@%>
zK={^;=YQ#+{fnI@-bxe6-+w*7XV5uz_5Po}wzu`n%bm50m&&?n?XW&Sy`b)HR0)k&vpZ`+xVdU%hOO&9cS)a$*ZHjqk-`)1>7N
zfAYzPA3M3Rx_(k9lLQ0+)BxzmKmQy5;9vaQt#VXNTfKM!x{HgcTN~GA9j~;rdAHlE
z+bV${7mJSTwkkoJIaFS1_L5W
zB`p%7cWou4KoOz^W0F>$BncOFtF=b05Rn;DN;b1y5Dg~~#I^J8AC1J?2NCYm^*80c<%r+a(reQQz
zL{@1;)pT=B~7LrDBDIX
zI=xl}5|1KE2ucquT0|+(MhXR?9+wBDGA{29GPl4;sST(ouz~;?Wkev50GVlOh?t$X
zIde>HtLsBc!~v4-)|wI|D}w_I8Ge{uKba$9BEw4>S
zhZPp4SnABKLVAUD2H{-h+gX;kNIF-(t%D^HMi*f^%~M+G*u`N!nKZkHBhAF^VOC41gwg-DI+wBNm#NRvKw}S2Tuhw>6G|jJ7d|_xq3ll~#EthR`ru
zP)=K=)R?`onKHvzVB;5s5?Nho$~~Jj(K=&Us>|T~h*Dj#c8yJ;HKZb7>a7-?kY@Wf
z!=7G@$?Kku9AYygECE;%HN~prmN~e%W2Q07^5krC86*gfp<`4?eUo>RFS#@nppob>
zkafzcLLH)@uNDz~oE2FX%SozhNNlYY5fEgqJ777oVMUs14k{*VZQy`JfPy3fKuA_;
zNz$T1jgo@GAi|l!u6DkSDt7=p00jVG)?0yGL#`o`&lOFoFp9Q+IQvVFoc=Q(y&uc%
zFJ0O(nc6&g?4d_bX*jDuVKnWyF8zKv4pAwKiXs$678Iz$#o1ILxh&;<2T*$-a}5xx
zF448ryYTwWU;Os#U#=Rj<3Qvf)ZQ0JWm&S^9+rEwEO&7C=pGtvzxMEp+wO-R9c0NZ$(AWu
zk_RPGl$a$J$s#MDA~JA+o5P<@++nRX=j?}b0iucp7AeLm;(NH*IR80k@4fauYp)r;
z?>lAEMQ-QTQ{m4x@<*UUq*!&W&hidr2hLQa_SkfZcNeD>DxbN5!K>-GQsBTpafj{e58uRVXW
z-^K|9*&)*uwK#Zv4gb~8yjp17AB7KI-1*33XLo~uPeyf~jK{li
z+#mJa8R<;M%~zhkcH#V$@pQ^g_p2s&-=_ZOuAQ{Y<)c?F*O&uCmrifKekWqsE?3Lr
zd&~JzGn$+^e|g>N#j}?#>~8g;_~LWVDVPT;kHxRbyFXn9HYK+xSAiXl1g(h*_u{msXz6F>(4Kj
zmmaUCfMbYN?wx1m+~cxFQDlHJ`Fe@{SuXzY!MVTsM=qCz*Hy&oYz&YTDgw5Z>c~`?
z5fBBz07M}Uebv;Or3#fJ@0hP#1h51IU|tQ7oB$+MMkQ2{=%_Y>oPqtrY$}rhDu7p0
zGb1De%aRe1iQhJz4PVFCd9zgT(0kwwg8|sc5dHSa>;L-c^0!NBf7{`A?Xcgvqqp1s
zdi-lLLVnxN=%LfB0iqG&Ci|O3Pft&$(N&wl!q<3DzBt35s1Ue9jm(cLVUn>}BYch{>b
zOpZ0~;7-iBDE&g(HHT(8!kAG6LZqk;4ABx(a-fL5*U$?W7D6FdrbsA+Xu?B;zEs~q
zCs{2X&hN&_o|BeQ2jv;X{X{`Jl;kucu(}e7(fJN0ph2^Zw*s-6
zOWOqAxdk7npi90+wQd;V+=`-{Y)L&uLu03m{isw(loHbN^x#7YU7}K5t<0vGG$SY=
z|CX=V??)8Q0f3rHP9V7P^%SM#v>0R*Wzgz1w{zX7s4n}n?|t{(pS^ki+VLabcZbeB
z=kwMN`a`H7I89NaJbLdK6`_hHAMi0d32uxI@i?&81KFarWswze8OHt;1jQ)6yj*u1|H$p&g!jSvA1aYM4qrV=G*
zBS1B;N{Au}3g^H)5}Kr(L5(3L!J;V`3Df|T5!rik%*+}S5oM&zK)yr*6ycp$ssmg?+MvjXKLTEcs=c;0xnR803ZU7bU(a5m@g66=rfG!ttjZp9Lc$-3W
zZtfOGGV9gAl@$|fRtP0Abq60^%iU~Q&DDaVX@%7|FOF`WbPah7E(1nCR=Ctv+dO_+
zAsuWT?Cx$S?fb>*wdLH3I=AMwcUPyU*B%_ccK<=n8JsJdEiOvz(5r<2ZWJV-b{+LY
z6%$X&zVC;az;Y1hJP#gb;HrCTBqv5i-LMaB9FWKXrUIE6k;8^5MJ^HnKn-VUXw%7a
z(6^plHqtD?k#IoMg4prRTsCnsCcgze2&|FRyTA&jjLD*s4Qc5*4vd~OqmeVw6cYec
zu*|I_83LiU-2jL_6yq@rD1}#FrtMk=Kde+Rf%XMr>69qb&>Mn;K)!U`P+6K;N-5`z
zYD5G8W>(jAS(Y)zcHN5zG7(Tp$vJ0cBBH8q_9%#mLv_w6fQcD0C}kt25-oqz`M-CQ
z<@)&X$@qVH>Cz89b@AzoHRfKzm49QW_f-Q$WeINIf3WPM0`+m2grJ_5dETy;4HXy9
zTw1UC!{hlq1W{e5e7NdwpDgbz+S5UzVTiZ>+MoI1zw-Bf#$dD9V;d2gn3S=)gclg&i6aAZ7(b2c85~Hl
zbFCeetD?o(klWOy@$UA8Nw_jD9~shC*nae2ryNyv#e0GOJ;-Cc8meH9E)g$7v)znw
zaby)9_4<>q&i>YyUc=n7cg$f^7}zXZiQe|1tt2_2rQ||Ns>4t)S?wZw%uxsJf8qZ1
zxv*+1hLsi0>O%VX>DL#Zxi`FYa(u6iNPd{NH1+G##XopuK}2o8c9zam<%h3c{lEWP
z?>-ne2iC0w;&@a^;SJt>u>Z+33w0mOi?!L#9E`RLy@P{v+
z$=2SOEdz4tQcUvQPrrL_wAU|>`vSYvk4Ke{&@QeIL%+9wwh3jUai!gZuaNX{h-_93)fE8Kl008_}Sato{?TkdU~V2cFk=~y0d5M9vQt)
zsC6}rCjf=-X$`|y)9^RGvO2RFgeq*zZw@cPZ_g3F9Y^7<1*c#8
zDKq;9x(S*%Vt7N9s9h~yzxK+`&d%1>)}1?dZr{FLRn^l^KfS#@C8YIweYiY%=9hk9
zSe;Qu9Iq+OYZkTn353Jw#*
za$EXc(|%Y;*Yh~`*;vLEkXee{E}QX!wpffTBgB+3wMQ_!hiT<~!9@v(hG2QCD_pBx
zm8cM65v|-^jZGLCb7lm!l;RNA!NghFgX9RPF4U8v9*GVzEYhq!-F^I_dKdwLq5&Ep
zz;|lA#ipCtiYyC&<}BDFqQjh#1`2>t&;fJPt?uj2fu`TXT3XC%7P+z$9^t*2L
z^Dmsx@bmYd!sG8ccXsQ_^x0>A>D+@G^{PDo$kwRbeUF@6JNd_t?d(q9_lZ|u?P6NI
z_T06{E?pgO?d=_0ID7d+MNu6c9XX`k-Cbk8UoGhk0JSe&fA+<%d7`u~w^c)a+DSg)e)JO~q-oCiJU8Fa6Aq
z{`ePPxcOjts&2C75EA>!%F3k
zYAq{sg@usWX>S&XaJnLL-XRo}gGXXxm!_d(RQ3;l*Kg&b#h!r>!OT=lM9c&cT^0dQ
z)uaM|L=*+l)OnB2kpUtTI?v9zKtx8lU9Ay}avp|`z`XY;+NO=$geia_njtyH5G+(`
zStSFw4HYhni3oeAS-=FDH;ss3UkvoCCM935euQIq^NP&Jmv#B1Ykzbm{g#R7Bi`ssAgd_BqWD1XGipK
zdO=7cx$hRMrNctOqKR5Axc~!6iL)Xu%nU)8!83UzOq!!Mi#%+bV
zj*hCKTbX8N?|jI>No;3~>-93HewfW2h=VLTWJj@0*lD4Gibk6nywP&$PfxR;^R72@
zb?M4VTkwQrRuij$f*EXr6C`u!SUN)>lR%7B;ZwIt>sjrvEyu|_?2gODgMc2-T(zx>
zrNW>HniA$g)Hrz0IBuxKrtn0B1}eya#d$8ybKxC3je%V`8E=yxG1XbyzVBmS{#AjSQu<1KH4w(jX#<
za?V-aXi^7&@64k8w-0(s$b`6Af|}{WYr}w~ga!lo9z^P?%Cf@P2hghEaM
zP4a4f^4VL5FW)~ox^n`qx*jE}+`D}bsSZ_5L?WVD2$6}*4Crm2K)py+nymXYON1(Wvoir0_)4>Y@vH~Saj>l(=+GJjshjW%7Znos9%2lR(E#y
zwd0fj=K3$+%{HyK{`jTszw!g`{?M6A%Voc8XS<{Es-IOvDK8hpdU4nmo-2gIlcPgl
znXm3XxV^JO=XUq*+`D_>%=x-$PUZ{tt|$Wl7&ruAM9%>L#LQTzz^>28#qX_S50rNplj()M`q|Ud7w#Q#EJ{C~$5#$dk52F3->WMP
zKFXd=C&QDEn;SFMNQXlUvja7hppfY5xA0RYyV9C!qRT!MiS07F)2Pz->8
zY9Rm=CN(Go5@g5Dv4c050dJe1earS+)pCCK6ZF5Ed;Fai|M~s4+<)7|{cq+teVEDMLO`P!V?ski
z@)bx+0Az})1ZY5jY~a4#y$9a*cFM^R7?=h<
zO1tIExOU>u6in8Tq4t1bSSKA#U9)$6dGc!XKXJ>hzWV8NcYk4W{>sPR_o=7e^`S7`
zj(J#}9`Ek%OwVoq+yCBw@Uf47`K0eZdwn_Uovu%ptJy-^`!rdv*Nb)1MsaIGMWsA
z-LQXD_2K`LnkpC|fsr^vQzUX~Y|54`DVoL*ob!$ufO6Je1ax3#6f++jvtty~*mn{W
zBRE0=0(@BQ%m66HVn=ZaMu0$?pv|TVq70NvRS{Ja%=Q+LN2ke6B|2KO?gCB>Xg*t;CF
zs(~tGN-_X|a}Ll8IWtYFs$f9K!I%vq0gwX{WK~eWgqYDLBSvE@YftQp44`UE%w30w
z3{GmBYLMlxphMTv+}kR66ilFUFP<)le->6QkViGaLPnTZuSp+DaJ
zux|=8>xbx&vL`~cY%GbuM>9r}m;xxoj!|}oKxGm0Fi2uc>wYlhP>uU$)Mo}l$ORyJ
z@6oXg!+cUuo&$*w1hE}v51g&zVwKs2$@bFuq1fKO@~AJUWh^HpICt&-q4cene&^gH
zRyr=qZfK8@xNgKdk?fosm{SHZ)cBweo%hD;h|8AaU=n>@&L;cCn4Kpe%eu}zD0!Wp
z3C(e+n<1s%(EvFUpebfuSOx&+LMV&iSQHcqiHhJ+8#T7ojPg+U(w%>-+}bbZ!xFP9
znjRTu%fpb@18COT6A&1oDX1lrK5Ey}3dFQ5D^FyGjsunjBKJs+gR{~jB2+YnXktX1
z(5t}Eg-&=i!_(Ug9rjCb5yoTQ+AWGIA-CO9R}FQ0V(x1af%1!@X{W6Xz`lV_VNtPDKDNwO5bH%*5XC
z0DFVHZuyNWl-D7nv>*aW3aQi?EOWE(6S
zF$W@JRgKaka0W6!dZ_UF4t_mkQvl$v3md-8buj+`;GOcoJ9r2008po7<`WiK%bb&#
zm}YWg$3xeOnJ)r*$~h%4bFLsZ$MSa5;P!j>7ewND&DU6c-7xsyVsWd|NhL2
zf8$r4d1Qa`r1wwn@4B*@wfRK(a}Q>m;-#j#a%nGH$~{C(VtMEe&1^$&2l6>(02e|q
z-gB9d^NcL#W#OB09MCOer!@}4puSMSovQMbAH>eDQXIc{{gFop|f&#hxPPLksI
zFm=7U)UMWJA5$n~!J}s7fFHL}f(=1EmPXhj+)#-q0?O
z^6;b2-ueH1`RMOGd;8~aKKOrpV)sMu|A4PBI2k1u%Ifs!NLqO8>IaO0YCk_dymjwB
ztaN{8_tM_}#YZoleeCk9ufBHc-u;7vgOoL7BbAZ}l0{T?UIAmw&Zu$>GzVlvX73#o
zama|+jOxGiJx>qc^}b(x@$TRG>{q@pA6Ai`Uz|vPonTrpY;R8|qw2xwX`HoO)%CeY
zmO9v|)@IkIAS~-X#vEf)Lh4E5s5dug?gd6us%uWv6n+rbQSnpPPcu656L;;^tpX0F
zmMtunLnU%`8lFCwUYgdV^CsSC+tqj!_V*^uxG9?&nNDZ(^M1tpdxoYC3?NCrI$4|?
z9^HaH7VTH_bLVysrehk~?MagzIj+=n!wKLwV~@9IE
z^&J2V!ytypz8<;4djRPBp2#^D?lVo#UFehBD)+aW?UmkdPhK1PTfvWRy?$*TCiTVh
zlXCmXtB<`aul~XlSN^~M^0~kF)%#^$cK}4;FI_$R=RS76N&NElmuDxZ?6(|)m?X`V
zmHTzk4_#rAvmpf2AOM18)*>&9)0C~GO)$PppofVsy3
zd8imErP%gH+c2gu@#sj}7W?SE4@C`>34uhwObrcK5f$h=wM!8Z2@wduhyjF9fz52b
zqQcf5B09{zL{~d-Bh#!+lfbDc?VHcG-Bl=40Se%r#PL=Cp**So%h9SpFBH$
z_ox0h=g(f)9gWz*7hm|f&wl_uhM-KsTJxjdg(nApkKWxoj$ZR2U4Ih?qSQsfvjLBopg351I!RF(E+9
zVg!~22#I|)3WPY!mP7<1NI*pBsC2bw>Os^BI0%E5el?@$1X6ZTSXe1)+g8{LUGm>>rBGq^-V9d?|Y8Dx?su~e$i(IK=9Zh>v
zWXj~%iMI~+uwB`()o)$JvK*$n>P9X(uoFh|sK`OlnVI+^4z5O=Ef-0VLNSPz?8~Mm
z7ceae6r(XX_0H8LI7ZLI;jy$QDza7XCeX|wqI09&W_Pb=wDE+UAJ)sEUu-qu>X~z|
z4ZLBjnpu{I(H1i!ZfFDe@Yjc=2-y^!S2NjAzW|X4iHI4KnQ2y3lpG^7a*bg2c2eY{
zVb-&J<$610Z-#);`ReS}ezsE*b0Q^c8C;YwhyhU_8dOPL>orG<3L+o{q_f*ax0*G8
zPh39x^rMeVLa{hGIX-#-JLjGE%logd>Vh6LHJqZA?YIs{U9-DIMG7dMt>X-n5rwLD
zxyuzK1WBczYdXq!)9g9FPKMfY4GMjVF)Fby%5M9Swr#s18LIPzF=Rsk^!e*<+;8Zoc?TQ3
zbxd%xN$>m?e*fZ~^1wTI2k!tJq-ZGQq$pB3>I8?B$^5Wfo1qz4-va=kqMF$tIPxyn
zm@ZAr_n$34^7tbccE(NPzxv{>*JkT4+`RRiWTX)#EUH`;Fm%(G@6+>r_xE3THBUGX
z!vwtd9v1n({*^EMsrk7lFC0vQn@$?T3XK5QVg;CE0-z
zLel#57)F!qa#LuPWUv%$+l!Xc!&;HG!^ulW@TEI<{`!!|bckI9hM^#3oA{E}`Ro*P
zQT25&4$D5(M2>LK&A
zs9Q-*T>sTizW3k##JeTlU#9T#mySx_8K%>wl-Xk!{>Rtu&sI{6cJrd^Yj%S}?>&cq
z@YUCT=7pn=zH9oIzwiBz8=RiZq^c0gL=}uIsz4nkUH9idc;)1`!3i%y-*7U&%vU5ep9Ki=%jj1%HTzMY3$P!~@vf7rHw)RYM
zaHGz6-KG?H=8=w9yW8ddw2rFJu2x@s?&f~O6Q7@M_MbencjaKZKPe))SC;y-uiW{G
z&wu$td!u(>y-GQ+7wbio*Nzr9=IuPg>A6eAYBjak=(CU3^wFK^#Yr)lR=Z7wh~x2i
ze}BKOY5-8vjSslyB~V}
z?i4(}>0Z8N7ymcamHMWC{g$Wj4FS)u^X6}IkN`uHvS4JNL_%2&LuBun8AK3KRmDtm
zp3P>={KzAZY+rh0D-^5wz1#Pm`-KGr(qsB%rSz2%F(9~2C_nTM!y>TNsbj45JHK?WBX
zp{U78GtR_$6*UbA&|{sd5mb}Ldv*09fngY29TSz4k?45m_2aTV#o@H8gHCq~t{4Wi
zJ_rhG8Yre-Rf#r#eE8b6pZLWu=w$2Ml}GL#ch8>o!*s7*
zS#6wiUZXhY3D5~^N>MX74B+-xhaX(u_~15Ped^tAy!ZOy?8U=h%33|>>GkC-^7-&c
zbCcnxpZ&!j%Cvv!>`MAy_~O@o<>lKqV)5Fd?Gc8Qin8>@NDx&s_KBl8)PbE6DtJ*d
zu%PVK7+7Pko`4aX0tiV7L=%v60ZkbZ87!e7X2J+sV)J#z>~H)xGMbqp7(Nv9F;p|H
zBAN+pB<$)E0y3&2?$JxOsL>UHi?VQKHb^!ln1xu=+SyaR}HUiEw
z;gZmUH^VH7pp1m3sv0Sw2@?g!ju_FIdIoTah}h>ufaohwO#tH9OHy{3NSJ}y002bM
z0f8efLhy)r=u=9fDd#NCOOh<+0Eb}!rkWZ7D64&k$R=JyB>@2DY#^i54T}Ym;{-%b
z5u+Lcpkp;KiQQ`Ey;tYJmu^H|*OlY_H1zFaT~>)Kc2Yo5>76^>dQCnQW$BPC3IGNs
zwW69#i?YtCv(PX=Mh0?4>_XS|nmpv4F-yYOud<|F({35bZFzX=7$S
z2SjA&vg$ew%Qj86w#%|fDvD%|lQGo=)MOZGxppVV7CSAyRh6M9E}bsZIs=Lss9K>=
zD%PQDjaU^`Ni!R;8apnWzu-<Tt
zp`QWq=q#r0@ZYa%tmXSZ~&ZL>oO#ctgrnH>80m8GxK~%q$|C7dP*D
zlS7bmHYTdQArc}FLOIvyE#-mzH=Y02RmsD0T$UBoopBj}tZ#?gV<+fQoMeQE!G{pC
zx2&u6-9T8Rc&4c@Z#7TvZ@qVK`^mi>Dzk_L!n!H~I{-qr?frZDB#ZlpfA=r^;JviI
z{a`e%7ni2H|NQk^KmW>dEp1uCx&7VTN9a09X+4{Xn^5I_-~`yqbfvXdYFx9
zb)0~NeP1-Irm4HRM`uN?sA$nt25>^**WR`7T;jKa$N>PVJPacAVJv~aO(Ll8;CBM=
zln36yJ9r0Rv{VW)jFx6c%LS!;Fb@LS
ztY+`Ovh$v^dmniGQd1(3{m@ev4cH{7+&**t?%(_67e04mp7XSJKD)k{VjOsIqpafs
zVBo<`ihp|T&W}ESy)K$P4^QvAOJ|BFw)PINI+A>A-5oCa`$x-J8&ef>8D@uz2Yo+C
zD2m#1*bTvnB#jUpsxcrMh-3qMs6YF5ZM43wQ+qn3|MI6l|K$hA|H=1!@bLzr2(yO&
z(SP(OmiKP|VC>7^v}9
ziBZ|K1T9FiDFZ4H8r18af=>o<4HD`|CP_*VX2_vmNw4I+Ymj`F&O>V$K688hi#NU!
z)tYJ5ce5rC$Y4^p2+}QlP(}z)s^znewecCN*-8q3CJa5hh(%bc2J(nxse@rbMKeqK
zlV5#p;^=*sMqyfi^!tBMZJ6B`w8dZe_|=P5`rFrLFP$D2t|&wU(NR1hbUjtGxjg@c
z7gvk)p@TE$ck2&c90k@=tzEWc)F(YLENT63UKoG>-ba?J_Re~***|#c_TlXZr=zB6
z3fJtOLCOowwmg{{1af^l0q3>v+T%N?_wPF{7HyiGJEvvwg_mFa+;d+!v%URg^Ott_
zo<4i&Oxbu5SGWP?!R(~8ayp%c5ROkzmdkZL9s$7Jd-pzb0$+J>G{+FO&Iclp=-Bn^
z!R9j*Faf}FondVh0C3fNl=%=1!ZOrdL+j3IF8bI$m3~!?m*T6HnvRU7pnM
zx$;!td}B6jZ%xA9Xg9|Ft`a3*~9y<^s{^C_V-4U51*t5>B!t@XaC&J_1mwUt(vviFWul-+WW(gj{b}HAG~^a
z$I?zZf8pm}IXpTZ9WL?EnJL056N*U(p3yg-#WmDBj0D%LW%N?X{#NL;6p+hWa&9rW+H1^Jd0%#`!
z8JG=N!4W7SFlIEc?@*7t3}^s`4;!>MBEq0X#ZY7yU<=8FdvQ^y&@duReKBHPimx<_
zk8M?-fjHy`*Xo_U9J>-+-}}Sa>yKTDf9jFxcYX5T4BDJ@`Ty`Az8loH-B4q3{qU9R
z53W-JS9v0D0@9^Yak*;$$(LUF+1FQTG|Ja+_JvF3jvKm>4TGzZR6!$i69n#p;^TcI`VUr1Vk}IH^+^Rt53$c(>rK%YO(Ud^)KsgoWPz?l2EQBB;4vxTp
z7y^Q`B4{kod*!7M_$Jf-=t}ja
z=PHFURK|-#TinCdDOcmpPib61-EdJtD98su?o&kKusD%ZS&maQs0c&pmpNmys&bS{
z6b?nzY>h~;o9Sw`UaQ9Ws&<93mQ8WeuU11fUT8cyA)-xBJh6bPOG!jx8O#9+G!6>`
z(KL8yZP31RqX5+z>N`kTBxQ&am|Gxr!FkUire0G@0Ecnyx5kLLQP$FwQ%s~9b9~ud
zM(AT+Wi-bg*yX~(blSuRQc?uW{R)>Sme-}nx*4a*8MG{x2-O5+NB~)fHjTH!WDD3w
z1$M!Er)D0>q#{BsA(p`)dqUr?eb;SStb(`DjJU*J_L^~*x=B-x+2%QQUDvL=A1YBLPXRp?oPLN+CH%>%TS>RX0{~u
zHr>P%K$bwgOPHE?GLfaSg~@i)b>ND`=x3oSN+;AO%n8Uhh?t2yG60}j+qU1<%iV8xs0W3tl0}kg
zE^?XK{33#T0Y)Y3Wg!
znkBg3I1l>|EX)jn00a$?2+0UAzeBP3eT;X?1MlD+yaPx^t3j6QNLinlHXk_C{L%Ma
z{Qh@8fyqo&EkDdcGJ|GN{o$*S+|>wLx^jMeDlt9i)q*=ZJo@jS`Px?(11t24)zY{f
zjev5U`vKUrW*ExubkEZh2j@;#>97dnp?S4E9oThi|MRzEy1kCW($Oko)k`RYt6U~a
z6hUTI)AqDEB~*s2stIZ4%t~=>CNp66Kt#ldh7eQoZzH!5@-h>ZMH5r~V_!V^>h)j#
z{;ON>s{2n|d8)vETESoY!B6~=%eVi@y_+{qn`dv{L!TsbhL9j*phQ`_uHaIvBPU2c
z8su!u9Dq@@b4Hw56_pT4Lx>g)2ISHphM<%XGd5Tm=r_bl~cb`cVxTkIET0zIdaYhGglNT_Z6Xw
zzGH+?mP*>jvG>~-w&zWM{WLst^Va8Xo;>N7O>_2pFYNy9=b!)F?bR~21(vI*mTBH~
z?>TdxU_EWha@T7rzI?oR?X>-4m-fz%^3J$QYXl~$Ag5elJ%BvuY}A4oqf!}4$PqN>U~UcMFX*~yFozt
zvoGELrLSJM>_Z3~EHf!hir~@K)$GodaroGKo;g9i^TUc7YU#*OEmdu}?NK7RSq*4EZ|
zJdQD%2_rh^wzs$Yqu?j|hl_h#=PuXB{h5pJos7l}u8-TBa`W`Hd$%9GR2z4gUj9G+
z=|6d9|MB&ZFC1*e^yHWRA;)p%U@zSlh3JF|sld3d^r$4l>!Cw=Y#
zxeQ?JO@cCpfhenK@K|alS+6l=hna>|Ud^h>rI?coxn9i-H7-+b$GdeIaSGczyuF1I
z`eHg1MUjTHeMfe(KD@QQf0u~-)pS78JKFsUwm@5QTPdb(b=X$4+GAdlgh&%W@_ug~?|)xu4O)y#?j
zQ0SloaF^RH^F_>EpOq-}k<(B+JW(r)TFU+V>*eBbE$dTP7FdkhVL9=WGVpS}UNgOn
z^=H3)FY*I*dua$9rZDxRda`Vner$c}8XZX@Q(rnq&XP)}LncgMd5DsXgbU}Hok%Y!
zsjmrD$OaO%+yFR&KF3nFIf-SBl>3Ys6#{mF>oUCgvsK+F*#M%601+V?A%ZA?I%Y&%
zk{}ad0Ax{+Y(z}VRwiej?O`K4WMyQ>E{~O&9hD-|sWJmIsj8#e>|xD8m^RanbDT6W
zAyvU|Qu^c+EU7rJX0|B_fOAC5>^w0u3z+0=rY5#&5n;q33xS#$nj(PNrj60q5RAwx
z8bVT3Ljf{WCNl#w5Z!c{Y@~sFOp1^sV^BqqGL*q1VwYD#PApnFh9r5l2Iv}BbchjD
z7(J1xAEulkQ0RO8oe^wb}BH2Lmt|hQ)eW7Lz^@Y*bwxE9bIwxIVgT<=6op96fl%hhLh&
z%eWdU3PdDUyR!7^qqNR#=mHWGnBhh&NsS!8LmK!!1e;h+0c@wP_a~!qC@>8v#uewB
zW$5E+b|*Z2rQ9EK!9$PJT*bn#eatacm7DIPpIG)=c+{iC7@=Qpxx6>3Tdb|+{A)a)
zz9HWE1CBR}hyVba34sZSD97|${N{J?I|s*rKmZCE%^AqXsQcS(7W)R@MKjyj-M}}<
zT_Ljm>hE@gO#=!^j*dMd0Y)@2b>Fc>?RW5jN3P}VNj
z9n9*m9ozXjXDWBLVe#+3`^oRRF#e10|9)tETEzSF6?aHJXUZ?RVy{{(W-pzcs{L5jt@pMI<&kt)0he@P!v>*-Lg8Vb)-G50Yl?r>VNdvSO38)
z#W+CVt~6fpkW|L1@4?|lB1`%4|^AXxC_gXvTIqwm|Be9se?rj09svaDPYz9%0mD#Fb`BmpU(HYM?B+!=s;r#&rg?gQyAA%N?aOE%n=~$9Ij)DHE6FYL@X~A7
zkFUM9Jsmyf{dnU4&|^=0aJ%{3gOgvrKYM-FuYDd^Yxd1jby%yZl)=Dd6-x<*LE_+w
z2@yGP7N+g)V`u}Vm4hlVNlvAz6EJh8rT~6xGDzpJ#43WQWAel!@2lYd^AFcgp1rW!
zd54`9nK92a+s!W*`@bJBw$B*T4AM
z>HPAgE8OSL{n+1vZUzU{&fa^L{fS>aIJ0%-iBJBKY2VIHXSce~73KJqS^eO~?JwQ?
zrQ&pD@s^K|MyMbFP0u95lcRgx?H5)1W_pktp9fMOAL${5H>CDf)AHy5IG<
zP-mLm>=aigr2Vw1^6_Qb*axXb|o<6K+qS%UGVp
z(Fj7R^A^&I$>x*uNv-687lOW1YXbnl_QrC2!%YVO03rzB9~P;2<7+Eq5UAXwXgHl1VIoR|R89?X
zAah{k8c}VsD%3BSf9@zRAH4G6i_<^Z5C80i*FSpZ+QHsB|@ToId;dgCBeS*y;oC{d_o;0*S2ejzv;k*V{1Clm||l5+nfzVKlSG)j35r85PcH
zRTXDBcX53Rc@6$JSEI7pir75Jz8j4NX)AVh;SH>_)q1rE&QV8|qQx$FpIqvYzyO~ZZTe>#%m|6lF#>ODHC{j-Dg_`QkOKe})0hQBQ53}(k&%!D0316p6Um!70u(or
z<)C5+M6^+n5|K@kx)K%DWM+-SfB^OY&qqOYS1f!
zI;54lh$vtqU!&udCGulpTCUe@sp02e7nQ;@PKwszL@h3NoZ>%ohuKRF?lwILT}z|d*-wr$%50o8F}z{CbQmE_P*jL-{{
z)wI7mT`#Jp&MFGn_>kKkhtB%0i>)gIdsi&x!+ODS*vzoRrT8iVl@=jq$i3v=I^?p!
zq8u2T3~{xxG!R;enFbnG^MI5wp($v#96gaMD@BSqJI97$LBnKJw5MyuV(__h6hs!N$+xs_{Pmasm_lDN$kmGS*iQi&pGz@{LZX61fVyvrn
zwWzA?q1@?{5wFpya40*!GqoI#`_mhxun&{us<5?Ht?y;c`;*bT&+I<2)x7`8xVRJ1w_I
zbLiJ)F5++rY>(r|zw3z~{(J
ziE(I}#>}=S)pEIZvf91(>gDOybbt5kxeNF1-T%}6%#}ypomdCyNyW@&5ud#`|Llv`
ze*X1%j_kWHAI#Rntyu;hyP^y`7_#(zPt2RT$a>wWYR)+^izM#|4f=J<>Iz>oQ1)HdU1aK`_eYKT2${TLCyB~Z6=~#0g{#zc1N5j3
zBFL
zI2zSdj~1tk7H5T&Z0b=(WHof61mq#pC@7GrDv`4Q!$3U0Oojjj4HBIcsNx+1Xi6!g
zK?sHv24O@-6EG!J0NN~e6rCX`s&kQ1%^WahWz8GHoLDvlBQ#VIsW9kAl;3P1duuAyz~JjMt8eB`on>LpsvcYbf-DI#1;24F+U#O;1DKij
z-sD%yCkke!B7kbjh#RunCX}#;yU^dDdD8i@~sNNp&>CQ>i;P2kr9GRQIO)f8lERiT8h^
zM!vN=nho7rduQaLVK7IoH-+Fy@mf2#dpAepVtduT--~af<
zfBwR~87#_Ts%jlBJ12knsmDHk>D<+wa$I9wmuA>$xU-CZ^RqAi(zDMEAvg#=TY=Q$
zI`5EM6h3e!6BKnIl1(I_M~O}ysA2HEuOF<|Tfx5jZ1txe-=g9Ixn?PopL*_vFP=Q;
zDYJyK1eM+a7R;Nl6AePZ4gwee2s((E!E~oCN(>Z>t#D4d+2YWi;rX1_CAm?UG-WSC
znuLkZGIVQGEV**Y45HKI6(96U=q}U+_kF=TGdMMHtdETAxLO@q$dL9tO)tM@&wb{{?Si@AS%Ts~D#K6UBbm2t86(J7P+<~I2cK;7T+AEy!oB9o-#
z`c6bnVuy~W-k(VN^2@h=?ppup+w=R&quPxOMyZdzu%|)RL&e_HR&3$4ob6)q
zmp-_A@$C4?gZIBLjAlRZ;UB&6;6HuW`R{q`@!E#*WMnbjnw=b*JE&`q`^%VT>op^e
z$@!x4m1TkvH^vanf*}A1F-=*&M;jIGg(wTFC
z^Mlj*O6BT>JyJ<&vFz9P+tt~oer$JtZ+y_<;L&fDK`n!zA`i(aj1PUP*YM=I%kR2$
z9uae!x28MFkfr;?V`u)%0RQ=q|J*CRR(YIOt7BH5D2isujOvgOoU*0>juF^N5lqb0
zWkAl*r$HeqfuU+P5inzinsjK%u^}M^WOLqYmf5;HUabDzpYC?`-W5)tymIz^k6m1L
z{mT#T^~xV{hdH&WTUJ%GfBq3QY{mg6TS?bcyWWyR-^aY{`uTjBa~_A$4}a=?i0=pc
zw*de|0Ku3es*&cL*)f1PfB+dX1m}&xfGU8XkhI8%K8pm7qK>G*3IIgR5IA5j;He;>
zXwVV_W}@uYoi$|=0057U08D|-v3vh`NdEcpE{^t`pPrc@4k5LSEL0x;>PML_ZHz+>)&(!J@@$PY>t2K
zvk#zq<%N?jxUGxhymRTy?ve6IxQXG#d)w=Ssrg>l%7{vZb0dG@Y1Y9p;gH(?xYRgZ
z9qMvmZZc8ivMm}}t!NnoCUyv(X}lc`p&o~19((64#a0JFuAOxoMLFx^>5CIXJ(ZRM~N@Fj_6=P3_q`@5!+u03r|q6Sf@y
zY!r1A;f+ebJNOoOQvqWmZ~+FJJVlaZW<=p9CPGt7Ij5w}zL}9}
zP{Sl8Q9XBGw-H0wa7U?xHUMu>>2
zh6V;sbKZQ3sJ@{Wf^Qw?jn>ec2bmBs07MW(R5phQ
z2#mBbF;zfgVn$#BWHU@z)Repj00lrb6Ga09MSu*j@e3d#0I!-=HD)PX*o+JUI>(G)
zgn*c{fEuW<=_XV(BmnW2L`6j84I#eGr;REzBS4h|L;yg5Z$1h)eg*~x#AJYok}@+F
zMd4JL837EC0DBfj=a7(@i2zXu07!;R8)sKABqT&6VJ1TNFsdg+L?Z+DP%olj
z8NeKpivq}0jvBVuc0E}DaJ}U0ybBBk4X!GRg3T-@1|(BTVybyu2B5kar<8_vJ{{H<
z8h-Di=RdaXx2bsVrN@HDM6s8lJzgB$x__fPIlg)KXz|J+?H^ArA6M0B8`jO^l&=rH
z0HLMS%}*(7D10ZwdS}9>YT^wX0D=I!Ar3`*!h=`fjLy|fNuwTV*7rpXL`bM8Dfg1&
zD$<~p1B>DTLIQ==FQ#e04Dn_oE^?W#vQ
zmv(L)uZ~vhlWurQhn~!=P;oWRvw$})#lSda9yF_Q=O$Z)-%@F
z>}+3p;!MMCz`S((uirgd99(XkvJmhXZub*nqpPxDO%YMtqN*LmeU4JslTR05w
zf9%nZJo&+=9=ZJTSD!mPIz2czyLD!I`LRn)(>=T!~K&%V%NBmz1!X0Ma0E=y;!U!lgXrM#w9qYK-J7TkH_t5l~1WG`$k%d2nB$!{yd`6l3veH}th_+TYB
zp}Bh$VR@WQTj6|q+CKL)VYFm5bhWiwM)Z*?obxC_b)hTHt}eg-!r99uh5gA$Q@VFN
zm({|$-W0HK2C!XJ?>Tc&7H*x_9SkqsO$?k;4`f_v9wN2SaFN{_h{enV5nVwBmQ=)r
zDnaf|y@F4KN=l`&bKGUnBw`>W7&f|iXsxRmVdKVxU=Gks*l-{PhE3@iK#dF$1;j16
zc0L3j%0S*3LN~-t`!_GdH?&hb=W@=f50lHAMU-8@*o-%WMxTE}PYceq9^;o9i)n%cUd({WTH-
z55*Nl5m4VI$oozx5B%D5e(Q^GhPV&O1cdg+`!++Vs+O~Xne)y$@BKr$Nkr$ISpK~}
zzjyEse$zlHOl?qtP_{!$rk0bWh~eyjhZvWCc<;&o=u;Ojmt1Ub7uIyc5g6TD^iYh4
z7`fmKWUfRRat`Eh(u6O(eEJvv@sIz|#f$%!Kl%Y(jgBRu>#49gQ8WibOlcXln&*6N
zdGhM=D8mR7XQjdyfBVa?|979i7RVrVj~|>Zzz>?My~)j!*_uODkGgJP?^;Polp>q=
zJ?3c9p)}&yK!DU#L%6rlxiMJUf|HM*8~xdjKKbY0^Qfs4#(i39K{MpX#=2`yrlw?85$2XSOcx?>w=+B|2C-C#z2T*u}0@X)QnX
z)KhnF-TvFppN64-@}Pd=%=x{Y?YZ!mUcYm1ovtr#&C>WZ?P{0R($RW#v%j|s<@qzO
zz392nwPw3jce*GGrz%a`lF@S%5~advqB)jxWE|E1aS
z`SM_=9yuNfz-+mS9TZG{G+F~J+IjB!;0vxflx(Om_A>a>*aE=B`B1pZ1yRx1-|G5#
zY|B9VRmo)cPfz-u8aElaVMtwjc-*oEqMfbnOZ(fmZr%CC7jEuNM$@V|d;ak!uYR})
z(5)AT^INBg)*Q3r2#P2GKwxWbqQ80!jZ0^NF<31eDV6Nug&4OO-alo
zN9eisJP;ueB?j|g$eg&&OnpF&L+hYLTXtRS7EChoV1V*{P1Y
zcDY0J(Tf*~)UEo{)%x^ZmoHuY)F%KCfO<*Jc?WC+0{~}0XN_kNjbbTO2*3aW3$`ZZ
z$ORx`2_*nnp=|7lz`z7VfEgWu0XWEp76F*a3y@?($e4Q@n2nJQ904RvfjyY=LsP?t
zYj=2?eDtrMP5!Mnv;Y3b?@*oKZAr@bYhQTC9Qf_r%iG|aStTP91Hyc{9#X$rt$bO$
z{PN2Ld3SpoFrUuusipCxym;}VCS9-lrVQ;m%}*C6r>8GJ`_&h}@>yg0(s=i3T}~f;
z*X1)8fBI{$zmk2x9WJXw-T(5+R=L_6*%#*5?+>F}#ba|9{Mlu93G00g5000#6>x`-h
zK;9XVh~|t*NCI}OoHHXv6A^i+
z!h}HViCEb)A~1zAn4uCHBuyl$o{Zea(?JCtAt|A$86Y`lNtt=m7-(pS4gkz7W!;$J
zN{(h0n4EKjkcib(L=!xGSQvr<_)S)pHWArIu3Q+9-q4MD*t7VC$w5>^VgN!Q12Ql{
zL?nm_0Mz7RBI%7Vjj|#lI(`El$y7B$1~teEK%4aXrpoPHFjYV`(1$fS06^YY0PE(>
z_%I$sB4AG&A$3(WeIu6vCJ7$SpR}2j6~NRG%&{vS+h!0pWDX*zf|;S3s%FE7y6phY
z1>4v@gBff_f(^+rZW;$4YP*Y?qFH|E&J4uh1kel#$dHHw07S{dFqk5HM`UIwp-Fj2
zvCTP#AfXx|I|SRyD;E<;7)=id6GbjHts$ZfAp{E`nv)I6D5R~eCc5Q{Af5(7GAk9R
z?^|(XZH7bwjl5m^aE&*#Mwj(rQh{v0qtA9uy$;-O_T(YX?;ss1lKs
zEQylmV$yJMLrx_J1#6`@UpLcj*Pc9MI7Ok8C4
zNv%q^7J$?3J#yTykHcxJo$A(3b>@;Dpd*g+gE|G!zOP!vIy|Us|7j*QE;|K67b#(w(1_&K)Vm
zlok`7EHy^rOhClI1UX^0v%^qgzcp5-Zs0za*2WdD(~PO7k;kpQ5rw1c-}lrsTDs=f
z({Xt3mHpFOud~}8@1F-(eDC|73Ksw6S8slPIUM%oaB4$2I$PP}gkn%j#kghRBwzU2b8)#`&u0~LIbvS{kgv-L
zAbJPHfgN|fmE{NqtICnvuJ^Xj_kAxS!!QiPfar%oMx(0l`?4(icHPGT0B``v9di5f
z%>LFhum0ngZX|HKKCJ2F44wYb5550Sz5BbzOf#@nLAqgQT#HCu*E#2t)A@WpAB{$J
zU0-M>W|D`9j?K}Hqr)5b?+embm-QE4c<%D$N5|Fnja&CW_xjECWURSRSoJY7LS2+>
zj(W=k7E1g!$wxWoHl5?}mUw}PRKNN0znP}n{JW{P;--8HK!^b9Hf2!)MNO6-+RH0u
z%@F{JnYnMcP$*ihh{S0emQ5a}LpNFdd_|)>)z0%}*(&;s*@Z>HOP(}-Ym_x}(QP%i
z_TG0VtrqLHXtIsRemwE9&9`2}`6&iSuxfA0t=Sw%RDbIFzDj9-
zva@^kpbPcsJ-mA_nPJKrXktKZ9LQHxS~6*c2#idaz;B)*0%*PkK{00x-nA8oCX{t)
z6bFu;Bru|R0aD#OP#pG~>dB@my;0L+zzBp0sAiyOMr34WrdvB_KlYwSFEn|~eOXsu
zx_0khJ$o3bc{4})=8Aq3?QJgNTZ=avE?UOd%TS^Q5rnE7Kw`fhd9OnF29p@x%t1eN
zFaPFO{OewP$Ex`2AtK)U&AMyTxxRTl>oECc|Muo3
z_)aJfycKVrKePX)lF9<|hJD_fdRVqe6~1{Zr<7uh0s@Hdum|43w~y>r{0Lw*%w5y`;meo*(kDL>QHBMs;}Fvcgz-E@V)eaLO_lpNsp!-sKfa*`MxXLm-g!OTh&L-?O!=Jjg;>%mfkZ)AH=T!dUX1OANg`>%T`Ey@-p+7w%<3ijs*YB^|4nB{$
z|I!B^+53reHN@Vy5GVj3genpAUHr*c
zZvBI=JpVnHu0A^U|Kis5FW#$e
zLvm`q8ro6g0Z0r~HGtdnpr}1u;=johhF>?EjBjIO`)e7j-+2{+Z+fP`Q_yej4e+l~
z9QZBDq;B$bNHkxr$K$4~tGr70?%g>!clO-5GpC35@7%pr1;4em)pzTA_wK#+>Cc=w
zbLJ2Iz@I&L?wpyaiuvV-FP=I3y`OyPg=ZnG4_E0kC$m?NR$hkx)u%oXZykN@bDw$F
z!KH(8`s1hD*H8D3>odEq8rwRr?wot<@eh4UKYQ=a&ouAlIgV>g;h?e3CY3kfXhhi*
zIp(Z@rFZB!_h>`Hs*B526mS-DG6&UY+zvWurgECQli}pB$a=JQkQCOl*$9PKC*9(9
z*xHuScGF6qPB1GnB!|*74w*1d7bn`KsAg47Wkn8ChW|f%{~c^wmfnY9Us!AHaMI29
z-IqFt>7Izm0Ej?GK6lhCGq_0GZx(p6nEuAAZ~pa9z3IxTBj&2;
z+i7&$SXW`UfU3@jrr^B~pnyz78v?zESQaxiGg)RPaxNgTctCRG*a&+|8`T;E6Hyh<
zZWsXBSr_Al5h#&~f*J!paPxuE&;q&8RoqgcfF*NI1m4Jyh>RFu;~$LPks64CAqbKo
z0I_4&w?Z44AOxp`#9jc_-+oXbO-~~Vu004Tb3?DM&kN`js
z6j4x_nE-Rns`=5p+$Y&s+5#aM0l)?{85{$Xd+4J42t}Kcf}t9yA^;#e+04cR!ySUZNaDzl}Wl~iwy>l_fIA57YW)I>}1Eq5k=dwTxj2N6N
zJQpTyxv%S0){X%o3#geo%4BueWH?4ZfZ#BawICj;>5_Af*!yB2F2%m>){T?S4+cD$
zKqv$tqPUbW_KPhSd<9jNdYR>nnoy!Q-1aWE9ie&WBrA!lU5)J9cFoLG3^Y_GnoSU-
z09%fqC~eU{IFV!qlw#*y&Ss$54`+g;qf
zxwpODQ1Pjke(lX^b98WYWoPRY>sXY-B;}}Frv1-EesjT&A7k#fXJ5ElJXo9DSnsw_
zJhp%RpZop~w~0>|>vvk1YE_C{FHtR>qh8Ma!Or&HXt*=19N~lGd&EAYFKIvqgmij-
zG8zrKuIXc^n%UfJHd`*&N0%?}?d>shOr7&g=#Vg_#LQjSAz~4nh^&JWmZ$UC`Fiz7
z9xML2$6vfO*(ru)&oOX*>gqu-%HX!iUs5TDYQ-?;3;^hHubSjbg20Gq-JBdNup%JZ
zZYYO)hogG%BLmkwQ=Tp7jn1F`$VdOv-@yO#OJ96?=RMC&_TGu{joE76$^g*?1^~)n
z-#PuLM@Z)l_I$G>ezSD=QBOHv(9|w!jV=fou>dQgp=|&Y2v(THY-%O@PzEB(n)}=Z
zKla(Q>(r+l=d^k->6+d3dRf4U;VOlMLv)MtFe`etCabkAOT4>WI~BuJfG_lQx%ks}Z<2UcPa6
z+D&tv!fEX@B~6KXcM$O@gXg
z7Ca~m&)-x?eM1*`c<%7gc^tqIBuS9jQ_2Y>jhwH@w+~O;hWxRy8Te+E#b#`9@z;lW
zuYjS!cc%oKc()u7(S@j>h^Sm>c0I}~zM+=>qn~4pkBax-CdE&<5wH4&XXp>Yc;I(J
z*v!mKzTu0F7!grTHJc~^FmrH3O3^G!7Lg4bR6tn2%O3b{d}oH>N@aUk{jsNZ|MK_0
z`1rO5!=|WfU{C?U<+5*>4u{Y{4mzMUh5>6Kt9f_Uha!f&o?gDT-Mc8jFWrqV=i>G0
ziirNhpZ(ll`}EfLKj9ufIP#vSDShz_9~6hTnz-e>_WeOw{JHl%{x5yt>4TwU=$#AX
zL)k;Dt0-*&lOJ}&KINZ&^{#~axvelR>+yJeGN0YJf6@uBj%8Hs&Z`a-I2uDF0%Wh~
z8LL%QyV^f}ZS>;RqwCceKtqB9wwY}uxgUKm(f#ENyu_RoL({qIQlo4@cIZ=N*&=6kOGsUQ60XuIGPk!0F;FTFbbFMjz;|N5=dO4<)y
z+j)9OKlVNEuQ4GJG9FzSKK
ze9zM#J35FCmw~I(RTx%@VU|_0FR;o5MhE*{@5X!o;
zoef{QbNrwF^l#057z*{PMJs7{d@^=tMa1neO33fb&gQY*9*y?v@qL~=IEja&^P^&^
z>)CkVuI_Ff4hGlE#>4H)gU4QZ?cP5;eel+NeOs#YvOizEy<6THVpZ0|X|wFxJ4c^+
zN6)96bG5ZMjYdBIbkl(JgR;)1`@=jMjrR6-0P|_roSn~@eHQuk)3)iFySHb5`;D8e
z>Gop!)i+)z3TfwsL3LF3>mWIbB8P$<*t&-vQpc$?Q(-hV1XoCI6isr@nH8k%m#Z^_
zIIgx9oX`M~gA0siZI;-oDhCEakB*TXDG?=6QORYUV_WuZX}~Ima+lzCOlLkFIeq`A
zc%lvmgfBdHWw&O2|1$^IpO=`N_X$)`JwWYUZ}MCB&bI?UacPnPng;KmG!JBT%KabO
zvAy~LQsJC~oWbZzch7(B##^6weDu+$p2e~_G5Flgw{E|4b2(q_?O*=0&p$_l;rV*G
z=(@>dvR##h+J>d*0x=0{3##5AQ=lToE&w=CU;`!+gb0vOYrscL#z*p?-w+k@D8B2>
z^^ZCJ5so5$k7Jv6BQQ4Iz;@kDr_(pzypd9>2IaxQ!QET8V~j`p2Um}-ymRaA*h^Jx
zz4wJH&ph+o{{DUz%DL}j@4X*SCd2XI($Uf0{!sw>+0XuFdw+TJt-DWO+WPgM`-Ls?
z7}V#J+xPCod39~;iDTGamY3?u0EzySSBHQ0$?fNN&mX__`PcQu32NpPwQ;UnMl$n)
zu8>^BCUk3ErE2e5Ty*__Q#03s^fQHaUkZC?W9jFxL
zz}I_u`$Ks+c4g76wZlwZr~O)F$#GG*Xu&t#!YNZIwJ0sr0EKtOAh?CGtBOEmz@$ZL
z#jAPm6E%5bVLdEW9?}#52$5NIBidtV
zmh;8sr0ERi*B0|0oj{OZ>*o0>NV8~_j*HY!7a7G?ATtoU
z;E4#ym4t@0QA#p}0@k^2`jiz5Lp05StVB1rTJji)%bnnSj6Dzg_G-T@@BU&v9A#sn
zprOjaCrV9|EF8s{J6UgU)!VtdI;fu98dqZXSI0+JcAhwT;?ktvEAXHSyR`?iB@S4j
z1_Km8ShmgdbUvRhV>la@+spZSc5ibG>}(AG=MOUJ;fL?lB7<1Dmcykr0z1UCuLNt;{e$})J9tfEBdz+&z@
z=}YGXEUIkCL94DweJqNi^nr}J)a0)5j*7xpD6D>7mE>c2cHhqLhT$-z9cK@gs|M;}
zy?41oTdUM{NS4IF5jzjT2XZU~W~yefUsj=@zSpROP_8vCq86h8%hocju08&&b&#S~
zTjjc6><#vj{M&bC3D=`~l&X_l_{tXn`vIqV`K8rt`sQT+c9ZLJ@caM21BMi3L;Mm`=NuL+rW?a^6#?%{_DK9-iOxnG;9yv#`TX(i
z;ZtLG*~L#@-l9{vb8ZO(5+vx(Z0(7JP9Qt#!q7Fq}(u8{|V+@4KU2EmAo}Hhb%vVU`{VVVH
z_4w`M2j@%Ga_6AAp=jMHy?p#euddv^vUjx86W>0@$9H5Wp2gb>R-0-%?mSz<6UW@Z9FQ
ziWcxR#|rpS8~Oq;^iArB3;2|N{UBh|=l(8>U=!~?j73otMIj<>+p;g<;W5+=m}qkn
zAL54IjUPe-@BQ!G_{FK&2<(5O_=7MW_?_Pw5!1UEG4QU+Id~V?F|$faDM?IfSu@6%
z#D_rc(R?ObHihr92fiEMiLrL{0^|SqlP`Yq+IBUhHpX%|DFj!|GE_iNjt1z9^4Grb
z`Y*ll=6~=bKd7Yu@L2}Wv^VhQlK$E+e`fgBV;>$4|IVxT{x2_mu`X&?gn_SY;NQD9
zpnm;#zHsB`-+0i4ffqRP_HX^jd;joLTf0>`R$ixm!Oer-OrR=w5OMBWeK6lH2h-Kr
zVAA~uKl;J{`S(8Oy(WQED{8#^1FQ5E(w}0Y)`A5I+gHK&qw>@V3;4@pFcXuIb+?c2z81p4w38pZ
zcJ;q_>E|%+wBb%Oi}GMGo?LQTN;4nkGdlUgslVK-B|H-_0<932=V$llQ{KB8s;b3w
zW2O-~3C9NyWYMf)=0nBRIL5fGRP@=6vfyGcxh%wI%ch)7Niy{`XH3;rL$0hw3Da&7
zh76oFlx)7Dn*ZKQU-_#)eRku#Io7HLA5hlw`>V54F5L@PkN(B?JrBUY`tn=nZRa&s
zcV6||6Rd_YQ5fwc={kwTF6XuMJ;Z2C>VhI+&XU$+7>OFUDK6HUQ(cu2w1pfknWASZ
zyeA@)$Y`E{t3p)`AjY^}uMy2ThlurksmqWHm3Hmb)GQ`v!@*&$x<3EXH2xe)-}jeq
zzOyZ>$F|FVeojvw9&Ob`&WgY>#-!bGGynA))3d@oZT-|T+deLUq1f(UwP0-
zoV6|2#tY(lV1Nz}7AN$DVBQm-%zH^!%+NqCdG78{Vl
zCPCa_5PmPM|F3)5*CmU8V5kHCq-G)S5>ffbeI&o3R8(DrCm*{iSFa#%L{Ig~(bb$Y
zDdrqscy2VEPDSM4;L>=s+pGn-4D_P6q
z7*Frrx^w@nu#^SNp1=7eUVipwKKhxQ=c}XO!&2V)&tCKY!u9w5`Gc4KtF%6seV5h=
zN0#d>O1=Px9dxNwV?wf3+ZJI0rBCW|M_L*NOjYFy8jg9nJk!~op%+r^*10G~6fK(h
z!DwY6b5#^|JibTJ%fK}76Cyq
zB@{8UBsrOyqe{Gphygjrg+s@H&|`w$)ZoHLH)d0-Lf}yD(_SvPv7~KVG~GBGl0_7&
z0>B2cXPXu_G7)Vg?A=DkpEff0fMN=P)gWq0sTU(IU2v2Mf-f0NHe4|G|6FS(m
za)QqZHuDBm6VoW70t!Yj6cI&2XG{R7Xep*{J=yi{{L1Q!kH7YXjHrk2+uq(iEc@@<
z3C}nax=
z0H}4AO7GM0ov-}HuVNL>SKZliertMm+S{Cl_Zp1V-cn=~-`5xQR%=_wU+vcItf*66
zWP?eYfE>}Nna@_|_ja#b3R{PgO+vSu+AhR(5BWLY_E=~42T9zAHV|Fl>5A1
zPZxx>$0{W6HH+(-vhBCYM&s?W8WK^I*mYemNywZdzfNkZ1kSmFopq}7R&pwQ!bnZLIvx(E*Vf!
zRKYnH$Xi;k6VjU4*Hv!TNO-brNQ$_gI-woIQnmsX8N?`ttc-}{mPk3nn99*`t0=wC
z>lTV8TTZhQ$=251^n5v8-TwH;eqc6V?(FP^P;`Ai7>tI)VG+u{@7dvOHrw2(_wLTjygrh2dFSB%@fUvf^qujgO9Mq%w{rL6!6m}01vFblT5b~z_UY^+H0
zcF)vzK4t5vZ`^v;&D-2HO+6wsHp-CHNW{R<98RGhfNiTEG_i{udp`UPU`gR*_HDv+?P@KBreR`+Ku}a)0vs-~af*(a|&e6&E1z&iU-q@7(+Jt&`VI
zPuA*7uQ$8NJsy|IRn4u-AwG3&RO9$)Yc$?I5Gr4}J^jk<pp0FeSFDXW
z%3+{75G{A5ZJI_AJ%bw#s%n5K`KTpMvtSlZu1r+)K(cpSS1PLJw27^EUzzx9{Hz
zqw&WtJ@(OO#y8Gx{e>5vW#|9Y-~7eD@@JmAzV!sHgfz`^{`Bp8KYf4omEU@)l~J{`
z`tc7x^W!^L4z}RwtN!`j?G&n;ce+=9^$UY~3(dtid-mwyL*wyw)6T^gbw!28xE?yH
z21S?Cs%wsq=Y>KOO}OeC9J?&UCnCengXduwm18H|Uh(CB^e2Dt-}wIb&KCWb?w)?;
z_Su(@*ALb`dm0Rot*y2kC*1d=t2^aKuOD9D9y~b-AH6hYK3^r*_>v$O4nFeUr$6}o
z)xGW7t?BQ6=A}>Fo}RdRI-P#(+LeFpM?ZNf0GL?cb!hjO-RtM;Pu;ltvoF7O-fAm5
ziovY?fB)vofBkbEd-$=-JOAnjpScXhh{9|!N6OZAWcnkIjeh)@Km7U(Ub=PsU;oxu
zzjW{JkG}62Mpx>fANcPZ;U}+NZ$GMU-oAY{o!>vccj@5p^rYJ_+7K$wC7U0Hp%K@f
zfp7rYPp6-K=lECOdL8y3AFTS-dam`j#UT}e;`(x-&tAUz>gnu_vvaKc-7kIinP;B+
z_kQRHc7FJez4XT0|Mh2Hy01Q#K_p-0Wv3o};j#U~t>gbF&;FUKM+YOg>gZ??%AEmk
z4XqEbTw`nxhJIA-9}FDn=)r0+DEX74%m3v|ub-|L7MsLrQq4ndSftaks4h!U8Gx!e
zfkLuVk>LH_WUx31Hh^yF%9@DhnC%APitQ
zJTsfmH7@JG1Jo{ykafyfl~xsQYs973Hpi|9HRqu!YljGCscTnu;>$_zeazciJ2p9?Gs=SdGoqwT5~mE{Bv_MRNSotHoP<$I@X
zH8iFw{O4c2{a?H>)A3+=ws>)di<>u3;<_noUu^w@JMv4%Z?;Vn4W+!I9-&Z=?p#$-
z^Dr{P>4HpDbBp)x4RhagG$8-!*KhLV&i(atqVfALAO84rhsU@+*gLF>Kx%{1SB9V|
zTi(n+nEBl)-PiSqznLMyKXK>Ye?0NqXq(@jEq`5nLkXjQ%u|YY%aUF&$~7~40FZ1u
zlby4(Gh+0Ka&X*^{byN0Fl~4_L+iCj`r*SoTaR9WS${|6H
zLYKhx0Ldh)pY+8f=k=&`gXt|tC3X#^&d4cP%59Hs8R*!$SV$=u+r@;r9u!J_q>*Q|
z!eJ&uN@}?nnHy2bNNcYVea^#OD5}gZ7nCl&2yN{9)S5A3sm$g$L)jIP`wpWLc?fKQ
zKvb7i>ws|6Aq7Gq07OMZ`E3n>?`jJD-s2l6+nPB~hzLdx1(7ef?E+=fL(^pdaEwV~
zY1pl{gFrHrmM>O<1)uGN_Ab^~k*a(T4&6n4+llJ(&`b
z0CZ}`jz?}&U=SIaLKG2E)uNgt6;a7KXUB|~h|ySsp+!~!wlu(%L5b{v)s?WpV3N=V
zHNTmHu;1WSV@{OQg|nKfYGQOn;ft!wS`?lgC?hd5s)FW}l4^DcyfF?af~qDJ1T{kh
zRZ%k|W>pol4Y`4wBR~LP@5D5zsOHUI)c^rp%4(Vg4Z#=z$pB52Nddtwx<|I5ESPDM
z3x!;VL_|hp01D=tPI1!#$86dg0wDs6BnBijLNm=MLS|@^yPh`M{e+I3GApA2#XbXo
zfo#NPlZvV`fTpAf8ygh>FdzUiBM+pKzjk)@V)+#yKsHb@Q!zks1Ykx!ATk;dpsFGh
zGNH<*Dp$z{56Kt?2&j-j1+yZ6iHfF;0sw;{0s<*L#A6a7VFolqGXNq)RaH?iB?Luc
zL@))%?5hgNscPTBE@_ol*xbi%9>_xQAY{ogC+~f;T7UH`Um1opJiPj|zy8J3
z==sXE-@NtKgZmlgb2D3<+;$eDxiA_sh2hEZq@5ROx=hPdjPJmfBTsp5E7_TB=k-F0
zA%+kLq3=?&P~r>&AJ4&s-b$ca@>-@Rp_x5;X}b>x-u=ra#m-i
zY%M__n3~wDYTveqNQ?|jCYrKkV9nMvv1xnFh=|VnvLNPaSYwR3Jj=0<>$Ql45OUcu
zGclM4t}Cg_wF_x9N!iF(YT2~d9s0vb&2hOndyuZymkwUstw-_rguA#xe);75r0iPc
z;c$X|Y?~H`^<;lnj8Uzx9Qhy!#Dc0I8ANkTAxqmeR8}}FMX_`=a3t$SoB0mMt9#pT
zwCyZ`B^cMVzg@>^v#6?8e2av(7}#`;MUw+j4>^?ywuifu!>#?nAjWRm&D%xUw#^~R
zvpzn5Ww5(fyR_Pt)t}orI+WEePpuK_=MR5dI
z=CKv!^NzC)sv?}s&YE`R%VJ`~uIr1U7>!0bXJ*%@6q8mSjE%tza@$1(;lO@Cxr-)*
z?E0Af&d#-+?Mw4T)2x2&%bzWH_@}P#efa6?pSpeW+TD{-4io5iO7_gmDJ4X7&cQ>}
zxpPia{D!Z8gg1JGPrpzlGBjrNt`4sBuJ%^36`*G+gK^ty+lc}pbF11&a<;Z*BsCH+
zpj3=`a8T@*h{?ysnz?E&>Om+6f)pW-d+hsO?Ow4?-j~DS5DSHP`_@~pt$M*QsJFIx
z9A<>4^9I(l%MhQsdg*(gxcvOVc*Ha;s>?^aCKCDU_7xv8l_K!UC*mB){<-y4-
z$H!lIKkjzJ7f=|H7L$-@r+?
zvQ@VpiyNuAb&QaEYTV9<`t180R
zVdkXryPT9i2;+fAgOtse-y8*f*VSq73a5}PLKY$*z`}>Z2Qo-1>y(yFw^%INw%y33
z5s=t-#{=Ju@4RruK!r{k`EO3|{H<4SjH^B@xk|g`2X~CKe>*ss0Kgup=;lJnf}#hukY_vR1UFeQrX_QumAaH
zZ~V2FZ*p=q$&Wv``$yjU-u+=6#Lic$*t}{0e?Wl0?X}&bvJKt3tHQAMt{e@H4)?(#
z5hp47zBlYJ#ceGKT+I38-rcTUmyWvCx^jV;C&TWadv@zDeBuLdF8crY=YQ^JZ{A+^
z6JDB>WtCTnWj=Ff&HPF^yE4|#R5wRXpWfej?9x^T{Q!$W
z4>IIce`RO9ggwLD^~pS^gi@bfX*WSMI%3^sAx1V_C{)evYUwUd(y;bzHo}C-}CX{>FDkNd9yu;h)
z)BpW+v74uV@q-`wQD5oFjH5spuyTn*-YWLZo-D34t^cs!zdwER4}Ie3PyX;x*H^dh
zE&s-^fBrY_9?w1XLw_=DLZdq_WaOxn6la1GwG4ydXfF+@BWRW@UaO>4Pd4Nr5wLPi
z!0PJuh)UPRgW7-S>g5-YCKDUpUoLN)oFA`RAl@1j*ABL>9qwNr62YKpa{^gI*B8aj
zv+k>Jp8mqkRquv-j~_X4t2NB(0J+@W*}1oM>Hqo4`txu7!;%XDI4`S#;%%>f>*Jv?
z>&|N*C0hZmUvLEZ#eU`ohFc&yNc4)~|ER(}>Fz`WZ3W+oz3Ktw|BhCgE
zRdcp#09ZIMHj@-hgg^~lxqT2p$tVQKswP54+^1!EF8~D3B^JqCN;0Wg;`wM$+m`Fr
z-GB#um+ne_<9>(ykU1rJN3CZ~}a{fYm!1b^$_XCG@6{>t5d%?mN
zUOa>Kgv`CI)x5@b1nM-!7{Plk>!_OQj<@!s3}27EHoU^gL0+AA{Hb>qH*VkRgA;^<
z@$iS<`@ScK(x@}
z3vHYEG}Jp*OwOU?y&c7FXFhLzz)**Po)hFAlsSyzOoCvE-US3QPFbu?DT60g8bTli
zf!@r>2$W;_?!rtT8nk}xI@$~d9`Oe3pRW7qTaIrwFf&ouAkY=yVOIhGs_wjmOGLwz
znEI9XJ$lRHN~8I`+oRD0Fmw}CL%OQDW#N36A%Lc_N8Fmg7!qt
zxk2B;LlJ-rT$rke^eL-qj9u({A9y$%`m%J)PQcVGi`T3AYnqYS
zF)?6d%{i+W(Z%$^JOe;)7ehhsioh2bH&fL<8Ula;5}BzeN;Xk(merDql2>CORxwpA
zHd^@zYDPrPIRpSxsb$CsL`Vftk%Fj#
zAu?fboA#5aWXX(Vpo$l$Iv^yX-WZ99JhIOKSw$is=8`R?v?&jn;8B?^hD?so$SIm3
zWB^3dzVAWQ5d)Hl5W+?-nA(JdL}-Mn0-};sRW+cAl8Q$H1VmB{Vju0YET0VAY&b3ZVc40wLh6Vv?Qnqky(v-Msl0c|2cE&u3?W$Cs~N
zwW9i^&wloEUw(P#(j{==v!DC?!tKcRmJN4MLi)<*8MQ2mY;k9KW4t|CTNsv1{V6Vv
z({iqAmc5mT!*Z`4q}gIg#c=;hTHY_UHCIFKCHBLr+~1zGz)c(1)eg9F0fqBLE-0sV
zP>nIUMyu2m87caKuZ-6#TAf%htVWpEbT9fotE%O$xx6(CgXy|0ilT|FNoE7fQhMi@
zfr24svfkz3*;ln9Mc_1F41&wudcB$jM_~}a4JZWZvZ2L30cs#vpWPROYA~qPy7@hk
z6hi2&zTGzH1J_r1)qY`h>fof($W@(b$XidmaI}4RaP_M5)8kWQIA3KzAGT!g@L+fMO4sVa(b0>;;dnd-
zBj>4a+S0q!^|9}oejaj|PZvZKV>cKKz*k#adzDu;G3uKrP2cT~4v3s;N^L^4d-v`P
z1|w$f+UT6y-r8-k>}O|n0i(gFYrKl%BCOJM#C5KjnO3LAi{&bzqPNA#1Gi^Q(|GSE
zacj9;d+&>)AR>*lYP*Q&%L)J}=PF_4-O5#U($;~mLR>YcQ_(g#EJE;h@=MHmM{EW8MT3lR}4
zk#(_O^|R9|lv-YU;3u!e)1B4)iSG1IzxUdwUw{3L+n>Xm%bm&o)vfEv4Y+hiTa};R
z-8-Eg>^-%A^|`2nuIok>taK{MS%;b-W*yYyCl5o}rT2WeT(pO+*|LS#Uc0p&E|(DVz{HuV!tj@
zU!-KyxoS?qIV9o&Tp`L`vM8b%9n@+qWhpG8^hJuv1U#Em4j>eT0WJGJs$o%lT`T{Q
z0-l*&y?V8-YY~~x=Y8LgMkD83&Uw0;Nz&j#R*9^X)>Ura|KyWzbwO48zJF99KC&W!
zZ!{tJhIyKRIsB%gwf|H_2w{`>Ip@lrO4=owQmO_D}Z3}GLrtEy8-WPo#PTFys
zjEiX~W*F;~trsuiT?B~9S4HfF3lSmPSYkb#Lm=umbKCe1FWa0LS!WOpOHl)eV9FNc
z52)C!3bI4NXlj)a07XnS6ljupD#iC?@u>uTh
z+OUxe%UeTSYxxgeKKt*!@+%ztWUG3mi8rGEo?Z97-8rMLtM231cK?NEu3g_#!z?10
zny*e5fB(yG|HIerzjD@cRjAy3Zc_f>!OngT^OIFshLmNww8d&QJzhD)0yTTv+8!6c
zjFH8Q&{~#QXvs7f?&J)L5y2000NWSS*>Wb3LS1$b&hPZ+tGC|Vy875Z|Kb%pxo?B5
z!BaAKB8O^k1zE?$|s7Om_1_ZIK`Rnx!tQX0}@1Y=%2q-5PJ*`qEDA4-fX1F%Lt1
zZ8Tgh?u#n{pie!ZI`Ygigve^O)OSrDZ>||{o*X3m3u3dS~yZ`kc`efRv
z|L)7Tf91y8>(%tJ?X7EdIgs?&<4@jPEMB^E|E0HYot&>SS4|(EJDfZ}p##p>F6|`^
z&+O>l&Ti#+6vAXQ4xY<0AQ6c5ykDdzKoyX{hlmu!KoJOF8-T%JlCysQ&UkfH|MB;?
zL;v)M(R+V$+WgkZ`X@d&yO+x2({XO?;8UT
zg-O=s)W|%M3o2%+veUK&l`d_&Radl&SQT+{B{GB<0l-wVb|4o`I#mqk^G9NSo0Y;g
zS_4Q92n685`{u$(v`mJGfZDZlB60vKNJy9~6dQ#RqA*2U%?7!5#0Q=FvT)v8NS-Mr
z&1l}S%}$6sxPqb+)rBq{NWIcnR)m>AEm{w#mL5@7RSHy_rJ&e`JJ2nI)3-YSnPMs
zfqs|mt11#_6(&wOdG8GXSR6au;Nc+{fMg*LeHQQ719W{u4zEA)%qM>I&(4<1wsW7q
zb^rWJ)2&H0o1YAz|I>fukJXcFU-;FZ`syp6O}>);?50hwcXy)d;Uqp$-n*-hX)KBbN)eeB!VEGq6O7hNw459El;s$-V{s4xN}=m%1hS#+QQq-d2;8|TM6+c8VbM#C$8lALq7H|VQ!#bBb|
zkxPQBR0&|r^H7OuD2r?gXs&Q6^=cYYnMqPv86g;nh$>=AJy&9eWayYf;e#i3h*lJY
ziYBUk+$i=FA$w}OwI_Clkc>@@-3@xvKQB7iMk~s1K
zJehId^~8+8B8IA{qB%+IwGRT;qZ(yIF)>Ku09USNLIO2Qh>&6iCL}lRvLOH?5G0M#
zM>Au>Or8KG^(iG_V&}lID+0ASKqj<|K30?D3bib$p$r68unUAJId`p>9MI;)s79xd
zA@!7H*h*9@RtcR*?6M?KQ!q7TcH~2@D}#h0phmucP-Nx=RGZ~?9aWjVbEOfcl%m7}
z6EXwhhKrzP8k3ntaE{p!%#4Xtvsxs?0iY6QU`0Yia>!^1nsDH3!`uKUnSv(+NRoWk
zUTQ>=jt9dPkajdEap?Bi*u@-oc89W>4$jXf?X-@|Pd<0;BhNp#xBtTK-cbPxL>Zyh
z#7XW$acPIPQ31Uo2tW^>K`XL=5fo03&!T}!+!}?!-%pPsC_B6K`ob`Scha3LI}
z?cH=Xbz4_p7*gQVHk;e!!+j}7{R5nLcSnyf|O$x-RNbF>V~I4$9%4KiGnh=J)P{poOx_-N5_onZWDvcE2DL7R7pN#6i31
zl*k}d2(cIyw)0qyJvOJ5x-4W1Ybgze0ms7#$kiyAQBF%3d0EzjGG
zcI_J#vEgKS@7$MNUw9gzbrnwE7}FHX8r+k^gZ$~pwriF1gxlAFXli5V1Jv^*E2Cz|4n5221PRc>@esyxUS1K~J85Uqni7}d{
z{o&=|?w(KSXc!0K$sa%b&63STCwl6E0^RH9=gs^C(;B3y15RktZ9Mt_qNLp3zH{&Pt<%9|^7z%O_wSq?
zyXm4?ZcRo(?H7OMXOc|ki?iu;>b-yZnHR1<{@$(Kt4}=k0@j0V&P_KBl^=y_f3okS
z=-b@Ulv1dPOxoda47o<&1T9gg1M*$By}h?6widlUSiNIY;B0<=GViGO>~^+ywkDIlOAlt#8*P&y
z*Hot44cnSTEM+w?(Z1`l8~Hr5W^Q76uy@k3D_rd6>vh|e6&jk5lcW&EtfGiYas0xoi_n!02
zZ{M3On`uF9y?cAx&8#LDD42e0m2=&NMRt$2IrTVR490>}^mz!?}4I&`35w(+b00!)%)
zoO$xuIrGk$HEIn{05h17DuNLZ5&|;a%M>4Y5*Wm(p<_KOn@diE226^_N8UXsm}_4TskXD
z&N(z~*Uj%jIV^@-^~C2kk>;VAWL%<)hAG=YFOZGgDAPFST6%San#G_;L_-j;q$;Ke
zOo;qV+U4>c&=UFGBKuNZ3Fr`&O3wn;SnKAT={
zM28U(>5)gKj
z0RP_Y`}Y=220tvvMDWsE^6%XHP4xLZEz0whO4osq!h`}FbYxbC%X#&qPhNlF$xByv
zDvpwnjcH_~C8fG959*1T1C(!(q!++{UJkoVyAv=c=`Xfc=<2=
zcR%$fKJv*w@xpt5^s&p2KYg{)e$Z+nBF85u52nZGpEw%*OF#OF+l$3ZZ@%$g-dO$C
z$-J}z-3n5dWyBc&NAG*@zyCwm8BhrkM`oz&uBcp(xsuX@^NLZ>J2><6QCXy<6v`c!
z{`F_C+UVJ7B7x>7>Bv=
zYnB=elLzM@Jd_sT3Iv>ik$$I;NjJILyTw3$U+wv~Ln-LpU-oz6ckH+OHa&{p3-Cup
z95Vm}=X1_P1Zv2eBoR`IJ_KS=Q%3**YN;%}0G!NEo9XfLbo%tseoU)_`71y7i=Tb{
z{&}R`m+$elJzw@yUTHtOJxwQ!kFC2E4v)Ay`*^qh^4?KXjhyAjZF#lt?m@W?(--ZP
z&+a~UV}XOg0CuicKIEo##MJi%yO0sEO5GY(r#Rqhcpz2146d!X8~9W(F&08bkXGxa
z?}rsBL`NyRs9wxvAxxS|=NSPlnF5*-BAOy-#_teft^y$$vMHo2rk2q;$N8T!QO!yO
z;u^XFy2g3#*UK!D6X0+ws}X=#V_;HIz#&N~
z3MOz!h)m`c3UHKHCmUV`qM$JmI(kbAQMs6WTpm21U?`m^;RmH
zG8qsu5g>pffEHO5oFStcZ#*d!vjjNoEvCSIsG%r=!3auTFJkj1WF1zO+&9DeS~>Zp
zmtOzn&wr81k{RdMpI!HhW_mU|SyQoXOe2Vsc2(s?>TmY_IXPc%zsJI3aXrgLZHsqY
z?w0MjlXc~QaxP$GvE}M8L?vh%*QBQH&XNS84AQpj&CeU(v_?2Te`_73R4i(f^FHHH
zsZ9029OXJ0SO9cMK$^a9wIo-WrbNxMbsnlZBSW(yqU3OHoj+fZ)VVQFLn?!lOqRjM
znyRu75px*0$`MOfn5TM_!EEtBQ-fGShy(~~f2N|rIwUKK{&=Up}Aw@|R}!@v-5~Ua1-H--D(L
z>>D{Pd_f13ITVlyM*%toK37b?BCfROy=`*T(vJ=gO|VZw^Hs+irdlz32J&k4L+~*W25>ZNEOB
zpU&FzE>8_}w`}WbSIE^EVtUZD=lA=&KlM|eZd})93>@!c_9*^$c
zzcb$5p3P4u;FF|RKKBc+QciJ|rI(nP`N^aA9?ws5(D8T(KxXLcVL2XON$>01_GV7!
z=jU}j+}_zM2g968U{=MJ)TW`Pl)`vg#gO5G=;@SP>XMXi(7U#G39((#dANT-*BMj_IED8wl
z`t4VzufMXpcZrH{{fTG#C@uo4CaQ`+7z`+>S<$5=1-jA#2~I*7bJ=iC
z+hgxNJJ*TkI7Qj(II?jCA($ketYM}-fehgb|%Wm46<
z<4J7eoA2B?TP_M;j%l2t%w;{Fbvb8CIT%$?Ag@Rzb^WSs!DLv~uH+
zmIHrVd1q^X|MI2VTB$Y7gXy!E4@Scgc6}`_HvQdoo}J%+`_8RvFT8)ppg({0Xm>ki
z*un0#mp@w%MpYYY3D)zVdy@!{2o<^7v?wre{B0p$Hj`QnS$zxTu6^YXouUw`v<
zbk)&rap30V^d+me`iiM^+x6b+?D)q0dCS{_D@QRaK}IwI+bQc^KiGHUmZ;nwR9id6
zcuU&!%9}S<=L=RWh+MyRju`b|JlaWbwp!0%G^as}0R<`dlp1rKMM>FVp#)-*KnNk2
zm?aF1-E9U#I5j~PtCBm=m!2^K&sZ?|=!M
zHzP1n#8Oa!GdV#Pa0YA`KqYtrX)&KZSCV%&Mq>hCTZN^m%%-SLW
z04fq76A~eV0{}7tfGl>QRPd1IoBeV`140UbTocqrxeD-l{?x(!FB3=hN`N%zSXu3RRMt1cX6En2Y23j
zDEJkl-(6;o6oAS4H#
z2o2e7oSjIFvyy?&pwY;TLn%UPL>q+%#AK4PV%e&SW>w7_V1@IA=b}?3&%kIVQB74g
zA6%(02_eKRkTN+VFj!8T^0FUJJVrDmS0$rXG9pY0zI=GSq08^g;hDWqql#vwQay-5
zQV0Yo^$-f--oTZ=&)8SLOKft`3)F&h@BZ)ahxm5v$?x1|&iN5qXCrV4zlX`hqnCZn
zDpL#|3RQ1pXCJX&opYO?i8lBVP*YJqoiFQw55(X5J@31H>*hcF$^SOS#rr?>L;viL
z9=!RvU)a8K`71YGzVn*i;cHetcBlHl$*3%#`>~V%=8C;BYv|bTuG6T;@nmu1JvaVN
z-QHSGj)sF>D4{H`m%EP}J5^XpcJ;8==n;}8Z5Gipxw=T6w86F+)3|Cfq4%Ng$>r4f
zjUu>ZVzJdK_idNDl=@^)kH+O-G#Lyu7szDH0Gp1cA^?doeP^`SRWkxGkpVJ$1w$k-
z3-Q?WQi<
zI7eNhm?XvAbp{0FJD}cV2oA_Lx;sWp2F+yP2+r9#B#*>MXdn^*
zzy;rBQ!-Bl2Q0u5vT6oJmP~AB30I5UHk1>Qsk3!wJQ!wkqUBU1mw^%eup}+f3c6sI
znTQFf0tPQkz(@o+YswMLKN4v2u-zY+gsi7-TSv+ymsyA^8T$i
zPX6ZK`!ofA-gGBXPK9{9zvv>Wm9M?!&mXvUR-qMJotiVuC)k%F8Y)j)W#5$d-&9q@
zoF}`8TU1nY9ss#KsN#A(0+c~7x#MLHW~uO*%vqpfV4-xbUP;$47ifr++V{W?Y*?0l
znAa<8PMw;pi=KiK8(W|%JI2Cmn4m8c`JMosvp6rQN}AWH4*-aahOX;VnmWlwkdrq+
zbVw9kQ8>SrltMIYRhvE|Aelmz=^3WTuJENQ7;0-K9vB?-8KPR*_uhM_*}8^JlRHrC
zv&gzV$F?EJfRF$3004jhNklKNNlqpVcmCr`^uskbkL6Y_|ops>*x%8F3Kem
zw0XNO-v8`VJA)c6eg2M&^Yoy5dux89UflNmTyqk!o36OSn4<0M?;J1Ng}UA(DVMoY
zVn?H`txM%_V(j{ufUr5ARm0tSFc}O+d%OE>>}Rv-&;I<+pPhZVs;bj7r-p}zhfT9O
zpPrUwK~b`Fb?HYAUFm1DSs&XJWvPsm%L-GM@6FDE91nNRLlp&9qYI)^T8Kl%JK$HymEcCJ46{CifCFCATLsl0b~i;6+&
zSFe5L*Q2x?hJ^quW~1@p)>F@)=Q`1dw(t6J#im3t&bBGevN=JX+pF(Rs1QIUnWog4}%`UG~{6
zQ&|lNE;R~xd^SSntpY_jCyAo1P|h2yV=tnsg@K?!Fl9h%mZrwBD}0Cs!n!>k=H;sC
zx^=_F_V#dlFXs$qAevG#Cg(jPZt+!Mi21xJFizY0?&;d{|KXM6>ep|VUJkEc{=kQS
zY~YKw9{R#Ph269DgRnKA!S<6^o_dlrq4xgp;QaLd%U^tH
zx;!53>~Rr@!j-GfT-x2g@#gDy*Rv+xlM%x0fETU;&12PN-L#t6e@
zJ6Wz)&g-D$O17*V^`eBuomJo2*3}PwWC!4_U;PPfXB{+YXJ>lv?$7=Gzu!XoGfy2(
zlg#hG-5uY~vGaZaxe!0D9)G@zX9Hc7EW-e0?J7@bnsXlS(QuI8|H9QpY;)>6oX*>I
z#oL3y6=6p|U<87u6n#sM00_YHhH-BM49+$Hx<@c3xVYS721K_J)iKByL-!QEMK}%uU>gy9
z@MRM_ETI)cbK%?E_S<*{AR;{cLJDSP9cAa7A%Yq+lbDE@S~c0{xb&Q{ELwGa$|d%@
zL;v(vLZ?C3ws=$Dxxbh$q~JIS_lBVy?-jd8d-d*?n?TsgShRh*(^*G3QD?C;rrHp!
z>pJD-+<73%1{b=G3>O|k0Dz(<0_fBk0MK9l%m4ac`@jEZ|Lnj0AN|Qc{wIpuR;ayb
z1SJN|Y%~yV839btl!(kwOu;B^EYz5g7=RSO3=P3SCPW0M3Zi<^tU*w47p_k#V1gzD
z$Y>P&Mk3M>1j!H-2$@_jrWqxp$%UE~keLyvbB@trR_QxQ4N@kD84JuAyvIpSCSb=(+vUo`997lY-hjTw;KXfk%|8RRsqeMvg3oYTxA
z&0=2d93AZKE0`||UkB-X_wY>lr0gR!ZJH_?NIaj)R
zwQjjw1z(6*N`i>wT*`e2&MANjsHssBB=$uJo)|!jqUic2=Nv-V2;{jCOr0uF>72Bh
zMSV@Z0juLeFbZ_K;rUU4R*E31MQsI|XmQcBisji^E8^#{Je{^CW$<-RF%`n#u+1H`
zW(H&e%(T(j5wkXZ0g=!S@hvO~lTQN}TwnPmY0sYDerAQCe(A!trnBK3rbbWs+_*T|Rx
z%w03*r)i#IP*17pxtiW(iCb7f_5=`t8lLes`JMf+}^EFvPZnTg2ez052kIcN5?In@{DCIr(yA)u7I)^Mh>(Yk>vuG!qWDGJ0~Qm{L58#Q>5km*?h}WvtGv|JYNz
zCo{XXR2hsRwY8XGFk-UYts7$Wo{30-&^aaFiw?AdP1Owq0BnE;WRU1PV?36bNl-|c
zd3gGcTE94aE~mc!&Q4>$o`QAk?
z2mMBt*UZp>7?B779Ky!KcXQK$nu$oto{^aeFI)kVUOXDSvM-7lTi{_Kxh-s|{f}Rx
zpT7wJK+G3eE!R4cxj7r+wx#YRdQR+}^7>iv@Tv8_Tiqxf|
z8m*-hNsGm5emX1B9NBTZT#Ho2-n^&R=8Gz8HLRA)xuHtR9)M#)$!H+V;Jh06x=w)T
z5JdyK?1Gp|j*t`d4Vw6>rc4E3hrKqf_eGO(ZNWlm*g=eBF|))NNf=znMnc)M!zARL
z7C|%mP{^oO8%SI`NaiSIu+){gDN>_ur?Z&Q9I*qGq(B|17g7ns%J7CV(XZFbQAEle2!U1)IQ4}ePkecA{iUe23%lM*)*et=L@R44
zwsYIE$e%F
zJCGJl%~3W=Vr5yUlpwQn?)v2qzW&yy$&=?Ta=4*483?L@3NeNdGUvAGi98sMMk6z8
zn#MpT^_FBSLS0oA7d)LE$FxqV9aiI<6%m`&+Iu#$EYUeK8noSlSU@9!NA*m#k
z>VXR(gY}xz@q^j0ngln8$G_0AvxL8Vm)nV?awe#YUZpv(T9@>b*->
z6$@gAVA6N1<+N*-0>a*95FkD|KQjv3+uK!!=brfF=fCumbum=w&{%Vih^-F=lLJ!%
zb(B;vTObzgBxPnFIK&u%sVK`nr!3%{i(M*;N@5})5HPb*T@?eDfd|9G{ew$=Ue0Id
zxzQBc<@_F#IOi%~BNsW;<@Uh?**b07)5WT&>Ww-Zh&aWd{O^MpB4HK&|f)WnR1^OSr->m?UVl$Bf=T}YvXes-az0H~@8
z53|$2l-7COJ-Ig&DyLnf=A_NVl_9$A%Py2CwC>fKdlb(WvAA5pWOt_;)CC3WQ|x*y
zIkJ_;<@TYNN?K~lr_=R%J_`~jyIY`C7d88$tP7ELe(#pk_&`e$sY6ysiHpLLc4~ZZ
zuE!h`rR=s(WWA>cK?U;O16TZtF~ByDEbvhdHIxD{?vP
zvJPyCzyh08Up(_BNwM{cyhF!pf|f#p5^{-|6Lr8&&9W%~FdBg|8b;itH)0zqz(s3I
zE*J&B4W+ap?r-AVyk{a3`PTY%3DJ{f;A~2272_X$y!hL%Yf0Z43*lQ0d{;g7Zl%zK
zeQ<1&EGGv5IZ29+i1QjaWGaCQ;{fCh$dC$u4#p~xwc9JIii>sMovcoaycg<<>wz!}
zSU~m&q+&U*&3fm(_koa&TESIkJZe+#e(`Vr*FW`F{*V8Izw~eX)xYweWQ+jhzyW|m
z1iFAUY-kP&X8n5g*+pAuXS4g*p9>=t3AMu!lEMY
zn1`&>#)`AF)zrz>xl~vJ87{rdUG
zgf;fPa4k(9C4Lx#g9x02a=@$rPJcYMyWcMmjd7II$S``L$zUz%y&)r{ANEK4Hn7V3
zv*ZUIqU{m@k%*K~_q{6-5pc%BetH#ERX{;R16Uh7#6c~IrI^K$w^W3FQNGecfDnZfWJN%+gHhJpAQ9qDHvrZK_|jQl$(0wVW|P6vs^|Lg
za`xuflNp9?)BqSO7kxavsN!#$7L$IHVj5UiI6r+}U(uRw35~oisG9TqLs^(im-3maQ)f
zvWVEHaJgDuu9l&&Y-dh<+M#w
zDZb!#Z=K%%!o7>qCq&)ZLvWL?_RF7_^!)K`aaiwk1XV}Pkbnngfhi}2GD)Ef&N_s)
zDw2k^Epp3H7tMLsWjvRu7%Ve`H>Js3mZe&V;?;sAsJ+`KC|1@J*%TrWSO+6_qW*o=;bIMv<*R`UA5UjQPbgX@!z*?&eSpwR&nH_+s5zVX5JVOva_tx8A|Kevk
zVO!0Pk7~}Bw7q+AW}Z2qlX-mMD!e80)Ar=`>e-?fG4B#rZ^7gl{_v&-WRdP97_OgO
z<|?8A=V%zZl5+~#kVV0%EiU6j(4uudM}f!pBEZLQ-nu+HI~ySeOldHrj3yI+5oa8L
zn3NGf7=TF8h(L)dA`H6Uy3kYCFiK|pVSm+sLJ%o2iZQ0`u2jOhy-IN`{)~?Q?*-LD
z)ih4-_bFO3D3Bsi(yaR4EgqdZOa{F);vL-16&R~fW5GKd~d)^qty$eqkuqFsf7RC>g&@4
zribzS(Xa2&1CDEs7e0BZ~)p{R(UPk;i#003kNEgAt5mvwKhyPxs)
z-?FfPdAe>#{nk|Zo)GwHF%JL`fB;cBgNkC_v*Hkm_Sav75Q=(4>ye4F2q{@b$_)rt
zX;H$|z_xql
z=!RT9dwBiU^XpCDZ{u4Jp1l3!4$cmu^K~@~=NlyT4y-2WHoMF7Nm0R=k|(Ijl)$sv@Tiv;#E}g+foM_2k*sspYv3%5O=)e2F#=5w
ztl5nHN(QHhBcrorI~jr~kIo}GKdQ6FRLT^(5foFzlx$g0Q$}k95W6V2G0QQ7Qd8lW
zL_nG%##klmEO1O)Tcb76_-x8pjMvb*^%LZtFDn#JadrFH)WPuZc*iM4%n4B<<|`wc
zA(#8Qg(8qKB-~X
z7nYf=wcV!PY_?@FEsJItLf>y^^CqQmiOm>$BsNB4jNaEds47|N3SSq^bT&UsLQ%k$
z)$^~sI*ubqe*Eb1{d@1$r62ljj1fhvLSs(j5Mzw(bcTo_gtD#&&1KUTzQ#-^lY{B(
za7b}}a?ljhdNPk-`ynk34ke{-eRbGQ!*10N+s7BD>+RLL-=f7Krnm3C(TIKdrB5$+
z>yQFyiUI4=#nks?`_5QqrLR=zlY^n`56IU)i`
zR@JI1Qc7su=_S;q5NJ_O-+eQW{SwiDsSk=1w^&rypVPbBuNG^RJ)&uYKLMwHTE%s0Gf3nfW5oa!8N5
zkpM`t=75|?lX3=OE2V~HL4!m#2*R@KTXI}aGeLHitx;=<(Fi%THf*y98USYyHF4gL
zqHh2e<-(xwxYC@kJT%r4vA?TaQNkGdHFNT{cPh~Bz#26uWYmx%NZz|!;21Bfsv@Fs
z99g+23g?_^v~`Rz8Z?-xGzH6=ZDxwLcl*d(04R!LPhoM+)jCLgoXYn4!N+t6?>^jh
zEXjn^-P$dOl^*m^wX&Xt3xg~re{+?FOW8)O;_&fV@$!M6ur25F-KHbHBQg4BvKw}1
z?|yYV^w><>1Dj&rb;~Zwvg<<3niKbZ=W4V(#$d+XH4Ak$%><-xux%4#Bp1YP``mSX
z9s?>2B*st@Bd)G%Z__UA9vWk~@Q0%-D~qeIAiTV~h~v<-1w$HhQf{1c
z`*5<9GwgTSL=1%`0FWe%F8d;4y5wDFnz^4Iu&LK$-iDC#Xec}T^g*hM-)h(>FtelY
zUhdEEv@7s_Qwji{x|n`HKs1(w!HNh1R0bb}@<9-Q{s+9ye7aW>c+W102oY<#W-O+Z
zQV0qO6&3aiIfu5)Fz)O}7g|x
zCR*`LF53`F7dc<7?D`K%=aCe_kn;?f1&#G)&%tEQIV*s3uBs~UeKb=smK_O#=3wGr
zLT_^_IWjW~i1q4gE~;#bh}clrixMoFoMaqFM1zPfZN!vp>ukIr84-)wp;!ne1n9Rp
zN2#W`xFPMeOee$ob(P@4su;ILQIt21shV`dc8u!Dmz?ox?7DR-+U{K)P-y-v*vMsweY=62{LGmN%Gx?FaCg8lRs#Dbbjldi`AF!ZE`{qR;x^4
z0Mnn*?V@8;WP}V*8if+Bl^(zF_Eo?iE%g8x`k)!~o~ER#798x8KR{UpVDC##eGZ69
zM7aOqyjLD@NL7}k5Y&h>SQTaIE3kK-!`=Gn>fuibr+vLwnic@fW&MWT{|0Y*?j
zK_cT&4I~B5*@B{&Or?}THpW^YJ=`6vJ6
z_g`OKaMQ*Fp%+j?){z?xdP~kAhq%j{!8IY9i2@_AN)#pq@0`jntleFlP
ziN$Q5t&7`W!;rRHjZzhLDm4rF!emLI-!3Y9(AwO~a%&&jt%dYrv)FETy`-oSi^=cU
z@kCbFEBjV=Ij!uc4rc!7+8^9s|H{1k`0Zz2S}fX=ddz9d>}+DiF5q@E{$IZN@Gf6F
z*z8hi`%$O-_+9P4Chm8a;WN)2{nCxaZIQKfpSwKs317P1-a0%QOfhPlZIvh#k8b5a9X0_%z6;}@*oNoWKyPNrR_U}GhpV;xDKAPRQ>8jcm
zHN5wfvLdu+_=mnO>8NS1<_G_~fAHDgc-)^Cy&E^idSI-LbyoZF3OG-|C}0v6P$eW|
zluTx3W|s#vWQ^PIcCtLhG$|@Qd-%eW&Grx8dK^F$sW2GfTU4F*p{o1*q(3>5`4+q9
zgYVmH0ANoPMKl7+`!@KVI+z&%-si3&BFGLA_e$aYC)vNub+eb#?f;G{1OfmBW6mjS
zAUhHoP}R)L?0^KV5fGaxAz_(aNQWhR{QSd9cG(*rwIH`+;cM
zSvP&#evREZQ)w)y|Y&gS!*W+kGQ}7lH6^N)N*?wa{iHN}LZ8<autgRW1mmn|LPWN!VT?spBM>2Y!q{(x^9CkpjBy$Ep-@Qs
zyr?QI(RsCHR2rDGW+HF~A#@^H37vCwQCedpoh$!^@NBxy3aiK;+IdKygP*2m|YGC@AY61ErJ0h&}d4Lyr0&LF+>*82q0_P
zw=v$6dYJtM*F5NZxqJXX@6mg$VpiFE3+$PX1c;)MyeMP^1wv~aAsHaR##NA5Q_PYB
z2x~?Vuc+<8qC3;&y*z9WDm&qA$N3YKAFW?}=6HV6HVmxtO#c!K;SXI^s|Qt
zGwDv(5AOKw6KW3V_*P&x0f)_FkcY;XBYG&RqAUs3+d&u!OIz06kdtx2I5z#FyYq4#
zrg2$hs!>i#?sjLd-}9toiv!kbyFAU^{o3vTaz-@j&{`xf49rTbpbVmgZ(~lG>FCDE
zM{a-Oo3DL7rXHlo5WM$$Mby65V~oq0^O(o6SCGTQ#df<+iM=nqD_r44WN*DBBFw32
zDrP3l0Dxqti+Q)}7mLMwI=#HSeCEYZx6Kq-)0o$rvyt$!?RS6c^vUOrj*iBZ
z)~oGoeo{4a!UD-Z*<=L!@sE9KK0g%6UB7z!oj1Pu=GPuyy<@Dub>r5J*|qMpDr^nT
z-M!8k
zJ8auTtdHH
zV~Giy{R8VegP(n=;yDq%ADp(g9@>*@ebz+K8#a_OcN0UucyjR8JC8o!
zY2huU7`rv3z94}d#-UI1LzhNfue9Gf=aOp3G@TtVUm4>hvm&{oHogd)`(F^MCbMP*
zB07$j05oPE5<6cP&D55qAy@m^`SuD*G8PMk;rzUkIB$!gnxLUE1XZ=xh7e*1-a2cV
zW_c;{EmDG`;`@hd{u@OjO$TP+=2zTn(d7?{)m6h@4U35}F~0G;Ha#eue07MYyd
zG*${kB;%%6pNhp`s%T1ZwfY8~FT14yu4dC=EZ}m}#8c>=cuSpWOQSYuDvozY&tYLt
zWSr9OHF~AM;KF$uA0P7NByNiwOY6$!=Cy0rR;wr7)&0tuVF-z;gIg~mG_%RUljN@M
zUG&RX&JJ!YQXYpfHw0wF*@4#3fU*YRZXJEXIACA*GaZfcMC+F4Oy!^bc+^OyTxV(x=gz
z#_V?--|u?QF|Y(cS2#b$aAVf~!gDvj@b)|VY>yutp01|e^BCN(;)rNmeuC!#3vPa$RkAU;QG3Tf(A%u{k0aKz{Bp^a-K{BhZyA5yH^Aog%sTy0>
z###gercf~Bjh}BB%yN*yZw|s4i4Uc;2+-1fZr4sqdM>qs*C#
zc~czNb`i#9TS3uZR-=oaI=0~jl@qI!iW*!3sa3zpM1Hdc_
zLOx5gNA
zvR4wM2%x3+lgVs0X{)OG$>Z+&?ASyd`n&`G#fN9V{m$LflYGC5C$S^^9tf?&X=8p|C=AY+UzNey5YHibPB2u=C5<<in3%ujrJ-1Vg?o@`f7#%}WQ|3>*t|7ETI
z-j}-5@U=&~UdA!!JgM7~txch=j454GDT0oa)saysurvUwPRb&}h7heHDygbKMng(y
zq9oIyfyCa{W;>qN<-`oOX)TdA1XYoo=_2M7ZIc_L!i*VEMVJ&ooKaudFhoJjqzGVG
zOFQxzfF
z>|oq({LDx>*FGjQv@$<#=P~-Z#F{fEJtswZqu1B%&kTK?_Xa$
zzI-xlhSTBEH#)q#<+sPsB=ZXgFGW4PSPDsEP)x}f+-}$2`!K{brZ2qvusyuC+zc1z
zk6(V~+Tr2B*H5?i&hP)qCx7A1M>^(Rb9v$G#+Su7z-EK5U+p05zIl;fhv(vHckl6?
zpSW40)z#-uUpP7V*mEZbjUQrq{ru_=-g$U`)w{|SXo?IDEtTyzAFsfbZgZ)wv4wY`
ztA|c2NkrHi19;M4x@
z`?i}A0oZ(h?Zbl0@A`gHc`Am}r;efqP!KW*Ap-5269DfmCIm?LNkLDi1<0%bkXDb$
zII`Z_;%W8~XXOm0RwYnGF=P=H5Xccm%}RRZMgA6Sq6aN7evXe*%)p50R~TIZftsrD{h@8M}~*dN0mXY0IYev**n
z{Nn87_#|^CB0?g93}66;ad6IAV`GX%QEa!{v-A5;&K|${&f7otv%h%#`U{`_sh_xc
zeE1K4?H>&NGFwA|uRT#zdC<)Hy^|PLeW8GPZWkkpX1D
z#1@E5i4qh*S(NrV0J4gpy1{IWv1HMJh-iwE86-RMADBz_{9#bl!c*obFph*;02+~;
zn*u0j<$dlh?CmO+Gvk;N-zAVc3g+VOfTOd>;q9P)kLlA+i
z;)_B=46C!?lqB+)qKJ&EkW#h)1{Jc9Q8bF2lg)slD0|m3P$fhK$yj7XQ2`}EaJF=1
ziHN~TGDRS3m@tn@XnaBC#MBk;I@oP2MzoT2kd7f_XdAxT6qR*QO*I=1fWi@wobk?(
zW0o94PKh9s3YCN)L`Ve2pfQ9%0F+Jf)Xh&g#gy~7uhQA3P*5Qe5JV(+S`H`@iZU1k
zP|=K{h$za=G7Au(0H9GKAV7n%71*ya1(dAyt}YP0;>GT2^6noUg*!(#ZvV>9{mQe?
zU1vwo+rzd5Q%vV`&ca|`^d2MUT*_GyESOIlVyV+!hk%z0xxEY$VEhh`kk9OXRv_kK^Ggsx9l8)Yco3OQZfmm^x*$j0hg;P_o1?dL72%IyQBWsys)NTjX6^oly+f-Bx
zb~*a;dX|Sw;q2F*~
ztLy+ww=%;{Z3&B;qn-A~`_j6$#I{yD9Fp9!-F3%jyLfB7*j?RwGQjoYTO;C~Rrxj?
z-Y%E*>fJf4%QPr?we!GPkpo97SRf;jCj;I9IYguJ$~m-Diyf2cJKa_d4SkPNAvvSQ
z+r8{oF|mYTitV&2mCJ+0jgNfzGv3wS7e!I5*N?h>8$!3+ERDswUQ|`ZNe*v*w4EGG
zrYH0H^_WvmVY|Cpj$e5A;Oh?_-XF(ZQNftHwpq-k&(05TJvaStU%zp3d3CzD-akBj
ze0d&T{qQT(>0*6x_3**ni^umi%e$B7k3!s~IAXY>1jDeoFZr!^-eiUZ?(wRVvi#_L
z#yOF2PDAC3(icThbi-H_andv|zj!2By)7p7+?ul6_8oSHbL9+RYN|@IX(x-Wzc@Pi
zkoUXG)!VzcYfp|Z2iH!nPn+qZd!I{TG{BR#H40tVt>W&+!K|r{hJI7}qVzzhpeCdU
zYH!iC@%Y--9A|(`stfC5zj6t#~?k&BEGjQJfm?6NL6Qe*us721(
zK#1{VdG@Y-at_uD5~2n2*%d%7`qrY8do@G9zs+`XT_nj
z)MH^Jg&k|mA^_goY3=ITDyK1rI0z4_oCc#3#hE$|K}O)QQTEoXC2lj982+DfM_P(S5<|GDo8}O&hdhYXQ3L?A7S
zax$r#!JEy=O$f5p(l)+*CYNbVAj;<R~oQ_iNG
z2It<{tsb17pPIH$P`>@H4_~s%uAHg6IpuMKA&i}IR2c8oXgSkn-nt5~ptzcf)fCr|
ziaa0|r}fK+?s6~-R~=U4da=Dzjd#RSs#-E0tSiVh7=3Mgu^3V`oL%NQkA)xEE;m$FMckfomPt`Ji^FVPM=VI0=H44)FcUHwLbaAG
z(q;Va?EyeY!uzUv&KzR4)>>;tzrPNFsL`r`05Oj2A){EM38MUf>$iIa10+cqTBr2#-uwn|FV}zbz0@+6)1!B!W`%Lqv^#zNT
z0Kgz2DhrI^X#=N9+g>N#!6$>GIz4jzRlnohOt?8>V;~KRgDbcw#SqkWy&XU_OUj&+
z0VNQ~QB(5ap(txIiK=XCY$o8mTG|!XWvTL5#?hsuoS6p&Wosknz?me0b-@!@$IPe`
z5{KD|BC|vE(J!2F@%d+X>>vm9CLyPkO7b&bm*ld>OfEL_#JPl6g4C{p^%GKMv>oJ-
zMzevknw91JUh}N?+;?d^(8?6?@CGymZ#FMneBo-n+eLBhfu9{^SM-co@ao&6Cf~=8
zdo(d%Qj@dcOiT(YtSSY0`{RCI-vd2nW06!C#2UoUth@
z7(-4nCqOWiQ?kYiD{g~nn%`T+&t5#Xzkh#j_2gtauO$!R+I&82c9TWi0bIoJWWB|{
zSf`l0O)+EI9JO_b9Xjx3F|QZ9z8^_x-e}BChI$mPjZ5kx3lJ1eP*UzZj3rxueH=U?
z0P?l
zap$Yqu&vK8`_gj}H{ZmEi(Sis51Uy-m?Cdf%P>w&>8wv;--*T8^BY$ezdU{RKlDzMy6i)4|M8q<^|hzww9n%*IoVh+^@+0o|-u^Tp9H1Xt9Y*KBiwqJ)|G#$(9$
zpqg&w_;0=ayMlmZM_v<=As2=~3W?N>V^vU_&rKM=Z0r}m5rf?oW8!x=^11t63P^^?
zrjVVnMEk3+l6M9!s>0PyXI9H7<2uJYBr;$L4T-~@MmCCwhExSWWKYj`M8W}|W&;Ae
zXa6UVGv%i7h|xeOi6b+iLCA{VGEsT|W)uX=-)H&QWc^mo(zgzUM)#gTssgfiCqW`C
zlOa%171e#|dau4D%kM3l6cH7X091AA8wJZ~6wE$Xh>QgTQ2|})2oxdZm^qQEMY5a$
zi8K?6b;LQRJg5wVaZoB|))dsb%y~e_5HmzZQYg_N8twD$$`ti8H$QJalKb9tbn~+J
z;a1b#DCBo65+>yjQ_{iqdzQs(9<8j-DmSp<>24zcfJ4O{g9h~j%fFNO6>mU
zV~i;!grurp{^A#}U%!6;{(T}^EEdi=b$+wiTs%2@;e{6t4h}BP&Yyku+4Xw;$}6uP
z9G)B=oruWg>HSAzzrK2W@!)G1mjB6r`hTb>7jFLF`x}4rH$MAMC)cjudF^+q}7}SYd{6&2w5Fj>rj(b0NE3N2mncq5|N|?q(F=WPtDhn
z2
zP60)$=h|V4oFoGXfnXL$QF4~1+MDPL
z5HqU?0T9tDPiiVl>sc+LAVK1w;shB;m`ITXlno1E0x3rzA|Npi#2^tGYlvt@_Gy&D
zJ{B2(QHdy|sFIzsCn6A3Fo@n3LX?Hof)K9z&E@4X##onCRXBp|3>i|B0Wk|?l)M--
zu@}Ms$|{0SnXyA|z0t
z9kptmk1}TFT{rAlo}@fgmqBe(a$w1t%9|={3Y?>LQ$aCA`}}pzxR(U@wAqUWNY$!{
z%3dag-P0NJJ7jZftE}prCuda*?e$0rJyu3h6raYk0H}(pFb!nTqN*^W5(gerHw?ej
z|K9ZZ!@uy+f9J!upSAOLwd}`B#p$%$#aRQn+Y+!Lm#~^oCcp3#i_^#7`1RlX_5bqC
zrCq%0(loyEp`BTL~YNar-svOIhYE7G}0+LXp)hSIbFhT~34c^|W2(
z%j%o2JC$P6qzQu?T>`?Ur&6n$^Yx@Hm7es&$j}pfOQ8zvKk}Mh-HLW-#wRKTUba|O`
z6iL;50+k&{^hs)P;qZ_MbbB$z;3^Bd9uTy#YqCh7EP`en2Bz2(OdG#BeqrjJY}OLl
z)zeTNL|3_@jw#!AKBuh9J8^x6(wnO*+MH>m;>kd5aqAPlnv0wE`Rd*~=j9;+
zx-}%^2=JP(8*Dg3;tuHM?eeE&cJ00J-x$_HaKH@It1cOL$)4|q8vr{ETXWDEN
zf#k%hOr*xLN)nN>(3F#iQLI$8w2ot}OzE6Q(vTuEC)SkM`N|ZPH3vWQ)4y>298OZ#qRMkB^BwbS{n8@Fb!KEGZs-+1$C7rs*Tww0zO5I2PbBCXJGVq@pao7yojWd2endH2UTWY39gD>WW7~(oz(vbX+XF^()0gP!URF+T_&B1XqTL8M4
z1uI_|;;|doSG>J2X^7*N!+48#U3HM>x2ox35&HshIi|FwCcd3=o)x~FREU^k$|0u9
zcv!(HJ_mN1$*;9|ER)(+TQAr_asTr-7p=1x!UI!Ix2dt=VKFbmj!V{Q9$_mqE9({wu*c)0K14Bstsw&ddCb!#=
zYg_MaNV}b4JG1314l3DFJwWU=i8aRiB5RI=DEq$f&R~Iss4A!{%F+}luHjf6EjGiJ
z!|K{}qM0sMr`f7VRU5QM3RNRkL_*wk+AVMq873a3?4b`TKBN&RIUx?rcNxrx^9#k_)`_^mdt`2ZQ~yPC)SKnLabF?jTdwk(RW
zsOR(f&%bo*S3gT51dYty*4
zygfyPYwe`08gyELc}MJ86t<-OkCNgy^mo
zTVAKPNvFYWhiO9*ouvtxp+T8kHdIbF*<+>jX=WE6I#jRBJFg2
z?rdSaPf8&NgEFHel05oR*(WrZDt+px
zvZksd4pMsG}S%?Jp>58BN
zWWjuJqJw|YTKso}R{d$={loqXm*c;q)c&mo$)D_i-}Q{1Cd++akneeU|M-=EzeDi;
zYIRS~QB_eOgq*XrHpgWB-UVF&QGqxsaTY79yz@r;%c}&Fo6lVT%rF1aq%2;zb#uM#
zHepm>HZl&Y?aA#UAba}w!Mjh+mr*8T7y2jW_?fCbeE#72dUf^MTVMZ&U;4sVr?1X@
zHOm*(aCy{Bx_#M1m=lMUyM=^Bc)vMl4}g96aMTw%9Et2}uHCHa071wy@_5
zAQ*V>CItfwu+N%O+M7+<_nP?M_xJTff@N=HZ%V_&s-gtWp!sq6{C`?tWdmS`VXVm~
z0#0m@C0ImqKB^7E4nfkOK`iM`z+nvCIY#5_M!-5luAKi4{j3r~1&C-7W=JH;DGB$+
zn8KIdl>or8*OVM+fo0A)=d~C#NJa$`LuN2kTKhE20*P5NsP|rFL{WoUAh4vEMHI-G
zs%mi@V-x@qkt7*R=_@bYV7&n%WkzL3q^g`^o8FC5!%mgM?t-oBx&ePZK0ikeoA!A}U7pL^xy~7|Dt=LMUXf
z{M7GG8|hW7DJmIvXiY}fCh1i|U8%2qcXcDK54x+>{X1vd*PGk7$7f%7_wsOe@6zS)
z(u>a+pPGZBWW2cCyms&2$3Onjmv3ELp1$?}{Ja0j7w&zt(-Xb@$qPCu^utAgb?nmN
z^>nrxws*#`gMJF@jThh)&F1c)&N^yGwcCdj-&W4y&AN4@IF`kU(m{}lciHCzHs{jK
z<}ZZRwyb67&c;ngi<7~KGimHE<_sL-xT&a|RTgx8WvixaZGY9Jo{TRsN>LdtW%u~q
za#{jQ9C{-nfulv|tUF)2?S*UWY&EE&F}|ubWZqnk=YKT6aVy#~FL#cLB0;jnx+)i0
za#`7f$_gM(3Yx;OY|FN>x*CS!;D(q3yWrK?J9aTAH>=8xYwN9UB*Q^{1G1$A73%&wlwt&eIscQocFcN*gJvj+{);Ol(!
zlA5TSp2KJ=mqFp(Z;a#6jZ){$b+a}NoS7rQ;y{+U7?;!8cycqx)Dn!Fop)xn>A3KA
zTA%R5VkCj<(_3L!8Xp5nSrwOqN9bpiP2LuUViNVWk^$VYYK{pBt@oL;CN7F%=$(K!
zUQtVh39|<%vWhbeAdVv=dLWukrZ2wmp}KCq_VusqcB`e=(Kr18_x4ut%D6mzvyLbVUV8skA30cm4Ry<)(G%`IFhJ
zi{`Yu(tw7e2|~6)B2^6|i)Jv4&Zz3X)sLtusWC>qP2kD`pbOvp`kNd-PmEalJ_t>U
zPLmonFlr3`aN=;P-I+;&NL2EWQfzrG49nK~TGyz##5#3WT?
zvZ@jg012Z4DWWK@IHE{O*%+5u0a2ApXMM@-&KoFZCv`c?!_WY5PGNnv
zLs6DS=qP1z7sb$TPdBNVS~Pds(e}%4#T}qT)X-6f8kf&Iy?Q%
z*Y2H$V$I{RyFR4UTGtI5=y2L?C~ud-s%o~|H1u$=lB%H&fj1D=zCIomEv&146UN7{
z%#UBZc~mwv5frx2>gcntedB6=YkSt$EtgfB>BibKkUEE)fO0B8gK=Sg{jS7jazM)`
zv-5{p&)+(|nwJvVaXM|UedN=2_sw;jQ9Uaawqae=lcC(kz5_g_s?E@JGN$U}
zjPXq5MgugR<_!~?NxCTWYV39whO)hOt9;Q3UY=eiPIcSXM>iXcAoygpZ4GdGd^TKO
z?4FoMhGH~E1qNfw5U?AO{G@JY3YtO}4_@k%o`sl{crw*$Za{}Tz}Skd2$n
z7*jPnYbj>}0Am$lx$DonK>%2#Y|Y%2+=oEw%K~Cn&RDw8RaSO+*j{QHGlwu{HA*eu
zPGxWEX9#s4Tdm3;%<+C7sw%)!Hkvt+sWW2XN_DVT=zTlh-^YnOT@>w8H1;oqEQu&*
z>stYDw2xTh~YZs3ZC{!^7>m#PqT22QQY|
zo0E4pyRV&H-d}HSG|lzH`WZtbr}M7ty77X8QL#vX$^hm)PMFmXG&E?xKU5Hr#qozi
z0mF79M2Ds@+g*2Y6|T;flba49SpCw0QifUYbo&|E>RigSxg>yc3BMnF@Dx$jL?xmnGEfs;_I$d+gX%xGzB
zY80oslYUKzbx|OT*z`|Ve~r&+#kmeowWGp_@U#2=>i{o-p
zaI1dp>%TQ>XkiL8d%V2kY;kh@+%VXeUi#2v)?Qt12%6pSAjWuldODp>FGD^)KHh9L
zmzS5u*!g^(g=e!_*Z0?M&7E_J^S@o+)aP$qzjln8eBmTVjdJLGE^nAac*36p$*bv$~3s`e7&vBr7IvK*{(fYVshJF{O8e-V`KAj6?;HCKUnBlr&0a1hhuIwKgeWk}_?js<$E~
z0W+_HTHDk+jxmSQcLI$)j4@sgp(;ur1?^-ki_GH@6+;I_dD!O>D#Z~wlV+@_up*>j
z9QHXT?*YYv03bOpgmD-*t0kFYph4yQ)x(36$z-ei&C}K6UHruL$&G3AiJy4slecdy
zD78jzUOT7?!H31+?a-NhWaTbtGj00;@RFh5rSQs-H
zXSlKic0RfrcGkJ8bv^Xdlom{xoW){C!JyRE#E^)nFgCn4)Q6e5?Qm;(%sBNw)?mU2wiT2#qH5bLr
zQX(ipDN#%ce@4l&He;zyWKTjxLJ~qIRQ5!uV%aX@IR16wL<&@PM
zjRSC27BvP40Duf2+Az7j3FGz&Y5D2^{s5o+$G`qx{5!w=uYUYfANB|qM8pfB0-%7!
z2>5MY_W$x#_#avPKotOg^3U)m|H<#jw%NA_APG2=LL7#{6@InaT`sR&SzKStB9(!a
zH_=%L874>9%h(ipEQPSa5^Q7I~J))s&fdZ%`(g>)Qj44aw0gx+$S#sE|`z}}q-ht;akVj+o
ziV+bd1wl|w|22O>e~up!D13|1BcOmF2!uEgBFHEyf_Okv8slo?$g?pPW6p^=j)9Ry
ziIHk+_DVGXMbxLEw#K7TAS5!Xs=$Z{hEzp1oiRjvJAP&rwSWdG8_KNAQAGA(eTc{@
zg7Khi5DWqUX4y~bw24Z#vc4e&08!PPGpFp2JQ6+K-7=@lIY%a%x`vZYfH8B{OjdwV
zC8H#L>Y)F$rq2ohkQIuevet@-Ff;Sh$pwX3KWP5D|83swql3W!vDqsfgjF;pV~le}
zX3j%k)zbOJVu2x$wM?2tcSGL|V*>W2U+p??tQt&-Nz@hIdY4(sr=cbasz%hX8WdF>
z#vw~G#vmAAK~QU~wRpgmsmQD>Nx}QlT5AbRFAO=ykfK70qf7ZfeG7$D_S?gQ1yq$(
zU6T@HG==vnlsGaOUzV&fKQ;c`8w)?}uo5dEAj&?nLR1wQ5tTsqPD2R$b^;neMVHr=
zvkn)zi!emXtVJ2z%w*|@ZFTYB`oll`M11_0f9_M|3x~T;exz)EF%JC~zW9ePzIZV(
z2C?nU`K|f1p_y^%`mOl2v+jTMxBvERe(k8xuRj=m=jh+MX$quV4Qtq)anF*YthX?E
z9&J%LJB>lsE2zrGdAZn88VkOn1nkOGod6Fy#vpOlw4$gySc4Warrvb&chP*>njlC!(@g;(c&
zD{xc2BS#@@A{fy{THMh2k?qu0g*c{uJFTF%({AzXF!cW9q|-5vUAYiT)gbda%i15i4fKg?iQP?+W9{5+BUcAtl?}}K9q4&*%813v_7?!*)*+duD@`xym)+d
zx0oL_hc`#o6h~HA%nr%bNy*oB5i_eS+M|Bg0c5Jm=}WH;!+3aj5Mzva&D>E#F>lS5
zhHY13mekvWTScf6A}k8X|9uLF$u5
zLrv+X7#(k8?8_w69aT3J$51O*=+}OFe_EYo$-15y7a2$4psIohV1y7z%~M7ApK!zY
z&)KGZaDVhI7B3|AE{n5d%d8=UUWZ(f&n5#!U?$8c#!A9ut+_%qBNSHG$O1eyMI|-~
zms0JJW)0k)6n#u(*S8Ywf*!AqCj~+16SA#b1>pv-PI$=q{CudF#kEbUulgNWH(ku{
zj5J&KUC*BIsOINrD6JjG=8e$!x;t9bf9cvHh`o2d{mR439ZM7jGdi?`D;RB7P(=lR
zr!gg91;FfGCRGuE6o|lCn^SU%ZA5`YHb;Wp^u$bWFLqs4;+V)92cS6tL{{9K+;&I>
zI2&^ug$=}lu3WmI9dj}kp(@7&DFz>ciNo3SCe90E%rHh#)!KJxG$diyrC>^1mlbCA
zO2+yOXuFleLMe&C!Md#AGZh;v6N}C9Xr~bRiHQY@wvDvY%f#DVFV?nY+jSgcNb0b>
z=2XVSNfIJ@HKJO2vy(q>J!5^a*!XM)p{yF{}-+}$uNSQ$q6pZ!G6{1S4
z*1AK#Re$uq{r&C#@nCo{G(YLao5$4n`u6po+;(Ro2~B+bQ=`047O@MoEMduok}Lz16Jn)14<8G6nz&v6d);juOEpFuBDrRenSXjdhX@@+p;&%M_;lZzb`e$A&hr`9h
zr19oqb3B{)y7}B2cfNAxp%2Sj*P7#b;q6vbSHtl3YmZjL@UzdqdT==TU;WjOh4Au?
zgV}%nM_>FquU{rPC=wXhgaEmk4q%Tp*N3x%uv?VVlj&qWsZeE*0GSwaRJPINK5A9~
zFlY@Dk@d#>rB`qKC0L*#qM0EpVTa`jleZ8vc+2>uJCFX)UwPvzUDfZJn08I2>wY&a
z7bRy+yV&@c1b21`F{y~~Vp=|XI6dHWV)%u_gWFS!P*CZ8it8brkxd|d+Q@L-o0s1>
zUw(CUWy;zm9Cw}Ca+4gYq!d&o=a@OxZQG~h@|CaJjKwf!W%gwe45BNVaXp|+OzR_M
zN2lYs+9&`y2dY8@Fr=J9U6-6Q5#?cRd_iIylVv5uWHo!Zne9jz6--2n$PR*i>KF27
z5;>C?WCT?;0tO8eGC)O%G)2&yBj%u1A!Z&mOT#KLrL0nUtIDDo!5fY!LIA+544@#0
zL^M_hI`ic_pMN2~>-ya99^Yg7kACy_e{s3`>6f4V#4{%ZF$M>gyaAA`;66C5{r|;}
zejDJQgYS}P{lok1fAXJyKi)=FFJlK0ERwO76)RueI=Ojub#-?CLA$65!g@B{jF14A
z{l&0+a(ee&XlCm${>Oj&|MBd#>z{n-xtDG&#?8f5d%Xn+&9$8`>oRTMd>vvQdo%6U
z#m$qG>(4>a{_S7?d&iLe`m;w7%^!W`j|kg~
z_SHC@);#zj4ju+JMO+i2sER=_){p^X)!GCsk|BvTL`~t0LJWBbw3reVWD+70@KcKr
z_z|-d{B!)*1>N@s05YmkMS#5m*pUUYr8NdKDqvzs5_29!3g-xr$e>`7G^8YBZ>y%T
z&RT1UkoJ^tWR*%0#Rv=Y0IDLQISHrB*gNly@oEiNLM2DWlHp<4$L<+tErDiMRclS3
z1OOF*Nf8mqg0W;wjL{h52o=#9G^oa6Mi_G5XZ`KZ5-gD|2_Sbpn|+S3bqGXA$S7ig
zL0OnU3?m4Th^PWO*k=GTv$a&Zy*xlN^WKHb7y}?V=ad8hNHc&KW55_hBxeBtI5UeX
z?tRJjS-t>@%2~vyFajbOGTu58b!2*iaqpvDSQOyMh++{(83BOyhF?gi20&5O)un|J
z5k+&7pv(kfkPI0#7X1W>*w|<=6A|i6A%N{o#%$49RpmbLsBuxFf?-8b6&BD83WQ!s
zjRkLZqzVaDuYNwClQ|hxwvv
z7hoj8lEW#=Ly?=kY`0oqs5OLT&d?Zib?CMxbsoo2%!42!dsk!Ae$KZYTAW
zd~{5L8B*_kX`PyJLn+qAV&0lzxz5iI!xJ*?$U{s|tUB<|pIha-Pii2H
zy|LNyXhSj*DqoJf?%qZOv@#raS8qNx%&BtHuiwm$E^|Nin7ZYpFiq)F7zZePsv%0z
z7t@j^>J30Cq_$JhkvXA@klM;aOoSPw%<*Y!FDC@Zx+lXLW>GQ>ss^Df3RhVKW*rf+
zD2k#0fSfq(`rSB$va-%0ktWWPzz`bpXw9x0LrNDHmpMmcObDSU3SSgg>pK@$Sd|k?
z&LAzf7mpvmO$4JnoX^^6d$iqlam?0w704VdNKPq-n2f{s+M?Fl)zw)$n>uSJRWWTF
z5XBs0xcDE~;{gUa|p+
zx`6_9eP%b?Hm(v~`q*P$6>=iERI*p&Q@r7R19xjF0lwCf)B>G7)vb8#?sj~%CIYO6L}oo1$@o_d%H50lC-
z*U3tpR&ETPZ>RIAm+sufuFC5{TlNPWI|EPV2fmtI-}M+5x45+9c2}V<1Og%Ozz=s}
zJT0Qwz{Y!fILCT|QDw*%*KR45Wpy|Wo086r4kTbgo=8zwH^f12XPvQp<5eCovHq?9
z>h76kOXLhfFTdT4}He
zl6s*xQ}wlXmihI&!`Zz>O+WqI(QzhMl(y(I+$HM>@KP=>$IUylJmNdJeK1*G)p{lOmME5HpX6)ToIE-sLfAIJDtL5w7%jL=}V}5Jizq
zfySc^9O2sq)BCo)z@Kz4_8t0z{kCY&{X#@#wumS~*1ERHMj)mlV;B_*gT|5flp`ox
zmeSadcoz!Oaol!2fK@iTtO@OeM(W#(Od|}3ylV_|J>%3ISEg+&x7_F0F%KDfuJ1?l
z;^M`{w8fORo35Rdjkj|Qv(!P}{ZzeE3E$}3FX{}8ce+dCdJF!f8CppObT{;({M>wE
zbGlCxU?o6ML?t9b*qa;!?9~JNm;0;)M08|I=kc(4bhT90T$DsIaR8i}xb5OVred@l
zgQzOAwN?#zXpQ$!yDTvey#uK^@oqQhRBTOtf_0TNgfQgV6jY-1>_8cy+}>lwX?Bk$rlq;n|h6X4#9b2U{tTZZOLGWgGd7Pra}~m
zMxrM02!sM`O;sva*^@A3;S>k%vv64%j-x^6fp`?ogk_bP?B+@xvl4V$QLvL@THtKG
zy#yy@#(wQIQhOq%&CqCGWfcPmswhfNjWR(1vOXn)??dE2IQaf}PT&uPY53>(k-~Qj
z3~SV)0b>{`2#|A75|8mkgmX8RncNU>ot)I&_Tqd!eevUxucniOEZaBV{^G40FW$WV
zq356ZaLC)Ln5m)&n7>M08N
zw(cdYmdnU;wcecGd8kz%cI(o@W_{VkF=e=T{NUorsdw~0{LqcR`BNW#G}_<0zy9rS
zo;^%47!1%AJ=Ew0G2zM<2Eb21x&(ky7%&D
zr@C%AWyx_eX#fF2dU*EafBCC_@hf-kzPZ}Ix!X;RX?$UzRPI#SITca`CK99M5VO6?
zN?B0EwiZ8hbNZKGYJTN~cy#<+>*}mR&Tvk^_%pA(_!A#~G52v8hB1w3sA`)VXhpOt
z3sT9u-SzA3kf(Lf|MAcM)DwB**WS33%)(G4OEGl@ivRV$`KeF6a_raf&HJZmle*Ls
z5gG#)6+JpbnhYT9YX^H<1I%b_tcx;LOt8(pCOZVB&DG5kb$+;cy!<=gy!&?^UR|!_
zS6?k!zd71q%(o{^sO%`+3QgGs2*cx7CX@fxbL~f;edePlN6$>k!^*)phAJINFli<5`iFyR(%27|FG})9|)2H
zIj4KCH0!}xHOK}?EpuWtDUO^1_=(L~<}DEc2pF%aC9F*iTX0
z+`W58$MsKq_$Qd?!)^7e|H@|`-+%D>YhV5KfAaUMY4fZLzkTL@k6zSS`)^&tk4Q0I
zT?}EkFX#O5^=%&xpLu3?dcSDt-Da9)(k!koG5vo&_q&t4sVvB)-l!LsXJ36Zd-+SZ
zKGci^+k0##(Kp83G|Zf?NE8UgJG9O*sUjQ0RB8E?Jg6MTFc>xE^f3Dhz$h^)p)%>-
z(Cf#z7xm}(mjQzUq7ovaAP|56AgGEGsx!tCA)=rN0IK*JEfR^Mkg>KTvULU4#GDnA
zfG{H|DNxFiRCvtv9z*Eq2GwE~^j)GL6vS1f_oX4Hj1VQb5)JHQy+M?bC9_C2RN)5z
zgoFTyrQwvuENqAvG)h8MV~jNo35rNcNtO3Geu#)=?b%6<9t(wp3IqieLa@eiOp-Ei-b)tt<-EcghNo=_CB!}b9?`jC-($$kA}krk
zaop^-Ac}_eF-680W|pjwxGKw(5-WpuM3g~9g^)~9Jx%FBHDrKD5fwo4Q$KD6CUPXa
zx3&x8$Y{maR8~qRazbSU<`l(vp@O{CrsYYS6(b-RG>RU9k}z@xKtp0_%)JAPStUgP
zaD_G2f)yIg{%b`*6%}CsRw6VC3IuyI6G0IICDkeKodokfT8I%9K@mBd;FVy^dDVgM
z*c@)I4llm;96f&Rt?BQ*J80b;3Ts^$==(E51sZafa93F!|tSjDZ
z&i~&3>mR@R!pooe)XzS8=dFMGg*SeCtj=!!P(3S$4W3_&BMiDbID4H7LD<&ZmvrEoXQ6r76q6@bEN2=!yE-RMhJ0?D0ln)t
zX`U-|$yIQ2f}xA2Pu#(zzr293Vb#t~N7a&&*KW*RRhK*jAreD&NT#Z=DT|bOT$8s}
zx$ZV5fhmm-JHO57OkrTsiW(&wfUHOwR3~S;LCdGEuSL-wC
z&at@G*9%@uvL_1Hl1mx~+Kpn^^;^mV&JG7^&>uSJCs;cDuv+c%a3h6_cu^BNV3Al_
ztrm!kiTabgqh_ukY4d0phT7DC+4tL-@21lhNNZcUL#llNIVFvwv8FZF=(IJGnbWTC
zV;&%rC5vsT$))78vl6ld&a-GNxoJ~|I}NMHEcXP0K`uDzn|?L+U2xRrdr7`L-~
zwfy1adny;ufK4Nzt`+&=#)*^np8r03T5J6VHPBBoyP_y&?SwdB+-$E-ha}tbgrEGL
zj@#jKk>_Xt*rUcCx(zPRBzt3R0+T6c4k$5gvi9o?j*M}S-V*0x&fC!&D13b=hy^)+
zI1`y*=s~J|!W(U?E@`|9?-P8Ls!MW*hFs&HXZ^%H`6i*qbBN-RuRKKeNEBJe$P@
zZ7)WgPU6~7?sF>&d(cf7LmC?YNYf(byiG3mW=p)T4u+<_8H&)
zi)J4|0+CEOw{6-?=9m;kEE`7%DMCu5T5jL9-uLKB!hMgOGixbd?-TXt@|DdaSZ<2C
zDWFD@TzaBDgdwI3>`cjpMbQxAPRsT?BwlwG$N$*)C*_jgkrsW>Bp?hb>N768E|0wf
z+%NdBtYL;hWHXF81@WOaW?oi@^Vxjvw%hd!lWQROjl1{m`QT==IX6!pLbd1~bmOHT
zFmAlzjX%e$w~f*J{oyb~+>G6!BUUeJ4SdUs(-l0M3$_bB?lkGX6-GZEh5GW`B{)XM0*$v5V
zTy~G1RHEc~6=P(v&T~PCPEw+^#~v{#a?+%>s>;^5xe#_K6q*24f)-!%&n}e(p1vqs`+i$C@zZoU>Y`^;HH{6-*$?Bt(j2tsQQhP>fC7
zwEa?)0+-vW@pf9*$c4q^V@TUf*-MB_NR&Kdfs!F+W*{O>nG{5pD9FG9lz{SM+fw$V
z%8bmTD|}G~#OxFzK4Giv#1$pD?b>y|s^?mj-PqaA6d1F&g3j4&{TYTHk(HSNK@?I3
z1q2X45KZq-y9~fc-!|?374fk9Zv0D$@9-Y@0L2L=D`X)TRDjFLe9i)!%d21N?e*~R
z_c!Ys)uY2|f?7@vp5B|5_s>R)v|6wC=KK45*XFZp>(yAesV&%huc}ScR8=jatJSJ)
z+kS|~HdRU1)VFUwqfdbM!?#|4bIQDYc)CjcAmg}N6=fMfv$gD^v2>>8l@k_T#_frB8h7_RRxJfBNUYcF#=rs9kp};T%dRFph+sF^&-c8IZt8
zfEc4OMl5%JC@cbK-ifR0P8vlJ0UHV-FtTGvNko~96Ho$$L}DxzrB}1@gORwr+Wyq#
z`Bq>Nd9zK=?#+JeBTsz(iG%&@VbZkN#BqZfnz9^1R4fGxC>DR>Q@7z$&;8k7f8)>m
zgRiS5axrh$SY7kO#t9Gx(d^iQ04cJy2&EmzPaie^!{7Dt54?DtMF0r$c0(J_21l7u
zHH0x>&@2|vm{=PnLZdN`J`4d(=~<^P#b{wO{O$YtAN>3`PA|@$w)5PICevqM73HD}
zKCjwrQdK|op(g-9at44Ml|JT_k{2;5fQF5=0ODgvIcK7xmn%t-B)jZ+*smrOX7|MZ
zXTR{qPk!s}Tjzb7=~}z59XMxvds=d#(Qh|HN0lq;&)+=wV;{Z#`5Uz(W>Qeq(Axj=YHyozxsc?G*j^Pc){cRnaRym
zimB+OTSq^dgbadc7<&Y@Wz{V&3+vR>V;&3(D_mUluRhxR%-z$UfA{WbJt>5(8WR#x
zHkL$r3`qR{*RuUB8-7x{q5wh`EfhElM==D+fT}24vW^+iI7?KN&N7JzGb5A6ERZp)
z?;>Xfmh)L;wOMS}sz<_3e1;+E^eOGUxA(N
z*5Y?|$A7a&=wE#NH`F2cZ!|t=+vxkm$K6w`R-4&u)(!op8=}ZJ-+VJk?#D1I>F2)w
z>SX`;#e>7kx8FIKw$B_K&u@Lt-lVwdy0feG+2y_0UVr2M!_$SUmy6YZrS-IG9pp7N
zfBtSfu}?m`cla!wo3qp8??3&-wZ!R#C-xuRd&@^Jn=y
zY4ky3GKyqa2}NbFY;b4qk%$Z$05Az*a_vw+g;=dgwh%HoOU~Jn6%o-)39=;FaVQln
zBtk+q0FOQEB?IIjDdwD!c50?1KngjFpaK#?h8(g;V&L6l<&raT%n8#F(?IB)#Sq5i
z#|*eL9RUK|3E&VK00w13P>jLH7$HkpIZ#l|s-j8=KnO(0gaBZW(qqLWL?!^pB3Xr*
zt+m9wBZeA7&KVjbl8FQf!~>uRGFe;rn7CjN$uW^=ZLCpDDUAt{2zH1WqC~0)Kxn}!
zfb7z&!sVQHU5&zK$cO?a6J*R$S*5TpRwbwv$eI|`WLw4xBN_4=2FfCuL~|4sH2^{l
z7CHt5DUlSi^dk@%XY($NBSIn-(VPK55ZMqS5`bpW6cK;`QDiI_MIQs4Wv5L^$kc&Y
zBEz+<_II-4-TeCa><_*1=-rden}6s@n;yLRsUQ6K%RlgAI+$Y2&))jboo{~m@|~|v
z+D|`q)6@Spr=f8jNA^RhpwQnJ1G2ImL7SkIlsV%$)_a*xhv^lA@e
zfRRY!C?ZfaxPQY|y<;l+OQ|gzoO4l?Q*55=>@^R-VeM$bnh{2ot;Fbo;}%1ZdC}y_
zJYwyW$r`G#7R)f;`}n6ncmEr|e)i7Wg{x+55&BD?R_fFb!U;_6sA#but!k|*@Hrf?
z7A9=BYKxdhs7y1N=GBHQIYZaX0H|~J>bL3H#SSJnWR1_X4kIN?l)Ei
ziyVBvE%u*>x~YuncEWqTo3@_wdA}HQKQkj})UI#u{?b!rx-oTaRjuMf9N+cZRUyNo
zd}>v=(Y52Gtd2fZG{-q@*AGsy;S4Et+sak0t=JmKhW8F<-4@1sxZXNbLeU$@H{j~I
z>dw{;g{m>+CHP?|3t)su*&ucCzK9eDkw`MGnXzOT#yz%XJ&ev3AxSwqT3;=>oy?<;
zIc0(1Wz!AYeux>{X5x@b=BDA`H{yzr5|-uY?J%y5acl?)1QHrVQ7@xo1~g@3FMpuBBl((;8oRF3ji5(2%Gih93(SyQ4+JYmPJ!a$rvOwBD*!1
z0992>SM_~w&EC*&0DavwjGB@cNOfy`7#gAEqcD#+_e$bm3!o~
zn4_XBc~qOP%Kay`xfZgCIrPIeiQ1+i*$c}H-Ykn|&(^ncZ8`ORef4x+eeUU-x36En
zyh@31b$PKdzBB7$43kMUZOcu+vZH^h5u6vqDV2<1nc#l699E}Xnv>(%!Qs4)Ziw=!
z&wcLQ`}f{`@2y9d7n%8DY3t_6!uCcVHS7QmQ#UG%$7VgT2Ytk$En^yZ*3|n4SKD<&
zbY)H5kS;D{yFmqPC|eo^SIy976|32rF!Fj)bdR|2#(LED!)lP6RoP>EMfk8D{Yf<+
zP^0(cb)TDwt7G>Nhx4`fY%CXbQPwFL@QF1U3eX(vyMtp>W@H|
z&AE2~N3jq|V}utbN7R(3!}{K-RikQ3OD1qGmg|$8niI3I&#F=)n&qL2!P3~;4VSuS
z_foo$;UQ*mh|_6l)sEd-is8;>b=K=*zzq~z5sO#-nD>tlX4l(h>ev0M_ly0ucwt}5
zKAmM+hhU@^ib^S~q9O?FcS
zHnXzLXeBdRXX>&>^s6go1~PS_WkL(V=E03?>$A`0iYjRi%HyMioZkDIEZ)BP)&QI-
ztn=dGI9%BhXI0s-xdyFg^0pAJvY63pMC77`O5>oY6<2$S4N*8@Kj-RgA1Jr1#A8o>
zOTjyKRWrS44_F`$qflaJbB+MHB+GT7Df^hm9J3^gP$N}<%b2aLR8dnd%TkOHj+4@!
z#gInDY&uiL)|BjuA^OX!nL(Uf*XH^tNEWlHQo)7ErK?E+V)nxjV@#42A%ogXA56&%
zN{{tPK}5czwe=ma@$bfe+v3|@H$TvM$SPUU7N)IRF)pQm0!?LK7{`C__q_b^S@+e~
z-YD&Kt>@d&AKQc5H>a0Z?@lK;_O!U{7MH(P*R3_RfH+qi931q0-*w%3vl+&5^q!e>
z9HwopNQ=et=;&G>f+kql*fBp^Y;WAUcCdF)mSsN5nsP{15~E^b1e3B=9G$W?StI#mMJD~`M_#=3
z`@ZL;LHgWvcbD70_pLj2g0x@$`EPvV*WY~eBeT69{^a+3{ORj;VTuqqXvjVnmCqv(
zyt6$2nODCO8CwSfC9NGmXDh}ms*VfSrQG{i6y>_VEI3=ozj(F%!hiV-H*5Ud?fvh2
z_QY~q7jScb;;$BVen7^Fpb`zrRat8ce%xltY>a8`2yxoQUwSb9;=PB_Z=XH6_6M$Y
z-*o(oi!0mfWWJ}8q7NzLpZe9eKY9I$!niT3F^16w@5dY~$Ig_5$(S6Ys0aa~L8G85
zNmIhcDENLTxtNJo9DeC?{EvU`ORrqni43<2X4jxAioz6i)8MTHy1m%$S^wit-~2-#
ze(~c+`+#9U9eN8sWK2WqV)TWvm2GM>2>?xY$?MlnAO6L+@BfYa!*D@3gxj<6kACv`
z&);mNyGUbtVtRb-cpoPpy4uLu>1nv!UYUM2pV-1oOuO8y7PeN%mL+O8*bp*ox0}Y*
z2Uxum(of$%KU%H#CgZ+r=Ov}RC#JSCiL4`6PKF_{ShjmYyKS$+AQA3}u?eO>{Mjcej>(=M$AAXHH07&!xnEHSM13yJ^$fB;EEK~xfx
z7*)t@OJ;Hsvlj}U*D;s#6r)iD)EG0Ux4Fn^G)kyMzzBe%0E&p98jW#PaVA_$n`-&C
zd*^G@ej{9dt=qo$D_{7+tFM0Yx##}KkN)V<-aKUpg3Rw*s_ZUYyB!byxevVG;z#6n
zh*bU-iSOuJ{F9gjAKXAhWJGDIsxW5VkK4_rDVw9CYdPm<4=2}8ZvFHxe?e8hd3yGX
zKlKwAL;T$5f8^=XUvy!e@fUyTS3dmg(;vQd{7k~D%hmUyqxk6I@^$ISgdw}52j=)^
zmvMRc`Ql{tOx3{o-JA8rAN+tt4Lfad08&C!K>lspB>ZmtGX(C6
zNjqJA%^47dIIBpcoHgg9N(f3Mh&$yR>q;U9fSi-25d2uhLPa1X$RdcWz`(nbJb*}!
zqzV+tI5gIv0mrBcDlzEb``zLrq6i=&%*4!y7M;VL4SkNvti%eGK@}7vsp_ECxC(LS
zfJe5P06w
zAZ8*UQsiM69*YYRAp!$R$|*}gFvhBC79ndG5D1wpKb9c^h*61D6`83h*cJjxfQ)Jc
zQc5w#)b|;{T5GIzOqc|7(yUIwm_h*%7?ngo1rq>RLr
zC@ZQrpamJpN}`}jS#5?Av0$UbglGT^0ut<;dlVE@L5ZvdOR(#K3TXc&dwB>4lzxeBa{in7+^RvJ6cYeV!Y$HC{>Un$ZtHU)QBs=!A
znIq-pYD@=Z{g4x@tATAeF$zHY*Em|bxEHM0N~_64r}JphjGOYU_uTQ#IHX>A<&LxN
z=dtHfn}fqVCND@ZbLAWgiWpS-M*St!)-j?jkYMb?Ds!a#_JapJ5!2R_CafO}$!0et
zzHPHNJSL(LREI20g=$P7Y}-06=;A6Y*Te}yVd%whKa74}*3KMFswbyaiy`#u7=oxG
zkizEXF`ia*XTj38MO*Ak&~A|vwU+0{Z8x7|VRP7w0w>!=LI9eGIo!~uO}YpD9K8#V
zJQnX=ilj@HbCNKt4zZaX#q+)Ta4BZ#iMqx5;%peVB@oFe6(?G?)~3{5ZJT2?cD7gy
z&Q)^M9v#sB-fFNFVULbER|S}PhhNWAkoPszuuh)8T>
zjG{T_SXK#{xOAY9MJ|^s$xu}dl2d?`RRo9?(bRRVsxihSap!bC483!XjkB&2Nj`*~
z(TOUjG#Fz-Kq3N^aaa~5u}hM%u50hv_k^UXDa6q^$%-)~gGNMi&PBo2TFpwfY^)72
z#ux!PMM)_!o7FJX6l!v8n&b*j&1
z8wEl#Y#0<#OCQpDxg3TerDTk8&TYFP=RBQGhuDpA_v1FBg9>J~VNv}#RJMYQL=-wq=(h-%JZmUL22*4_5e#W|QFDE1*{PC>#u%rM*%g9ZLMzUQVs5m^AOxZ2$!Nj|F+1Q8A@JzjW`8y}AMVRZ6t%-eiv!vsa7|Qf!8pqm8mFi^C5Zwe
ztT}zBJMn)#cFthHYEJV;O&N{w$u!%u1=fg?W7BDyc&1=`U{_M)egV>R}Azs`$C
zl*B&uG}di&SgEg<<5ldymSxQ_cF=dFEhpvt!RpaI@5?68rzeH7@fmM_lX
zqx0!Y$MrI_8$WD@Zty$E1VbE;B0SucR?
zI#UhaKt`Y>c8pZn)aMwmvek4(TnA4O%wDku@SF>{a?7_F&>g=tw%3TV#w8*tibzT+
z?7Xp%aCd9mxn1pI=RIqT`RB6E_-_36j_;@|(fdRnZe|WGvt?tG#O>8WlRutL{;dz|
z&04?m_1E9}@|PYhR*LOsKl9l~lUEPh6YF@rk&|n;>!t*ZhKuFGcl|P@yk4)D%jIl7
z2cW&Zz3FtiH$C>=t0l!F@0&m4IDP)b!zWrjDDus{Ij93HR5^^N)Vh!DH=nsZfBwe3|LE_$vWaO}
zYEjyd$003|44@%&Rn<1q<($!2W0{C@l3gQh%a4V`{Hxox_AFS{Q0-uc<;U1)@#7u|H)h5f7~VqFs@P}
zw6>^gLO5g|+jVXM1fs`uu0U^!lIu|J-0
z;`%C}SgD3FsjN3c@5fTd-*bKXZ+_~f4>B_hnO=
z8Q*>V?Em)4-M@1_piehu`P0{`K9$RRZ~vt)e&b?!XWBMZUDoh+}?(f*T@>Gb^aB8Kh1_QT)**?#%n{rRta<<&oX|LT0U`Ca|}
z&kk?wee`1=`{<|Yx^CNPQC3)BHee<@mtg?_qr~lm>Pe1qx_|J(^UweA)x#e=IQhww
z&ELJhI9>HkIxsr+v1j=0Ss83L1I3&O00fA=$=(1}RHQoPkWEd7Q}RhHfhMXT3nLOB
zp#etXYy_bwz^E~Xh$JUaRn4d%s!cX=ysC7_Rq-gc$L%w-^1+?ifl8XR_M~xjL1Em6
z)UdIZ5CK0xaD1S%p`Zx=bTOQN72}^O=JT8KN&eBG-(Zyf#yfM)CzBZ<^g(Bn>1w_0
zbAIRS^z}F2_)E*>n{U1L;TN70Qw_GBJpGa9_6|Ps#8X{-v|haZ)mOgs{U3h%lg~d{
z5}ci_+xf|Ab4J^Xr{{+cB|%B8#B%Zai)Or7f9-5*zutWK;^<}B4z}XHZpvzM;`_7M
z?YsVX^S09L(XuVs?KSZMn1fr**kAZZtL@X0eD(RgSdtDn|r-ECPx7x32}scjKQDkJF+o5|er%6hPJtdDDR?lbR2f4~nWH
zlL`itX}7LZ!YnqW)JJrNtWoO#AR~exh^Bs2&8T@N(~lsAkgfHqM2rBcNt39GF>DO`
z5FgwB5@l2XKqbjwP#LX~N$($6${Dg0mWe1y4hbMbWJ2Vm=pd&U6$AiO)sN$2LtG6O
zO9dbzYptSDfUM~1$!;iARdU?v0`IW5DTyK)N(wk|WnC@FJd<_03D
zY649FyZSyVGZ#gr2xCe)ilQXs9AeHnmuwAK5RF?&2~tdiy$?y0wLsFGjl^vlHo5`K
zU@h;;2r5Y+gR04Zf@>EYDa8zZCbqV1u&RB^<**XfT~4KZt!yl=hc_RDb+U?)nhl-%d5{v2d_DdhW
zv0Usq@>$PM&;Ol&>xDFQm#gL9_?1^)*TdDp^Qqrd)kK3jHE@1Er0ChYGN6`uyB5~6
zuxh5>7AljJsXcifs}s-*^-IM8Q>1R)CRn(eST&}uTT;qo18O@gM!N|8d+DAs4d=tIox&!*
zp<962JFez_{hC=`Wz#g*u4^}H3Atd%-8L)=6LnO#xwLjd*A6aVKFw2S$6ORw;b?<%
zD_a9X*+l?0&VqXHqf&wenu&9zN=(@cvXF@oS<#}lg~J#`WcA>lEeLX`lG?(B3~3ug
zv?=P$RZ1!4Zb%d)r<7HNF%u_ZBz+Sa}KU3i_(tc7!#z73vUoqzXXH_{5K
zX!l2M1DDCK$E!zaaXvAd{W2#mN6&jIaPO^#cp0vK_0HzyXP%jW=EZ9{gO4iYwby1c
zCuERt`snQ>5y7&S*-PJdNIB`)^&KM_<8mSiMIXkC({G|-GVIDSj~lLy>PA;9TvN!#
z9hkNR%WK>F5zqGJMBT~8U#%bAPopwATRM^tnZn4lB0Ebl#z71fMLCJkL&ON|H9=0L
z$BC;#X}Zd~_C@PzJi3|Ji!g^23H+H2z2V3v`DTjM%c5hOWP;cmgV6`cv9+z(tn@m19npKJ^F2?XyKU`4o5PIVd
zi*V|gK}Ok(!8B4HmSx#;esj~O?YdIkw`PXx5KK~z0oKUCMqr<-p{NsTZIrur
zMjs-(Y#kPbI7dwpVc$owWmOY1C`bg&WEq`VUi&`3KAXgchgP(#y7tLHT=b9j7H=cD
z5wK&Hsu3#42M%ymaABN~QDvlpAtPE6>EqB_qiPX28WEScl(NR{$kV2p=2#@4lt;-#OUdtX~j;Ge;J;5SSK{t3z2)MH-bDQ&6F5}_&U~KP<<0UxCHi|-F2CI^{{a8F{-4lma(>X34
zI2rx8H23kv>o=O|wa%7HjVbli58yFpymR-pKH~m-uTF~HSuVh&d?G6)FX+K!}APNj75mu|c4L-OW=$Gy@qx
z1On^aUGK&;x^XzHay4G*n8VHI#^Q*_M@hwdPgya88sq|MwmT7^U`n!O&f0bT8ZtQ{
z$+fvWDu?;{TlwtK7MtzCbL0L;XScLRRa-=|nzb+hB8&BQ;u=2$pDL_oaJ}8TcCB*N
zcCjp9|66sa;_2Dqz0?PFMdQkvGSn@nE+sXgs=Ly5)>+$xvNEeBi88q9WG~lEWTQo0
z9PG2K$P11MeO3=Wq%j4Go)@Q5bUN8nC)s9OSj&c%!y`E4A~2HmP9-nLF{!%Zq{6)I
z0!`Ym_k4=JMVqfKLbrC=6C29vyoZ#iu%>3ToZEVLd)}Q^yPs7R@$uZ<#RsL6f7Qz(
z-;Mv@Vz)7U&>Pq#lSJh0q^T$Udc9m+lnxq5EvG}1|HkR?ef{g*>AUYfI_uY0lRfia
z{hhCdvU&E|PyEmi{@DKfIvG!#UVHV8$z%^jmz(b7v-ukts
zX=bxoRW((t*U0pmpYMYOJ=%vP18_~?BH~FjQ_x{FfCUo7cgZ)FCe)?Md
z$3ODXlgjLu?&0~Jci($wl4B*)<)dwPv6(lsrYMX3W7ks53-u9PdGBI)YdIEG`3Ilg
z|KacZ*t2`qU)}FRyx-+7y?1qI;ZLL4cCp`fiX$^y#%Wzn+WOD^`0~p?zYXJNj0IQ3)!SFAt>-`Yv#$(AahS`wDFe9B(^NvXrQgJmxtf%+e(Oc8M
ze&l2GMp8ix-#lOZ=l}34f9sp~AB+{f&!9O?*saG^
z**5E8vk84Px|G3@+dG*5+n@gMzwyxzpV0JST>n4ryzy7Q__Y_O0J}g$zx%fjPJ-U)
z!`9;5m;0Z7^6+2#=#yugD~EL#uC{#?-kZdOr8=bTS
zodiM2F%zgW#(RIh+OTzdhX<1Szxbv1{=$3RJADgCCuZ@PuzJ!wcyRxnSI^f}%|7(f
zr>_;bxcg2@xm21>PKF_knXB0(5=^FbIjO(!i@y{%mqEj(TOS?XfLt`qblEL_;%Cod
zckiaT_9U4X=GPvUH(t-%$?4fszt-2PbzLLz@$t!Yz7K?})n@R1sb;db*NuMFZ}<22
zk0w)>Q?U^$Cm*^EAG+E6@O$pRc;o8tpZZ6kEQ>j>&wjI){(mFUTR}<@1SJ>B5_|%&
zR02axW7LGgEFouW#1lGDW0@sM&e`lx135cmgpDBr5J?C-Pe=d+Id|2`(5VSw`ZMPu=V+-fpleeCVN$x(l|Jj#6_3OX#_qDD6?H~VxezRHmF*)<}^(RWT
ztBZ$U%>G7!ANu(B{PZvU$}+%L-n|=(`q48#^cnm0$(vt-7NXew>N?lc)W#3HkH1vE
z{i)&A?(&W+tGf@ktLh+3nmc=++h6x4c>%{`+ACuKzrE-W%{ZoK$dW#BZ~*gR^?;Tu
zZp#Fe3k$Zene)!@QvqVO(@6p)fvPHVTZ~=btvc>9xdaNLSyU8tr#PR!i_rLQYY-qR
z>_9z~cG^5hf?_}wh!8%=80`$-*g9%&5d<)j4k4I`kpUPh}l?E+ct)<(~DD;7$Y+mML}Qy%SZtQ0S%EsBDOYVNT3L+DQ1b91yuk*Vn8Ot
zHl^VEk*sCoa?Y+`+5v+_AdAY*o!Do_ok&Q|Ip++K9`mFP|Auh_fJlll#uzDN1y!Js
zQJDRo)m&HzD{9>7?|
z9Je3fy}sRK!aGDlK+FVjH`Ndj!Gz2^z7m3DKvf`N&M}-%{F{@jdpCfQZd{y49L~ujQ&kXejl(
zy&^Y@N6%zU=m|ptOG4~@_JqkfD?~B4?!lT|38?E!f>f7t%%+;|$7>dcRWXQh6B$iR
z>jgl8;vsrmNcMT$RS7W%8c9>1ek3Dm{w9nnU^VZek^mMt%
zNjsl?A_eO$a!lyMa&SY6VHnNgG^dM)E!E^*+YVezr!zq(#Ktd^F*=;4jAd=U=c6ww
zLB^V?u8ngxI!;A(yuCt7bg{n2c>jg#$FsJ%zoDz>XD!`0xb|=n&sPic=rot3M4A-1
zKhxu4RfbWj@y`4+8S1<5y%&#$loHi@@64Y1ikT{7!yGHtQo*qgQ`igF-9R`&Niirc`y(_0*+8}^r14C
z{osIeGM39E*@t9{Vltb@oUg7{#-Q#TS9Tm0LNbO6vvhe)W1TDY=s)i6Y7oG~t?1R$%`O85$at758&yKP5AmZ=v}>zjHCT4*dtJQ*W8
zT1%Fz6xYO6Rkx~RO37L4Y*Og0vkF;6Tw!vKy&qK-oipRnx#M$=w}njzJyXs%K`#-7
zSaD~viMZ=o_%W#J&eY|7-QKK##%84qoB=Rn4q@!ObiP;~wS{BM0A#Utm~$XU)-t8m
zrzm4kmC{&nMM_B#YI2~-1H_!1GkawT-MV4A47WDJ#<-CUgi&HPfT$Xq7>6W-wUr^O
zacXhoBCAJ#K5g2dglyPIHJnLDc6bF<
zf&@7^!wtC{#i*UlX3@sVkOLUYIr#DZV&$P)?@d3ndar}D!oHU-ii0Yat}Ad@9nI>|
z>$acWI?UFF?osGgL}rq#n?FByPFD{)iJL&P>hR)nV;o&wJ+zkVb{dA95*W;gJ+1pJ
z0Vx^IZB2w$6M&4I5ROAbXHv#U0%
zIELKp!`@LyVr>`~XKA}J7N{ycYR+u1#0eN|vzY|M#u)T*(5fpA9Om4|*gaZpo#gcd
z&qeJfjTX{I+M?a3`C&@jJBygQ%lDe$)M`d5Jc|0Y3_ThkR<{&K8ZTd^bZYF>Om2iA
zr1l=>cF$5+R6}3%K5*{+(6YHJnWB_PjYG)jg`j8#1k)|=Zq_lT!Vu_C&*xn(HyxVB
z9XFE!+tKZ7a~<|3WiuCJCgJC+emyrU<@1Nzd&2Cd4RYb7Ny8#{OI0mhC|1kl<`>f^
zj$(*)EoPoI2|TJz0n)C)&je=(2es*g_v2OYE9Bi8OC7?CZ7`-v@R)b#x07FjqAytu^NKa`DdC50s9O=7#4I
z-MFoAjQvvbV7Py&nqW1((xMxph3qI0&57zqThNXIO@a#JxYfosP%38;z4CL3H*b&`QaDTDBm+7LJ
z13?5GvN41qFkqBFn_XXr;4V^c+76m^;cuC
zbv+nus{t+_()s1gN`(@V6vk!cjaIYgKPAOpKSs{myna-nm)SG9;11L%I+5H35XhOd
zI&xGgIL<|x?Z!*a84?@rBuPdb(C3_!M9WN=0CGEX-EdVVnUaR{Z9*Uq7A
zvk{dPQ8wf}6-Qe2613!;W4~Tkz^!vRCTZ)Ovmb)h%20^90W4xH9E?bmRmhs0vtuJM
zkwV@H^Qpe?bu7F2iV5T~V-<)90Wc*{W%F31MGluDgzJ>);|(%$d3+tXaf%Ztl<7n90WJS7D)
zn+?Qw@4WTi?XNrAs90;tgXy&phRLj@iUh)BGF`7%q%fV%ttqUv7i%4|kInXWzE)zf
z_r&!L)HiDT;wPWJ+^*kUZQi`Q{)ZRoXCFSQx!kw-qx12zpZNIGhjaCd_wKzmT&6ER
zTKY|N`^VRgpGaJ+2F$=n5rQPyf$WAG^QUJW9(i-C6yOuPq;JyFJrwi|Wt6^4^cW{86YVi$(zUTErwqMymv7
zyB(4yHfipvNj=GavFiDM{|oQ_d!O8M6Gsu68Ex}`o>ReHUYQ6*H3=cgxIIStAN|h%
z&c|l^u-#%~lETw9|EWLtxqtopum0+n-}>^s)gbt}lQez##U~qa5{J;8t(_^06lAaaDMey
zet9@T`t0qhY4B`tf9jQcmy37)jn6#!>HQjrFCbef-n_eg55{Fo$7HTUI-cUeY%(#`
zK{l_C`6pg`=jZQU+>dx%pUm4P^;;4#%QG%#Z|D8&N(Q@BHRM
z0t^r{Y3Y-sWU^O*$Pj%BMZ>O1(gO+hzKd(7+9p7k`oTNmG>$oESJ)&u=Uk`7>f#FO
zDYzpsID~so;nnQ?{@dN;_U+QZbk$wmf9>`AUp2q{(|>Vx?TH`zy?^+R|K8shD3Op+
z7B5tSml3J3QIJe#5P%(eVVgtA1_1y_RCZ^UihMiN{e$?e*%CBiG1OmYaKcarfcH
z^5OLUok#c}<%yo%oV({{a5-KM7e}XVQB*i^vFfB8Zq
z9;^cjk%nb69+V!^
za7kQKw>IolcgEBdhHV%-ivXtOtl61dFiFmck)RPOw!>I(C`+_l3PVa;4L#&p7r?n>
zNf?Ppl5*HNKFb!INj{beDO9H`L&;&HuUnxzP|+X<@It+x4X8G={^~5&bKs-tsCPu
zc3Cu<>>a35_T!{gGj-*D-Rybb&A81;oSCG#&0?S_Y{})K&Wn7YUCK3WC%HjvXTunC
zpIQXA)}z>V(*Gi=-15Ewg%+j40d#>phcZipU{%X+^k
zrmJp**f+M+Nh_5ZH>*@zne7c5jIvQ-N!t=>aMwyReD7iPT^uw>srIV
z=lehV{1eyK-}kYHmz!69PZM!Xi-?&)prF1mW6U>Xn_L~p;Oyw9vWi0%{
zZvs!++2J)2CzcCSy>#(~Us$x}#%E#gb~QYJ^S6ArJ*?(8CuJ6cW;C1$DnTr2@ZILJ
zoJ0R0r-ZH!YRWMt-DVU}9ewm6^`H$EhLKcT!y+NZSQuxSg5sDWGiS@1j0G@=kg%~W
zGO^nyqRy|8xO7dSI*#LNwIHIZAViN~qIlJ$*(=20d#32H1jr$HAS{Y92}=fJY(FGK
zF_d?{;8d_?5mijO4Oe6tkD>QCIubIlH|mOqel_eEULCBs+@C5RY*eCIy4+|
z%95Qk%t--6vMo3Rs1RG@y}!7~aiG)7b@VBQR2I0uzyFb0tuSWjl1ftPVpd|v%8(&1
zh)4w$l*uA85FkLGAjVXe<*q1ses)pU^;R^4UThIt
z5ROIU5R&(PvsrN27-NoSb{xn3gQ_rS?Zi2kbM7<>BlC`|VMNFhGus$ris>g7J=)=ZhN+~1IW;ta??fdC0P@5qUF7mmx0>r2Q3uZ7x8rL5w*0v$KLSN
zCoHOF97ncWS)0gO6(vVV{t6vJ2vhVdB64rt%nSFzW=M}NHmz&~F@!vwOnZ*Y^YFXsOp$i6pFfe
z;GVdbdxC9KB#y^MEotbt-+=O{DhDW|jBOWn)KyMnjn%7%FPmXij~$7p@)|3080Hmv
zT3@-@)$*OvVTrj)tEy_c&Z{k^u`3pLuda&CX@aqZh(o8M^Qw_dSwxscAxqR8YR6SoM<2ZRS;ZK`hGWjE
zN=z0IAdQ#|u%OzQ5VB>=Ll4HHbxNF7FbU^OAOTIoSW75b3gh}DUOZsNkb>3ws+Zm?@dILceh1)j@xGjKJ|ARo8>tm!kca)D=+~
z(;!%uQ!bjS4{otp=Rn5zxJEXrnnH2}CU;2+rpOGz`oRc5aNu_79sn2y01(*i*#w9P
zh=nV)>0nHSHdYE$DAZL~n?vc>uDZe;s*;PkEL$r$kn|}HmuKRj$!pVJU`zq(V4Gy?
zbL|b+(H+Ijz}r)68e!TXP0b}lft2!yN!2DrYwN;XZtixQjl^gHD2x?kbp#BX9^oM3
zWEx#PB8;wN*B0%hD`qhX6wjCts)EUolI?8Spu*6voQa#B6>6l|yUjXSh(UeXGK){!
z4TT(IRzj#Jl3h^k{Wx?RpQAxEO1RC$HYvm;Y;99dvZ5b+@S`bQ6IIfFY@c2E@%MWCH6nV^7X1R&lK=0!mPA9uhQPyyth?A-m`_!l1k
zSbd2PG`_y0UXvjqA_8g_QY;PAICf`eLpKgux(TO#%)rNb*S)j&;$pixds87SHdowC
z)FO!5ohq&IU;O!>Oqo9N@lWj^oUHqyoz8~r8&iphAIJUu{n3YVT9;KL2)8D9x?Dqw
zLfdQCo=!zSm$d4KKmE7g`<3&>av9sn-bbG}{NH`%x$k{y(sm(oHip;ht8w&QXveW{
z=FcBppMiFkIp-n8cyMswT)8-3grN=Gm(D$W@MyK^KKs=16Hh(U)|1kfn#XY*cO&Dy
z_ui+JthGUB`}O>!_Md#tyu0=N-}!~t?maqr_M<;Ilc$ak!};l7{%b$+;iqqZ^rh#g
zm7U!_{J8A@?kD}{Z$2D*%@Y62SKj@#d#8W;Z~ye4{C(d$IQBziv_onAb_9*vf(%zA
zP#Bj-@L9^jnIyVAx8qG;fI|~kJ~rIuQMb99PMf^h5$&NU?r+2Y`EUQ-zjyQClP^B~
z!v{AHjw@3a=i@M)@BJ@-?+-|~$vJCL`5gOgXVGeVJgu{p++xFbfB3`S&usqcufA@rd9}a#U;oVV554g8
z@A%O6`)*$QKYaGt~a_|Q~|GDcm0f4o(X&Pfp2w|t{TUAxpb-SO-
zvb5Ij;BtTLg+s>n)rX5ON*7~S&JHi#i@*01AF+GSeE9Ztw$-{D-nh7U`l%NV=d9xx`H9zG`{q!UjlB-VdjI8LluZ(fNf`t6GOC+F_rmz@_nTkjq=(b_i%-31Np3y)
z>~wyViM-IAp?~H4{QlBko~`-dkSEP)w>sZ!m#3YW8WF{kZ}w26VxV{+=o4|mz=et0
zGe?o=aw#ID%)&+t2pTjXnr!%X<(%(W^}lt36n2NeV^{hu3ki@b5v`IH
zA5u_+#A>XymaWDygy4Fw$v_AgPo{O5W(Xo{`ADyqGpC>-|@k6uets8}T
z^UMGKPygi)z4Wn9lp{6Ku|DPvSR=R1yII+{D%46_!knp
zLEUx!`2Fq+u@Ok10;-IBay+%RIKQ}PjZXUb_W9-S{Mh%}=TiDzzoSdrzx5M;v#P?6
zeCnBlgUO(DcK_Wse(jfk^iv;s`IA5J5B`fkzr6F)uYc@EzpyPo_V!omaXs0b4xxKu
zIwu(lJDyNn^?ZLgI_2vR+F92;GqJHJ$yuACRO9-7$lklQ&~72D8B-)Ot}LhXEJKKl
zQwP~Fg3UG$rptkBsn*t)i418`nOULDegFs)mu8LYf3rvYyYX8oa(=8)n{zhIlgT8a
zN*0$qqOzf!^Dt~Tup(EmH3$V72B!SJnKuhy7PX}SprQ~VlK{n-Q;H3vfcVsps$?7@
z0;4fTz2D(=5RnXjKy3d2+!{gvLPbO-V$P){X2*t+hzNkPN)l-grn;*?Wl_k8wDT!8
zi~s1t&V3TM_u
z4T+&_(79}@G06~eMXD(zBLa{Wktr#IZKx`OCGioxOBE!Zg<;k!?B+Q2@%P5Or4&J-|ql>d&Z61B&&R_j={MTH$
zcT(16TU6C-K3-iS!2P>-)~jzgXI^{tmCfp^TlqKd{k5~=ox~kEjRbyOPkZNa&Yh=8V&gk
zWe~8bE}_gpGJzC((@d()Nf+1K0bC8vM26`{ZzqqmxX0@=z1jp<*yAV7{0NE$&K`($
zYHG4byZJSav{}~hkW3>bWK#kuhM0OU!Ux-TA_mtUu1#}dEwhaQC(t+K%
zUgwBpxh!gCW^u`Q^pw
z@}2S7gWFGCd&Xv%7x456KR7>)nNnu1pG2;%wWqVDVbx*1&g<2oDaPoJnszw;)T$4#
zZyqIncY&3knW`9qSH0cm#pJ3hu=BidZdY8U45)%J3ji8p$uI(g5+bPL&NyVZqO+)~f~cy>
z`w&|;yLKQ7)O``;^kA?;KSC
zxvGk~tPC2!ltOgQgpngCq6HDij(}oJXwc|<_c@UiR8opW;GE0jzy<&_v6RMmpAixh
z0Wh;c)E)kyD2!ppY@NxVAxKop2n+xP+On*5>_yXd8;E*DHpFFFAw$-MA}J7{5@+iY
znV541DF_{?lf1(|>rRXUi4-O5psFAu5i)?1a#0k@XsU9;rqE%F-~h~|?o&#t2xxNF
zkT8kn4Au`s49Lvc+Cp*H0b;8~Mb&<@Z;bKY$K-A403hXHj6t9jM+8@u4w1)k^dUIs
zlsKtaYs<2XF?;VB)u1ufg%F6COSIOc6#NL2b}z={{iw3&{3ym8-FyQ3VF`uQr7hc)
zJ?wDiig`5~w~x*&9L!Gk&sJB|9z|=iqcv2;zVLdKurlq;n42JB%>9PA9hH(`FK)Z8
zC7EJ|v9>XV*#V4wOPhI7cr@APsSK441#bKGYUoF)W;g}lDr3sTg}WsN*EtQFi@dxl
zmUk&!?w`zW)#b{Mr>o_~X1g|&$7U$YmMEn~Rn-Qu_o*$a!;}50EXBtXPjXgdNyA
z3FQH$k(06eFG7w)R?q^fLCWL+tUr7*`fU+BDq7AO^MF~1jX@`2Hm<9Lf)r3<>v(n(
z=u>Ja0cHdw1wX3ys(F&n4Os*tSD2zf1x->Y6jd7~3L}w%in8U#WW^z>h$?186a_`-
zLku~!Zc@0%;JlcVruRAPQ2~t@mIwe4EPLP~sI`QsnB)V;%WP1I3!6RIO>#Et)O(B4
zlvcRji;LEk7P!~krK=Rj8RSD}Cb3hOLr#4#OPah(kx33}Xh;g=ZFkvS^kqrY`DB^#
z^^3DWdq-8XofiIT=^wm1!Bw@t|Maz+SKbY39xONamaDPiJ!XIL0Pg(q;?bSv_}S^L
zPe^<1B7_LcZj!_{#x>5CTvCdd6U01sZgPD1XdK6W4BLSvxWYEfVnkJ>Dj=n-1Q`Jg
zAOS$aohkAA?Eyp*_`rUEkd1{lbCqY;2WQ0+qHa^RswN7LV;F)5D+MwD1#vMo4$KhC
z&7-P+P!}ZiVG1Fw9`rX~WJ8ExP3gGdqRC*qO1~a3W@8GfOCp9SL6xe4uYZ2h^(ZY|9$aFqoML`Nh;%-f48kI_gr00345a|(X2GG?=<OS@?llH+{D`
z@GmPE@B@{zo%6)w>fr}Uw%<|qLM1{X&6+K{rm2Z&=!Z0fP_gY}b#>=^s;e8@*A{-+
zuSf{ptd7&eL~V?b*%u@<`XS0}e>UGcFl9Qud`Fb`4riO~^1XN7idkj{2i0WGWlfK&
zlP7Moaborkr}f^nEQ(^Ek2d+fQ1PF>a{iNN<6sWw^}|2CjW
zv|TOkUfq9lx!JZSPZW9_F7H`KtJURtvzSh&H?H5_n;(y%h3urkXKp`Hm(^@KjiV<@
zeYnJ8x+A<3nUoSDa$%7f$IG)zzZ_FFKR!FZc&ROa{~!B3&pvsZz-I-OXNTT9V>iQC
z#J+GW!LG9Y(DP4SCjamJ^`HCZhL5XjfAgE0|M4fFJ}gZJuw_zC66=D44LOFCVrg1r
zqeh7Ncv}2-KlKqP8t5Y1$~a7twV=Y(+A9?nh$v4frM{8|S6iMRA(eY|^#mrAV`LrvF{qp4I
zakKE7O&mNZDX=Pr^oY72`Ox(U_NQLIOIokI{<$x|^`-mM|H}`X_T*@Gcw)u?8io${
z8fR+ybT!4idGw`s-ngr!+e6LG;lahMy|B->ZEK8cL_^Qe)%WfJ$=Ss{ZFsg^XBqv
zr+ik_OSH7;G{w8KX@TSC#>ieLO|yS^u+{kFt=ql9vq$$&Zr(m_C-ZhP8Dp_m?7cjh
zHYGp+QUJZb7=Qk?w;u$P*4CB57(6Sd)565025F2SoLLMetiXAWmJJJLAG2mIvFCos
z{Y>}Z+j;{Z?9ac|irT2}|J&_Fh`@-389*fwbI8KPrf4Lyk^wfv#G@nfu*-wC6a!!QwYL%8RNLQu_1E{JdHD3l
z*UNP*;X}ta5STNt0vJgk;1psuqQDjGoCn`mYyjM0xqoYo=fCO^Vlc)KecL!I03z?S
zvlT$VT9Xli+M=!&Tz-6htIxSSYB^V_!{7P4e}5C`asAA|V4g
zF_J(PHI@Y=FlAM-LWDzu{O->Zupu6!-@%VQkUZVJ!Rh0+
z_YMv47?z}9kP?VU){uAiHzI=y@@?yD*BaPqVXG(tSYl%ws%9Tz&H#{DGGrjk1X;iX
z2mmP+Z4Cg#2&l+RA-bqGO0Q#PR%^+X1|12BswV{}IVXS}aRRn+3^9tPVZ9BL)-_XG
z)Xv!)aZu@vYL^rsFepGECR-U(B%je5E=poU#<6iG=bSSd12me5Jcx9|X6Un_A{Vm^
zr;~!4lC04oP!<<~ETJQrArO%uC;}1yCQwl+Vi{-%X=S%uAhK(Mop^PyynmDB#l!2~
z)K>{g!?_<953X)LdF{p%PhGol@a5CH-PV^z4dMCeEf_Conz~Fy#6hYiyC*`-iEQ0WT~2!OUaI%e8URs3
zFEA)!S&a5b+$|1Dk~R9|P$a={rIF&~#$eiwmd9@WLbY)Uo7{4a39~8_iw!`
z=P^bcwzOW4r-iQ%b?@dc-Q0WL-ao;dV3tcVx+&9|{RaJ&H3x%A4;x`ld1{48&cm44
zSp-AC#xM~*t__3PozY3;v4Inc1EcDRf@)Splmx0Nf@G>}lXC_YM10JU)0}fgWCoTU
z9VllIbZpod0tNtM2$^zLqbg`rQ6!;g76m|5BqC(|fF1jf-f~k)pu3iUk^CDQf^qi!
zjaPQBq$nbSq|D4{c4pv+2tX;KamQVXy${4}O<|}sIcqky@I=H&A_As>$NvQ~MneX0
zw>QOIpEX5MF*^l0MaVe=fGw&?YYR(7$&!7}3RGIBgHNhDX(wf2cTb)3&btd@j5(`hiI7y|O1-M0s3oML+@ks$UwlTD7e$Y0{}LNOh!=@>fN?18DQrXZyCT@YjVz6B4$uk5L5+33?b#L
zq?85OXg>r$rWAGDEMpQRhwOwbva`;TBBz1Lsv-dGrW_&>BSw@cY?%ceTT+}
zKGf<;`e7W$(fg1Slx4}xIjO3ey0q3>n~S1ANC?bqa{^;IgdV`V!VHN4AZ$AZ8Y4E%
zeq!m@Yr)%c{X?;66G_obaJ3nSRnQijw)5wpb1e?dq_lRg>(mtBH)kf5KrvA-SSeU*
zYZf!OD_7Kzqf!P;Q5DfS=i1?28Wt^kx6W5Xg!;%{dnT9HuQm?@XwJlg6mVWi*3DMC
zEl-MV=-5a(t`AY3n7G5)F>xH_>Zmf6+Uv!%s48PPY1(SqBLE1XT7l*Ss5c-P10t08qknZ$W)Y%jYrTKr(j1ew3?GEhR%;5AOScbszAVeRfwNr_u0nl>7dakApeMoV8t@Qg`oI!a{r|o(m%oY-=59e!twoJuAEDj;l
zqAZ$iGSr27Zv$<&gnM0n2FsE+S;GA2+SFC$)jRRQR{*-{3!g5b9~g_GT~}H02ocNf6`xVgc)4roP&g83{+HbcF78ikj0=%a)lanb(J(p
z2!&4&f-FvB3PBY>e3Q6WijA>!4Q{B
zY;o|CsGaNyndrb`O?6TG+$8R@pc12@I;&*w{oqGLEULyBlXEtub)84+(xmFT&TPhM
zSxretORY#9hd$@gNOD-JPP0ZX&^V4kdQUL}D7mUSnuPVGTr8U$i)pRap)qyvd3!F+
z%?RSzaNA_E30R|A<6v0sRJ1?NK>B|B;JfiJ
zGybt9!tandQ{pVBfMA%4qL@sWIRITFUap6x>Rz5~pUL-Qi}RC@)d$Zcf_tYA@4WF%
z7avWV$`gr_DWNV8v!vDX;{5a-Ca|V*eg$e4mv48yZ`+Snd+mC8dGpC{VJHPTnPnbXa>`?-C)7fS_=7gQ+
z)5~blmf6T+d$jkV>8#c3x$8Fb`TXeU8lf}B=FKV^M7HZb4E>_-`+Bbp4E1J{b2i58
z^lFNt===Wi^3qjv!>)wUY#)B)`t;?S-=j*Ubxbghqbq1DNQ`9aki%8i8x<)Eh;%^H
zA9?YmUw!wNA8|3n;b$-3`mx&|X>(k4t4Vo?h8KNbAxa!UG=(f+)Zpl2Q#U{U=@%cp
zeHO!fwGO@net{lJJr6vn_rI*w>;ZKlP+B<3`l{x7|8|670IXPz?Vspnt#^1E+6
zTbO_M^FJ_#%h~lC#+hL=f~W&7sU%3$nA6kKsw$2S4)M>u`2EbOFh4-PEnoCiataG3Dg$o}8_@|%C=
z@BPdb?QNQuuu@x4h@)Ux=87AP92Tvb(7Rl6iV#>i}qG1eBwxELcMiU9ZF;VS*dKmEleH9zE5Z>%;E8eCrf+~rrt
z^_t>&Uai0H!$0`x&wOq==a{4{vvb9_UVnSN8V?VyfB$DcZ;Z=X6vm>kBI)77hpX-8
z_U+sE?%m5dKYiGI;|=+%ci8T_%QlR>)RhN-0_Xd)ws2WwH4Ic46V`Q6
zJ$q|*74kQ``LAEBo7kD6UsltWF#BOr)E+^ww58EL*#c5Z2@)Hy$RJV+H#mvkwyr;@
zzx)anA^Y-9}PAdD=7`C8>>qrP-bOopY
zBBS2in*d7R?)76|#IRjozV_BzM|(#fz4i2Bv7k5^&3yUB*G$+>3s;7#AAIRkvrk+%
zm-x>8yN@njzu%?THp6J{+wfK5EjwreN|FHL`
zLAGsMejhZN*{rp8lRLZ9-+ONR?tOh^X}WAI_0bE_;N2;&l}
zu-}M`6L~T+b7!o~HP@WuKmNbJK#bzZ3voM5T|=dm#neoouWm>*7C;fNLbcnBthsDf
zNi@!{#n}Y4KpkZ2U^kHstblAdsYC@v_fPtY|FC{1)s80%I}lv8TB~{X-VfyaqS>lL
zN8Tfo5ofh(i7G%n5(^Zep
zCQK2L5WSfPs9+9B2?araON}XsDFP!qL1O0M8y7qVf?yWTW?&bch9Db+_&$|jnzL#iyG6ah<-)z#IwTFnlw
zk6x3Gl9YN3#9YmCDr%)t=vrtTg)nQHT2jur05*QkKkc{$DxJD?gT%dn@Z=d(9oeD4=tcV~AG7Gq!Oa5`_|^GEx`dO4V!Jbv=dJMV-x
zij>);)iQ#~bUK~x$L*A<=drj+UAeLe7L50r7z43sb^Rr+RsiGRo7(QTJ(^{2o6XsM
z95>V0q);HXZadWJq`GAU;|!2`D%&pZFXtT$#Z1AGIdsLykePA0-gpRVUckDxL8t50
zW)@+PRFODfD4a(g3<2v{VtPk?ll;Wm1^S2p9>tk*CJ?1w0eqcbBNwO;7%%r16KZKV
zJIbrM_3N5Y5d%BtJx8FL#+q{-(uN~VCUL&MZ;GYJe7fho4^0fgot>REZCGBdFPGa*
zwspdSVY+{WDU;2c*a#KZP&dc4Dcdqsiih*C8B;gKqg#vnPfjO$`?2Y~_ftydu|1fQ
znMqNPMKnx&DQOzJx#OD4B=~9Lt1@{Cq%r)zuTQGP7eIPCh<^5(m5-kV%Zy2IGk
zG^WKOAAZI+5w7mNX8y*$I$CF*W`hW;hy=LLjUu870J6idyRWMmJ5v=SAiyHhk82?F
z-Ur8oJ3jzJi1w_ISo!I$N#vQK2h{n1Kw_wB0L0{pybl24L<#_^KrOo^3aTpEu1~(Z
z;0QsykDVDh|W2p9z7D2iaDf>@Xo#rR+6ERdcH5n#Q3Qyb8H!lnk446viK6DbTJ|F1
zeMCeeRjmLHkpWZ%fPl~gC=n7OG9#1Jn$n=E&eM>GT5I6oJcZD7T_@Gjn$=7I0Czq5
zr>Zu%YKjO9Xoz5-y9vaa$K6yS5)qMrXf+XSyD0#aR8tZxqAK3I5FFD@4#2^a<1D#I
zttC|vcxv(hGlAU$3YD2X05dDvYP&*2@&SDS1c%i3W34rWW-{&Ct}!!a9&_HEB4dnA
z(+tDVkI8$ld6kO=KMCIN9~_NoyB*iL7KjKnYubcxXx5!yf1)f}(q&ld~B)k%=vjoZ9kr{&vo-!ya1<1%e}vC^2ipu=X_x*QqBP76#-r9XP2(aZ%nE`zEA`VTUv$f;^L)mA%%V~o6(@0$
zxk*Zb0BwUDm3_pkjcit89FStHgsR#4G6Pi5iWQtP0t;If7?aIr4hhZen)lT?Wi~<|
zIt7IyB-K^3ni5!IR81aB0Dv0Af)y~Sn#a(1RZ!Jp_1S2Ji3l>=vk^UJ+TqMftvXy%
zXxSCc8MR_teeK)QLjhw`$CaG|p#rFKGLT}(7`qswvl2aa00KA=6vu|%m7Xpi!t!hc
zfo=lNzw9=v&FSfBwz_wdXGe)7;XX=%8MLV22e*jCb0AqE?`tJOBhB0PtIm+{%Wn
zw~D}sR1HgoGGf1RBtr-sBY1X*oHP&VD%Mr-A?z=t?!!pl6<0PnuX{~8zg6xVbkcNA
z*p*8IvAA!|A>=jYEf5)1(|}4?vQe=F5LwZ5Q1q0VMzMj=;LsZoM{~ZF5UGv|QlYlj
zUaDSb=E3>m+d4hO^@*%cR$)Gw9}H3}d7M+-uS@_^os|#*)Z#^nORmx{7H-<|P^`L&
zkV+jl%V8J@G@^&zo0*C@Fy~yuIWn<#7Z;nocH(1GRaiBWO+f@yyNNGKdF67?4nQMP
za7N3`Wvk>7{5r%b0ysyG
z#fJerA(zmshe1OAk_UnIhyCy8KF*M+po7PIo;bk{Me8D-X~8^72SHQ#$mnMK6-fn+VPE-p5MQ}(ZBS~H?BW-
z^MC!-^L3%=RKtXMtIWPy`1Oc=F>aNZ&){?uftF>NUUNZAyv4}Aq
zU%%dDXRI*o#@Zu!@;;9x*A!!GpgGq-2q1`D#bP&GpFMVI@5wYBPw-EE=%8D_^7qci
z?>(0P%`bd6)V)9W(s3*(!)7vFK$xrLY}I*hYC}l^;%nL1#;XMxf8_aVf9IXkI9aqd
zX)9N^8QCJXA`%@7iAkYUTOVtMv9!nC;@&p>{q#^yANtz-0=U0$`SUTS7ee}1f8x_W
z_1Y_1>*+X#i79Vkh3~)r?wvb#nh;FHyEy5>Qc*LP;xJ6Mnm4-Tx#LXK=;sb@>>pfv
zaKGPU=fYe^m^AhA$w}PW*w>4Ef<8*%eW`Etr~mygezo{pF-#AHyMEY~9`@T>^IlHI
zb03*IX0+{kleQLp3)RMuIZ1GB-VACUxf{d|o_jGA&N+)nN@+JjxI68~7*%x#7YZTl
z{wpHDo0h}sYWZFo9~{U{2Y0)RyK(v6z4yQH`+w@*lSfyZ@z(Kk-~7&R#MzMRXhVGI
zrI%iP_0`v2dyRd#yu1WZBqijWbKAC``TXZDE-oHFetdX%czk?}icc=S4~z7%%k7_f
z?)L1JqlamHe42-l&e!jL@r!@;YiGl^9xeZ=X)Gthn)+mo{O*4()<`Td{1``(w$Q`b*z`
z>#sjNp&DbVA?&SdcJl=$;|7QN1VNBT1Op;4U#l1MO>-}WeBh;AJi7Su&;N}(FWlUF
z?h_o`%{#Z>edAlNzxL6WUjNj`KKXkmRNlLIe0bxqeNT$cLRB9az)R9LsRb|sgUH|So#S`x@blyROkd!M-Ql?g!
z&Dvn#ux%=l=g_n+Nd*8j2Gt4%VBVDJciQdv!}=#*{{RHC)>=g`Gy?#KpavyZ0wm0;
zDmI{0RsaG=%nU%NXaL^K7|ek|rBn-u2w(*S+ck5P6%o-R63{3K0GOGWiAYRgH_8j9
zNKc#Zpa8rZ3CcO^ZwX(iwSr2Kx?}h3=5L9J$s-dYmLiDu0Yr|Tt|AI1MM}0@O39WH
zg*}Og15{NLWoAYsA`uk@=Uo9Sr9f36LLx#WqAG%j`~g8owO9cF4}^fAuuC=u#GOh3
zjVB^8OEm*PaGpaXvxS8!rm9t;h(Z;`qH5$L2)LrE3Mdt|pjOsdDLnRd=&MK$!KnBU
zy@KSzgd)1>N2?_QviAg-Lu;ivL#SGH5GfHYkV`5yXX|U(4qmVCO(%WdrvR2FNX`Xh
z=h%~K&8aUX%`9RY3MgnTLvm(gZ)})eo?0HbnF2C(6XKcG
zEwrKSX715@PAa!;2Uqs3U)M1Q-^y&7=X*5UuOV)&YTL<>;{Loa$;Gm5J?*zrs`udg
z6<=P2+^Y!qsLd3_e7;IbRhcJyvFq4`rC3d?+b>xpYLT)OS164%6p0jay;@)0Y_E>o
zx#38jL$1UVf$Ry0G(`4+6kXAj3Z%X?O@}}sPP?uf@=(Vb+K%LE<>BP=>aqvlElSn?
zhg100*WcP`InCqgkgsHmo6=^yd9>(aDk=HKH$M0#
zh7hXQrXRQI;_lv_C*EwzCa*RE4pmph`$^!3)qF5zz
zQq{%iIG91sdAEBK
zi~=8odil~U}dJ+7LJ7y+kR!RXtsfJi`&b8KOVt^sm53GtioEjn`IY6u;CbAP6
z4bc#$+spAPn<6`m&U;h_2p+d)0;W5985mTkwbojdFz38`_1)sP!xnA(OCs{#hZwyN
z0I-WKm7=?h1dkY8^v(w+5$U=~2qEVjV-yiHnM@{T(x<_@h#;jDB62>Mp{g#rYca;@
zbXtmt2m?Cjw%cLZKdz;U6acECW*CEyP0U4hS3p$RnAwM5wN@q!o|t6U-YFux#=3F+
z&FlX(eiF=#ftlGkw}aBvqEbawgCkW%;N5S>%#e%;Qdv0;1}d7tDi8zaT!ujy09*x@
zQp#8*6PZUO@sg%6!H_+HA6mltREug;sj!}%S5V?GU~u8eaxjoZz3wb}>?;si;l366qkQow?k=KwC50gu(y
z9Jc66W9-{8Pk;+G&>QqihtQA_z=oAL2*shGm{S)pItyl21d+%qp_!p6Ng~l<1a*XD
zjT(xfFnKDG8>m71#_9>=QUEo!K!~V9WgCO*+OAf)SoT>(DS{CS20obIJl@|wZ0CnE
z#~(a+vMke?g^v8FkP6qaecY5+uHU>C?EK{P-s!o9W-*Hm*u~Ay_M6M&lkZIX2kB~?
zwkNZ2ymxrLz5g5fEgdYl`Jq3$;oHfP!nI;ZmlzKy&Zytivv0->i8-wt`L>&BW5~Bsg7zj|cR(JjYAVR(ZFyRUaV8l<^Lf{Y)j0`|Q4S>)bVdE*s
zfD{zk1aV|hu+UnRoU?R}5jm10wBQ6iE5ov?P`yCV5oye@xl%B0XXmzO%>+V2Wr%||
zDB@gSw_0;#Bto$YS)?k77?rkZs*^PKb(_3*e%d|{k8fW4@W(!T_xV@0rF?cWxqtutCr_RnEcQxCv1^-&|K6kXfAiDd@aqwcE3AXrO`(HpS)Oq=Epv>m`=|TPR5~Loj2U>?Oz+>IIjE0_uetcU;pmk
zUyc2zKKc1ie&o}qmlxwuUwY|PRlT^l+S{9`l-RIT15Mss%Z|x+KBii3&i9Lsh28IQ0jb)`k``s)Z*s2>5!rf_@x
zyZ!J#_*-xON5A~tKl174zVO=ZL-UN2RR^E@%x89S3*N;(jXBqDI%_x%{ieX!PTFb4
zws6zU*5j}t>b7>czc?*9
zKJm6YYNr(I^2wf3+f(1UKJ}>-BLSz#s8EnVAcy%3o7lENbv~URUOQNHWHt2j`4n-d
zGCq|Dc02jqo?b+DBEhcSx`69${CEDj&;NU0efX8{y|ZVucg&yMyDr@fnet>5>O+nYDX^V@H}`Sz6R;pcCSmtT12{^PcFZF^1V-AJO1!T
zUf8Z5FB+#EeokO>J)AtMf9XDqu}PZ@NxF324{xOsyTCH`pS&Ntt
z!5G`=WcK#z!~4%Y_x$r0%d2`(sl2gf&x;tYq)dh_mEKj60?Zo@@)>q9R;cT^s~(|`Zl4W1
zb&*8S`F6I)O;-g}#V7zJ1~eoEP*DRjR)zX3dH9F*!vX-%r@#bMRYZ2qS1EnpXG=}f
zc?hC{rFdjEt0g&*3I>D{;*_!Ut|*
z(VBD3kX2Qo3b2tnLKWMo3_Gb<5k*QVrFyq>4?LYldbTSFApjr)ni+u=P}+&OU9-EY
z@{CY~fCy+w**gO$s))!0$OvL01Ym~9{M3J7W(Zcdy;JbS24?k~c(^QbkOS6aw~UD#gs=FhT`(4R}(=*%2wIT7y~{pz#if*{8AewNe{#x`jiWoSe(0k<=%m}9O>VCX4CgtQ$NP(EZ1yiMPB-gO
zq)eJNr9taHT4yxutsi17X?_cv6`#J1B{v;R4h1>pP1B#QCeI<0Q*msnB?D12XV+vs@cp(~Uiq}N$R^5tww|_ociOhR
z!u9svfx6?GD)~C4_DZidL%&(ConhNfnkJBtm?Agb?BMX=x&mzbUey6sF?w`0eD?f42!84m}W3y;v!W>
z4Z(-l?uO>o!kD1s-Rgl%t#?F3fOR)7m`lz%1Aq&jnbF7qtEm*#YPD)kMI|}!01S+g
zAcO!$s_=Az1Yp<7->oFOcR0A;jM{qoMh0fJs6qt=C^^?%#0w(Y4kEM*3fZ|4z`#su
zL1Z(tD$mwr27toZJh|Yd+PaKn76`i##xjx+BGy`qRFS8x^Qy4xS%(ll&??NV>)NL>
zi3u1XBB=zz>OC{Jp;h5F%)CRU&~=^nEdUUY-ZO}Lu6uhk0KnzgbseCNV{axQ`rWGp
zx2LK^p1B4NJ6n88S39Yu7C_`GT5BDApQVf5o0)UYF+anQGNP4Ix~^kp5gCSINTVS+
z=YY&q4Ha;Ar9oBI)S*+WrB*})P&GgT?-)TC37BZdYf?p20MML`kr)lsKoM!D*b@eY?eVg(tn*kdmJiv>HgFBOPnqW^~X+
ze|-JO8RW5jue9e$nptj5ssD3mm)<<2yEhz
z*=5Ff)RH2u*X!l_Dtor$#k9(>JzuR(&Y#?Suq4)%94i$2e1>>YKE1LxdH#5@H}4>X%aop6EZ3#+XitXWp){+BpLiUv
zo~)m|@ckU^^vG$An3vTFCJAy5<~RWR4N$(|W>%O`C+8FDJBgkX+5v56cw)v=ak0ZgHr
zy2IOO#OB3UBLS${NV>z~8zaNghj!vy8^EK0M8%*#Vl_J0{0tE<_FpkwJ`hY`Y)U|DWr(HVU71
zxIXv*J$ugYGy?`g5>%6fKr(761r5+4&ckY>7ZTXB;qAXOy#<5fGEIN(;a%Tf*B1`&
z%<pmD9((+`gWdpOT;k)1d*80uUCk%y$NAEqkNQo1hHZa&_Q22`Uc0Hp{dOcYGzpGZ_bw*0S*jpl!!Xp-j4F+HLMq_|{qt&v
zq1CJY@^iOu{OcdO6!kY;sMm=_bsOSVQX#bR1s4
zv-m&!*Z$NO-+1qT`U_uql(zfclLc@^q(~OuMD|>@j3qI11O+TU;JS|9H3~3|zSfks
z_Z!C1_?OPcmK%mivVaQr8*aO6ue@3wc1pDaDBE|xQT;!Vf^lx
z-aOy@#V>#9onh}va$96u{Eh43j~qc`>!!}~a_Z|u#*
zKXoqxK+`n4*_rKjTUE_$*C}9TCNh93GMCdbyZ-wAwg2Fse&tIay8VCr^?P63u7=}}
z{I@?zWc>5|HiLwmNVzy9aakh?TD@}2jmUL;vzcW
zDy`a{ccxF-%^!f9pBjdKhbaYyPu~EM0L_35c8w(GoIIVM%|%8sv{b>*gLe#upjOmO
znVC>53yi0{8PeXh68Gx3vRaw&^*kOn4<~juB3{okfs1`xo>Ts%zxKrk-~a0Ui}N$N
zE1&qO;m)Uj_BVe0Fr9q%6Z6xP_xEPgN9X59cJ1+Y`)7ak7xs^C{=S#*+;U7r&b0A<
zw_5JN)c>R};Xju8N4Ww1aZ61q;+)$#zEz=^61s!^qdTA5Ynn;U>1(GC_9Q+3+J`RI
z>+gO4`~U8D-_hV-oJ`y0#q`j%@ui8hue|}
z?eyy1=NE_XFZp8p<>foy+*G%>7p0_2>+U7p+p}Z1*URwXeO_63b3J3nq&5_RsN6)8
zTD7#({Z!9UK@fujZhVE3mzOl=N`zrb92^*-idjmf_+4%Vu|WifphhV8G%fLm^~3t0
z{sBREw-R{u_pi+h93Uo!CHhwh>|N9pyRkZm8fZ{W>zp5$d2L-XoM)DYRXB#p?C+?
zAel1(#A-Tpd*~|@HI7lO>
zJi4I?J&;lC_OE&8Qg!Xc+vGhno1#fURWenrTxW?v4T6{lisl%29n9qs$!!
zF_P9@P}M4hOud=|U{Xe?umMu%8i6+Zn1Yb#`T1~gV>+Hbx>s;<`|uRcb$s{c(T(Yy
zTkF%emseL~^5d97@FKF@ET@afOBa9nUik5M7ay862kr6^*%s%=+}_IaK5w=pMZtzr
zWxJ&`!UjuX-$q)rRiWmhBAM(7*N%@i{@S)b8;6bC+i!iSBHP1PifnbYs(DCt@Z?+C
zr?BT}dZgeJKoo@_k4tYJ54?mX)~zd{t&{7y(zp#c7IEB6<}S1rJ9TX}3?Z=d
z`>RVou5h!=CPUkm*f41j_4eo!^VQ4PyY`?*@5)Jt+LxS*n|Ix`nURW!Y*RIA9n${M
z4MdmC_x-S5kJG(r+qunV_4OZ|KRF$V+GslonHi?=@c@z)`(OG+j>X*x8j+0WrO*dDoUQ6p-NeQ7E
zbFzA!eQ2>-Nk#yXDswJQuV6k4)$Qjp&Ego%&AGY*fA5w24YnG1oL~fKR?GE!=bNk9
zwcFRWreao!T?|c_kbqqSm
z&>J$Nsga77QpO>tl%xs*iir1qH$C!nD3>tC_<>|8&s4;+6AYf=hIWV#(`T?;sAfo5
z5xuCX?UpN*Vznrd&~EOXdFPa>B4%YLKpM!?Bt-8inZ0+SMMTj+)U?z};)qqXiLsr`J%$(VzFbBijN`B+a<{JEsYb)N
zjm-qWO3J|La=8^LDom6BY)o0CjAJ$vRb?uw>KVte2h<&^al73b3|Z8aX*b932vJKJ
zwInlJEEenlSS)45gG^&c;)ww6Gb$E?a&l(vZMYz1GYGPyvjHnLM!@
zQ+CYGdE%g|Nph{#Kp3zTG1+nJz(`aHjIn0PdH3zb01Q(qBBLRIk<_{yoF{}G7OCL?
zVvK%owz#}n70ptVnPb!ReZO9>+h(%AScJek!^)0(RZ3ZJwrzTj`Mu~R*IEj*TlGuloEhwFI$Q-(cFtv(
zTS?Agx0srP0#L?6h`7sji3h_o`_U^>I3xbtr}i40N!!n6}e4c5RI4
zw3I~Px~@ZOwBxqT1?o`JdYfQ0u)0NYf<8Dybgm+*Fe4$?)bSL2Aq0{HqlxnhuqcV_38PFFrg<1zsRM`+`>SIMTst8~Jq5wOClme)c
zHEt}xnY+&U7OLeQVmDbF-Qtwh#1xFlToJd`vL5sMkDl!Lkhq1<<@x8ujY~q{
zW@ar@r=i=h&Mq%&oRtZprVN@Cki^5Ls6(a3b!~ug*bLh>E3oof^MpewBM>x>J*p?X
zN@*19h`pmskZYy_f^J0XW`_)@QcRSp0-2k;Ni%IKNI#~2Pz7!|dT#}+fb$u_BVgI&
zg1L#F8Zsz-d&{ofLGL`rakG(}LgQ;qVp9NC1U1|_T%Z;A%~1un!@A@=pGM$PhP93TGUG<;U`T)}U!Qu1i5mcY0mD+BYVz&{4?YKcE5fL-bY+{zHCpJ$j
zcO9tSu)ZJL!Rk>5o0f8W{j5x$V6$G;J9_b{Tl0_Rx7&*+%W)fggHGT0%CEffwXfW|
zdH3M(`tkAgw0d~|t+!u#<@HXsSC3ASV|#S{$&<4`|FzSH=3kJ!h%>@*oXx>S+&ucT
zmmkCTA8zz!&v<7!{@CJn6N{aFE06b2-~G<3FFoh1-nlXB_KxnKZ+>w9WPSggrgd!_
zwrO*EdD5r;=8fl$j&59xd4DofKp*1>^7!QBa6X^Z&CN-fba=L7bmc4BO>~)ut_)@nxT^tS0Kqb~RW3hhM+*^+#8K?(6TB
z=)yWlE95v}WZ`i;`sh73LTaPc(3K!qg$f0_IhL`|<&^7a5ykKMZZVCC#=rROC%^pQ
zZ~rg<;LESP^wQqm-e%JyV$(DS^X^Mu{*{0IZ-1`|Zqam5@@hyYxpaGnHBBRoz2{|-
z7iaP(Kk@wUIXpf(x_-I3?9fuG8De5?YN}&+XLI)4BK)BryW_j7?+otmyt#Sn!LXQL
zW0MM1?TU$C%wFK2*CjdN~e@Ot)bzKqL#SA{(
ztv;J1n!0@(!>Q+HJO1w1?)=1SFMsv@$A9i`|N4LVSIT9qt17pqpZueL^hf{Dt1lf-
zj;L|taPnvV!+&Q;`KSKC&wTt7pX$2#`Q_F5#mVjuU>MTb*_oNmrhCpg1v_Z(jC68q
z*5I9&FCU*=oSmP)H$L~m@pD^AS?06JC^VeQtc5!hUv<*Hc=VB1KKfI?@290^Vt#Ud
z@#y?q0o(PH=oko}Jbduz=L{BPRMt2X<8RGB^x6I)U%oN(>Ozg1#@)Vlbh$m<4%;7m|3P!(8$a<+$H@z?
zr(eHe{N`!+;;+1S{o3-yy~XG6+O%2PJj(R&Y&l*ouNDXUo*f`7`ll+3*$*8AzrD5l
zi>|H^NC(;r^@@mF5E
zlkm%b@9nSMfAGQ!4`)7Mzno1MxViX=mv21(@`wGTef;JBr!U+6A-LK0?RUO7I3FkO
z)3-k&hX)s@_u#>kYe9~i;XLm@aJSyr3LoB{O@{vTsu;K~IK?^)c}!u*%gq^-ZL@#N
zLR$%xeJWJ8b>^j+E_m;#0_MC4%osv0)=5wRE5HU0&sdw%1b}2|KEz4cik>#
z27#P2IGW975%v_U7%AnkWJO~HMZ{sl%mE!LYf%L?@3D!L3xziNM$G_etXj*MNAdjIP?H&
zYNFMoq6s}^h#DCn2qGYunC%|ieiE43h&?+qF%c0{t!0M*2LpwwVpb6q8BA235k+>j
z1Bn=sR*_mo6cNEQDj9OKYXid5NWk5h8&TQVvZ$IdC_**cNf(KBtu$mZA|Ue!1Zo{{
zahO@vkQ@au1}w(9?YRl)0LxG}8z`AQLBqwjH0?ljtktR@aSYx8*4wvC1c@CmW#g1=
z>~qZ+rkJ$MLW(Yf8_}h1#)W}eGp-PYNgM&8s@9sPCdQT-iMw2T_B4}6uRZ=|0oTv&
zuDa(YB{>coCzI{-xmnjxFnX>u+h$jX0-KRpYfcv@DiOBv&W9Gn@=t;m-FaKwaw+_yy+I*!D{TWYumcYJX_@{Xg`5b>vAJ}3GsNM
zfsE^39;lm?gX5XQ3Bx+nuGS2GQax?AzEXYz>$$lo0Zu!
z&lB*@$lJCPwMvfL;CD2}U3@XXQ{9b;3;}SLQ;4NV+eHI!q`PF4r&2n(G36attV$*#
z1V-fGyuvfC@l(YA(>0-#OhnEF?;RM5>^jNH4t8UfPnRSpnB&QaGiA?||Em0X5mq*BL%z^>!mkU5%#$#&??jLGc?z09uG>H`qDU01zU@g5yo
z%Ei;f%os3Zjy~+o=99_n_~4M5NaWgf;t9N)cn<6JX0_a^my?qdM9V25S`))C49uuC
zi#SsRAZAAZsVXqz|%7qArldSBA}8wBu8vS@N9gGYN6OnRUnPJDq9f&P#0Wm
zB8dSIGKR>A(THc$#bR+ZRlc~oyjm^$aV)0DBnTql=_%BRk(#FM+7P1;(M>QO92|@}
z6}98N`eQ%-h12tkFMa7tK8D5KQLS~e*{oMvqM>P8>xe~n0<&i#aJ$jwl;lGn`s}AZ
z{oyZt>2H4TyI2ABP*SE;***
zY7Q-TxBKJA>+=T--_ZK()_(2Mh}K-y^F8lnI`NBk>^G2I7e?KFBYyAw;GeYn*ScxA
zPMg)~IXpgN*G^=c9zR+Md)GzRMG6jd-zj(*x+s!N2huCb6@?8fk9oPJAmicQo|4`_
zKPBhfI$p-Gz9lshOffG9oCV=mQY-xu_=R91;P7G8Qy7
zGa?1pY&HN8+b+sQC32xL8AH)7L;xt_z2jOd00<6a5{G0Uo3urmHqO_6Fl=pdG?Z#%
zAIxP0^}cCm2vu_Gnu(d^F>Ptf)OZf$r`1nLRscPi@Q_&&NkVlhm3TL9<6Jl>si>B{#-pNCi|wLoxt0MhPrpmetfUzjP`-jblM6NbVKZ&_
zHPBiHzgoAB!Fv;&le>ZAmD%M`d05Xj7yHP$*ZvB2;<`Lv~&PTs3e4E$?4J
z+$_uHUL?1^_{#e9_6xT@y;;2QWVqTWIGlz_Bfc=KNK91&2Lu&SMF-gRixEPhE@%#o5LRw++vZ
z>MHN3FKR~4SsSWzxt3uVJhf#=j7nfu6KqOFBm-@tp$GHehKwHlE_U=kA-%!Z){vbL1cE@r8elm{Q3NDU!ad*Hk`LIp(Ps-R%rM+2|b0KG&f
zW!pdx;>JYOIaD*Trw159GZT~Msc}yX6p#Qt7%LJ|1+BY#RlyKZ0p!`?f_B>l`^}~~
zp};c>%Lj#su3BNJj08YMO%<4`2cNm3D($R?Kx(YS&(L^=Pj@YfunY1D7Aj~3^T1vK
zM3qnin`TceP>WipXoMpOD+(imk{E&lfUyaZBDl&P3Y!$eOhM5(RHr$Gg}3aim{eoN
zX0D-2N-P%h#*M3zlg74suVao#$m6zTA6sV?OV7-iW93P7Ayt8rTPy7Mwr+})THK@p
z2b=RkbS@HP)pjAHGM4tDG--`G%(*q_er^8`eRKVTe}(_XhmY^9hN}k;-YJ{2gShwV
z$3LFB_^o?y|CtB-QeHT|$NQJ}HtS)Tapj+!WD_8Yv<|Q)t>wx{Sgm!7%E{Lo6Lkcl
z*FV0VfrIo=yYJS6FP_uC_ZR;DWsw9to6Kc-d1E&D*e&<^{_&@tyYrDIa!%YhYvQ)n
z7TV2bGw&J%Z9RFjVLMtw3nJZ{SLfE-&B1g!pG+}?y~86IR^vEo&Wpujw%F^_7-9r~
z{oAkRT$$>2+Y`}bZ-!(r_B+?UkNo2PUe#QjQ+C^vLB!GNvS}JDo+lGWgv!=o#vCc|
z{ygWLiunfjkqghqRNR=xx$7lVwOP%RG+j;#)G0#=PLuY~sCj3?P`t1RjA0wmni7gZ
z&&5}sK~@ef`?A&ZsiMP~Hr@M|{r~V6zV(MU?hCi(*QkU^_uiO)_Se4oEAKoEab7@9
z#zKUyiO1g016;p(G-e6olmF6BfAr;pJ750lyZ_dg|LTA5kN?ao*RB;&GH4?;CT5g|
z%`GQE;QR;Q{i#oW@@JI)s~=wf?Awq2%-?%+TVupcpe|D{KG$R&?O`RtqPEI9jcu*D
z>@BX}4A`Guey54g?e8Cv298y7a>-F_u^ZRhHD67pN3vP9R)(4dV!tiyA-l3SX+!I3
z7WI0yERXMhqntnd0=@a#-rBks*2VYR2l}0RU;6$RYq3Y?r%mAN*Ppxg;>*X=y}BKK
z{i|Qu4401{-@ktR{Btk7czpfNZ0}%9vR(JHZo&kP`n`ETYf9#|@bSUkjqAt#c>4a8
zFK6FBS{(bEuiUz4uSA*8j{;{qC>*+UCJ`&z`)$
zePP*Mp?vQ7-*e-j^JSFl$N%y_`^RBevTPnaeDcO+{0nc(ci2kCxmJlLnVJGAmZn}>
zGcPrCL~3co>Y#Cna=7r=7Hka0U}LgL(b(Y@$f@dXiU$CGQ;E^v9+`ola&?7_P}I1R
zc`F7Ksj&=WAXlK7&YOZYRZPKe9%>uHq#ZSzR`azogz0LcxhmZTi*N2z)WxDrXcWu!ecbm=oxBtNVN)Ju`lw%3opFTOkK@_-r(uM7k*pG`~R5gKdBqwKdDRL
zKaH#4AO2eB93p1X3Ru7%ot%WweDb3onK*|GpM2#N6+5|l`zwFDD#32NUhWVduKZEk8SN?|x_^1)nVGM;!X5lA_wYIP+$!jDl;ApbM`I7Dy=Qf1o@tJ(CR0+NFuHMcj4
za?DF#t{}DroS$@Y^jl{Hd;;EW>NL<+*VY2|=bjx{D;1Hnd-RQPm3QoNpT%#n6li$k0q{+0lovX{}T(0#(>~CRfFZDA=V3L_<$ZE>xld
zYV1}{$t0K~#u`COQ6>ODbwD6eWfWCUu_?jzAd_vcs*P^~QlBMbrKYvOvrUF-MBtjJ
z(YC`@t%fG@G=TDAy+-I7Fo(G9)1Yz4iS$od4k7iR;t3
zk|p{4>tW@4%we*ueMnsN#`W2Dy*^uB8el0(5ESl2edqJb;oaE0b@-7n4Sew^sj_3|
z+h~!E`WVj^v!<;0@pnVy{^$-CsKcdK_1)eOB7ySNY4M@<RTHzc-W-C}GCdZ}4)&
zKt|k;sg%@A;uPze!+bVfmo#oyPLxYEfK+q>D!F=KY`n8$+
z0*6}n@{+AY55?sP)Q@te<(rBbHCiBR-FErt+vg9DuiZX6zB!a~ba-uCpS}5mw?IqU
zu9!K*v9B2~A5P=!>T<>28PetD<+$Cv{qEZ$g6yVyaXy_ko^IWF{@`GqYRaXuHXyYL
z-q!WrqjzVs;g#nc{NY
zeHyiFmhSwdyFHN~{qP0DY?tvXvZH&Usmjo&@hP|;5e%|gB_z?*c;63WKNQ!rL;xni
zVb0_3lNmh&5fG}GfK*wPo?SG^c^ISjp1g1%(k5gy1YlI3lBgEMI#Stbqq}I9orTmn
zM+_C%kuy<|!8sySgv0~@9hd@su#YYMIH=X-whzv?Av(jHvjQqYs@XjF#&xrZin-)*
zJM0WqS!#^2l+w)_Rn3-Fl!2G^3K3%y$D9PJ_rAzTp7NMui~ta0jIm8A4W%+ooJt7I
zty_0uXmYO19*l+&E>F%&9nKzaX0zFL`@{qlw)amSf(2$~_Ko9OH1{P^Yhn%%g-Q`1
zmdc~31(~?HbDo%MRW)@@Y?_9MO5SD}jj@Yir_caE
zk?J}&LuSXUO`I@NPW|%YR%ogeR955AN!HAO7JYQ2)CY1-H}h6QBq{PAM1))L~r
zR?U~oGS*6NoktmxCscJrlw?e4BmyKu16A!}!^~ZKBj>89eeTH|R+hZd7&oOhj(*~m
zV>Dnu0N6tI=o#4?KoLX)hik}Ev=-S}Uc>-^wW#+#B36-H#1IWI=j@P(otiNK0UDSi
zat?_oI`Cnlszs_PCBq?C06>&stVED&a*hFDYsM5sXO8G<%t@+kR;A<+0wDnsn_%LH
z?|mN;nHr_u#rdWm7ZCveQ3{<8
zJ()JanM#8YCzSJQ-H#zm7K>}Sj2D+rrnR4Xzz9If04Sn$Eon6=Yxc1^QN=JeFpwDd
z5Zmq5G|Y0HK{~)N8PYh88v>b}r!f_Z0m;>xoo7dw#y-?Krs|uSxW-NA&E9l1tc&WU
z&=k523{={fSvn)_8btv#&jl)&1p_iBAPS9A0dA8?MXfT*SW8XOhmNfRZ#)yK%c&*w
zp(#cZw^}3doTgst0R6g_vDb>!43`gPmv8Tnd2{&TM>p;YiJo6z9w$+J=OuRAwUlN*
zP8VIjtS!USCnxqAUaCb7}~)g
z7soI)KY=jzsYveUkHUu<-?I5b&z?oeI1tl`nlFOL1CME??)t14x?Aee+3=2DS
zfwnnH8dxu*W)~adZtbTdMVtbuAX+vomD4KA<~+!Fd-yi@S8ab8bW<%cGKX1=v#ZGw
zyJ*}nMu}75wz`%$T-&xe=bf1Ea?WX_aU>#`X`hGMCn-{LmE_R}ADk^$L{wA^(8b0(
zr+@&x!hz+LE|-EWpp_=zWCF7mh82Jz0jUxqm;xxH6(l5PiwI<9)v96vP(&|)q=Wz{
z$jnTR0Sq=oWU5k4%m^KmBfw%i5C_1sv$WaMa5hEQxvYr@i4=hVokKKKtDpn`pu595
z79+B!fL8+ah-i$6yWNDL6%-&s5vXNIk)mKB1&Ct#3?p{M0pKaP?TdkYBh8yZdBs1
z?}O6<&YGNhX4iI2P&BH=#;SS9sdrw{M`$?Ck`o@5&7+gFyx?f+J2GyM+NBM+y`<*(
zzcPOKUpu(0Yr
z8$ve_Bowwv3TkMO2^}Y=a$4hGy}$+Pt+;UcbZ;m(E>Aw)y0SXIX%mO5$ta&W`jd-U
z$XEC8Yl<XQHyNrd@{Q`zqToxqQi>Y*^L%M>X&(MI%8U9zpX_I1%8CO|qGwr7HdCnPeM6{+esZA-Rlsb%?l#-csv+4Dlw;9XD
z#YNlt*l?Bc@q@>U{lk0r?lmFay?b}JVRFum*t@panKO}ux}vOFs7{EQuH0AARAe$GTzxMpWO$vKwkKX{yKr_F&`@##~
zedEos=w_R~@VkHV-z@*~U;ax62M5FU$;r@u^PAtCFZPa)
zumABs{zu>Z!P}>&r`NAv|H7v~cK^|a50C$6|HhyG#V`Hc?`<|O9N+xazw_rm`O1$y
zxqS58&F7of4$sd@x%YVGbhX(WvdMDW#9N()bzN3m6C1CG%dMXu7~`NjDj%U?Ww4I#
zRvBEX408)D^Oc8fYX<|A4<2gpEKhzXwf^I6lYyYa(0C%Jpenhj5L5?Xn@pp1j(s@dmN|V_}aJEkIybnw~t=@$?y5wa&vq*r?)P@Gflrf8TzxU?wqdj
z+h2e522U40$Hra#;1Yc|rJotTaeTG+%fEE*jbHuh`ms)qUc{#Ma^-5xJQECMMZ{~@C@E?_v{SV~ae(>*=@+^AU
zj?TG(fo7BE4sMr6j~}Pvk~c7Lw!u91xjy(gAfFTj7EgcaNGN1vOf+q)H1~rdbMHLVTr2--$7#J9++K#L37{H9l
z5%DgjwAK+tOUYnHhD79*G6)2=X~>(j%S)~KDY$%0J({u9i3;}{le%Vd9KhKyZh;V)
zobv#fvLp895sAgA$d0U~w2ttr=B2+$$JB#B}{s$Gi|UE%C$m>u37<_G6rIU@gUdF5AD&CRRR
z!>GlFqxHa@KU=3j;rjmd;M}^D%W-><+cRxs`Oc#Mzkj%1ZjZijHoukndu&b0n^eX{
zy*e5muif$H@O8NH*~y!~vbWt_&h}vMIpK+nl$w&s^DY8
z%#*I!n;&k6^#1$zpt{53E=E-y$;aGFDcCe^#te;T!CIx(x=w?Rwbn{${^ar=ddj6;
zuU*r+kA3V#Znt%%2C`%8*IEf30F2}4IE>>6sBPPN=jB;#leoaKDOoRxy(4Ghn$Q_k
z5i!wX0)Wmr-$XC}y2oo{wPV=yP{8dpGo1N{6
zkdc{4JUJq|97bdu`d-9b3}&J#O&b;m`&tD79TGV*LUP>UWJmwlN#sbZ4nefm>SJ5A
zBJ*n92j`u0F*G?Rc5G%jS1D==iiU<#Wzw{5+Y(VqCFeu{d(+u$x_9&7U>Fhrq?A-a
zL`spAQdM-0bIzBSmt$HJkXk`PL~=+w0ss+7sYMDAdFsr}1z^UUvkw+x-E21fx;AkV
zoiAmKO;d6X&J()|WXK2vOi!Qh21Hmz094gdlAVP@q?*~ZX^Uy8W2rSubw(Zu)QSip
z0$5e0X9b4CFziC1s|oFwN-a_gXxnw(`*+@Zd+7TzCb9JXy|-8ED=;cGx1H-#
z9&-*Mq&|O;38A&1N_2d+ef<9Y@4WqkZ(dw(Uw-i;j~~4ug)c5HN+u#}Lw@)EuP^qd
zFM7Eun+K1-Jx-4K`~;IgqL(a*e4zi`q6bL&D&BEXkQGTie}|ijZ?Qvk)6I7)y@M6A_rz
zX9>4Eun&Olk|tGEq$=Rf8p7;xHzliRim8^WrK*x+?>#%uh-gs8VVB$Cz26!7cR}BQ
z12gZQ$cX5iueI(PAkOU|x2CE^41@0YT~8@b2G6`T)kHV_x~j5wstUxhoy=wjO;;Cf
zJC;oBH~q+JYtff(ex0U$DvqE;=t?Y+%c6Jp*JN50$TrRuA1K+gM>o0zz
zkN@W{zy03YpALIvwin#k@V$*r49HB;Op95Ss$yo171@Q?s}18&Yh~}U6o3(u*tUv5
z9aB}|#VpLa=q3P=MMbKKRH>%IXcG}oLp3C0BvX(iMP%JhkkETCB}>X?2nK=#v!;+K
zDn#n2Wh6roqV{M30H%SipcC8
zBEt3@1jW1xRV4_M>`1I?Z;_#KgaAHmi+bNmMzvy=DrU)kTv13j@ClMD6^n9pt+^K4X*-=gnSYd&W{4B-31k?y
zS+WmKMlWqtg&tC;z}^!&+4kGufq2(MC$e3@K`8~(5TH1R8W<43Mp^gFju1oy)c{EW
zs*2)n=06fsWq8cC=nji^U0#GnTm=!}rLO@eea7GH`#4>U8
z9tf3P!wxBks1f(l8-l7C?t;se71$KG&vjXbO;Kefu9a(YsL@8=JODtcm?$Em*8xiJ
zokL;rU80x}h`^h+V?R4M9)Q=TVA3kM?Xayix7F88j*F36Dup*!0078+7V{o!UvO}x
z#?3~TS82QQwOD&s=f_pGjxd>qQPU{?G`#rV{?_W&{japh>!khSb7@N++-x?@c|9Sr
zajC;4G|>|`W-*4;kH!vE4HAGD2~ssQ0Et4iO62VyxE24E*YAG#pgA2b-+zpc-+%kD
zoeZgrQ+M;`?Uz6Fqo8uJJf%)f&rip(zkBztBR8GImtK7F{?%ph?WF7E2n(YjH>q2I
z*HXK^=`ccfkRsJ_*p8d+&;6}m`1q$j`rLERX)TJXkhf_or5J{io%d}&Sn4;3Ht9M<
zBVy6S&a!4Cm~|6oCZf&uVzXNQ;xGQ<-Me@9_V;hxxOsTEAEO`o)t%d~b(2P_5z!8k
zxWB)zTD`d7Lagx0or6F8yFU8O`&ai@E6)uGMdetW3%)qWT}8@g!}-)75xR_6Ny*S5
zH5kf}tl6NMRo74;vNY;QE;I*6ma97SjiE25UDP>)YVwYOX%{XgDrS&N+VK5L
zE@6~?cidy~xVOpMrq=AY?9d0U)id)BRdjT8{l@Db`No^Sz8r^--TBabG5zTAzN08~
z5OGNeqX-Yil>(!X5t|kS7kt-c8y0iFPnXxmw=tKx_~;+|#A_2BE;adfGUv&g53l~#
zyYIg99a)I|9Dn-{eD0-*(~rLLk^R_pM12pViB8*X9v73nkG}Z&W?bI5dvovpTa&}*
z-@bQBcKr|j#83a?&;PB{%lBeeJGdbtR>Xi)5k$SZ96-Xbeft}~{^)A89+Mm%A6>t8
zlh7_NFV|~99}jOo=i7tNedeEX#w5eAzFL-5F_pB%GQ9ErTYvwnf9oc7XQyfZ`rc&H
z5^LMGAx{4G&)x@Y4-ans?%(?dUwQR8RlT}6A++ai-s$_E0dtkl|LBj+X0z+pk5%0h*wmkWn10`_yZ`oct5@zx@-TTDZvw^2OTC^(KN*+fyqj3C!d0{y*5Xsc
zh>W9UH1gv<5N~3nbb&ibfC!$U$+XHd`2IhxLI*&mmJy6NuqROkg50&vkjNQA1;`2#
z95a?&*g0h=1P;7+&VZ!WKE`=o2~px1_r3ENTG87v>bq|&xBAZi;(z<{@u(g|1*y3l9&~j(pbLp2bu-%Br~ZU-#%Xl*N-Fs7P6@diSHZOd#Kpl_dm4!OoY64Umx7sa7>G(JkdHH5xlmSBop^$Z<**9WfRpKn!6@fR-wXHjvPH
zMJ$50V@a815wXErB@wUu|6=byV{O~A{61`i8P-}moOtuQ@7?=isAckRc0_Xi8Q~q|{`yn_Zz<)s^eL3NPPx
zb2xFQwbl${41d`76FWj&NqFqhywlx=~=`^XWQfDMRs>$f0OMButwyJ6jc
zLKmweO|*o)MZEfOdTEwQ;^0-l1XmCeybFWNx8y8?qu8Botu~<#p>6N&%X7k|!
z$u5k;^zLuAPrkC+{m}8{Prq4r&@Nc1r)9bFhmeUXn%Cot&o;_J%wpiR%=bguA(3Tl
z*4%(^6(nPomNstAlgkYz6%o^#Ms8>1xPoO~PlkjKPYo-1@`hy?iYJ=II)A=yHd3o&
zG*dQV64T`7?NAB_Kj7faXsid4uU~LC`-cY@Yn_~d0Slzq(p7(y-oTHS3-RVwE0Qk6U0)Zi|
z`_zx+!Q<1zz1iMuf~bU`j6>>AmnY3+_tvf3hlhtd^Mkv0Z{NFr=ibBeiDFe#$te{ux_0@p2_w~-b55#7nK4FFOXD~?
z7eGPOfS8!0Yo?x0A3d2&rzsVLb=S1DxVPUudgcY!CRa*kM(@2TkEIZrcdc4(+VEqV
zIx``HnX+$E8JL|1U&g}0H8C(y7Bc_?BciPV)lkidnQN(l`jqZl3ZmAU=>tj$7%751
zjp8;XRs;JW;>J0*Cx3N^z*$05nq7tQ8RvK%|tK
zeDp-@1H5$g=4LgXo}A6*o#?oY>uh}kM7-4on6}=FEvr(I3{6y2ln4;9N-d?R6!s1r
zJ5LmXma0{m$#Fy5ylXKcV$T3z3cmGbT5DAmL^P{es;W*}uC*FeF)|d}#@QG!1|mwi
z7*#MZQFBmH)@Nr&
zMN~x4J0Oc~!)ngE5Q3SdG~$+tT+5`HoiC35jeqlBIXzk)9PS++Tszpm8ff?J_rJE;
zob}`A*qf|(Aho=n&yC{d{t-W3H~EYvt!0YVOpB~`ti3v~mqJ29bIqi2SnDwGdK^lv
zYC;6lX-5pm6oMzj5FNUv>pC-=w6PXQV>Xkjf`$&ADwI+XF@)d%0jSoxl{dO}I^rlI
zO%t{{K{Z9)dTWfC$pBS}frz${=caY-zB1)7P
zv(#c`433oceZV$GraYe9J6mmP&FtV(8EQ>xm3fOBWCTFeJUdhbbRL;y%TCFXQc}se
z_U1-48~WZfPh*VHo%9KaZCrYq#b#2003gPAyq=p3bzA`t!TF9GF4@i(T4h~}Dnw3F
z(P6hUpO_2n1mm<&9F-(j@JPf>v|3g=RyIWg1NB7nx%YAZTxMg_UdXp!IQ`wrn>Uo}
z_s-JAol7)57#3#}OCAIa900)6@Lp#K2m~Jl5u2HUUxmH@i+k0f?{`K~rZ_2wQ;pra
zR$2g#3qP)dPjNbB-}ch2)mkVj0-`Ev1q7sus48g6IeQ0ajHJ8F~tyy$eLT
z8dl4dI&Amm8#H4D4;Er@E*OyyLQJ
z3SEtSU1fY5PF
z2|%ljVrJkf`HT$_L(GQ2E+C?afK&oSQBx|Qpo)ZqhE~w(wp-|L^Wy^mgDCCu
z6}E94`|$|WNXrE>Z5)qD5uwQUn>Zu^WH<6R(^V
z`d|IU|M+)qJ!sV1{_e4^Q=cX&Or4vxO`%ySis)KmKm?6dYtPc{?Jm=Lre%@F)9Ux!
ztc#LqBWj_UXeo?>Cb<>>0&)UMRZPGXfsoiEk#bPgKmFpf*A$@Z#(on55mj<&B%DoV
z+a*X7!kBs=P^zWW&y@GVbmIJ-Tko-he%-%xsP`8~@2T$iFc0395+ZMM59sP1-C-ART)y<_t2+R-R&mb%@<(p`Gr#o7k`^b+{OIV(bI%^iSc!ps
z?aNx4iK`ZX<7DoegSKN(+1r^PeDo*povZ^tjP2y7KlRZUZhqv}?H_&nwXdH)db9Ld
zg?(r}345?A(Ua)qoXd01Jv*69-oJC}?YG~ZPP^4=MInCdm5;so&YNR?>(d|pSTVkT
z@A0R9;^!~0f2;qt(7lgezVPCUpW54@Z+`Pje`I>?!O6qL;*1>eJ{D;GcZiO=rt
zT}Zn8%2&TQ^y%i!7Z3I?{bT>we|NJPR?AJ1Je_r_`taey)6?VcefK+``OIf>--mX7
z^ZMar$n*Jiy9B*T4K~@m{=EmMXXndDi~bjX?pLl|c?JSqtNGcT$tS(O@WPF^?>_qW
z>qoGA;d7Ju$69t;{)5+!k&h21VYGa@nQ*tKWpJ*drnp^Fg69CPl`++lOJoNC+jL3*
zFjM?51YV$3whscUMTTT$<6P5Dr(;SS0??@se_5Cm5Y=0nAquahu+d)zPM@$>QE*M9yB
ze|6+Em&bX@b+kusG#XL%-3~7b1>LbRLTSIGJbrasD%_^Z&mL0RupE
z>Gftj(qOXD_~W^-t~7^4G!}H;=yh+S&BA=l?05UXb^`U)N9A?!~4%fzU+sQ=c|#
zvf0Jc|6qUb5?Zv)g>~{9ORlM#&K=8Mfa6v)<{32($&)z|0CN
zY6SwqFz7J$BdIk2R7Z6&yJ;?*mkWRPd(S-j&f(6Lqsh%|o~$`)4mS-puqgd}b#SGu
zj(1n<6x*>>To3E9y3v`f@r6b<1D=m$wM;vTGt$|B8_(=qV9z$@q(Ve~?_k+_9xGJw
zp~(h9Nf>c56^DouNCQcBwQwFF4PH&w)!KGI&eaG>$0whB&JNP+o5lK^qjEU7et5Px
zU!ISVJMZ=d0USB@B1I%Kqaz2V6`*SyM=gRs9|sxNMbdg$Tb~F+-!C_tlNe(e(^g%`
z`MhhVJCp8IR{c7-cKf7sr#Z`&SOqP2+pzPlA;elh|q?1QsQ#8
zx~o=tkw@$0|1~Oposwh5yvra83bT%n?&QBBZI1n3_qgg@{yj8-z9t$%7wLCO}}f
z%@uSGMKt9_j8R0*ECi24wQ3BL&E_N*n@)UiWJTYEhA~RMl3Dnm|M6PO>ssIWeBL9L`w!omcC*vte#z~yqL*Gg
z2!3`tuAJK)Qt^(aEki9=r(wo?eVWfDrS&K{eY+fPaN0h8a%J9MnB*9wWbBY;k#nT#
zA>e>iSs})@nM}#3l!E3&d*^~e1ydsQ-g%~`p-n#!BN!1qO?CwURn4`0fZ)~-{4}MM
z(i9>C0j)KwYE@xoCfa&Lp0540R+B9^Xt`W&zkIW8I|$dU{io)wKY#%Vn3;h+2P1;5
zq_n+GpSni=zz*nx`TPMo4xnUFtB6R%%wVb_CDrP^XJ+r2nYBu-U}oO?Y84$elSvF+
zwQ;>(tFaG0gwQq(fYp)^Ed)28&v&NH-X~rfhU8tG?(6}8i1foCeHN(>C`N~dX_T64
z1XYYyuIiAdC#9mp$+-tme|KYvG
z`V86HhS8vo>pVzkFIKgbwMv`Q1SkUfT4HhB%({8Aw_L6Il*v!B5t6@f{JvDoO*P$U
zacsMJbGF(%ZrzmFnVBezR@I<-0&-f-F?%0KHJ6m@0Gcv^b6#=+ROcKaDWD-z^tK+U
z^ai=afNr)Y4T^i1S
zLo^!_f`$3U2v+9~gtcH~3XBXzlT{gqthquJG$10e3WVa_GVaIOzOjOJ>*4H!A+4}sdZ3OLW}``
zAXTmlH6dgMAW{*ts)T0a@btb$U@E2xQklc7kpz#Q8K5x37dBFee)*zd1ScYQSPMhF8(@YB%F%nU&hO#xBN0MU608!IKG
z0ZUdbf@6kN0^FTXE1(iq;6mn@Jx2mdu1VvlIeP$ah*}+(XIH6immsKOYESd|&{Wkk
zli_x8vTZ1Ez`YbE2aZwE6aW<+I7TQQ0Jfzx`T$^QW+Mhh1~Oq&1|c9-DOv8`m9a2+
za6YF|4%M}x9^G$ge2VWBFs%eiq{NuwhFm~)B7(p~WDbjq=JqHLo*A7Vn;mp6T8a70
zZBl{Yo0+UuvVOo;+OX@}{nlWP8-$GlZ2G#SIE4Axdd%?(=T(O}ddjqG21*V9udEhh
zv8c|6Zio}aJ`SUmMEkMboJqm~ZBwg`HAX)ob@5;j22VTQM@0}RhzZ#bo9djkQy>ot
z3@{C3Y&sM*P*fo>CS)Q+Q>?IRan`N+3HS|N`nMjXf9v+!GyK{=zBgW;Pp{p0>EiD0
znC<>sw^zHH`}gl(zi`mTW@wriW9TO9u^yeBtV=FErqlb!V|{PAS`gnqK02K3eE8;z
zQ%w7{e*E%~f)}SZC%!RUF
zu1^4iw`Std=wJSaf3d9j`{~i6M~^lY>U{SrZ{PmXYww(o#D2Qd4kqA5ZLAQYi_J2R
zi)1&#^k4kdPhYH?fBj3}|H_lRA}k6G?ckswP+jG~-uarU4%%=-zKV!3A)=VoQU%P+
z{`vplhrj;u=l=LhJC_>fJbnB4-p$#Lbi3oAR?Td?0x*$sa(cX2oXxwv-78m8Ugfj%
zN5@avrv6$xNc+rN6aT}<{p}%E3R5%B*tX%k&)7_>aLv-HfAZiwzkKEHx*lzc++Y5c
z@BgJg@yS>IM?d$e7j_+>^tm(?8*Q%c?Ci}i09U2KDJ}a|Uq-^(`T)6}oGcaKbg}6B
z@tzCqUWdR$Y)Z)e|ecXoC@o$a)pFD30Wcm9PHr
zi|;-8uEmQtUj6(RzVP2@uU;Mg(*O1N$&ttI(&2^O-F+9sH^2Vn>2xaH{--|m>5p8!
zzBoS*U7G}^JF_vBKzMq3TuXZGwb$CV+27wEhGDTdJ9+X>TRu@{k;8d@vOegh@4xoi
z5=QA=bL08{&7c4CS9YS6{@(KJOYgjQ?{+nK>jR
z*lu0^z81qgkW;WyWz3|8wQk!_XxdeRs)mh2ntB!RP#MD@O2&vTL?nl5X4RoK-C>Yi
z^CD|q*)(U0o3}47fB5PAqs?Y1dGE@NkMDJtpZnNn{l$y@+3}@=pL+A|gTMaz_wJpB
z_eS^Car24ipQYEn`_9?DiN1hSPOH!)(_nB2~CWOR@
z$Or%g24-psNPm~#|IgqDAmX&(IS1@ozg$hFOxX42+W9jAn;%;T;O{^e{NeZqHH^><
zO%%xtK*f|?h^>*48Px0uI1oCtGNwU72pgSp$w3x_YbsErQ8|R{0s?3uta%(gV>{M%
zIIA>vSYlnclQ*BiN3?q2CI^p_KOlP2KI=C}FZbVkPH%sQKc&W3VtQCz$@BT;eYpGj
zGj;yX;m>|;`te%q%IUX1b@t8A?3cG~=bg(xwcdYz_rce%-TsZ|_I8`u#XLEPo6Qq(
zck-pA`5)bXm4pm7N3BfCZr~sgTP}LHX4{qZ^
z;90$Y+I^I@=E4TWisXU-LX3#OWB?Vg=~WT6){-)Ap&)@_5*N*(cLbsU;6SR?j0FgH
zpsNVtz>t(2695A`1@;9oL+>!89bX#F%Z(+Cu2Eo7)#{89$eVKroH-$pcf2j@D3G8Q
zQ$LF}fs~v~)ew;tqz*mxJ*jC5Az4F+^WC{jhjg_xFfqHuULal|A6p3C(JmrgsnM(X{RK
z+DG#2xnW(kKdqW(PbQ_NJfl(C(~bA3>LwF{>q-obGoOyrW>ZQTQufS~
z$;1)oJ`HJ%)yxqB*S)cU6I^UFlJ~&m)MaVSgBt!2!C*JpJZ5`{$lPYD$scYjK
z?>{+fyNv7BHXjHvu&G2}!JZhTe
z!r`-oaj|@|O!w)$^FEs4mSQa;X&iM(%R$!bI_)OpqLsC|;p#Ix5AL5GJ)sL154`uo
zFc7)fY!-YP$59}ZQc}(UKn6n^0l<47f{!tdkV0V3P9?Dmh_)HVS}QUklWfVg+e)zk
zDg%~r%ulPH0zNQpC;v8I2#}BjP(*y=xBGPagF`v`2(^fasFdw1GXYS-Dr!Y4gVkCA
zjRUJzDy*tC7p>AXO%r0uL(UmJtEs3I5&dAXi-^-Uq;aUl+O`c2q7O9}w@o(%5Uo(8
zsJO_{H;hg-Kkc>#B}B&@yl-M0`dms`uh-kxE;VnV^c(^^N-6dIz)aCk5JkIGY|qQ!dbuGu1{2z3`DUEo8{P3DIo;TH4iz2
zFpe2nnOTcRj(`S8>^zu(h-o<9EVr%q%v3IICbMm*0Y&B*tu|_1WwvC#0^m2_bVL*X*Qn>
z!%#$08fvY9o%a#Z+8BL^HWUDLjHb1WmB<{EfBIFJBl4aB2oV(o(Y$x16e7xb1OPKh
zdDyPhswn`lL+21lQGp1FP*p(802B&4bYX;(b}zj
zGQde-l)74D&g3|$&2}cK$R_uu5)hqt0#L!Y-H%g8Vkk;rs4_~aV9jdLj_X{9V;_bK
zKRsTFWzsTxsA)7?z5PVk0}5>I(DZcvI066@d%6c90|k?Wu8}mZR}CmSACV$aT@9?T
zUc4FiFV;965=QnYlEzhyO=nf4k}4X2>b8<=ghuRH#VnN~=p2Els%q0eiEUGBwo;uc
zBAVyXWCoppRneym#!|IbM6)2kg!Gh9Y^Kf_ft_%Skw8IB!73m-=hzjIR0^ni1iw3-
z9_}JT1q+*07b&gQ&^9y)3f6FBQ6;TqB;WJ8b
zPM|E&7cPJp06aj9WbDv+9G0FqHo<#uAf;$7rKACk4UpLrLRIdSOO|yh-lk?&N{Z_f
z=3}+Gx3{yqyL)|lv|g`EDYW?@9L)$3XZ7-I`PTg5>rzK0p0rbKb}e?}=|yx5QJ?*HGAcQ$&#>G3@;auGw
zS}uBfv-k_Y_xhiD<+;y({N=aadgsn@KXD!ESO|RZ)woJ!CiW3Xfg~4ERU%;a0EP$%
zsAi8d-#b1^7gtTUFMh)FMrGZGh-GF3>vjMAAAI-x{P@YyLojLE`17xR{_xuMx8C^X
z*S_}EF?6S=?;ZyFZ~o)I@XuVj@_JwY!q8#3-H)g}M
z(>{x|Cbah(KfLw&yYKzlM_&5;%g@i<=7k$OV|&n4;9+o`!v;x}t83%sQq$w($ED^n
z4*Rpc8&{uMt0krxvcupGn(^?(ySHDD@zH#D%I?Z|@c7A-N8rZUUi;piJDW1y+1cN$
zhsO_(_xJbDPL7}4ey45QZZcauxpU$0;tRjt8aYc8!x~3;)TnXADupU{f9re
z_0Bh&fym8(~-AM9VgbnV%%ef!&O&Ck8?
z;*Jk7bklBr@xnm}{`PzCjN^Fw?pvRJ^|Sr@?Ed}x^Xc?we(q<9c#5{J*%e3w2np!2jkqe{l2K_2&ZqumAeD{^8Gj
z=qGPnT#f55e($?`)7`ze_mQ1l_p{fA^~qZ&cOO~)pZv4`?Azb`#t**z<%e(n<-0fj
z*td2r_Jo(qk|B?xI5P<3yC*rxEUtmr1CqHb{`fPgRyF0&nJAM4a=kH#Z
zCzvWv*8TDa-~Z~rvv~WR3m^K(Ctv*||J*PC)V$k${>I0Cy!3VpZ}SOdiL}OtFHfGYKQ+d8l_0^9Am7t=2{FO)pE8tUt^gvzWeC?
zqdRZ^AKv8!Jo|+QfAQtd{9OB}`|4l)_s=c7@!(%Tww(Chol{qoM$bYF+#NsD(z+a3DqsmPoMC~Is|*-i@xhoKx`0l#tg*D#zz3=Q@b}a=upJEe
zv~PO*`iKbdpX*}W&QdWymr}`Kl^&b%2D2(dwoaqvxGhn5tJR|q->tbJK^vso&^HXH8GF%LN1T{UxUXY=)9
zfuN-O&CbTp9yd2y2h+tP4sccOT|fEJa+W^gJZf=n!()iIeP2G)swSf{vLFX?MiB{<*w678Ee6qE<;w0h}Xb05Wv6ZP!r+VF(!jRKSRgs48y(SDLk8QIm?9wua+v
z!9dMqdpT?^#3E`|R3_*kQt_yPBD7?N04@gfRZE3boS}0;5ek42_GBSM0!k^>G&n{^
z&{`%#A_J5@P&-{px3+F11(I$`qLj1MQ8AnM8iEp!Zd>LWRbXc(8R2V})i41Gdv)zOz4`T^Mg~N;z@Yo^uNjSo2{&T$J;puv%-t
z6~m6*PwwK-bhG8dxH^*AwFt9e>=Pg8GSj2G{`?%`4)w9|qX|H@wre`%QcB7zArB(R
zibn#@*9ni&No+Z`4y^!8N@AjHzCx|c)&}Ot|H;qn
zxo_RR<+AMd0n+ZO)^W^TbSd{~7@<=lcFq}sh`AUQZL!&a+BCKuQuK}GBBCi;DTT6(H~I?A*Be(!s&uY`V8IKSbo!vghFYzF(dG@cjHC0q^YWP$m&kWi;xC`@way
z?n3~uJYSCKShPs)a?L3fG9q#SP)gbc@+|7uG#xlP+iXNg>Q1S{-n`s@_@JFMlW7Zt
z{WvHXli%Ino6qOR$H$K!Kg>BdjgMhcO36uv)bApOh{QuR3lsJ(uKQ$+3bdv789a@l
zF*B^{Ttj9bzzd3qu{zHYw7|BeuuUWuskO#7)LKRUmVscK&rZN1vh^0^S`|!HYpvTr
zEi*JVMqjkpC?Plya4w2zNjcTgc?#fych&KaGuD~hwp6XEGWLxRZ5yRjN!dAfVQ)XB
z1kezXn>s=)S(+Hzwym`;mx)Nd_e5Sq(wNPx)SOEdky6X{Ve7mnA`mGhZ(HWgCA=~=BD0uG}M1%niq?jp?L+3nw5VFs~`>kpP_S7G?l_Amx3z(-O0Ya^%
z);bGK$~AZd;+#Z*Jq0x|B_z(Ztj1hLnAy2t%0ubRL<@NE96bP(DzywALn+l!5Y5bB
zO1=qI%7aI5ot-bIJ5%57<}z+lM!>eEj?QY?83tD<)VSHTWepUrA(h8uGp{}nBBN@=
zQX`bjI*X|HAqI~s21#VB)wDL8azCy`Bm_6@nnXeF?ZA5VzKX5b$Yc4iHfw0%9h=6$VOuIvZ7uh>)>r0W|_3bVg>l
zWn>5dc<+gDOQ+Z#9(=G|L?%*D*}l80roiOM*IIMV24W`8x%qU$%mAQhYGP1{ZTmV+
z)693Ks;bo^BAe!6VL0>~&1DmsP7M0$OBQ4b;V={zoZqZh>qQ|bPoCV_^b6N^Lmp2z
z#~WSeoWV3S-1;Us5CPc|mf>m2j4`$58CX?INtTOdFf%Kqr&>iRFnOQ35TJmJsfQzq
zP1DUL&d-b}36yc|C^$DVJ5b%M7wdIGZhSjeV7n4mWqG_fLB+1A{rb$YmORWFur$L|
z4THr)vddth&3+t?94Eyr=qw2hiA_C|wvk08ORW`5O)FaU!Hug?L{OjFmjzX@2vS3o
zC%f7l_-fr?Yyh>&(V&wjWe6n6k(%d*8=W1Vyg2Q5e?mH6$DWe8CO}lIXk!H!VRdHx
zV+nCo0Hl@>##P@;v58($Rcjth#?XdIbmvx;SU_tX9W!}$>X{H*Ey#>51JS0$adhK~
zL}Wrlp(!ht9%V#UU`RH`Eqj1aK|#>mRyF`*CF2pQ%ViQve0bLtOfw~K>W^~MWx+sqy*P|owV9M6a1b`R=h{sZ>nv5{4Jcuj3%jeOS
z9Tt>QiXaeBV5+TEwh_ivggSBFsuZWrvnmK^F1kq>I3hck?|Z0RM-Z*ri=u%5(?Bf0
ziPYWDWmmHqsF4_JH5Py>Uo;F@YB6u(Z-Nu(!KruoKn1wiVo*GBEs0Dq4)S&a23s}}tMXR};Fhy?yT2%$Lij=H%qGCi2Rgkx2YA^w?
zlEx}%r54lb9fyEoRw|B5^pRo*A#Rfay$P!1SX4w)(llBctz@bd%_<;^uoS{VGUBju
ztF;@FpHgn9)=k=sE_jepi?yXLbYUTwMIcrcDl!NeI#q*lUfH
z391W?g)X$syp6tkGGNk@7bUMMmgd`gZLshIlnb#3CdU$hZtOhmyTi&gY1B23^@Rp`LSsaUsTxsJn~
zO)lMGA3Zjc^?L1s&Q3LUvF$<^aPH~3+2k->op~Spsx`VYovXmesJf}X|)IEn~U22L!bVMCy(C&k*2{n-gqtbCwVwOIeHKy@9jT7j$>$>-M#%w
zSFT^VdVMxO^ubj4?DTxArBAvDgyT5QX6@D_RZ2-if4(@I&*xI}*S`9VvBIZ5^{M%M
zzU2`O!+^k($%JY%ZM`QNa>6)Si=6hWxH+3lFSW=Wf{nd*_=Q)m{Nk%u$1&eIN#8m;
zdGO@u(YpVwcON`@uzc~-_1x|J)1N)~cfa-aUwQkR&(Av(-G`kRV=9lpPox?Z^yU$<
zm&buq
zbDz9&_29kt-u>Qd-2)#jy_KKv6O{@8cE`OVTuEn~mhtXJdh+jrNS
z&4tU&Ggn{u6Tk9L{F8s?&wcYdU;Ar+<=^{o6Sx8MHe
zt^1#*)1CeK-MeqT`_}iCi^YY*%OCpaPh7irwaI;qJa6NdfBU!YJ$Mtetk(k~giw#~
z-$Qcq`OajvGn-5S9g$OmS3mi=Uzki>g>T-yf7DFVke;Oa+>4+4>5pC>pWxS5Z@%&M
zFPFN${;^N|;>Tb4-QWNE=XdN+ec~hN_4i)8`$t~=*~`s^4#&v>E)_S6;ctBLi*McE
zteZREe_Y`3Zmyo3#OcYwE55ntYi}tP({0u#;ZxoKimJ-rXEHJL
zwHkAXfs0f%HC4ylHmU_28L~7~Q8m{<3aHHN8N7k!V#(IxmU~UFJ`zrp8S=+`fu-EfBBO?`|B@X
zeI>8<^Yo{W?mvG0d*8bH?B~Yy`C*)#-+wrN{zWTK_fgx!{sT#}e<&6Fe>2!^(YtxE
zTCO)67efU+K3~kZ3r+dh^EdS7%NJjKaI{WWn}6}f3wyLV{?Y9_cORdA@79A>*B4{G
zm%H09{K*x?PVloA_Wx(s_ny4=H{ZMW#`TNza7>S8FP*X89ZpjT>yV#B{~*5by1S94
z92+m|lL=!mH@Nwa_$SopuibmRzie&P(l0y3cGB1|F4YC+12IS~woUk
zTZQ27`Ai@{ldXLc0D#oC_WZxw<@g8FjkX7~{eK{qc5d;oNg~pZr8o+l*&6%X0+If3
zCjXaYAR++LHeCwQ%*0@XB(O?_fEs6XgRdphJ+k7GlN
zZrbh~PFDBlcfS#-k7p+?T+oXzozt%M_b!i*(y$qMrjyqC^Rd89y?u3xCvkVP?w>4%
zq^n)?J=>+6F(n{F>`G^PS153UA=Nl!xcX1L(Z9
zna451@4xFW-hETRukxc?_lsPBn=ieB&E-vN?Ip~V=V>2VQd7gC;~B(*PMzyZ8&pM
zy&0)kt=SSAgJysR%!zMP>mho`J*k3-mTKg{MFTc+N`|%QCe>0fj|R1(7#Wt9i9G@t
zfhph?h=zjXi4+7FoO4!9#Mm%ks1`H$3Ia99IzZWI*(_pA*g^yCW|0;vBaLOV)4Avt
zHfn&Gx-N-Z30RGh+QzAsKHJ93i8V)XWr!Ik?c;J0+EaBQZbpCpIJ*M4)19x+LwhN|
z_u+8<9G^Q_9Bp2^$FIECyv%?#M45EOEkYfgNRlC=Ybg>Vm>Q7;55NFG-cR=DLrTy%
z?=+0PKR+)y=Scg-@}LSrqM*uv(Ni7P7f$cM*&wc)-+W~=ovjY9_2tr==k>YcC&O_6
z)p@qfYMqZhRE~#kFv0o33ZwPA|b2n|gccp47
zrBrxwa^8ffVtpQs-B@Z`ZzRLs^uYP%;tMxtZA8$1y>VgEHk07uYS}Y$(=0E}
zG);5m+Oxwj^nG7y4sF-A?e5;;Y|=>?Pfw1VbHx5^xxz3l<-C}n37Lt?7GMhi)9IYq
zv(xEx>b;LKnjtd;bPma$;#D0m1@93t=j_Qx_q4~n7TtObV&F<3BB@j|i{4XY5sA26
zq&?-Cmy!U~IR~C%j74Qky{ZynbUuV|K5P^b8JO82Bch@j=!W&#XP^C}KlgKg^*{Md
zL~KGNfPfLmd++!6_nmX&II3#SHRl{c0D!HGvF)ceGpTB(A~nYCPj#zA)+*#kM4a>9
zwL}!dj2-rUpK4-d*d`E^ObpIDu_~b3(p=x?Qe5z;0>QWIe$_V3v(MfRfPJ0CGxA
zh+WtEmd&h6Mr&elj?*}9h+7eFMyHDhmoDABe*Na#@4bmdP`S!bob+X%E3|Sp+kwPn
zF-A>Ra8px2A5e;7Uu%H?%C+_Y90L=9)=_IwtBmNqZ`*d$kGYIBkHa7-4c;|O;D`ah
z5IjIt5RojE$VxUd8&lb~6(WkNIp>4A}~nBr={Y`nGMO_lVduAr~9^rGe`nKeEuEXly6m
zgPYE}t^>l6EtLlPrIK*)2Bl~pIX5+xepw^~29FLIjl~7%^O}sKVaSrk!N`2$&8)1B
z91;T%D6$XERc1i;7zMj^B^jg@n9b+f&}vNWu<6^K0eLmKPFDsqF{m1@V%*_WHB~p9
zy3MirS#nch8akUyTCS>qk)vadV}eyW8D!G5UDqK}PD9xYc~Ko>ZFjZnkO)js87q>g
zLrT8ejH3nzSfMC+pMyj394D+)DGJ7F9RLX^VY8i04nW~42LluU$^16Lr9N)GX&z_M
z{Hr)x(K&-0z`6P?1He(2(YZ?NAB2uJt
zZd_~})zE}uwPHeI9nzCYaA1?z>cVbU@gaih`J_laynv`2;^k5?sI5ODvFR~
zO9}EwrVfFITf1I&X+6p3kd~=!>8aE;2GK2`H)ohvEhJ|N
zB;Dy=eYzPl1(T;KLuP1V#jI3B2u<)!L^TCyvlK|FTG6orXQ|frGH(2^Hm#-=Y9=jg
zf@qA^qDZL#V9}{V5n(6PVoh}nwVCcl>c9-cj2hn$HM$Me;7W_gsw$dPGg##q&GQ!3
z<#cR4AxO=s({tkxx`_`nr@gO_-PsbJ3^nd`lO5BXp1eo-q-!S|cV*-EAg_%r=d+2=
zJKoXIpRLJ9-nSFYDVS*$MF7v0P-`KB8fx)ev500#gO)@>
zP>WGRelDJ!x1F|iBc)ZMkn3a@N4+}n&&-+^FYR5Mx$DzQ7pJ(#ZaSSh9{?!E7<>@0
ze(3AwWIEpsp#>mKBjl_(X)UV^-UsjF=0=WJ>$
zzw_MH{d@QBz4hKZF~;lHuZ^j`_uhLSfAv#G58gXFI}6dzX0s>9n|t@}Etks}&H;2ob$-^JdO@xjMP|;wPrlX|1(s8bn;JR&5jK^ZD}pIQz+JHKZ=~xp%5D`uPUZ
zxVHJuns6MlBT!l8JWi&~i+g=Kjf?%>y@yimxeJH?@ehCSzx%x(b;SDz^K;N3
z$>B-Ch}2{#&f+ZNepRS+?>jb|KGzV-0tCtHtuk;}geG{Pfxdy7)(bez{&jkGqFg-}Ckxf92nK;KIkQKY!!O
zjS{E3kwZO4RrT)jWD21hMyO-X>$8cQNqLOGO%EEOf^VAO{b*it4i5+VXaDl!Io>IV
zr-~oHIRB@A>iM!+F6+te<>$Wf;Pknj=7obfsoR@O!0ge32fy{+```H5U!HsYBR}!^
zkH7H3dcE{MzV!S}*T&C$_QG>FUTC}Ut#AGQcl+Oe`1t)>_s$-#&z8$`3WhFF+qxKK
zEL|!mBOrT^_0
z-}(d@cTbl
ztRB4yZz|xg|N1|2`@LJg%fI^zzxb=$hR2KTL>1NypT2qRQ<8fZ+`cS
zKY0E9c{{!K@)h9n;LRWWAK!TG%fJ7-|LuS5zcW^O{NU`yg=<%??k`gL;qAw_zkT~_
zPnOSKy0~-kna{j<^Z3?#AGva=JXw7Ijki9z{@PFPp3FTwdZgzMe)rBZpSySIr&{V9
z4FDijq!uz@N6tF}vN8XCt%XtBavRCBLNc)`TUMl-78}_%&X^U8ID-&E>KX-e2v}?G
zQ)cl^LsV63WT5#4mx}^J#OKd{@0HbecIJCO@vDDS&c-kPwcj41z5K~v=@7#F)
z=YMVg=l;@ro1eM;{>_us8?XOG9fx1}lmCe9I*;c!ueTrH|C!g{d;baPkM6v7``+Du
z>Yw-@&gZl()%{@=_3z1GgmLW6?Bd14Y2GOY)MGF199`Vv)cb{pXl%L-1uNy^%ORMuqCy^-{md-
z;9LK}ux5-xn-j-jY<;N^D+j022o3&k`|}@TC;vfDjsL$MhJb)(U?1o<)pSgyq2f6(
zcoi}bl2p#mHs@!TPre$tZv6gc_vU^*xxRR9(_Q)c{uiDJ&4=&&`@8z&_u8L6`_Qi^
ziu>y)m-F%NPTk?*;*R^`y&p)ehdk;1-ch^%E&j-d@Dh+b>-B$V(soV~^|Mt;xsFx@Y7{lO{W@pYGN%%O=r2
zA`&2?sfb7+12WR(X`rpI*&&f33!xxTXjH(|U`vau3ThyhK@7Jlw=o*1p;WAzSsbx5
zWP`1HSUmwb?;FExqFD_9%pq}fV5m$Ak%DJpDco>DTb42Oh^ArIaoY`Plwt6yPOA$%
zj+Uo%Y9~DA%-FogX6y&75JF_{
zt5y>1+Ge?gwRcWfqc|f)^39VumCd7_yM&s$=kRch~J)+MEy4&h}1D
z9QS5AjLT9hAkTI$Y&L`PS^+8ej@@dxUY~DvX1n|I1K&9ZW+n{aeFURgiYoX(W;&(~
zY|*h|(C_UXUbt{s
zG;R7*^d6u9Fc5>Nb3O!zh;^aK0)k{qE}{`Ucm0{5PRMG_08i1uW(Gj#=cB@AHk%zB
z99+J9HRtj1n0
z$pkE@>hsUPxU)0+?svaitElGmo`K!=nn5us5VTQpD+
zFbm!jQ7ReKqVrgu>Pe`e3TC?PUf)z9z{UA$KASlXwMr>hnB{8L~4((j%>Ibw&c+x31o
zpzMTZ)dI@snZ{9@SAgL)w7rcgFb(`c#VkkAO#k#mzcpH8P$
z4Bd1(ot~ec)>=J-h>(#;F+%6aJ5npGN=zZPTMlrHvDVDq*ILQp7CFF#-g^YqA`aDg
zzm>dI6$KEt!U2kCjDCB{M2h0m>8WNDBtvF|3MC`oVvWU0XfTx5o+H%Ep+obYA
z%$IJF*S?9hRx?yl=Uj~5u{CXE1vA;sc69jT*R&7P#Unc@TI%35wv8W#0RRF!$DknI
zlGV(XX^gd`8&{qgQhN8+E%G2}f-otW4RS7G=$v!J1n9^O!$26FivVUSh>YmLyx6Fs
z&Ux=yM8`3SRAydgaLyGqv)YFE^yc84h19ie-0Mo#e%PSH&}m9z*xdu>)!2(_Y#Y}Q
zD#m8IwhQ7Buq-82$7JO_sRk^{o#!#aR9gM^)o(`_{U???tm4M#N21x@}G@ojP
zZW4UskR1|}=H)unp1?HcS}HpiCT-hw6r%K;%it$-7uhWrc;|g&35~bh##t|#hXH$@
zW?eO9-MPI4>uI?UC0E+d%}gRxNtA|G4bi6-h>#?aujuhxon*2l6RpA$Ry0wL8=>{paHP{v~gs$N-7|V
zNJzmsug)PLDwZmi2iJArXiPa4EseLXHFK0|R74c@mJ53H2I2`-a!$!1@LCT
zB(CqX(`J#&;-Qrf^{Z2Ua_XbAuA#*(twse~a!!SHaFo)pd;#uH!_-WYc$!~q0L+Ym
zvlaGH2vwC^0#vCREG2~o0Ncg^05iI#dwQ2S*fp~T4S)dS_8I2QDmfJC0LVFNor(hh
zM9dMGeJN!eMpU5#~k>*+r|hbunmZTu>1$)iaW&gGs|~GY&_)`7PhL
z)uW!f-Ju4-CeCKn>TY>p0?babh)CB)RRMsHFTb|dN#9*R4OfroFwZZgFkR(3$wemy
zGP-_&=TB_GwzEI96HRSsItPX|JL{!CfmPpW$gTjnszBfyRvXr3d1ji5>l_KxF&01P
zootlY;!d=UgV9)1m{%pH`4yY)?mqLbu1>tmTF+MNh2v&E*;V!ACg@hCrAUR=HQfO)
z0ZH(kIKw)I@i;8kJgjPeUWT>m_)|r!QznGPWx3%ducBW`;+y_Vh%R1tN8ebpJ~hI4qpiqncO<2c)irq
z$@1;QljU;LtF^>vxP(Ti1w!-DSq!-jwZ@%&a0YegycZKtY-bmBEWk9UIZu|lq1+Z2
z5O9>)#M)WIe$49xr#$mysO!ZgdHApXqrdjbmA#6Bh1r3nCGu<-&fk6O^?P^UIXOLw
zp*4ul-gsd=JDbiA?w#EK=%;?_;P9mw;CT7o$@%8&;c1M#9!^is9Tq`2P{LGE#hoN_lhH=9ur|;gr)wR>l{^aM^tKsp3ql1IP)1xQ1-+AM$*S`GC
zum8;gQXCHBf=fSXr>l*2lY{54UY<7XyLTQx^Ww*k&ofBx%lh=u8|$*_+;p@0hj!Y-xcSbb@n8S_w`&gj)B3`N`DC)kr|-U*4t{v{!v1XUEB6lV
z{z=oXueN*jt?%wlC6AV4JW_&3x5_nLa3-PL8l7TQZblZx$>ziS9cHhu3rAZ-roLm*RL;^
z%dh_47ni3ef9_BJxzBy>m%j3q-|D*d)lYu0)@chS{PKDYa8pZM6+`>(z6
z!&}8Y){L(FPe1qSpZN5Zv(&!!_20TUoIgA{edXlKcaOjGr5FEfo$N6W2IvtD$
zobb%CF0~41b%sb_CW;CGWc2@G!R!Cszzj-RLMnxbfuV8&8p}dPX+Pj%h0q`%vv*d3
znYyX;{d6*87lxcSm3N18X*U_os1rx0v;8OKg|+zfH@ErDj2J5*UAWJ0RO47?BDuS|0Poq{);)DAM*wQVB1w>e*o$HbyJI-TUkp{)t!XpE)`@A|H0n&Q8ys?1kx1y&Rt3PcQ${FZ|p~
zAAa%r6~X*xfA*&|4eb9(Py=Qpbx0q5d75($R7a2|;?2V;`S{`eOMf;(!YU_^?%scU
zb)N4$SucDp5bi7%k7aUegPnVStW{zc-BsyF%PH+%B~MfY4};s^1=S*IL|_H_g-$vF
z6oc*3mj2e6|Ib%mNBUd+{$wO(V9I1hf5-Jj|F2--f48|;D}j#y(3IK$023I|@Spk<
z`~$ei{{kk-f4#@k2G|d73u=U15w$7;IEQ3V1O&I{8Qj~0w-;t3H9HVF7juwoI@9A!
zwYcetr5)&E^aE07eZjJFUewV*-TB+kY<~ZFc!T`2XZ??;f9qj;@NoXQ``y+3;hr0t
zuU!6<*YE$%TiciwpX<=?(I84jo6hwp!Ly;$y@ZgwZ_qX!R{U;oReCnt;Lk^>v(
z0szb7>h7@DdLRN-caKbd3|
zN=pKCK!}9EhG1rD;?V;@QAl8*0;r%RkIA2J2+ROn@Z_ClGsGr_!c%X;MGYVYr$h`q
z;h5Z}6;_b-un=@joQhfPS}D*>BjzP5^&`dUr5ZLVFL$(Al;X%aXu++aXGVf6G3okXbG#-?(sv6s9}<9TEUPiDxyY)vXl)44(J_p
z5@UhtRAk8P4T}MVK1PzIFV!2H?z4=cq&oL-qG_atn*isV#?M1AkTsFty@1))o3qe*
zg2@o4#*So^bQaRG8ee5JiS%
z@0^!HnJj75TG}plu5H&l!@=&-H^@y8*hygdQ6BGHisOqHI~q2Xx}KJRGV$FGmRfLA
zvw8?3-b?|sAQ>v6f(KKoE}aj{CvP`RcQ^@of7YKQFJMB5E~l~2581gON+=a*XCDB7VmvB;f_kNQpG`3GSGr7>7q48t8RJfWv=%;F_v6?BczgcEPh#c8*?Ahz
z$bWEL$epwec$)JSglP})wXFoIUt~<4dimnbM~~BJJyZ>jpE*37PUf}h$;nx%
z1ENE~w%ZS}+uc1pJzHLU=Ekje-+S)4=bnG*BV+2%R?Fq;L@nv6&w%5WQmIH=!kmC8
zWKaMwFhC_{@6kDjsHIP6PyrA*M!z*~c}H8pwVHvzHVvN{(Lm9QfJkLPj}Z8}Xj*Cdz{E*Rp_G!OIPVCmXNpY_k(9^Ahv~F)&aKvBmTMkFwdJE
zYW3DlcDivKN=-z_Apn6Hsd1Ks(}Ep
zM~j&NfF1UIs#U#@xe_D#;6ez2cIT71B}4%$lE*r0XB~G=gdMM;S=5xoXN}$L;>LX4
zJSGjuq_t|%N_ogK;%XphS0}i$qclwhoehi^-J?@!l@zDWSFx;s92=w%+M;!AXNFkP
ziH!X?rkK~0viSJL&4u~JgElS!Z}su?bd?sh4>N6WUk{t{42QG45?9VY(%#zl1C}`6
z*NT25zf#KNA$Cf>8YQ=FyDh_eP3kr#rXU=(>?4fTb0o3}C2dv(Qec`yV^_D$Qs0MHV
z;FLXq87LwV02+ZYR_u(cpD1HSro#gy1Jm61bxe^cFna=#TylbxShfxoGbu(8nutDD
zzzU>{0APwDS^r)b6crOi0|L=fYc=9+Ru?ieYG$meXi7w$D41zh
z9E*^vqK3c}szT~VRW;;Fb?b>EMpIQ#06>6tOtm-+JJxaqgyFoAHIpuir6IK*osawD
z$+1*}-7&K=PGe)Whs_ep0?NZC_Jib~;d}>eID(%bVvhAo!uBnQYts2h`kRX@3
zRfZYRYORRQxez9$6hIKPZG!*+02!Eh_CH3GZ`;VWTh_LVh!`R@tur$OHdA}bl|iB(
zqwSv3gP0jWiGYYzd3#9Sy9SzCE1CkCY^gy2j8AhvbIwvnsc3+Ps0dBdoSiK^8G|ul
z(>AKgNUEErZJFcP5ACc$(o~XI4Zc{d`_s;xPF(LBOL+~*DhDdn)Vz09da#K~W#**B
zWj#Cp;oSY;D#r1AZ}1nOJ$US{Z{lnjx}K)M7lNwe;Kgq)><&e3TxiWvo2GfO!seuZ
zawTDKAbC^LnoLcUH6nC0dnWop@{`h^45cR*h%A+rJKPN#BGN0j*n31HGpS~RRbhi`RWM`5s&^sG1l$hgm+-`va?)R~`^m
zLc5E@v59`^cmJc?w}0@L{_S_zkJ)yR<}H``E+6HpqOpskJbVBImeYF5=l7nw
zc)7cD{r=He814^~xL&yYaHj%)>#griYWk&@Z~W!I_;3E;?(2KkU-;DXKWWanS$AnN
zZ@oQU9DVi9-B&;Q@^ja&w3Epb?B0C1Ue~C@-Jd@U`{D5WcOG1Mrk#apUgIL)w>s%#
zn1I%N_Wxw>Ph&0J()&K_8P;0wu%|Ph8gJdIx^v“sK7ReTA4zfu_F=fbB;y5;v
zAT|WVh=TyOfdKht`9pvtur1hvlvo}lON>QQ6lXS@-R$n$-M8;N*IZ{ldwhqr*7H31
zu#6F%w^^LdRe{lEYP6Nv(3KvAE(tqRB-}!I9`CIo7zkKCpQTwMh%gdYPh*lco
ze9^yt@YaL3-pJMF67c5!)wj0Z=%Sn*KKR4$e&fy8{_wH=%WvPl*F9GJZd<*1=b)V}
z{=zd){LCjGt9Qo#;ctKY>nGymyzzc|bm+!IA05RBfGP__)i_l$<=ND<)@$dZ-4jKSEt9f8$H;49_tq#9zO8%
zcojB23%|W{XLh=O^Wxr(3m1oRSu8ync
zck-upZoc>S>A7+-`ov%P!`TYkJ-NJhaQf`iS9Tt|vUG#y>{u5M4)5JxT2&P0hGRo2
zJUHxr^&kALKm5V>pZ?TyPhEOuJl?o*<4Gn~Rlmk0nBKVY>|@uSWdIQ!j%Gjh{lE3O&wVCJTUUFRFF*0|7oLB3GCMt8y?$`~{@HD5yI=h5r>|^mwRsNE
z&2sbBuYLcwUcZ+Ri_J?LBcap9@a)Z#@Mj$P%uCGON?654-5K@J7d4uJp#t!l{c4mg
z6aJHt2dGclMbTt!O5Qmq6o8pMR;&tY>sj-Abjph3<$~$lNPdB$`gL2YdnEcjn9sKxLe`a%Y+j}qhkx4vD
zX8!LC-T%iX3H(!9|GgpdNBsKrmS8RCtiPhF&bj}=Zz3We?KkX0>}*+Dz
zJ}*uV9<--t@7;RXKk>rR{!@#=Wx+CLDXAEeCy8YO%4#kpGjN`}tmkbkOYgle0x+qW
zh^)Kd3PAW{^NIh-`u}i!bZXdIIs_140!2Uo@X|XKbuNdBdr=V0v(C6Gh*Bph=h~As
zh}t@URf&?U@_u#?_3wnIDE$-q-HQ<$FK2
zv;6JZ&eQe0f(JW;d4KwcLsxwIv2ECMFTJryT|z`Hd@=U1&(g#~{FplsA=M~E7P@fjjwiz|##P@A#v@*7vRsqHa^xLh
z)`(KOAn~BcOs>_u95j7(c7HP5z1F_)@!$F1htWpI!{|4=YP$=YT5So1rj}8x
zsDB_w;S{%Gc?sP3Z1hALZfm(+n<~@X7)h8U;|N3UnQ}^joG^f?F^1R+b7qeOh)Td<
z9)MFIG6-VM6cH0zhnh_(^y>>(He;QK=oI
zgLjlJmzxC~-^DKZ=8)8M|5@;&OL_>y`B+#rAyMTv-A*QGvoombT?(RpC@kW~cKR78rhEXrUeDH#I-
zkuhZh1}JPixj^<}B3TW>=BQGOxto(&U2oLIcysH*
zh0Bkz4}9%oPd>AIczAqx_%I3{9-nw{DRypiZ@96SxW4wW>+Nc0#oot0`S_&XNZEV*pz+IH3VZA{&2waPhrR}_VxOfJq>O&tOlUA(yOi()YJMIje2
z?Et3t-hb=-{Pf|$y)2#g6herL4iOwOtm9}31{7SdHME@9Tp_VuQp$+vIjpaH@Mr||
zD6ANxd^o0u_1;bZObq~-xeE1q;KRfr1haLxAxBJ1>jRh&ys4o>L|{a<_5Kb$b8rqY
z8`xT&RZ{?1W2iKttP3j1AtD$ikuG)W6cIf$6Qe@5+zBSoUXrS=fepd&I%-gOpHkm-
zUF>>dTo0o}tm_&z5K#b3N#^Hs5oz0gI2``=*Z<}hzx1`we&+M@#q{ax*A|PzJ9l13
zijv4xo%6z0n-M!j)T}-eh)FhGH(N7v8LHrX?2)`HX~+%(yPWlWF+X1{=S`OdleS&z
z*0k+)Oy?2D-!TUdh6G^fL^6;%;z3=j235#O07w-af`NEX>@Y=1YfZ{PvWWrF8q-1u
zUPU#jD_tlCqP-bbWr#`Jb_JMY)Vdm0qft>*SuCf7Dnw*`??OOuVrg9}5&PwQu|QW9
zrO!F9x`iVuN*9V+Q;tC1dxLdAN4#fCx$8O-axNoc-}il=Qc59&vMfsuB^M;sub_ue_qIi}n*BQu4fTDO=Gu_yj$H*SV8_Bm%FGD9Ov
zDXrTSR;$%ouK3Z16aWB*B5++5-WLF1F#{-QnypqzVp*0zq?!pV$6iG8{D{ioW-%NN
zi(cexdSBYRtFDb0m>HRweF!1AU}`C)id*h=euRzNM0Pgb$}yQXFjEoB$O_2B$QqL@
zVHW&ob7SZ@wkHpl$G1!9E)MZRZQ7RyCr4AAvUVGOV~^{$h!1-CKtG*Nk5SIG$~|p5
zAjPt+lHg347zQ=c(2QBV>kV?>zHGpNWftgDM#yF1oP+tS2V@0ADk)S3EK0!6FS@Ru
z^=T$fsTgb6O|+A2a5R3Z*9a+C7_r}=qDt5-AlA^4V<8YVh%HGMk~-4dtdgPj$04^t
z2pAGVFadM`;3A+01W*EFKm(^hn89XKazwp}NJMj?U?eO3-u$#5O>$Yq)try+hjt~S
zk!e^f}jZh-B~|?A*rc;Sj!BE7y<);
zh=?dVldSk*@BELt&zz@^f=D6)z}_rvzV>V-Y!M4OwJX>0Kmm8ZQrJx
zPfuq6+NbP1NlLL-03gJo3T-!6&BRcb8>?04T$oI@HYYdwP8}6FtrS|cOnuX}x#yX8
zh-4XB3aN*P9-F4GokA!w7QlsLcJ1k?yEjb!LhhDs*i>7IM=BnnOr2i|?gX6WrVryB
z_S=YzC&N+aNBMT16cl7t_h$w7jmOq|b(GrE*K#3LC=Qr>5nM4qMbQ00F1T>4WVy$9
z$u9?JyEGTz)1%|nsaAu*-mnDbE?W{<<|((U&@U{O5Amm>m%8+gSVIM;bOm81}rNc
zRKxyovw33+PHJyW4ofXc=sy|XIG^RW?UGVa4vrVqrOj|UKDo2Ccy}<~<#O!pxTu2P
z*xngvT*T}9kFoO6;<()%e*N&?YcqKtoeU>qYu#*a2e(!{8dd7BVX(G4^E`Frd_AJk
zu=9*6b(BIk{@{btf8{^@^-o?OQ2s42R>#uUv^)%CbH^o2qJ)EQF#-=BkOuP>(l@q9BH9
zbMIvQ;2Yogt<=r#Vm?29;JE&a|I)vai@~zX&0=_QTL`LO$yH;?MU=37nkH@&##!P}?z|Ce9=WSAfR
z+G}r~M;8h|jw%f5E7vtWN0wyC29-ArF1i7xFF$kPU;FIMryd_mY75Oj{o+%_3pbA!
z@tMcBe)adhR}aS3&ZQ5U)t+7d=Iw|3aNn2sgZqA!76tA7*rmE(PJjK4!+-v{KlRz?
zo-FG@vV3j#u{TfNyYB0L?TpSNSc;}0g
zoxDNKy*Cb*r}w{cEIe$gC-M_F=Wc)2{{pIcd*Bg9#o)4O4SQS|C=O5qr>8JJ|-@f!%J$mo-aM&E)Iz4)ONoN{%wu(G)
zi^I`tN7Mb|S1xUA?@qQ~KershWMZCnr_-p(X``!?m9%p9;K6U*dG))aOJ!M?Mg4^@
zd{I^J-oJZvboAI`SHAL-zqq-%jl}Eg@M^Vq>3e_F_uW%ZJ@psW{|MJV&S3sgR$z@ZU4P}Hs)m2msJ{M92w|-g
z5@RijtC$n{1b8DPu7pD|NcR~J~#|W3_MyGWHn1&257V}YclT+
z9Hkr!CM%sfEMsc{S{<~gjxZOy4*OhY*Q%WSuoH1zyVKpTvTe#Nckrs(@QW7e`y1B+IcMCB+HmGuI$T&6C{lDb^Evu!xQF1O8+0DR!l
z?&DEX&wS_r>Od90FJw1K$?E1j$lcZI{`AtHPGNIsDVX|xrK^^F5IHLfZy^Mc(or^(
z*sCKLj+_DuND`5(*gH~Y+vqDL7RTxfB{rZS=2Vj}0Dwf%fDJec8H5m&I~H+hXrc&e
z#s+NEVrfV~NCcpYvEkTy_AL!k6Df%+DIzI)3bp5A(cF`mO`!+ZNuQfJEann>$+;xw
zeUXXOtCgzM^)Uv&7YbIkjJfT4T}2Wt0ao3BG~%#tOXoxWHxj;H7-_dcXNxzAz3=3aglWM6Z>=Ipa6<
zQ&(;CHFtgG?oRZS{Q17-5!b$bTMd(;wcc?E9M}NVI+!WB9({ze!>YBSHP;Iq!{sKK
zVA-cw6cWbPnLs^U+Lre|pjx}IX3Ai?wi=kda&mww+of}{uEW@q6NqjI&_fNu5LRpp
zq74`J!>H4y>-)mcmxFa@wW%f%EGP>w0x$q!5(UrfJrNRtcT|=kgp!d#tZ!pg*Hu}l
zsz?L?Q(`KJ*t;YtI_HR8-*<>smn_o7$W>MKeV=n&SC0~6O5ORi2gDG90p$Fs4~YR&
z*G57ENvW62M5wB0CLl@GyTW;t)S9w$FrUx5t^>rnuF>)KWPdo?FvYHm=k3wM!{cSM
zJUu@-JUJW`fy8z;HcqE!A%xBCt)qj(_OKfa1}^5RDErmI6t}i^hQo2})A{slHk*ag
zZ*5#&#j|S2m#*HpdHwmx_JzfAdGF!vg3kZefBCm(({4BzU)a9>>Ra#Jc;Xo<>UeMv
z7v28u#pBsY>ShNI?^b#FNAKNz=8IqS-gB8YHV1Vv_K?}MSx#U4#C0<}IX=64_pWEy
z-r6+Fp>UJ&1w>rVrW@m-b}_K#W?EL7aAdu&!X%}3xon-ob&zOS?2U&Pu3j0w``&9x
z(sZl38lq2>YE>oT9J5FU6U`kc1E`ri^UPC4;qTnP=e=KxH-+=w`w&7K+Zbca>7zDR
zHCPXik(t)?eMWK+`(9O=W{HSp=MUU{K$^cl+&k
zUe7s>hXWDu#c*vEZQC~Xt!T{2hQoSX57znwvNMF7v#f{jQB?&L7;tT3UGt{Ec@bIv
zoB;n2lUR?&W+o};90347+|)_VWI*H)A*T$?-jj%|MFNp^!TU#}A#{d@$Vdo;>&80+
z0z^h205AXpdbEL5Rp(12V>3epLNn8~(-?r69T6EN5Hssc6jPtFXXnZyI4XPqwH#y9
zG(G^6bIe49Knm;T7N;?TQ3wINQ&l2qmfdo`JUYAk&iikT#v4U3_*ehZzxl@NuM)uY
zbQSr4k#drl7p9!F1dLr=a=|HQFeM^00|i7(DG!I6&bcnis%cMSpOYeTljP2jQ8$CF`!h(5RCA7<
zn3Yr%E|^(Pps@$cQ6d66w{B}g<}lb8X0aF*$;z?{q3YW<^?g|nR4ir73YIZu$q4}q
z&T~@$GhY(1OKMq}LamD~p*S=6Qb?aBL;
z5;wD%Lh_nz9NaCD1_;YBz?QTr!7qE+Tn<
z{rtRF>zU`^S3K&9Erbo>;Ym#;*PIA6c_uUJIsVX49_5S%^WrU5Q&xMkayH1{x{Wpw1jAo%qqShD35
ziE_b?3^XQDv&3o`Xy6?$U5Q|pR8hc`3A_(xoMT$w^3BxHRMkXtjum@Ez-)$=RT3CE
z=Syb^)`fJUs%l8Ga)h~f5?c*{dk;-hOT02ID9
z(KR)UiM%IeO(`+12mWRz2nYg(U|?YW!)IMJGcdt52Y??@P}OA3Ms(hpnF_;?AZynP
z0CMhC*U@P-697=fUXl_cgL4G@NJ39JBLI5>A~6M3L_j3(y%CXcC|tK%stPkZ#+(%q
zB~$QqOiSl^)i!OXRbBU~JZ{fGcu?7NcDh`g1r|ae$5pwxv9bC7yC3ZBUN(^F^c11r
z+}v-Q>_Y`2q6o;w9zZ*Eq3&FkQQxx83(BFOiEEA1O%58CxziizrEgH?ySgJ-~n
z)+7bVKnMX0k$t%}1nfhZ%7C@g-P&&tprAOxQB1sSR;#{~m@L97r?Xi%?OHU;v2PZ6
zZzpVSZwv{dCT!
zuq>RZ-z*;--M-aSW0^L)dly#oDNBxhe?C89rkgi!V&xCJv-z|IfWyPX$1Yvn9u9xu
zXMVBoyR-9yuYcn=4-W4>oS%K}i@(6j!+Y<)I&~+u7dAI%tHXyb!wyo@)wRL%!y+3j
z??3zly7T^c|AqRA>${`hf9?0~%@?mO8p^pgDaO@LZEXBUzy0gZV_|S~`-h~mNV(sD
zqBd;L{)JC|^?u|2=C?l>?@lgM^zVP=da=3ny;~=L<#%5_pGT(IQ!@M5wVl8FCZQG~
zp#{qwI^1^gmv~%%ayz!W%c6M^_8{c^CETxm%
zA3Sk&|FfTYp*FvDZT~y>&b67HrQR~~ba2?UC)|{xBXX@~ADH?!J2fyg=*@^sP#A>F
zrjot)Ucnb56$~k-YB&?yA6#Aaxhl(>&pnqgi%8PJ?!{*Ylj`0F4{!hQw!-o(5&u`D}Xmd$-#yvIL8^84I^UrPI2dnwn
zt-CVZI-kySbB_1##MF%{q;`IGbd)CI6mR^CsB;~jv(*4}0B{o8N9{q1l5&e72k
zIgYVAeDK}}cW#fz8yg!NvF*>#&&K0n(=I>%`OiLn4Ge4cZadAof^uUyz(oSW_1?ew0`zm)uhS&w&e@)&ZIv-xb>RuZdx7{
zZV>O?{lUYNzxkWLa_|1FNaKU@ClCC{yVA|o-2CP37vAAOi?=7;X}9(G_SWT}|8xJd
zn>TL)zcPffK+Yc=92_4$beJo&5*Ba2^48sl
z-@koy{-E4HD4x_CdjX3yhNkNYX(MOKk-IkL9D|oZImdv7hu~BdvItPBi*DdN6&XNP
zASaN_3c-;hwjcSp*JPm&xk>-5KaxLL|18!&uHm`#4pS3j%FIAz1?VAF){ZJrn+Mo4
zo-AmpVCXN;Int_KR9Im^S1Rb>n=m;FMVKNvD2Fqq;2%&IYYGX{NX?yYv
zTRx6;)#UD^D`sxg@=naYp594)yWp){Z3ls(J1<42;RexhDQPG51
zKm`;57hOB6yfe4mWapwEr%3a=I8dq!7zFAmxY8>T8ep<|*p9~2YS1NfA|vOOtkt&a
zj)8~bIT~gvSPMv<#o1Lm80&*wxwomSY;cP8alW#Is|Ow-@AzUj<%D)#qd*T{>MT7C
z1G|!q{Hk4^+!2jxadO{NS_-z9#OfhYQDs*)%P^g4F@%lH+;?z(Qq88NF1zM1`a=FwRXWeAFuq&N3qaTE-FpNUelbO)F1~po)rilpN2^Kj8X|
zcbZj~7OfL4H?F{F0Hd*0qf7(sSJI!xeEN2~`|L*8$`-^RT@JHL8xWvtI=F|$@2
zB9(=&I)Z6z;%YheV@rx;by=$Au4z$iScDeH04;+=F=ivK>Y)q1;9i=hD7;8ziXhr_
ztNCmuu)VRd*>@>vDyzcGV(L_rWMEgBIWhJ{rP9aPBOsAeT|clBlBJx2^JN(f*CR6#
zQI$lF*Z0{zu5xMt01Zqfi-{@`<J`hu8M}Wk>Tds1}1+BVyRO97x
zHR667-0s#+Cn>8CL2CQhuZq&O?aI(L_jW|2TXimQA2kXM>)oA=7k0O<4XS$C9Dne^
z_nY=~)%IEK==}8P{P?{G9~4z}bbfq3JCjsTCJfPCbmed~+S;Dnx%=LHIU5W{{e%1Z
z#;cALES4V5zJGT8v6~xv*P5njWx03lsl|MH_1O!}Vs`%CTX*jt@9yqB^W3M^hLeqP
z&fUB3y>O8+UHMKRrA8`+xuMo}E1qrNL-)37cNa6JiL8$S^yMaBUkWba}#DWND^abkehZ1phZzgmS(w%y&az%il+Hu
z8oNfsI~Tn7ZQm9};R@%}<(yMY0I+7}tWk4jw$6GWvt#EUxh<3P;Jqu$5YuwDJVtc}
z0?bSVjD1dh?g1c8S47}>U7C;`Vbd(zwpE<2=Rb((oWn;)hI;mWPLkCTv2$h=V-k_N
zsveOW+@mrrcCM_#eAzf6F*1uH3Ts4?850-ms9HBFXOp~M1-tb&PqV55Gk^dHbY1UU
zP=K6Z-M#FbV{%~03>=aenpu`d-$;#2*ZV^<6Gaq*EQ*={0TI+hvL-crWI0s_j*wK6
zX3eIZMO|Mkm<%MwY@&>`{tujIazM}gUydm-*(Q`m6ODl6&ZmE46}+@ld}gPUj#%>eL~<6N+N2yRzyU#
zOR?>(^(1&_yL&9*L1@PWPe
zF-Alak$KZE+J3(3i_()}%#jdN)K2=fy%+#;79t|EloEL_2^B<)RV_#8Q#3PFLPSDT
zfe-0YjL0$OlrmykX9ZG9W)=bshGTwoFvxmb3IOb^3}seH(bk@3CL$FTEy`iLY7870
z5js-W$Bd|egSJ}@DyS;*o{5V=7$Kr*1OPQu&8V4#3xZG~tSoqUaWE7eMod-h_9y<@
zR;=_s0uxc&cYP8x5>rIf&Im&wU)Br?mJ_HrF6trQ*TH?dSh+f7+5JW^2I&>;7MJ?)DEov0Q>;St7&Jt`1^QCM%9om+=9AJue&Fas-Uizz+5IA)Ox$AZmsN#jy`7s!3@=b4
z231_U2Ap%MN=TmF`T=|WK;Eq8Wmy(Qp((3n=bV{2BFa(K1k4QAqm4%$BThbiwm|@0
z=imUqK-W%$loBF4=ZMz8hc!~*5wG^6UpY7dGgDN{ngKwvtyN{lb;WrHs!Sf%t@WxR
zl2V^c0Eo~5ArX;4>U!@zAm^MBLRA%2RUMt2?u>Sb22I}q(B;b)=gU>oMp)$#+{hJ|
z_paP(|?QHz@nc{4qj&$=ELt7X*2z_kjZ`E1oI#lWtp%QCpOZ%*DhZrdIZ
znVr~5q3U>VRRGYi)UYA=BSfc)ZA;{U6j;eK26FzxS)OKZW+u@w7EBJ2
z$Q)D6UU4h-dL|-Z-fwuL+#Owp!+k84LauOwJ!Y9C!)1n17&sq)I{l72`3D1QsH~O|
zo%R!Ft6Vh_g4w_$xLOfJl7l_ZcyRyWbUJNhi{Ldzpy<9fVOSVtfJhUA0Brh>;}H#WoL`7s=ampTW`Jb=D~x*ix)3D=lYm^
z5k`{@r*!G!m807~h%vGcFTeaUq4VB%pZZfzJ^P#_N=YwYy79?Re5D?Q>Gb^I-5=DB
zAe8%8ug!12qJ7`THmaZ7d=P3MCc5Z`8#@nf{=_$m7w6SZG5q1rZeRRYzVy$3;~QgYOw4thzhj{qiS&;^Nik;`1w^$|zt_7d-#q?%nqe-`bvR{KQq=p4enE
zb{;pD4`fh$^)k5&gKwSVzx>~P`_i~xSW(je$qw4NSwG>~66F*J#5U%K7t8%Srw^YC
z{I7iVlZ!Zea_dSz?;JWV+=Xk~(&c8+_<@WjLkVg7;-ze>A3m7=wO3|;^_7GS9>R`<&(Shd$(`@^{@YaJ74|G$DU_geXzX!
z%~ubvZX7*-<3d^SgS`CMPkil-mtQ_>mf!iEU;FG=Qw8Ij+HaP_WA2-xn4d3%PX4W*
z`RbEf!|#4@_iw-S_Q}fc;;5!5)OhfmQ3N7oa7-2?Q(Z~s5;t#13x%Ui|AFYkWs>BqLuUs`HE1yqh`HGOx*4Z0ZkA81^
z=Ws9_SDSQyF`bnSNxy2Brqc(n9*;&N=V-n-2?YR%IZ%yxb^5`BTa(G;=IrcVAle
z8NqdZd(^EiESG%mwWNpp8+*?__SDY1-<-w$KfL^@`RM7)PRemiXsdRe9eyAHc>?&05b$6CM06O%t(-wlQOaQ1tTRi6-k;^61r?o
z1(cOarhe<=v+*U|eToR}!j*UX{+(s>SN@aVoc1N{ePQzxKR!HPwP&k(TtV(&+ZW9-
zU%GlNxV`2F!_j^tqfF<&@_WBRojvu`Go#TY_z+_hVRmRHj{HaCZ}?*%68z&{;737$
zoU`}-qfFsP|LUW6Y3*QGD+e(~=iK^-`-ktpcK^YxwmH9Zcy_YPym95uZ1_glx%=eK*iFLONvTtD*-e4H*G96rTn~IT
zcD<0LsDNFfPQA}w!BPaZ>%1-2dS5U-ZMBF1FSEgPk}3-ed7K
zsJ6Pvwm4iZMElbn=!3bVrseUVUf!P#FLfuUxu|Yly80Wx{r7L(J^G1HeLPvy939Bn
z`BP85uw7IiJh*c-J<`s{*xfyQP_7of2$r~uUFR022&!wJCK#xQLoNW>5Q6Jc^udvH
z9u0vKh$^gmH5F{WSS3|MYGX<%xnfA&DpEoz&U?IN7P1b8rK6SCD5-6;ODqM$$U_Aj
zs@SEIHBd!?Voc;X6v4%&$=wVjXQ{nTY&uSN`gY}tz46hvRuS*#4K0R4&LPdVN_)Tj
zLS&Lq8HGMGhoM!2Ms;3Y1LxR<0&)o}28dW^KX%%fDT+8u2wuU=#1KOMuzHahFoDDO3x&Pqypjkz9Fe-$x
zEk}q$+t`4FkQDRMot^lHA(nno1=-&!TMpfl<|palyP+J{q3Ryqo|?=1H;SQOstsHq
zFRp(;XZNrxEY2mn>U^Mj?lDgc%UsTfo6YF5!yT#abpwO>YahIK>vYW5uk|}cW8xMe
z0ss+t@}g={SjjOnxtw#C(t3Cl0y`IwG3lxIgV4z}J-n9RT_%~kx(Vf~b*s!>nxtw=
z+=kT?8xL%M@955ZbKOTS`^_EYs%Va=adV+Ymz$vtt~+a&G1^|3bkfiJdDp5hf=XVj
zy5O9`S}fL-(KCT!tHOXG6ze3Mpfzo0W`nZm+Za`t8Pq~SA$V1lxK8;eW+PS#*rj&W
z#4aXg_6&|KIMj?PCSphm0&6g@nYe-wQ7x+)5qfscv8rMS0H6TOY@jj5HR^=f=bSaE
z_nti>aX?oT1t^M$S`Hp!T%4VqFf%(eDIE$}g50U(P?q(u3hcV9W$+Y2pCqLuX3U&>
z2_bax?EO1mfBW6voD3%$lS%1$XY=y*&SSZcv)Sz6LJ2^kE+8U1Q!N3p8NF2KK-W`i`j4f=0CXh^e2kCc2=G2Yp%CqMo(|Ki!{(VbiG
zE*I1J>~MB|w7s|c=}&&@_~h)*eC=o7c;k)t-+%wDH(o945oLJ%`mWH
z$;Pl=GfV&@0OX7@W(JPlVvMi9zKk(?A7YG2=HC0kV6e6=6HzD&78hge`(7n_Uw9vo
zX`NqDRWV6jG}JEk&I37U`zA?K_~3%e*mX&Jd$&IW@)mR$H?MuX7?
z@Om<1W@-pXn3J4MXQS~(D@jy>Ap;7UG8$5oy0tCCnSZYsNlRZ=eV1}p1R!+GUKIM64PezY*%Zl<3&Fi`$4Ms#nWXQahdCU+140@nk)ntY-cB_76
zMvj9oJ!UquRnujO2wd0oU@#O2VwM%UK8ZvEEUQpggV-(#?~xn;5CRbQZFAlzE1Y+T0@(Md>-s(==UfQBZQGo4jNRU2&vlyTT?c)m
z!YR+a8{{m(J2MMRM8p^#JvfLGbK&q&97okmR3S^2zFP|(ssKc43Z~9GLs&nex)2bJ
zy<_GOLX25ebIv)ZAF(TeCC*v9E~k_bxGcQ$-aBH)ZIfd!s>!oK!Di^akBF^^Gv_Uo
zp4dvX5(J%h>$4+UmMkhC5?0J5sj8B?234gl9kpp<2yQdytYQQ_D2Rj4Ig3a!Vvr1q
z$W8&o`Usc-CNWq2VIP+nx7s{3BWJ6WyXNMm;pX=0B;>Om+V14IP3e`L?N8H#f&z1q
zvx*vmnh83{*s3z60fJY{#0j_q?2O0&L&{
z9F+UaMS|Gm^JP4{I(>C>xaH0IzU`x|reQ@}Q0~4r`fMz@cg|?ThbnrmkWZO*yRK!|Ie
zkLIGTKs3c(MGP64$(iS%T|-$g3tAR@)TXUjqxHDHaF`f?DJ2tS?#+W1~#E2q!?Jmhlf^+1^a?UA9Oo--)ODa4@3yh;$$@?Tu6}iNEvzs?y(ZHSb
zGH(`01rA1&cvQ-Ci1+t~%k3J;6`6*sOjQxr5^+pj>U!tgs2)1!2+J6wnN?*~RTbhR
z^_b1pkK9od)uE9}l9U~?A+oKzW^kP(P{b^WhyW0P>`^Wn%}m93O;1$>G*C1H$+~V*
zTIbr}Lx$HHL-Y}D*f|G}fOSL$!I5*EGa4W>BCr7Dm_XG#=gk2~I$?(<=I9SNNsSd55zWJ7Hhsvc%z_-mM2i
z^aW72Sit;r+E@}wqDG*WFr<=0Sxdk=#u)oJZ@Z@N#MD=%f@N#47*W`Cg5&m--(
z%0)Rb165D~B0w}mA+w+rngW0WGUTjlf35}T);&K2MuLW5LUZCH3zm(qYFCV6Grxa)csx6Qcz84%
zjLX4jzHCOJ_{Mkt@Rc`Txp(j0wX4@2yZqR&-f(VmakO<#D>ZrJhu=84^_nAi?1^W$
zMpqW|)9pob?U_#&J}fo7;hwx5H#g0~^5M7g#=rGPcU<7cuB}EFUV5W@KAb7LNL*UzSpvx&B|)<;$`~3UH#m@^Iw1OKm7Vj7dE#DX%lKI{5;Ql
zwPNqWY(8y|F?IGhqmRpPzx4Z`c1K+EJFoxnJMZ#n_rg;*UgXtcr`mT_@%fKGK0i6C
zp0uu=-5g*0gV$d9{=<`caP?pQ+Rr|>Ke+DGjVCYs+n@WwQ`h#{^cQYi`Nij6{LjAg
z>NoE`IGRmIR0`UltSgUwTtUi?E95fgMMM(su#{pw>-+TAuOIPj3uWlSY{GIRKQ#{jU
z)%x1S!P!wG=_(G=^1*mg%%P1o!f`~W{8yg#|MWfkHkIo%A5owma_k4VCHraiAG8!+I(_0_B{>EG1xp)6?
zWAn<-{_LN-_`(+)7e%K13BU6G>m}+>efl#iu~88YR*N@h%fI*i4-T*2U_a=(Wq&ju
zZT$Qez`Wd$+*K6FZL+x3uLPqdFjZ7@!8_*!RC0nW|D*W)y&mO3+!-4uq@`K0ScCi`&~i-9M+nJ=l*?i!q7sE#t$ye@*9uqeVYz@BWRy@i+d`U;3Ya
z;uD|BIj^lKNX$(Cy-L_0^~nEmSU>vmyRM5dwr#t$wFLlcqsrKz`advX5Adf$GZD%|NLNzrX^J?+_TW@{$d*6?v
z-3M;t`>Qm~BffkEE$&(G77udL)nsdhHbg(A0)1pMG!*o%<|I)tx?!M{UCs`{JoW&H
zP!PD76b#nV7?H86nus`a|LB{5_$dGWC+m;fB0&2{68P{pW+sxQsEx_xm^Jrq)VM+C
z$|T_moes1giOd~eef>Y)pB<#Fy$3gcDFwc8`2H$4waj=hI2`_|lg%6O@Xoluzfr6z
zR!&WmVpVwVR<78s>Am-H@PTMa*CeLx4KhyO(zwr&kTLxi@*~x=fcdv^sFz6LX|CJQMj@y
z*Ql}c*sFoEOJW%XFc(Dug`cL_suNdJPMh&wU6tTN6^du_OHI=WNa5JJepnXfVAVH0
zP5?vacTeh@N5iL^@f9Dc$^$h)K>$TDBPuA8Fe5vQVk)Ypl0Xe1shHU)Ma+O|fQY7|
ziLZ(hrg_8Q@d|DQ8Zd&wgprJf^lWr$>Wgm~1ULaxAm^
zW~-&+sjTYNYGZM!YfcG;)9`u6mOmoH*X8v9y->_ZO|Uvyc|!)`m;mT=0d4$50Gf*y4IzK;NF7
z5DA9u!}BI?;gzDeLQBZY+|QTEM6A@RmqebkBo#rXG6YG95R1YP00;);05Hk2?=;2;
zL?$Z}914J3ICccN>+3=E
zoRlB#4N>Ypf8!qg$%j+V=`;BcK|n5gIYEfwA+%ELqtR5j9OS8Vmst
z1RVR|Le3>J0>E)Q6Olq3GZut?J<>J|M19}4eYa>kW-prO^J!I-%jK*rY9BVduTlr+
zaf+B!mbyIXI~@);p1ia2zab+M$n
zXu9m*zw5d&h9TPE}iwKXUE=!%B*&oOI2^f0X_tuxwFsik*l!qTmYfs(o_AYoZPVrkJwC!iTm=
z0H|PQM(DsjBD@=es!EPoq5=~U5F(Q*t&0r+0MX3A*2u1aDQ6Qig5ZK0t_R?Rh=7LH
zY0L)Rd1t_EP9P97dv!`g-aXox=cKBdQU(C;y`il+GmczQ)+wc=GO9N>w+BS7>$;!&
z`CmLbIQ-6czrI@Bzj^&BNt^7px}}Tp!P*dF(8r`PWkqHNgy0-8aBx0^lATl4KFe}B
z&uB#6h)l%{5TPiFXh~95%VJ98oO5W%NL-hU&~;6p;+m_Ivjzgt#L7icc;~>ZX_^>2
z2AeO=%Cdaq7)C~9W^&GpO74>xASe;|z*)e^R{g3fM$Rp>yi=qht%CanK_TI;o&8&}7
zl>-L|s8Q8KG8zUK{GjRDCZ>#Q4GhhZGNSn`8OhAnjC5XGmx-WjSIlhAt%v*T^D*Zf
zoM-l`7ITk?YpRv^9!wskTU7Cpb_F7BN@ljk7Ljw0>I$55P1Bje`qC;QU1uQ9d2$@;
zVgSXX{hXR*$$i`Ty780(mkbo31gEAc=SUjN%#hKD&A*LSY0)Kt%mah|oJ?qm)pysI3QzYd9DHun`#u0)leOzN5&B
zKvh5j=nH3MpsCF}VM@OBRDv;cHQcUvZ@*d<)$ZQq1eB6o
zzI5&MY&INiuDaEyKJ$fx!+Vz|n?+H8#hiN|v@FZm8R*2o2cR(k@hD!TY-gllQtbUVSvQUFftu6T<{GAE3Vnw*wR)3$9k(`n(
z2nu$A9TYsKP)J(O>C__>Oax|{mp%~y0RsSLGeQIg!%i2>jKFA^6zs!7wxo%gnI;5L
zW=VNr>e*MZO411=?vrrWL+%UhhQp$X#gPTqrYkTn2GB4I@`f%FZVm!m9996;bh&Kj
zCw-`?7zW(fs)M99X2f#j8Q=N#?{oia<)z22KmX6|ZCvwxhspWvy|a_Ea+mwltq-~`
z_07npwcc`;uUDh(;YJvbTMEbF;?C6<_Xqsao%1{8@WOVSl%D$Ng-voa~%66lt)#dy}C`ZS&@9->!nKu&qPc&Q7~5-T73q
z?OwQW^Tv~d(Ktof-ro7OU->WYzW?UuKKqs3t&4+dJRB9Bp4@+UeCyT^Yauy3AMQVW
zxc~GU_2bBs{W{!REWh8c=GEpQj+V=5rSl24H!g&mf?LD#&b#m0=9Pyx9{-`yLuekJ
z9RJxRZ9n}JhkkPF&h+Tu;guU(1IuuH_u{6*y)oCro7XN^s^_QY?>t=m!3Xz#iRy8j=3_l-9Xjvn@l(P&gShOSkR#J#gs<
z_5b|(_y4G|d)3tutTtACS1rq~8McePel9zsYCNv<{TH+TtB-xD9)*|hy!@a1;I-d=
z?|ia%bt4S#w`Vw5J%|ubyDQ~jdSZ9|%^L8>;q|@QAJ%nw=e@(!&%1K}Wc1`;O_vAe
zAZ@hb&#egkLzP8z0AbXoocdH&VbL~&!Kf(ert8u9z3tuD=dvvC+__VhWl@9>!u0Gm
z>;C3sr*G5Q*|cx_ohP@3qw3nm@L|`k7!U5g^WE?L{->UKdTVQV@7Ar;rOx{9dmp?q
zxIfz(UOc&UxpKbPxiIx1K}WH}zFRs^ZRQlsk##wnkpVF?vk@u~djC&K9^h!0%$NW$
z1H&3Ogg9V)BsT*9J3~@i2i>ZyWF_~OJ=$t<76@5nRqY@S(0a4GUlSwRhi<8@@fAC*qnsz4-{EGK>Zv4_;
z`nTst4?gy>k5yG=iq1LbC`-;cIr8fl^3hH3k4cQcek6Jbu#P!D>Ks~6{6Fd_{?UK(
z(Fh}@N9p^Va|mG_PmD2w^I}Or9J*D%K*X{v*>Ty0>B&N8t58=%E@ua))79d%U(Fsq
zc4%c_P9K!Eg-3SfV{=~92PK3ePc?2nd$0Rj|N&C~%i&r?|-S(+@Zg$@O&CR=iu)O+}gQxzduDVF)KdjF0lu(NuId(@WBVOv(fIQ))%sOA_s_&N-;_EA$X2(BoKz~n_F=9D0N@d_p?DMbNeU6=B&)9d
zaNLQATww)7RF+BmCg;BGigE~s6DLF0MuHWSrViRBckIfl9@V?s+n5rHoX^iiX|pbq
zTFCcCXYW3C_FGMPVLEtv9sLW<*CsuqIyE`jSpW*vguGMusS8
z*?@^kK<3OCoGXGz5{-Q?F(KPze%2$mMVSYe$|A_@EHul3#N{wzFQOe{sXi+~3>8j7
zpO%B%VRO=FY{px#xlImPwa>1@Kx~#)WzMVw7y`LPUbrM=uC^hPQ^`xv=xEw}Y~{9R
zbYZnwZls8354xgkp8AAu&&u<;Tb#P~{g_65RmrvMreWA-u#9>$zf~3BRj6%`RyZtD8a49(ksN#1
z__`fcvRPLy!<@V0e&LqQO7}g@`jLZ?D_09#oq0dtVQ7RI*c5X#Tt6ckrHo?z`Xoc9
zM?7JXJ|#(KtyL5z1EXO|WSJufVCsEPtr)ypm`!_qFxPh%+0B+^Uwy36eF|3i7PJ-b
zhCA0*@!(Zk-ACHNv{$sDphepW;AUAW7%HOo9$B-Z7y^jLoZ};+3t~zMa$?hRw6zg)
zzgjYL5u8Kn``(Do&gRW3s$d&?b}*&_fSLWJixYGhEgM-~*A5+I3FHrt7pJGkWfdl)
z;UfzLKwa1CD~zg&;{91na!ju831xlWl^WxBKQ=u
zGFRmwRL;zrrr`jTHJB967cL}_SZ1ShL}^4{I=Klb?5U;S_YYCE5nUTR)~J2SoIfKlx|Q
z&u6En=NC3d%jr3!{^7yhhxc#2|K6K@KMzbrRhMNMV@|nO>A&^O-{0Qe-QM1w%}?3M
z#+KjOxip(CcXlp+qTX1{Pai&f8!Yv42^QIr0kPvOsj7+)Ld>aYdH^s()vT(EcBN|T
z{v=g(F06}iGe!Z^lmyHSP;G6M@6)PHu}!gHN8YK~vRfk5ZFA;aa9k3x5vXETO(_+=
zjxiE>M8x3wKBL33tjWkA0b)#TjImGIyCQ@_G$H~SfIPBQ(<3B0@mlZ@AtO{&a)u@>
z%LP#Mz;nr#TcWhf~KOVMMT$0&^5@~;u=(;
zstPDz>jDA=5Hlcyb!g2Dz>E;ckRTak0|jI9NJNGrVxo>2fRr$yDME1URRM4i0uuv3
zMnJR==`p5&>rE*D7@?T}YABqDOc!UOR{E;#ySB@hFJJkIpZuv%3gke2QeH1W2DuV;Ez8CPspny3>F++BN
z8P`?P0&EfxB_+wK#T0WvJHO+(ZCC>yH2t0kSWP$`?eOb560Bl(VJW&0$SOWm=y;<57)>
zDbJZ4J6Uz}EG?QEWHgD@1^}sM1Is2K+NN>0gowUrJB8MH4_s@-;0Bf!2;dztaY|WL
zU4eByXrlAhsb)j;;7S!xGVv{~$XstZYU?=eG3-Pcy70a3qF=saOhtb@u{P9&^X*Wk
zMHjqea*>_-fJng-F?*~%sAX(K*X%AM=Mab7XGC;HM1^k0fIs6KIoUY0i%a7YEhOg`4?gc6M@h+IESV9hYV4NPDV_k@>M|1ZsQ<=0Vjs
z;aVOL$jm;k@j?J(Pyp23^7+{xaUUCy&eyHqKN(&-uxpEcdyqw(2_rB$bj*b*3Ib#M#biOV@bLu-&32O{l-^tpEZw8?3*V*|LwX=4Zp@>9*+AW&@)vhQn8s`ns>x)sTFht7%rmc6tJw7+
z1j4ZH>_+ollXHI5xN>@Wy4KCAsy5M>dNpI_9>Czk@^~ZwSpVcK>Kq}kDyRa2Tj!OS
zRYXjIm=G|hygo_R(S$WC0Dh!|K-HM(qb3lrbq6~Z>{3da69CFusRCLzgsevcO0X95
zEQ^^e;%qP&JoVhi9v&Xw??ce+Quea@aUHYz#IYZYF+PvUF
zo^`<d%!*;o>$fW;eEWrmOze?GIvFhTxoYPe1kIPyh7Kyz;|0&u0MFKq$X=cXqd_
zqQz|LOSHb}<1}XJa%)nK$5*)Cn2a|A+3NgU+J4@u^RT1&Z4C*nb
zVO3Oa)OFpmSv?bkooa88$|3+epDidVwnwOlM&zA&ml(xz&M`n>r1eB2Ny<4Z7!XQ!
zn2`}3u_p!)lbn;Wx3qL*fFXBa2K#2Zn4c}%YT9HbmkSE41LT5+h{L_1-<#yN%Rcki
zmndxyB~&{@nPCVkUqzL^2;@DPN^{ymuQHfy?#g^IU;xO(k{
zOZ{;?_zurUU)cEMugJ~s(+h{i_P_pzCr3rxol4|An(Ot!Rp{pBVG}sar)MeOErtb`
zVZLYptN+!D&;GgRcPAzPpT6~j-+6`VYO}0QPDn2-cD{Y@_*4hKbL*X7{@!WWxVSTn
z-@cpv!mZzzVmqBquh8nb$KSrVy^Y=Ci_cx(Hu6ikIvO{NbAk-i)jMM<%EPRiHmHCB
z^kQzlxV_%m2Htef|qCe&S=dzx%8I<(qf^^XaZkw#%Bj=5R2WJirjk&C-wSc?a5Dm=dwd)$klEt!)#uLz34@S-xv&G7LpS5jL`^3$sMjMlZ
z=@B^BSpq^yc|00Aw$*BWbbN2`!j;D!du%+~UY(!Ze|R{XbyqK5{@ka&eDL7m?SnVY
zy?^EIou+B$r}I_Rdo87|8Y1(&-R@_rYTyuxs%Yv;+o@Zgu?r5@RZN74NhPPKj7aoP
zs++h%=M^)UDw<;|3SSJYAn6`8Qz0VY`jQ5!N>U;tlmZwn_uc87at=8QM+%#as@+jP
z9NVCvEO2&YnA?7V)o^2Xqsx}99CnLz`=zJnzt@K0ZtZ=f*?4QQzu%?_PhPA$Tiw6A
z>bcx|deq%IOZJmr{={dW`$X~SPXJKc^<_~ZLQ1kO9
zf%Rlzofcm%m+O@DXf#^CKkFUB+Gw^W9ysStPfyo>oXuu~!C*e06H$zDJZ9${6;4$S
z5AMaT-`Uw24C=GhN!J}uXXg(eG{5`W%U!Z*PWRf?JN)EyT2^5PfH*f2`?)M-P^udz
zC|NK>&DvOAY2OU168Z^1TRAbzkXPK6CJ`YMR?3(Q6wA)ct?`p&9RM*QBHFrSaP2Tc
z1^v;t<^R7w^gmhu-!3y-^QKRm1YQ|>M$KUhOa}*lw0HN{WO+8eb{(Jog$rf2_C81u
zKf2hxR{!wj@$$a%M#`?f|ErhY``y-7TUcPZg(|8{)fQv;vClsD`pa)Tv9s$owk~g8
z+PU=1uKR<7@YekH?b}C3VYIvU?Ucg_+khb~SJRxk)XyidS%%Tdtm#(hjTk}EV~JQ<
z%5BWiq7RNsfPsd>@zNu(36NyXIp^d#RP`XP2Oc?62S1QT#u}v*F3#HPi
zl_mAdPqM*+mua3&g5}zE?&;
z2PhndML;6t!A{XbRraPQ^Z9boHeH@JjX8g{6jWs(=SGZF>EgoTn+&Nv9-oDsqsddV
zt!JCT1ye3WJft>MVQsNqlR!jF4GBqFRYM~{22|7DEE(W3GR8(J_DsyorVPReGN|m}
z&Sq*{sAcQ&iLaXNQK#zXb3_zqEo59S^zgio*m+-wi~S7p5<|f~xIH
zZri-}xoLVgC?>pn@ZNDbl%Z4}9;NlXPzV#!jf~@{Jv+&ZZNf
z+S{<`aO2{&jloShxr4Q>$X@WYsJpXll-R&x}~Jw6KeoM};S_j?t)W
zV-$fQht6q>8nZbGBk%T#yo)P~jqo4dUoKAbs7qJ8A2*D1ros&7)%nyPHRmT~d~%Cw
zGW6vtx7idBjcAox03ZO(QrHmDn`tjrcwZKNSPx?CX7lB6I35(i`rb1EKuQU~hLcne
zDTGR_s_L>R%gr6#+J%d|lcX_0pCF1X*u3WeDwD~^bUKZFK8>eE>9@Bx93xRuKr_oZ
ziU@!)(Yg^gWdR^njVU2=Q50Y%l1(A@QV)XnKIf>aNSIX}fofJujFe>F_K{c>^Cq$*
zcBU-Ggb8)kr%t-UV7gpn0x*xJ-XnqK6boOmGqBFg0Fvin@PnLCt1>uUY`BK
zVrZld&bA)TA5xNe)jy-7SB}!8-l`C2KJWTG+_^XzjScN&IzMj8%TGTy7;f+E>|D8W
zHD@iH-`Kt(eb;ua#Kuw=Q|l>xz9`F@{i*W5$9*GQ%!*sz7VMCMYO4Bf`QLUR4#$SH6$k`SL8l
zP)&%G*w`0l*2b94g3q8PW)7WabRSMp`@VC`4%o~<1yxa1L|h<4l`JC0&Jj5THq*Ks
ztjop-jFA{ATLRI+#(1?_<(xgSnK@?i9spDTKHLtiUn(+r=fp(U22f-|R)n<;+$-tB*Vn9>{%~{nDk<7AXB-46FQ;?
zQ`M}Qgoq$pEQ?xHo4#8vy89206x=61`?I-+!C(+PE*3LSz6@iex^1Ijlk+l0L2yM4
zNu^ow#og`T{d(R-#6TQlgkDS|B1?wYX9Wu(U;-lO`iSVw&=ghw;cfvLQi>rs=Q+m6
zOy0W~bJwNOXu|BfrU#~t!3MMC6jSO|MMNE!byYcErj#UG&O97!jz{%kc{-cTR2h(v
zIp=xXcL)I;8>tUPSqzcgs#$s$TqxF=T@f+K0Dx+mrB8j9jtIO%R7tS~8X+<(sv=~g
zoK&;qBMW6n8)8mPhgPRwkkUDxHTV!Cc-1*Wd+yy3cIz_J1AM~ZUS_Uqd_
zGp->;>tnU3Ip^5*y%8fKF$9N<4#b>uQ8EB@F~%$`YGz`dJV4->mx_siQ~`{N#eA)Q
z5TXM>WDdbON7kC!s#zJ$oYl~+W^#7cO~=0Ox+bRe-7OhpMN2>xU$iLM61rfZ>Hp2%
zp9O1{W%qs98us32_=Y>?oHdVKRXqXSjm8X+00&Vd15`kkMOw722m8T(kgXSo_2dXY
zI2_@y9bwrKwrR_h>5kuUhCQsc{BUkogOVVH
z^L3?0j%8q%6Y_MIqJ=f?^wWuisM-{0m5Q7wnk;Z|>h%w^)D3e#;xbbM7
z?tXH#S+@1g?4aKaYr!DWbHnjul(fRbT^@)UR&8n$z%CXb4$RJS;C5U^ExedtW*5+L
zXnBYu$(XfxtfYjtZMo&mW}T82$cXINr6f5^+wL!yOEWxM-MQ#!T|^0@M+ATfy<{jS(5XTQbN4<0Vq4(Z0r)Os|7XZiwLIC8>q&+$w+6C7^(0VWd
zk>&obXzI&+Z3bNSYYe`aSTUb6GIL!wh@4_BV@5;=wfCr^>^%UKRLH3E9stZ6000vJ
zh!qhL6)iDXz^X7gf{IE`r6cDd%x#=ibz<9iNtkQ%4H0vUy`^bBE36xZs;XLMfMU!;
zietYeqney&ml36Ql{UdwlJdN5002avd;AHMZf(|$
zgE1uWS;;x?5!0CaN;k*n58qg{v-e;B#&7+jf0zCID?k2APd@WyK)>0n
z_I-P4$Nj>;bmNs@|BctF{e&)u`uew}{Pn;0>KDW5Q{zXwSD(rb2gw#?r1sPuE+4y|
zs@d_q&D-0wLY}9Gm*VPl_Huvq+255z{667$=IfnxH9G;2e50MeI6CqoPuQ)WUw>ou
z(Hnr(9L8uFEWx;?CZy0be(n=}Ez-T!_Fld++ra1O&Qpg+&+6W6`I!cP?&seAJDrFA
ziJ!i+^IRJ{
z7cd{6|G`JA0akNv;C_Z}#h
zCU1uZ73bmX)T0b|1=6^jmu>ghZuob8;kn~s<;>j~&KmM3ckUhA-N&DNdbwQQIX*qQ
z`|;JQS5r!-r>DESyUZ9u`21s^`TdU`-q?Hkg+H@%z1BN-@4WVK_3!=ut-JY>FDpo;
z>Z*3JlV%}Qp<$*nRCpM5eP+&xe{3)RxZmE}4!@PC&x^VCk2xvchVewZX4CHP(R`iu
z{@1TQ{GWa2sA_hrIJee?a#E`GutrlXT5K00{(aQ#+$j7OJe;pk%-
z9wNCk`aV!W*f)i=8mjR6J8$i4n7e|DML(q7;C63Zd-sDM7|@^l^S|`h|K`8d^|0Qq
zUwi%gcRv1TfA8?=XP>)r<#M~2UB7no==j0;X8qia&)+`o-~Q-E-L$iL{lVRjKIzg?
zvcvXTt_N)7Az#~vTID|5o*CwHOumWPxSdN*KJ{&25J2F_q@`qn+BH?7sKuEFPNftg
zqN(eQgovexdLk-djOG|dF*bC7WfQP4ID_J>7VQ`)87CN~i~#^snM5IE;1G-NvXKKH
z0AU=L&4R5#npM=5F_w1W+Ibt3NfsC8o}yIX^6nD-x+!Fa5&5^w)m*f8c5t1*8;qPEAx4kyVt)i762w
zJ+jdKmqp9`N$~nVsqXy$>-YbO?|T9H|L0y4?P8<$>8pMEMMOlf07!tQfB;FgkK^r=
z&GG7^!{+jD-r0QTbwiV
z2Uq%yuY&5xVwDs#2yTX%77mOu5SX*tJOJ#tB`6SY06>Z%5_nS>fWZJU3n>ObfD4GT
zfti?hgoa=Wpp&jbkl+!B_frF4O|r@r{nql6NUbj8~0z&`&FF1
z@Lqjzb@=28xUJs1x^uAHGVfgeB(I(texvFR!rryb=0RRkK8c4CB16h=_x%@@XpB%#>VF)H3Fr
zz4vW9-;6`X;=SLN9=6~-<+E-P8fMl~OeW>F8&$fltEvi^nx-)`)=4Ln908?VabOpW
ztrSrm*;ho3qJfuwEKSQSwUg8~-3VH>m588AS@RGNRc;{1;y&0P8g$s@#NH!lFoI-N
zL^jWiry2>$QjKd(gsw(B~
zaoaq3^mcdpwNIL-KAFGL?SKBfc`Oh{8q@fo&0{kdWwTk6YtlGGW`RYOwCC1?@4$2V
zU{+$0y}^Uj%jNo$ETl}4_iB&0)2|=A^+f0o^T*G{jW!3zx|mBZWla`&P}VOzp8m)`
zxUa?cq3V4z)Hftt#u6&q0sxlk)LWY#wnEU(RMb+8GJ7vRuH^<_-i4v$nDt;;#~e5hCkyHUGE7dNN(onfG6hLtWak13&d
zCb~|WP47!7TBHFcC19S{bLSj*w2LevrO-4yCk8}3@NFrgwr8edgHcTsr7n_CqYANI
z!bND#QvT@vgY6ifK3q1-`7^thhwBF*;(1uPsvc7UgF;Aw92b$8$7z>3A7@czy)CCbyv|?vur*T8*O3YI`e=KU~5rb0gAl07MbEZ2v&AV=ho=nz_
z^w;OJaWEHazVn0eE$_Gf+1^K8q?
zG+4=$L_2^m7t1B5vCo#Fs$wZ84?9yfckkM6OXc)2a!-%PFU@khte<)I$K1STq@#Ox
zhj{+c-CIrI$8SFAVt(t^AMNg6-E4>J*RGi7D~F5C;1_Oxe0Y4mS`CsqN#FebYxj?3
zeRh8J#%0RgM?ZMuh38+c9s4RoaW^ktfBNQ=Kk=hqwvs=*^+Bi(_x9>Lx8G`7-L7wc
za__#cq;0D1==6z$r`~`4+rzL$CdJ*$R}RijFXa-+vOyfvmMavgq-3Xw>sC`4vyz$f
zGaw|^)Acz!oX^`yfm$_?O|rkD~E2L|Lqnw-rHET#x9C~zN(_r4Th*XB|Hz>xzK
zL_;)2s471U1Aq}Dxsn0!^c!cy3XysLZ&F~)8j74+!jn9xm`v2`_#7Eo2wC?b=7
z-w0eqW<}778WJgpf*PobIv2ce5DAzO9g3*RqmU|NF91}sKt?nu#Z0nul-QMG<93Tm
zP4HN#K`%;v5E2UYE-KnO7_r5G|HnVRLmDq4!81sLrts?*c+&H3uv
z-~8R(dHvl#`rQxT|6bFep`NYA**ud)74%`QGQ=V_Bp0eDZd^z8DaR~jh;fLc5&Eji
zIjJ!d7cE9&s->tSMkGg=Wke(AL?nxX;MR;a8986f%23PRJEPYi&j+nMm`XWg+nHIB!NOaD5H$~8?s+AJK
z<^Uj?T?lRCoNrYkky|>h>)QK8&Ur|qAWgzmppc2#6yhD6M^yvBS?k7ej6<$mqpHN*)|GRP
ziYuk`eXo}5x(22`4;s~1zN9rE5qkz^K5sK8>CaLZeC;_9c#m_Ka(6syS_x_67v9u+
z-}MLtqS-?QB8H(tUn?3_s(uy=aR--BB$m`-%jDeo`M9jfJOcm%tLiXpn5goBnT9bs
z=LE26+PZE_EGo83l(hh=nU$oH6CzfQ1)*e|@?sGu8Jvu9GsN@Y>zIbwtkDvy(6nym
zTwvgB>`Ig*z#c>8wwr#ryAwiCp-sO@#v?!*4OjpS3SVRoe7eshBmn#rL@SHMPzsA@
zYMqq^veSnucw_t8gYEv&_*^%CQec_<+yy4@w1@;$*eJJi(qXXYRZ*2
z1QBK6wr+B=A!UXt_1&zUSIk+(Oi(Oaienf1v!$h4)^H+uy(bqdzh~
zIlgyt{IF@7d-op_B9<`(HN`aa-8%Q1bt$@9t#+0*B5tnX4W$E`|rO$jkl)u-g_h(hB%I+3;tq#>D~EuJ-L&_G^IpDbsg%a
z1`|LiijoXfr|(zUIOnjg9YF0a92FiYDWFkZRSIG`PXM((j+b`!fneLE`$wm%?WV*O
zs46)dx~gp^gN2DM+Xeti5`auH?$q<(!i-`?gyf33p5}HzJj^9eX;&;FX8c3dEull6
znm6qD(W4(GB=lq@o3bCXmYgmq&sy@(ZL6mDzzkNP5vfMOXuYR=Wp*W{R7wHoD#sIA
zT0{_PCM8T-N)dGG&=a8o=@da3%|N9N&ih4G*BBgch6+84jGLoU_0SfH<+M8kCRG&`
z=NuTNoJGVuCoKd-L}QM5yQ!*bcX#*v_{;~|T|SX>_uX&)<3IepUrQz4eDdZ`{q!$&
zcOHKGjo*^De)S8pn{?%8Kd`GF7iGEoZJoUut~=*4`OToQNFW&0`+FOYeYA$_1H~(~
zWA>QmSFIlykVDf~#%$EDE(e#Z-2~;@I)Ax-@0EDIUBABAn?Lr&pL+Z5c~0rcCmwIa
zu3x$Fa7_Q`cmHFdzFXTO9PjzlOV`RQs(3x9v)Mai{|iMQ2iw^B3G@e6kteJakGwe#
zH95l4I)?%{le)RD0a2&d~-+trh_}*qaJb3uY(>EVG+OHb_oh0j0rYu~yn%unyp?S6P`E6>i#;F^zQ
z+#QcrYCn4AYCohg&*$u(dG^JllRLNXzH|HbM<0IlgX`BGABX(fcmHYAG}~@1B30!d
zJb1v&Pd@qhvD;tk{0|<)UvarRef93~G45TRHT!cphQpoFQygN`$>n2RI
zyEp6b*`@Qacw=+red2pDUGpo>y6@or`M9%_E4LW{=X6F`F3ajB<=x@v+On+!tV%kw
z^1|-z7#Om$$^~obZx$<-+BMdZC74+;n|&K`{4d<_5lDq
z!rc33Z(je!FJ5_Yd@>9}QT^n`?Dh9=MPS^Vz4gwWe|+^O*xd)!&Y`=OW)EQ;qu*_C
zs4K&g%`Xz5i4tI9X)_7AS;5Y9<>HPe@G3Y5n=E>u3W|mZg_PmHpv-LqNXU+m92qp!
zn5swyU?MgtV5%Z5#xY7ZOh~K%t{oOTWp}T+^`^u`2IgI^SQ-k*4hac%R)+O(I-c0Q
z5BBmWyQpnB4$X{1+q660v@zW)^)~+adC4QrC4l}h
ztA|aX(0~34Oq!5Sf27kllPLf&vJrwB8JgMzmiZSBfv;Cs&dHrk
z)$Ue{kLIs@NH-d|H81x=*;(aHU$7Jdb!^O?VZ={0D!)FKFzjb{358wL1
z@=7a`F751~^XF&l+?QEflW|wNx~@YA6Q(?s3lxd*h0D2I;G8em(SAfV!>}ft)F%X~Qv9BE0t?
z9bN3LjNL}@wt+Y*KwC8-hX6WH|OqTZ$4W%r=x7U
zvynP>p+Zi%3n8eue(V|9`+(raL3XO*ToY(UOsV`
z+XXZbG0S*5&c-zNbY9Q6JjBvw2s^Ny^Ukn-*kc~dvUQr3q42n^#$$16X6x$4$L;lZ
z*XMh7bfesVyt>_lQ+0*JOV&%ad8!$9_lBId1v|th$?b<{cogTIU8S&FRiVPRC1aM<
zon;D4jGxefL2(RxA~PE0q>5
zd7ajvp8W_gn>9cocH@vDfy7eQW7%xu==;yB004jhNkl6|mS(hu7vG%;;GyZL;co#(1rJ2#Fc8V@m!+l}MKyQT>CLv>~&^HRp;QBxmzTHU|4e{^zs^x|hf7nOHiHnWuR=_fw_```S{^V7Hg
z;xGP{^CMi&xUFxz`Ob$Q+b2KiS3EyFxKYZG$#KwJ#%>HDu=gQ^QnG978GhO&A6jqjhF
zof+lq3F|6=4>cmiQBztCaRdNzj)?j=0vLPuNL6r=(1{n4cyTTu
zkO_j(#gmGR2pShAg*zD(Nm@js^l=cLA?KVjnv>FhcCy<9G?T{*aU{nEesjj#W`
zlpk*5$}`NDJ4pWd=fBL%$H&JhkD;!hvmy5Ld8?j7H490yWCHWSb;EG9y3bYXYL7(0
zF?uXAEp{;uotcCXntDDAs8A6@i~~$^D8z{%;+#{JVHiY;BbRgRW8XB*a=u8#wwo>)
z`i4xcn$MlKIcMkGrt79=z$8;9bgB^y&B(E5?}nI;k55D_giyH((W=S=sF(sx0mY0U
zrs%8s;;u<124cWAId}w_FV-ij4CoxyAxxMxU%4VV<^-VZ*m)0rD*u_86cx3CYRs&7
zF#@Ncf>Lx!O6$9!l;WI2CIrZ-lvDsf^F*N|Mn8ohixy@cx}J!GuPy=-M5hbroq6w=
z7|~sXC6p3lE=3TLfx%D(nL}OI&bfYb41|m(A}K`$bIv&z3W_NvE%mG|r3l(=cizWN
zI5_8;P@|a+B9cwP0nKNNDPmR?rp1_O7QtB3qonH}+QiigQwm6W2>b9zJ|XM8UT^`#Zb4y8w_<5)opW2=*}T%TxhzT5M0x
zL}2D;pZV(P@%_!}f#$6l)Ic-#sz5V$82i1=_i6h^l4?VjSM$d=;gILgZPI{UU!nRI
z9ryrljT|U>AJmYCRoM>eKzwcf5>JvDQv)&wNQ8i@CeEW8paBANQ2+#Vjs%QMr}&o1
zY=TTim~&=FeDM&(rm0d&qQ!G#Dk7}lG80uZa;&Owa5{_!HeAyBGiSqU>mEEqA94R`
z<7VtiDRrpY)~7sK_FxL&nnAJ)F
z%NAX2Y>1`?ITtZSRZ#^{A);}-$m^IMMpG6CApnhH#k)EuU<@Tg(Pq)?_rt08UQ2A6
zSxQN%R)M~=>2X}Ll?a{@k;ZWhLAPBQM{M?&RZ|5Q=kqx;pPru1_HI1>_+xF`9-lni
z-Cf?fbNlwkAB&d5YtI1CyYIfc-JCl|Y9k`AHyv1R+ZF)&t^?3n8@!_w2`C~Fpb-^j
zAvW*5^PGlUq*yUTWN>zYkFu(+0YGCMQzRl}1jAg44JCvCgx#<$rBqr?v;L6Fghle?
z6+u99$ui2>YO51>31FJnr_M(OS}ciUX0Du{U}N5q3Kvrq$XTnpjxiGOtn$_;1ql%L
z7E2XDMJr|*3YD4n{dOY&q`;FpvxuvaRfE$yI7nG?MiedCn8z_rLS5(G#Uh|&0Cvtz
zReLjY&Uto3RJ53xz(q38r+{pSoJ%Pn1=K{KSVc5%0yPa=(IQ%6DOp4a3Nb!BJ|&{y
z9Dr`OeNW?*%s!h#vZ9!P#E}o46KYN)S`7dd@$?zSR5*?tJSEx2=u>eXpV)D2)y!Sq
z8gi)2EK3rxN611@1+K`)Gv@BF=QKA1m!
zyIbxr{>mcEAIMYEE=kYBc~^KwF8Fby!w|q#gyJN3eI9Dre4CstH0R`M`nI^9MAzvu!#$<7WTn
zp1$$MAO7uM|J#4`V_$gYGhgnux~S*#x|)6b-v8n!_N-m(A3PV@`MtCC>kn3c_mAEZ
zCQHeOkh+sn*R;T^E??_DnZNtdx4(bd%$iHfO9#8x5{9q*)L(q*b6*{k<}%cjr4%6B
zbXzoi^695X&IXZf9bawKSKk^fAz5ew#f8*cp9-OzI`L}zEzxE&hqZ;j_yAOWk
z<)2Kc+#4RuiBq%u$9Hbuc_1%6_2kMg53h|7S6f!9?0@(3U-?&m^o4up`Ct3@e*MAL
zp7ivuzw&&@xKnh{VTkdqH{af@wog3v_zzxt^{qEv9fp3~jPrRLV^kAnbdGXNZ@%%y
zi;v%Y=0|_|zHRTFma}a$d-BUt>FM!(SJ#pY7OW^Nn%S&rgA1XMRCD#3;)BmU@zi!4
zUcL9xV)xo-!^8WV!+udeSZ`KseZ7gPJCkPS?ZH>+;mv0+J$dEw=KhCw-hbo%{rmUd
ze)IZM*8}}JY|rmK_CML|{Mh2z&7!_M-;YmTfBe<%<~LVo%g_9)(yeE5dOh9RKYhL2
zdCk*|F$kuN!`%mW>q;J;9{t^4|BV*LDOB;+NAEFJAH4tll(%)m0qtwQdFT4gXU0s=
zz4-aZo_xNVhqHeD(ZkgTpWN-Xt8Umn@$^$yFYW9vKif33)zO0@-Jgs2(_eb*+uwTU
z*I-_8uIt6_`Xsei;s}X`#&2gd>zymZrczG0BDU;&osu8x-Yr7ulw{i6Oz37nR7B%H
zu?+VJ)_;*wW7D*(Sb{SVBScV#0Om>@a~8`?t`5oyGFCZJVO>|py_O;sNpJ*cL$p*z
zCUsObi>+&wJj1Z8=k3l_k-XhBS;l!4FmN0;>mgy-)%^p%Jg9xxj@{|mHr@aF8{hby
z=F&4?`srWznV!ZX7+hfAjjaZQt!(y1Yr{{?WsEwYQG>rK>yR
z<1hdFcQ=3g-rYOvdQ&g@eig^DjKwL}4l7=&OXQt^lTx5`s2Pbk)aX1?NLYYD2_Y~V
zdq4u?N3QKh#6$aICqnSgc`q(X`a%N#e2@SlAR6;j+W3hdxjNj(s5h$r}6ywss8t`
zoV>Oz)vCRzRn3*hc4y^wZ#?$O@YK(~{o+#y_fO*aIc;yQZk^6|`YrADZyvJMcKXy4
zi=X|ef8#g*-mk$`y1IY!XMXH2ZqLr%|L~h{y!HAMmoESEzxK7fzar0{{|nRnfHg7tPGH-u6|^)2_)mbVQEW%w$9aGINe-Ruj#n3hk`D
zIrMS8IR`S4H1CZ8uzT$v{}8Ew9dWTn~X6vp1+dIvVB+zq3@In`M)?ySI+kA9vf&+#C#Y
z@GS*5R+Or$_`0a(Qd7f;BbB3lCBV={N6hc7sB_HTne^e3$3;`n-
zq8O};zEO61bm{Eg9-Oy)$2li?{z;*|Fap~k23VQ*{aB>H!NH|QMk!Ws95Ic)CJ+Q>?;9jTG!ZCzQE+k2g%HpnPxuToP!bU_6)EOnl7&tBr@#&_>JH`V
z;T2|oOG+YX<<`J`pn+0rf7L2F&!`isxc6&DiBVT
zDR`8oQ&f>46AC&bGBjrMV9rxE$T>125sl-x*=zvF`^tOQZHHWd8B^**eNk9Zp_~g?
z6cM|CR*RV}7AIX^q8o5p)LrXlBy&Z}e11vMyGns|WTF_RTkgfaEr`@_9G0b6f3spP6@
zn#KI&LBG=mCK7Nt=Mwv4S)Xb-PB(MvXO-<#fr@b1HX+p<_H-~5WduO@(-ka}i&sWdfc=~F4aQX4)(DdQS
zy@wBO&zAhs3omU>&)@w1d*A!stIs_C{Aa)9?w{Ok+9jJ`%st#ZxO*JW`|T2;p3e^4
zb%(Ury|i7Q&t|iO8T_Mv{I@sTqf)k^@<79fAG}dlJGIxq1<|_AnW~yVPN2fhxl%w?
z3_!)QWq@hx?V6@~{HZ6IkwC|B7>2Yi+v_}(-
z36q=nBuFeGrIaKoP)$rmrl3|pP0+w01JJB)*jFNw!74us-UW7`7Gs)P!i?mI4arrG
z9gwMl0g$2t03uQVAC@8#lk~$#lmOK-61uwalOn=Anpqsj7zZg9*AzienOCrLlP%}LT5OK0@=9~dYO%ZAGcLJHR`h-T)f8iro22wQ^VwVP{ocKMpKQDJn39+h13Qef83dJM-P^H=lfXbbmA4->gnV$1!$HkaMn^dUtoJ;7-ntS7*nj
z#R{YdV|U)Rb&&~Y6im@qEg`pUTZ)+plAF!Q&CBLRlY2M4Z))_ij=Et4fh1
zU@4KRB5++fBbOz`m}?d1oEb`%z8@XBwrURd4v9#G`|)J69#TwPW6DVpysw(ZA!6Te
zr!I0<6%_yzQ8g2T5)(7qv>qV@05Qu`vAY<8!ITnCFhy=S2Pq9Al5?CEMWONl7SlMJ
zwT9p;*S7U^Q-B#G32KZn<><@>UlD<-nF*2e{$kjxDrla{sy;X$+{Ck*CM;s~RhTYs
z6=tHNW}Es|W|pi-0kTZwm^}apWB^4bRf$NLXq^?Yb
zej>^_i{!cvRqZR4y__=xo}ZsrRa1pJgOsdhfXpJYSS-dA&8*w@v*m1BUe%0n<+;33^bbf^FxK0K+c1Q}9kGUhML}b`*rpTih
z2LfoC8eK>!O}YjJ6%}G4Bq>GFCXN_QVE|z4Le5iqhlxmv<&l{SER&fRMDHDus^wTV
zaRUhKd}C0${#;duas5bSUzbugo7FH3Md}cIDY@^vtRA^KU-w;iq^is1003Rr)th+l
z!G~?zmMpuw`wt&J6tSwR=Zot7`yUjGb<5k$c`lwkm6B%dau_-@Yi9uf`eB2J?aZ6e
zIF3y0*!fw@%&Yaf6!DHf4I(#_lmq~nc}f%sb(O_pjE{Coph!6;Kpk@e05;7z=bQ=M
ztZ9j87`v1bJLlM?n8*bvpi|+1nVci%fC!mNDPvBo7o)<=SX~ur8^=7R5W>?>J$G_)
zve|5y8C>edwTdF5niW+6GvfKY$#FbCKVQxlyUQH{P&LQA?cx|mCQWs4TnLTOj3A8#
z44b-j&ZQh<9GJOnYb}O|0*Y!=6HYc8e0YFj_Qzal~CA6G~sIBUwv)&VW
z=Nt#`eJW~Z=v?2Q)OF3wCX&ZdRYM4EUEg2#i_jkUHdz_0jKv&KTWa;h%)V-zC(R+o
zVT{?Jn^!YnHzVd{$Jj@l7+sokjyZoCeC?boDnx_;BI3Q@-Q5*an@mp)o^9_J
z3xTWHT)j8!eRRCEOE=@&X)c?zRZ8Z)TwHoj@KcOZ5
zhnT?tHl=U$yo&vOI=VEh4$p7Re0ngu^q$+_&)u`{{DTv)dVZzr?);{EYX9oh)8YQt
zj&}b~|K30R()yb}fBm_B^WK~gEuVHr&pdsQS7~f_Vh&c1L
zG%W}5U~xz*!7=PvP7$z@fphnhpLx1IsGfgjncDMbf8;5ZKl|a`<3IS;tI}^;P0PLA
zrt!y{+h@F(r9b>LU;eX?SF`W-AIGp=;`pO`{Q0M@5jOWuPtLY08a6GbCqMgTZTI&d
zfA)#J%ZBB6bK1_AM@J7)bljZo@6^0=P_+#rYPQX`ABF+Zs%Do@wr{-qQM>odFaNjy
zonaX2X5rmOU;g|To0*t>}vbWXMg;7bN+vSBhKHryW{)Az3X?m+KlOn
z>R-Nj$!|70IsM{GFaFm1@BO*CdEDHKq0b|vZf|dIZ+Eu|A?59pk3IR|?#FCp**2h7
zJ6AXDFm#@&34y47c<=VJXWx41#^=BNE@xOTRv#@6u3tI4jBN#dY?Rtrof%hYXohju
zh0L3r?S#X>`_1>yV%%t*@2vaLXxFN3siC^MGpoSm8moN#mHqq|zxcCzJ6A(f^D95~
zrC)%}YW?^B?%&aJa_RE!>tFk|9Tc0DpZ&vMxpV*3^;q7Wz3@D^<+WY%?kHTl|HRK6
zRj)jE{YiZH>*}l0TvB7wwC%Q|Ed;98mC6S*d*v_
zr59f8lR0yu@cdKHRZK-Kj+vR4i$zn<&d*oF5c?~$zwirx;jbzJrn5zQ@$}B!NiDIy{{^#WjyKE_lIsZpuf9&Rh!*85BScv
z&bJS)J%05I&plcBAnJ1#foziNx|OW(pCz>aQ$V8sRQ>dy{D&zAkx9`E1kDJ5%nGH9
zi0FJZTV8Jd{N-!y_wW24fAGO?Z-;G_FJ
zFOVe;VLYkbK}CWUuyIN~xu9N>(`7wl3s6`}(vmGjONo(l6@U;Kk(mwI(21a`e7Z;oc-X;F=0Q!pqZQoMybdjOt?tt+WctNPxn?cG<)-jh0i^229-db9V8
zF0TQQXgp@bF~_U3Fnn_J&fh(ZZ&l~FHjo7E%I;_Go!$*K9A1Cnm;dtLc>d-yU;oal
zhlhu!$G2a8>7`HJd;?Mb<)8oWec>}F_l`eEl6G3R4G{b2s&Kem3@K(46+K&Rm?xbb
zx!@gA5sSHC0z;S*zX&*ZAHdAeEHkr+WhrJTYO03hh#*TTr6_{;9+5<(NZ|=iN`OdH
z`)v}3tBr6@Xdp#~N;%VVEXiZLR?R7hneL?9(HB0yjguvBspF*A_?5v!^K!dyx!nE+Et
zwQHY#>c!*ZlLz-tgX{M9t~gKIK07FxR5>IfGeZ?LQ~|$TK@l=x^5`6M@EnM%ao7Nm
znIVGnm2)oVTvA&1=jOeH8k}c>psCVgQO$c#Oh_Ub&1zpy6Hq4Vhg>p|VF10$q@3<$u?-B!)QY(76*^~UAOjeW&FTdz=M$*5)8hWg4o=S0LAgW4q3R25ZY
zX6M*@YTBxu)iI@_f{tP;DW#NUEL-PXRaK6;XrY32vyf2&GdUKKF{P4?*%6WCqN*jP
ztdffqwIRm1%^oZCC=^th5X>)1m!aFz!PxEWRBdGy=f+iR4zxH|{NWcpgx%RfbvR7<
zM|S67&W8ujuC~MO!PWWBe%Gz6bgPrwZME~^+4#G^^S3Z8o_gk${k=<1JpNc7`g?cY
zU^k>Z1B$aM-23>x4vw%|ZO%XcBQI}HhS$FT&(6C6q26ugSFRRBy1aj*+nmlk6Iv;0
zZ-2jGMzhVj`{d632(#VGd#6Xor|aUI+0b>GbM0Ulx0SC-&g7UJ0NL1gb<-dUFcG1F
zYA%TCoI4-8qB%Hr!o(N5Z0ADE8JQZ_pk4$oscJ4oRbz}I0)`HWLtyrZIC&GzP*f#n
z6%$i56D=^ULv)Jx)v1M$QbI&3W#VfE=Z8G5bG~wL*tRV*Pwx_(Cvr{Gtj{kpDP8bX
zd*{3%tHQNwPmSZaTAiJrourhAfrva|9!F+h*G<*T0ALup(XoM!T1qJ~Cg+?F$j&1{
zDH&C#R9G`LDU-+LQO#jmMX4bef|wQ+y-1TaLnRAGP)GaIXf4;4~;Sr4Y5Ox4yQ@d$FzC-od>`2|M`FX
z^}q2yian>U+#cV4cybRJxk}%9^-h0!)b*R9>VgX#fNU7LVTi?vtGtNxadeI+Y=(#g
z?}@0GDp*kwH6~*3V#!$w8z}-25juxJ=(poJB03km_ksOX^*{kcoi~>?S;^-BsSY&e
z)DQPpXAfTa%un2W>WSOO-+gfOdgRR~58m0``P>H|e|Ycy$D8$0v7wZ{syX;N#@wBs
zoSv@$oN7TTo>37IGNa_Q&3SMhQ$Z
zrI;Y1S}_$wtg8T^>&=-d?e6T0Br|L3T5`YHW+E~Lk5Ku*NTymuOpt(=^IclFn71h<
z0w|@J3W4UBLgmZ^0es~VQN=)v2&9-GAdxYlk^y8X)1V6IRn<(LBkzM_L{rh?DsLG8
zfF|z_^OSX*N)pp4n6?gqkytM_q?HfB`RTb@ayBz2a?VfvfSmJW_nBm}a1lLHkW5XH
z%sKCzt3s7hj{P7ys$vLEim3pgfr*-05z;IoA{hx(wf2Esa0X%#0j9`p=E}@cNmE(u
z(dNb~stv7}lp$eJPc&{jB3R5v!vuh;G-!6_LYQfwrD5B)`}_N=?dJUayp$YcWai1<
z@o9Ttatb1XbLPFTYCk=}Qc5|;QZ!|VF^*%bC$^HS9Q#mLR&tC9ke$G0(=8SY0Odo54SIKeN#WO{N~4-YBjbD2V*;T^&H8W8T$~b8DU#WEb7sa2!U+s)`A(E0e}o^
zRhbL~C7mk)IOZ;d+-(xobOErz9&t)n7fnu`HCA8Iz%+^@HNYU_rB
zCs1Z=XN@Ozj)0oYW(9;ql(RbLi>j(J0uedS-g_{dfG}AE3;+o5hZ|!gny@oW)c1Yx
zb!_=ysw{uUNRw;Ovf)*t^NTx-LbKVP@nQF{A
zBf;cV6v<3snx6o`dFR+of1FOL*bi~Z(*-Fx8dgRdZ${!;-J;Xt(YM$@Nm|7SrIyv)1^*YLy*&)u!tgoQHVqaol72
za=trHr%gE{^OxgJllRZtXWp29adG^6m&-S4{%3Btbv4_+%k}5ty?XrS)jjXHd+zhk
z%@*PAd+!XfoSm*V#)}W$tD74?_0_*{<%wtBc>B8_zW?fQe0R4BsR?nh08~2u5aVs{
zS_i3?_wO4lng>s@yUBs^u?Mts-|dgh{*@Y2Qq?dDLI~DE?pIz>ecfQ@m
z>VS7{RP&{AxM#b+_3k~5o6oFIfBse(;+)Q89S18jC<=QpcM*ZJlPKY6pg2dc}1{oPAf&sUog(;~P6RgC%ir7JmS
z=e!hDU4C+Uwot0;db{1$b)8j)m>08I2-RaxJm0icQ7R&(yKACf`qEdLd2{3Pv#ZpV
z?K&))k)XeIToqc5@$iso)s)JwHvIv@CJyV}_Ivl%Kl$t|cAKvbZ$5dg`?;r{?8n{(
zUqQL^;JDkaUi<#HPEYQw*XtYCZa(|OE4Odo8PoA3_s)sEud2%D9KEaV`zx=$`t81a
z`^$%4y?y0Jjt8Obm}m3lC<-*Nsb)4J0}0qSZX8_!yuT@#cdfBHj$|#A
zzdjefis={Ut6zTpl@hS_?%^jVF3^?hhX*^$mp}9D-~H9!d+pn|gDi?JZ+GE?!_Pmq
z8jt$#y3NB~Zuq^oLw9dK?C#EI>#INA@niF^{twd||72TMq!_`t!hVR}s(GzhwXU1f
zlk*fy$#K2v&1`4B{H1^6f86)|om(Fsou1tO@SWGc{oBDCs9e8(^`#&E>FZB^0k9g+
zS7m$N)XrQ@_X-c`0i1sD$;nHfeetLFcKqrnR7fp
zeDW?{k-)WH0n6>!ncopiv;J(X=3eE~%Ce!F*o9O2PlBmW)P5B+RxO~anz1Mi%3yq9
zxCJsGQ?Hc33`HswLIu$Cf!5nH_O#tVOlCMb5@v8r=4cI2mO@qe1p|EJ@?Kc0d)*h6qC#YD+cGm;XjBNIbr!)f1j;TV{3Pt#|f(qa+H5BZjlPRh*482o3n?bUFe!f?m^zDHo!>d*6L_JfBzBmczGB_~zC2
zu}k$_>DaZa#$T#Oe&+G#f8+Ol?f?Aon|*ONb`E4*|JpzJTVML(&((GPfBk>{FWD`3
zcdz{5?Qa~Puh75|xlzo#sgde#)2?`>s#(NJX3(0D90#iVzDK!WurTukKR^VXki)8~
zK!g`cOfxYAgQ8$&NQQV}M__iJLK~*I)M+aeb1tP+7g2Lc&WGTfbEb^mQnpeu0y`oA
zbVP!}uJuF+g~*o_#tblw$O%b?F^bq?w)8btHB@!|;oY;7lam);c)@#rX@Bpr#~yq0
zt=Hdw|NX1ipE+zRmi+L1d$b;A^B`Ov!qHBBwW_MB@>Nx_!`UqLeV20%+z*+Oxpr2~
zDT`(!7k%9}D|3sL5{|hJ<%I^M@8a=;z4O=4aq)irQ&3;Zv)$sH5goU?_4=cdvSpVb
zREvXpd+zg@g_E9kD_>xHZ&y_|C*bF~d;jVU34$&~%-)62hz~%7qmp*c6%pdbH#?G{
zAI4&<#_`~HhKaghg=LMZ2#%m?@=3rTm3ZuyY31c!ji;A5jkc3XJT(|Xp&b!}*k{_-
zrExWaSy8bZnOrtKJ3HNk3emtoa`s3&p{-rZilGfTXDd<#`XML-pqP>Yqo0TX7n=wX
zkx@jZQk{Z<(p02VApVWD~TZ%S)gNPAj&FOkXgV=PC0lAQ9ggeoRA_5u71=s+yD%8dTmjRaNx~|duL={e8bzO(5n#x-l);VS;hK`9G
zG7(YV_a>zZH8Q5rR6!JSDqYuCJX)-4h*)4Pyz1K3IP1GgwF4IC{oZ1>s4S0Li5i?w
z@wjNUzibXJT|#op{lnSrV%8p_>$>sic=P`G*~&wJBAe~;2e)4T!b@Me_Vmkxwb?ai
z3eCIsxw&%E@xu+Aa<{7=iv(qCrxP0xhuU3244sSmF)X~w=GtbZNeEj~yhwtoPzqVPA
z+sy-&T-TCw)}#s+W)-@evqM56R@I!7su7`zIOjz2$?CkSeOos~xM*hpAf^r(iSQz>
zNRgO{KDF<|1gt=O1S^&c>!M7Z0GrC;m2W1c@Z>p|++dT3CZ&X?%uGzGWqNL(o}Up>
z(=-NFRaI3rUDs_-jw>dnK#T~aszrevBjLS=pAb=z=qqNYB3Vlwl9)+VL5wuSs0xTy
zB&U?jGa`i$h{#lnm8z<|_rq#+F)2)?f5oY>G2NSB1R!J+N647jPz?+an3#wZM2aZL
zFbqJ(%)l^d@N>>7rG?*Z+KJF2M3_?QhYF`^
zMBHqoZb@wb%iC?gT<%EW%ZE?oG~T~|ce`C1dBYyuHOCK-~Y9r`!m1v;=HX5mLh%8
zq>*D5LJwqS=9!o)6I3gq_H{GIx*dit<(}B`7#&lfChM5v5bByha*3<;!z5)GM`o^&
zr}hsqOvQ0T@ZMK#C6Z%|rQ0%d?E|4B(0+RgK&?jtDmqTfZ08&!1?QV8AfjmyErgBt
zRX6gKum0fpK}uQMWmzzX0xV2#Hq98FAz`Y91|i6mr^ESaR{Lf^>lw_)oHzie+)g!%+qy7_#w7b
zi^O46;utew2$c)I^1hlzMJ_V=l+~s*?Ic==03nD`$cNGKxfmW+IYHoFp2d
zLLH>8>$dW$S_MB}w8PM+JZ7G&^MNTi@HrzQfrv`i=W!gTIcT!u{3jkS)7wpxEC3*q
zXf6dq2z6aoRVC5_!0ztlsir!$0LNj7F&>|tJ6~PB_SlUZHxTiSH-6A{-IyW(Y`5D=
ztLfrQ1R4ZXg&(>H{?mu5p2z4U_0j=KDaEsoo!{nMbEQmKB^nx6j?gq_$qxHbY*Q>-
zKgeb_bXS+@yKPc1S7F@%yIEP==1j4!`~t0Ly~78W
zQ`|2Hz2vy+%eFrFcKA}U1B(wYmD4M;ZsEzvU`5e4Q7Cm$3lbQBR+Vq6%6rZ!ibz&_
z^0B8LJiG^BrKCQNbI+cipFTXg
zyILO~9pB421@D%NI%gioaXcToDBk;fM@Om}$JjItfcmP11Hx|G-MV#4
zWKu%e(aA$&aaCJYel}|y+jOn+Ft)pU`&V}E-MeR^IhRsN&UxnBIy6>f$SHvqDNuj`
zQW~44@!rQYmMqRSh?uh|7iM-eEg&pOK}26yo_JyieJZG&ngl>oE5A4-BCiTY>}GyR
zea@C*$(k9FgCoBORn1uxvZ=Cjo*w}&b8Z}*ZPPlgJO>XnDeRJznYE9M>XWxaCk!70
znwBJGJI14#!5<*4-Sl{Ox&x8Li-HA(B$Ee87m;tT#Dpd1n|Ip2Bvz4zaG
z-O^aO-Ak7aI)rt9|7-vB-*4y5(e}*DTGwtPa9R31qHDVBb2_V~E;
zqf~GEo1&p^Jd9!Y(6yJGpWAu9v}ogadi3DVcDvnK>{*VnKizTdV^4qX%HiI-x4tvx
z@`dH$WBb?MefZ|;!O=Ii>wku>6j$o{>ip9D^3|9&3zP=a!d0gy$LVx@Z}Z-huY7e;
z!d_Q&?C*4UzyEI1`K5zP*NeK-VO($=h1>$%6l5uzuy0x?Dmjv9Yzo`)|JftzZ5tf3w|tI#ty_`0Zc0fAsL_8&A~B
z<&!VHJl}6?Nq_mrK6CZm&ENUKgV$C_$XX?)dfiw5)^B|0@oz2v+70*Mo8S1Uzw)o!
z-oY;ITVs^9q__UVzyF)enBstl4&7pR`SHg_gd!zW&Us2{T&?<2O4CTKvW?yT&Xp(q
z1A^cEkKVoi!TiN={fTW@WCj7Dh>dri9RS*xtTQY?eX1)q@H`ewc`OuHZ`9tGU-=LI-M{~<|L<|ww6i8O_`%VKKYMn6{@7K$`lCmW
z|2XyAQ>9goNPsDMkp2A~>>ivnJMsBHf7qX<_y559wVUmpbbY1Lc
zAwrR~nD3CAef=B1QC0rd`|kvjetWjN2uul@a>dg|#H+xfQBusQasQ{|R?C
zHan*{KZ@rEH9I3y0T6UNI){gqblBYv&fOd6<4^9t^qDWb^vYMf
zCjp%>NKC9Kf67_{|M`bpn9wF19Xb~jNv)i$PJZ{+@Qt(by$27DifsGM9y%LODC)Rg
zIYxofc!x~Dgv7JDDZ!tMjU^P&s1)2{J)Ae0QHcv%%s#LJfP+}_Zsm%hh?qGbLPZl*
z6EP)XkHo-a1O{jXIx&PExgY-656BsRD)OKHmJG~H9slq0OSyW0G$A;8knd6
zXn_f-In~Sn%z*!kFW$d6{%dn65&$WoAQ(_VR4@Y*Bs3!=o(RjLsD@@aTw_5bvb1u;
zaXEZqz5He*+xhbPyWgd>*jf7T{ow1H&4W2Ki^U?PwApMF
zzzDLHq&P{ciKwWmm7KHMBl#+-nkhmSvlwAA)Djw?Ap_a8`!~vl24>6`qDVAQ6+(30
z)2D1wQPrYqVup(3X;w8hVS*K6jDxBpaO@Cas*;tQq8Sq-VlK{L67nbjmFz_<#iBZh
zX&iG==@;c||Lp5Szg^De>rI+1<}bbc%9YEPKDqtTty{NZ&Mt&+%F~y&M~iX?IQC+gfO{sHJ`!0JI9udq<9IPlH{ct^}>m7J7;VOwI+u5zv>V_yL&
z@5t%%!|&8FmBA|?+@Al;uzbF;J;V_lVTle))t}F{J;bEDMaC|52}Pic#XfQFIL!99
zW24v_S}9u0jaHqeb{k9IJV8PlrIZ4$_I_5el68%`HiQCE##2pawae(dAtB3-ZmZ3j
zN37heM_WrFun|(%7fgwji?m{_Gji;h4P7Z^?Bl1+Dw_bWIW1ZWRlU1&
zxVv-k@$I|w*-p->Zgw`^`4~rmtXe#hNJ?3$tpF4a2xJ0*5FjA1=MVw_mQqYq3ow$a
zcpOInTzG#Wh69FD`kb1^1CXjo&LRTJl4G~+>$;W#xeN%}v`wfOw1`NKBd7twRMy9d
z_X3126*1>we}AutWwrI`1_%YVh*&va1vhqM@V@qeh>D4ziin?Rhg0j?jEQ2&MFrS-
z?_-RnP}i+v7=|Hun9t`#wCy`ZST2{l`}@N%bi-K2BqA<^s;ZE=X&SC75F=8cpe2fk
z3av_S;N}>7(9=Tqw+_XwG~1^a$#ZFZ!8{BDg?b)#E*F7T@^xzV1zQEKfZBnUk*Xwr3{~(q~`%V!Hl#zg?k~k3P6XL_htr
zKYx6_zV_6U=O@RrOP7YhRbhGKu>c;bnhLBEK#rXf)HR95m`6h=$a#!aRRP)LaRFvB
zEF~%!BGz?H3}cFB8grU%5G6)u2%ytAmttm9KgJQOU8oc;-2A05vvZC-JHdi70SkuM
zBjTjYm~syFeENOK#0tuy54R}DRtd0h$;90!05a~X3UaPN@AJVdtaTN
z9)-$@h=Cy6^im)S5Q?cvHifBtL_|J>B!UD4NPtBJ43q@`=JR>Zc?uGE#HLq7%q0?$
zDH;hOh#5I1#}^$wG9pAm&B3E@aRiK&4P5lWlK@+q(co9Lc+IeIus=35ciUVR|FlHo1
z=g^7fBAQUl9@R({nw#_U+_My0;rZEl0&GO{m@5|5xv$fG_$s?D}$1A98>Owgku`x=!gg%`^oGrTxsw@
zO9E8KAY}*+t3@c195hqS$0M_MlYG;A-*s!}sH!U8OqH;zWKAjWEEYLuiKF+G15-)P
zxjc3yr5t0wU5jL}fRut4G+CdYWhux|)qXZ_=1o08eqza4MO6Txswx09o01a3LfomqT1lB}SJm@SfhqgI&IM);)ts4CL2@xO
z=aHy#j#&3ZWEciBQw8&G+RHSv8B8Zn_Gy5qiqta}5h6F6%}OcG(X8D|X&j`=IylG;
zv#$UVi-8pDTNaUAl7TwMAq2xmGcsJHLLed;i8=sq3;<+`b-hT4Qy(zIE1lJOI@9SpKjomavCfYwEJn|}lvkL~-g;48<5TdsB
z92gxGHBF#mEY4%GIbdYVtqLMi0aI0DvrEkgjg%1dav+7;1Bt
zjYVtCHE;4fl#~(85jy}anM_PAgfJrX}?iMu`19VjF74oVY+Jz$x<^1-W;kBXdUXQR~b{D&*#hKQWe@}p36AqG3Tft0F=_0vZ$&b
za|r1oK7l|)>QGJNOhmI~fINvEUVHOf-}(MG#&O)+JNVq^J_i)c;L4RN0Cx52)vtZ+
zYm43b%9Sgd%}TL~F+%Q;x$8FlxKfkFV&)amhgmZR?52lxDMb_wYCucnbUs^dw_S{J
z90QRv1@9|FC}k8W>)2I}LkPvnxap@zG3HQ8$)e=E0c`t7UZsc>V^4}Ol<1uEb=dX;
z5ycp%MwatjRn-)uWMEBQ0YHpVOra<{x7^){F)|}NH_h9{j7m(goNw2NSXdD;N}h5C
zCIY?|6Va(6Fx?WO>d-MdG`0!Emr;wI$@%nQz|aNQKisS9y4wy&P0>-Iw1H(-&%F1<
z`?Hw?Sf8E^+l~O8b1~)f)ryIbXl_z75i2n*nmHkQpbYKg-tgu_2p1l1);PBdS%H6O_qrnSr1CsUOqL_vzK$FUI3cB)I8Kxn2UAWC(#`Ffal+x7;%X=aXi
zdiUw^3IM%p_M%&)Af8T~M35Z}`>^QKW?DI2(1E6#zeq3-lp|31|
z{ll9Fp=s;p(v_Vn8JuSWpp?>jb?$=ihIO{Ft#{^&8KS-W?zq2-&Q|9~x9;73`@O3NS8hJ{>@VJwU;4@~{@=d;
z-mm=DAD-Hx2~9{FF!Y`R@I%zkdVWG1%$Z%1f8Wu37Fb_I8;0
zDQS$+M(=%F)nk2_hCVPr9J?}L(HYVL1Nz_$chGOUt?qD{&yLdirM@%p=&j4chi`r7
zSAXqqo!)!zi=Y3Azxvny&DY-et+!u)m#^;ZhUP3D-y0HwuR~ZWG4$O*k
ziMkmcKK;{v$no{x=FRE(YH$`1&UOyx`=L2KT`|po5U_;cvW^c=AAWNGqn(`{01)OE
zUijiLZtmT?d*{Jg&rT)aORwOIPd>g`b!4;oVsY*8S}%sVa~*n~f4n~WvCsd+SHJYK
zW;X`0s=jh`)Ew;p*}wc(-ud3^DZO_sKlrn^fAyr^yS@C$5BFa<(JeE?W`20K8W-x3
z0E7YAaIz3E!k-k|dSNiUm^0OZ5h4SmU;w5-3ZO=c7yE}2yYA$!AA2}mU^7$S4)cYT
zT-voO){MnDqIN+jkyV(
z>6jIB-;N4_TCcT?#_G>qi=YnFF_0va0wMs=*A^UlM}ZgtRA5r~ApX#U<
z%J>(Mp-=hpG)clQy3v1JkxJ+U9T*CzX2W7AIx)u$EQ1nJK=#CFX!tJ(Lw_0$CDsZ5
zXrN$%sK7>8lZnVAG(!VL6A(0Wr+0SZNts<87LU1|;HpMFx$$8QvoR>L?er0Jj59-Z
zq2C;A-;w#1z1S^;A@DIVzjmd)=-N@ZbNN|K00{H-GTP
zx9{D#*KHHhClBtvJM`uJyzIEsQ93Q6rPOs@1;BLh0?=k=L=LnXdh=8ju@qUf9-{O9Qa5Tnx01%M|Fhqz9oKb8IgWWuk}k`e
zbsAbEXO~-VYF;!gLxpTP50jaA7^eQB1?NTCRo*vC#Gsat076g+S1fN#MnbKmD&Q>@
zOwiW4X$n@g6fP)2jNN92Ti2RvgR?3|IQnQd*@fq?dEW;zJCO#3+7*}?rBQ-K&}jP)YZd^97ff5P>`Ve;8`ue47+pgb^ITwov86{L0
z+FIGrAqpFSAfRJF1wyB8Dp=&4Q!0o!K{6KYZYf1HmRMxU4MP^ODtMdxTmTN3h)T&G
zf{0`>1r#s2qTF~5n<-K
zSpc}9FSAx|eRv13GH{^^^S#}O6gMlf%p{1GOCIA$Ak6I95qV-l1C>E-)4n9!bxVjC9Km@_L7^lVt
zn31YF4#eO+=hW48%N(48-Gkk;)tbFCX-VOG#N}y|q~zmixOi)fi(x4&8RxbXx@*HpP#b
zhyW?)(yfQKZEsw?dGGurpjUBY1}tzu!?GE!EJg5u5G`;(EGZXoR?NU@?DpDOMYKLU
zKCM6g;O+04j9AM5m%TrU)h)~JJF&Ia?oRV{x4-Y**T##87ccr?X0p#DDT)$Bv8Y}`
zl|;dY?Lm29%S{#Fkv;IN02`>9xXOlI1skq{457r5R1(F^q)0I{naRi|BBOtObKmXn
z*Pdo~Yk6>AQi5G0LjnxLXY`F0_IK}h&faIO|Nr;To|Zg)q!z{&wOYGz_3fgrg!uC>
z{soviB|9FikE-Fxv_a6X-1yS^)*e&z-SE+)PtH$|Ihh+b-+J}>Ym>Ts`t*ZD-NBP5
z^=LTMi^FG6Ff`Y1zhem=-TUn6lZSw`wK?trriYJJ?l!k~x7O?Bs^8ii>Eh~qw%k}B
z&WY!A
zE6gD$5=4^Hx{F>avQU;JF?fzK)kA*1u2nKfz=OsNkT
zQes5Besg0u+B-Tr{^>9OQrq@N$Ip<$<7W>Yxz}HP`{ARzecNk;&Kj-B871`=fUIOG
zB0x%O(Z*_R`@Ut)86
zfAsx+b`C;=@x&@@y4JI26y=DBGQeutHcf++lBCGUpcJ$EWA9do&!?06B`!
zl2O1FMMxC-kW$J3k(pz<7}t33g9|^=*T1;KFHZbDb*t5CV`J;!_)+)%?DX_}b7On|
z;PJu1!S3$v=H}+<>HP5I>Dk$twH6TvgAD)(QGC}bt+lC|cBQrM``N`hSF2U;+Y62r
z5#_Gz0PAwoT6=JCu(2^5464J!Lv8b5WQs}_^G;Ct*az-XVx@KBc!51lDI4v|vaIUi
z$@CZiNvYYQxe%Dg6f@&RJ}(32904G+q|7lUXAL4Q7K^^`N8_TgaY)7&!_WvRdh`
zZBV;EjZy#r0d&r7Y;L6(`o8zxV^I{&DW$ru>$=X6Qc96hFYLf6y_mThZAl4_iGuYoAxX>=MXIiux$pan!QsV#mzhOdKuRo-lL7<)0*r}an^bVr
z>$C`idKg0c>HQA`qIRRfaNM_E15||ukR}yJ+33_o9!;vtwei#b(fP`2RXTKq@|nR_
zdZ>!ruaEiQ`
z+PjbMeK^{FdA>N|=G>CecGBdPo2-|`a1hnFtXIpU#!ppWM5rOU)c1An%F!he;_h@I
zwNIJQ5D<_g%%Y{Q@?vp3u8RBL{bRf8cenO7wy%yS>vcUmc>Zwf(q3f$tv~$T@a;c-
z<;Lq*Ubz~^X7lAYyMyEDYT6BzMtJGURij3_*PlQ95u4&`zxo%~#=DO{`#_YP_PMHe
zKRNtt3-$VyJ8$d`e)^U5E1Nst{r;b{-O=NRPY=!K&UjYt%8g$RGThwVnNeJQ`tjqZ
z_f1K!y!x7eJAJm?+S;;iP`CnEvZT5y%d(8o8{>+?<-7!^hvT)-bZ@`&)0`#G+S#7};Qeo&&z|jGo&5OYABARL{^XB-<@TF*-md;HhpQ9V{A({y+CTn3`)QA`
z5l{N#*?xo7`;Wg6L>k%48!zQDb=~=F(O9Qr7S03-HDOUsoGq5ir7>lUNd&E}SxoBN
zkuukBy*=8!yivk@(r@tPEmaNbL5^wOv=XD!hGLdxH5&Q`f*aN`83`>>&mFfj?u&9@
zCWo`uh%aZw@nG^2lh)jW#O29cu%1WuKbpPFLxEBN%Ah|eaF+xTm6+Plb4z3Y^
zB$!hckuulK?oOd%)r7^oZC%Eykha;d!$3X>C%J$CP<9kGqA7IYbed1X`P!&1wyrod
z!?lf}GNnLsauoU%7)U9oER-0mh1@mm%%|*E9Y?lg3R6Ytw4uRZfC_qM;bfJoYm+4Z
zzkH+rpEna2x#<20h%9{k?D^prkDokT4yV{(_I=UE6YqmFLO}_V#+0awt+Gf0N|X}L
zF-o6BgVCBoE|3F8111AgQ6r0hDz!#Zq9I~Z)_{-#AP@oRvUpu`Jt8+K{INmOf){SS
zOvIK%sUrSsxw1d`ryyc$U;GYVgaKYKh(t68X8Flz0snLq?R_hlfC?cca^Zs)1cc55
zVG_;M1y|EfzVVa|YAAOSEE6Cx@XgT$0_PIQrW0fNj1dAzlv
zTP4JTQg0>_yzNfd4Lapha1r!^eONVa-cM#v)Y*@=!}Hy(uZVp+#MZ+zX=L)sFZ|uF
z{q?W?>eqhf_kQp9Kl$X@{l&H2+oe(?RsYJ*{Dvyl3%AvE)74`7*(X1I{`}Kg<7BY?
zC?0p2jM2uFb53E|fr6qa4e~|!BLEO;QUCzVOcydRB1Cl|mjeK$6ukH>7n%qpIYeVjlFXcyeqmfziV@L5
zjo1mHA}0{ETPcFrJEB%8tOrF=Q_KNXWI|(%6%&1C=~j*J1I3(O>9oy*v!|eqE~*q_
zL_q*0PeBsJE-fC#8i5px|~It1=@@x!p{a+o{kokjArN6J*IMrHpwoBoMwxGSqDo1+r3Fkx_Kf5TGC;DWw6>M<1ilITES58iXAF6kzqjxFaGWv-7F77DS9ORaGfO
zM6Bx?!7rDMb$U8GTwmWTD_CD2?eFhbO;D*BXeFY3?6YKTOff8_X97ad0vf>}qC$sC
z63hiM02yOYV~jCp0p!BENDRb@E^q&CGOUfp
z)R7gzlpq7D1uj}2PwstpXZ7I5-qp50UaU^)dXLNAsnYuD+SRvque>TspPn7CbW`uR
zjg^M8gQt(D(;(*6cYkgneBPXXa_@)pvoC6^hHh;zTJNS$-v8crRdI(@WeX+TA3l9%
z3`6V>jz0-umO?l^+`qbasi;QN{Rg53M`!b@P_dm2$D2=|9lBDJl}kIO%2^_Nt#yhSNolPThb)wtffEq{
zsU$2QMNtr8h&`t)B6ML9CNY9qX%guB{^Cx2LEis~LNR!cs0g(XBfMw@2qGzUaZm#Q
zKn8(}fn!dYIjI*OHPXrwTg9;toku|uBFM}aPK%2jo0*j{gv9y9DNaPHs!AzYWf2u3
zS?!2Ok?A^rbo`{O)c4;1=BJ;2PDH=*%fEi-&Kt3L{`}yXLK=+5!cink%1#wtB!rL?
zX+r=SiG|f_*=x;sK|4q>grJOZh0BoH;|qfw!NpA48EYyN8?R9jX+o4Nz<@xApoREC
zrE(DppcE1TXwD;2tyT%i#URt=AsHcu8=IRkb)SFno$vk0(`#4W_*;MTf4P0>#&^E|
z`#<{5-#?qqI4B@x%>Xb(=}C+4k~LthElQgtE>?3NBN`3t);88JZCxX@0I-;!Jb89E
zb1aS6i-_F}2t=qkbEQqv@)JmJpVCDF$_2puf}5W``xtZSD$*(mw;r0NpG+nU8B|dY
zOOdQyQ4~c{+M=*5V6;^xgg$nC2L|i9P{zg>+qNMhtBv=*&)zv_ECB%kr<9mMDaD*F
z=%GrZu?jMZNZ^DDE|}y*7irHBV+uJyW~B@ugxD)GN~zM8#+b~JBWDDyOi`7|L`1xZ
zL}upTQ%)k%Ux-%Avb;D@T`=1(atD+mW3&&Q0#xM47-!KclTzyXzG*sRoK`8sjzyKF
zq?DFzW01ABhLm%KF^VO<2v5#a07yC6!WjvI*ax3_m3w)?|3U2IBCb@9?7g3CUe6&w
z-zPM$zWMg(OONeYcun=qh1Jy)-M`FT1`aA_Yb(n=|K>lrbqKz!xli
z5c&zk3;-?^A=vjJrDTj-TiYnh5)lEFu>~{CW^*F7wzdYqheyv>#-E>`r<6kIja93~
zvaU-a+TGnP>f-o#1pp}}1pL>9TpCajGD`wxQleCc0+?zSTS$Wlmeq6_7upPC-`ITW
zakH-1m#g#9z|=+UhHEkQA)iUOz!i{IF2HB^
zF?YRdj{LGYAiq(m($jin!cgI?YG~f3(6q@a)&&Ocm%bYowisRQa^w4zCa1LpA!f?a
z0;AEKb7zQ9siG+AsysY=9(_@i1Q?sH$q9?1&<0pSmZ7b3(=D{o%hjT)O&8jxjY6>v
zvjE#damW3v
zS?RTE$=&Sq#5&H5Ip-YH$c+Yr@v3Q91=LYf<--aAt#&9N*mmqGI*nkh4PLTGMU_%S
z+FO$|QRqBIsLh}#3g#4&S7?`gCn*mG1EtMHV94dY?Y{5Z5SUVoULz@`5DCeqj1V~`
zP6F0i#R&jvJ4h)(@;P@K>szyFUzVjdIp>^{YNF2>I73ksUBA>?1A-#QLEA!uO(Cv)
zigk8{-jJS>DMF%Ycb-^4+%o#ofg27BBBX>ARB78*g*LMZf^C(JQkscalBCqerbkUg
zk^7K+&f<)7+C<2yUn!02!wqG4)i2vLi%@7NkQ5=N6ahe^AhOk@$N^(Yxfa!j5LqcG
z6^5O<-K?Aw2HP9|(
z?m9@zxyAx@7R#AfKp9jPIESuoO%Gvx6tuxXcN)P&&OOEH?6lHOht#i{AV!zN!j*=+
z78_!xbXHb#1{N)hec$)3ud2#9LskQFRTi4e#qf_sp_UBYDvO9?2I4;FY>Ie(`oRHF
zI-6>@TUyYitc_ls&QAQQ)ix(oSPYaPcP(#@Zf-Q+2NQhBo=xJw(ivS;bW1Ij|d>jpgE0L=@_3ynFg=CaF=hS?hAL
zUOu1S-CrD7nyd|P0DeG$zjn|-ULu7ULf@zL;pTXKheh4K{?3gXH-3=be|&iQ`kQZN
zvV+Nb)3w%&#>Hh3ULRfN(el-IU;gx?yLG64^KbtLr^k1nKDs}ui@Tq{Psw3UZY^GN
zuYBdbKmW@0JKoP*;nbzm&py^8yMs4IqqSFFe)%;`SFc<+mv29N_GGzSEa%T}zxp-+oSq%z;ixPIB5G&%D)WWVgmXe`
zigiX+7dp7s{__Yc%A+
zXl0?yJ_XzM9ZPJcz0wm~+1AfZP|6XEMsL3JZP3QNIb~eBNV;Ca$(dV4Sl5?fg
zCdC+hkgUgQsFRCEV>Miaw&M=7P>5ZgF()kQUP5f+AI;K-#|yTPA*5#Iec*EMZZ(wG0s|NMXDByw)dNZD4h@U|N?9t#yMGDk|8tV3lb=7{7_<7#pC-~NODdq#~$
z57#z+{a63W?N@#(PGp$I-Q2qkSCqyw3>ZKqJEDPeF^<0WD}Uu@zWQtb@qhLouR>o8
zMtarXP|bF2iv-zSaHtE8Wd4V*^Z$^H?eHWsX)k(WQDmJvY
ziqU8Yc@=X20$`U|+0^dDXZ2e@FSGgO>z7O~j*7Zcv|il(;>ldmP
zrhj$zqMU&h2nUB0B?@WF0%!#jvIa@0at}(NFJS=42q2IVlmLam1Y|U5W?=M44gq58
zdw>5-{in~1u3J`uK4-7N5E@K5rWhdTQtN6`XvnFJ9h1sRhm>0DRSFQWpV&5%5@{qd
z-l&w%+=67pQb~&BVi3G&i
z*2xS5=a^Q|i?OThwk+Z>rReoM&lToAVdcts3c^$i#@r;O%iwA=Ja-z6rO=3qgg>X6>w5$-!F}rG#o^v&X#Qv
zt*03K-exUO1R}(kvR0)cKa!>A`yYCNRW^-F%vUrVuc`#UZ5wL
z2rz+MNEcaz0THOvNipb*1PCNTN*F}2;Ph{`4&WMqAd>;KK#?zqm#U+GGwV?)XG577
zI6Fo~6hZZzl`;fE0>b%1Xo&_zlS-YWtWaxh0!tu_N(B%PN^7G~0c4Pb5LIHx)FFdI
zU_&gBScpY4Xdx&!qekjHlnQ!V_Aw7v+*Goq^kN*J&7S^vS6X*|Fv>?J9Bpo0U1gg=
zD!n*8t*%~OE5ga|{Qm!8+Ry*|pZ$wJ_vUMV^2cl2o7YdyPCxwg$6x>5Z&quIy14wU
zZ+#2&>1uU+zHFSW%Zc2CwX^wZI`5NcV~i$CDfG)G6DX{lEuHo`=iUP#YR8ZqniOKA
zlm;*WR8&<;QJ0DYMd0FfJF*~KM5V|BMgS18F=q`40AeJg98x5V2x620?NSOMwpkn~
z3yMO@39>et2?I0QNQoS2h9;3PDs2r&--lzX*GLtjRzg0fDkW!8LCH-B9DpP+kxv^1
zo5C-wj?S3#1%nLALR+&E;lx!xVeBBrh%QTJ6@bY+r+%DF${lKEg&>NMkh1TW(O5MC
z;JjKcl~Sg}mcv<$m9`O5jLbrUIz$2}wJw^tC~!HJI1>Vb0gWy#Kikd6KZJMH*@{4L
zywepb=-w?41}lW{$j3ghYCFqVfAN$iW1bA`h+08vQ|j{q;?c0X{~EXc`1pK%5}L+R
zZOx#KA@NdEFaAV4`PI4Z4)%7-ku!^~%U1QNE&AB7Rc0_OhuBXC1_c07xuT9CJLfXT
z&ZsViK6FB$)F813m9)W3F~yjJTBcTKPw2F^h~Qiv4lAuDT^DAnxl&mO3tbT)idRa<
zthEqPMAHiDzEKFppell2_9z*$&q)&*r8y>u;!F`DXYSDiZHG}}8|P$+$yj#2^EqcA
zDNCnG`EKQm>3!>CPedVmrIf2`i6Cqx&FgAJsthS-@Iajpp0m~}DpmT(fO!$lIKr(P
zUwY^DuibzA-LvIKIr^XZnP2yf`QpsbUGLjaSzVD66
zWL=OXgq0enltN6TJODE%0YQ)`Rv9CLDJ4V$S+`ACtya<#DPio&S`8)xt*kAp7?>GN
zT{YdxXcQ*SrVkuBhM@YsH%dF_N{7CSN`dckO8M!t$CFXz2uKa|sLGUP&Dl8|59@Ka
z=ylZN(P(>ZD3Z<>rzzP*+bo)C_AG=-G)8Svxn!zZIR_GY;L@2AI4~^z+!&*D=_2RI
z5K_5nO-RUi9ubs?7AioQ(uUY&v5^Dl?DOjB^G7*R&bcU+a-A}v^LAkKHXa_WaHXxFJ`tlpD&ID
zeOzkwXTV06DuxxYX3{00_XxJ^+CI_?b%4lzU=j!yVFNz~EHcH)v{MwsuZ*J`fnuF)(
zhx1R`(<3)n``KUo)wXX^vpRfoZ+^Nz9IRcr{_g(%-TCZTli>&iytTatFsSf!*)1Z3
zEE$9>8cALe0D>6SnyqFLsmk%iuGJYQTC}Q^js58p~U=8(>Ib=X#6Q#{$6l__{S)u@-=&~3Z4T6pEd4j=3vNr&T
zaG$-!9x|K4#28XeBxH;$jb&y+6U7KDfqQ@nN)bcLv_MK(xeKnMCNSDus0IZybCEDC
z;~YWBMgE9VoDIN5-_Kh`IiR7!u*%I60y8Hd0R>saD1~67PZ|x<#E4eNT9H7JDOs0V
zYT8%;REQW*xQZejp?d<*PUtKk4-}=t){wM$0-}{Zf@V9^YYk%v?`ov}AyPCjE;Bq?LxMd>{$?|bJe6A4pB%H868smC|2d}U*OFJEileeYKX
z!;R+;K79K0NtPyavs#@o`@Vy^ouM}
z4+a9kdk^R&V@SO*rXH#*8T1v`xEku3#LEXlpj>2ZP$j(4V%X@~S-<
zk0vRU){Mpjwdx;|4C;Dl9i`mZ0sHJR+oOZ0eO%BB`!LvEf&TY1Pq1h63G9FE&
zb|R8`m-Hpx++L3I-U%U2H~p*_-M+urJ6x`cvwM#sg$x#s}La>QL0awC>GWgG^$Mn+*vKIKm}Sk8mJ*7
zCl1RduvAu~A_l~mTucMa8yg1)`)6lobzKX(!Dv)fwN``q
zeC~bd`)KueWa}gE
zITXxD)E9$HGRPsOC;*Cx_(c~BnK2j)8f-{5#vt~jynZ;^dn12jIkVQ1ho>;=$zDoN
zLm8>vgvcVC6bF|hc}`4$hTMbMS*Kvw`lKPA8B9{>koq8u0gIgL$WCc?73=DBW4suw
z&34}1(VO2Iyk7Waxi}leL(F!mCxC()ZJ|o)3t)6wq0!DNayiQ?ESZzmWULz18)Z@D
zjLou5%p@u)5eY=-{W8Wt7bFnMh@g~Giv6_JQkS-0YFS@P^FDWT-!wmVrYu~=*+3?z
z7!f2lZ6}ViWH>8
z7A#)raCGt{1O$ND{63-xWMkp*_Xz_cP_vF
z+RrY#lTSbU;Cp}aT}8#g{`Bzh;OUd&)3fLQ{ono%zWn7cM~)%(%HY=a>sdG_F3dQ_
zc8(QNv$J{IB+g*ey1sTvHH-Iu|DWF5
z-0J&&V?5!|n#o#wPJO0cRG|urw3v6clyY->(&NMP15nun=mWI2ZDX)J5S`5YWEp1v
zr*Hl@4#wc7Zvq)`v2!(Sqne91tz2GU>u8jdnVgqR7kIw;K!UX++jF;h0oj;zm6jjBv4<
zo~hCLRyy@x|Gym_AGpoEBBv|Y-y*%Xe>mS-d(uoFl*8A5S6#cstF7gC3p?&|9Wg-7
zIc{{c@^P7CZE|frOKKs?qHr2P8D|6l!s+u*{_Ed4CK|u{?%VIY@pBuyS3)wpAcV@)
zZo%!4MqscS=T#fO^9TPzWB6bHFaF-G>u-Jf;M4mbz5j>5`}dDi$fL`j?w|H;w{3K@
zy?7+s+l%RM)hCB%fAEd@WHovFbYpjIRYYY%fuOy_WWj0SrJpfcKoOD188Wbvi=4|s
z2Am^^04l^?Dx)Nmx-qq%ivuB4ZHZn0!v)eLy9p_Ksv~ou-LH
zMHxAG&fXbS78Lz5#4IaY>WZp~5=O`>FMI2Vfs#lNx*(qrW`we@z?8{iA|JF7Q&}-J
z){B8xfisI1>f6uWjE>zjHSzca?6EFO@=b+QBr+teN>~N
zKTA3&yECuGMF=6xDJHZ{pG_hXsk4ubNYF*ern7ZPTZ-piN104)RGRyYBMmtl42qI9
zxd_TfpKRdhtRxnnB8regr4T`YLC~{PqO6E;%EC!$F~&e7VFgJcGMh{wN=nRiz(@iN
z!WZxXLIBLjMG8tgG!ByF6d?trkqmNH|90y@NI;A!Tp&dh3xG)eH?$8u>=~!of$`7VvsD-5ujFzkg{YE5@Dq<
zhj`KMAs|FyOj(vGCBgxS6_E?rF0#EqLknNh
zo2IgBg>kV@J_JCnpbF3eGc1Y(9IND1s)9Pl7Lu4m1`h)Y~aRyjaYY;=l1=ZT>6LFy6p3q3!>vy;*~tCR8GcF7Me
zDU}IgmJ~U5sb8KpPoMXiI31W6p>VqD&5ETTKwlJfA9GG^X;CRsJSUpR6vv~v
z8@8ibsZ={ySIQOI>pB)@GN1Xg>AV^^z_47kq>BocoOC8Y=@WRRtSNw$_q|d~s7qjq
z9T2Lr1Yw_-7Ckc5{xWoQY#<(-MKLa-%_2l+<|+OsOYENX!~TmsMmU
zBPmG=$U?es##Y*b)jH)6I^}aF>6tk(0%noLpeR)-gBl%y^Z>Su;@h^>}Oh7f2TnA6AO2F-{m`@;M6lwAa?!SP$t>Ee1|w5e${6iCogvqQDksPxr>IzbRTBa;M`IP`
zm~%^HN5f5|kfBHF2ctnv^{Vfa45!m$%|%_0owg_tx@??V8|(tc?aiI@`7Fp%0AjC*
zh;y@2ez6Jwpq0|PE0r~%MWE6?g2}DUqEHc1DC?G@H@b)c6tZ=F^Q?PQ<|TjH{}a$f5&$m_#g
zZI$(?QYy#!YCZH(E2T~9!lAZ|T%H|wRb8AO@0Vp+y2+bwe2FEVobTVe_hgBSjon{e
zL065|vvZN*mD_)A+0D*RKfSu~m9m&L%XD-6RiEO#oju;aH;GZHRnD%`)#;Oi)vTNB
zY>^%Zlb%mccg)&lSAO)tXX}&Ua8L~Obkzr2#l`6?gs?WCaaGoO>-g;XbpD{Q<;Lc^
zAw3yyZ|_{Uc64xb+%Ni=sUY+*dhfMTN=+?o-?a%+
z%q*Z4aAa<#@MrU!=W@8dkSE{_RUUiKwi#q1ID%ZT9gudkrh-j!J
zWK>C^H5Rpz7*dSO7=eggQ5vNor{r_a82}WDXoW)2cLcR1ld+Gn11Uv-r2_)tAmBlD
zh>A0DwvdKM`2rh|Qf!(og+yqrt(5K&wIpU_WJY6+GN4&2PEq-ee4m*^;UbeSD=UPO
zkrPP<1|Y39U3fQ>hDZP;1OSQxVnE0>Oai1N0Z{;)A_T}8&$Nsrdxi`4*G0e~OU`|_
zbj~^FQV5cRRh1B|R&(ZD)z$g*uqchOBZ9P8wBP&}|CCf?&3ajGDk_wwtfmsC5G!l#
zpmrh>`)rjX@;-IK$cU0qL=;7k7(xP3NFZ5|2$+b_7$wX(g`6{K2ob=tLDhOmUQkl#
zJ!&p&snu9maRyDOG$Iyk=9~<1DOoaPgxqya;ne*ne|Z1lN3Xv0?s9boSp2zP{mYA)
z|HE(n4s*D|xUXMJDL31Dy>$oD!O5$KZeU`peC&$xK{rO~r
z2GwO$6%Bq$iQjOx3rI&rwnWgApm06^EPvL2HQ#6dR6;1e8)K
zs1VreR^_RP5Lrb9h{*{^F)7S>kWv-1=G+1HnIo{{oP1bN(kws^
ztZ1B50L-LGIK@s#S*5CFYK#`rKjFv&M2>B038FwJV2Nr@HpN^N64JtIXSKFeumTCm
zB1!MrOI0Obs2#%vF>Y<3B
zbV`FUMqb!gxpI?wU;+D8dw;fK0&R3@jGgwMf*4`#Y+aRW#)383*^uHWB_AUsRxv{#
znivDU_>M{|)An6a8kPjYMyrrloa^!6`fxBYR?SzZeYfZ%X&QzQNP*xX8D#DV^JD
zyyeyh=;rMzgS&KT^YY~@N)4^0!La_(kACpUr$1gSPd7KWf9kz|XJ_rD_kZ~HPd@wZ
zmA#jo9ZY9uzUzx(U4tw_KejlqF7j}Wd)bV;Vr07RY7r9kz1Ru06LliglWl=c3*OhW
zTxLc}M^xT>{u-RVxg4*n(2As@o``7O2bBWnxmxwvcU)8rs7zQEDrOb*q+F^{VP{!E
z&}pex#zKv}=Bp0UGR4!V3)Gtcha#IQTcEBdmzjFc9;0!tBSeekcC0rf_hlRa
z#_IY%NbOiIwI-dbB0l~Ot8%%li4AeTxR63DBGy|gOZs@fDn?=^t)jc=P}
zua7wAu5$(lnkrKyOChpA9);A$>{&dk&YFrO=G1EsT31dF$WfL>mN`nr9CmiM*C)f{4Z(OI82pfoIQ0;)us8eDu?1Cqx;_}sNTO|93`}6q`Lsq&}n5;5|
z9YczMB&-mXHW@LcauPx1PEY{Gt<=W)5
z!Y@}URh#!Vf3fW6H^TQU9S@UfTq4dRsSUUcZJ&MB7SUl!fw{kkB_IVpf)9Nk`jwgs
zf9btnoS*OS+`J^i
zG+XU#Ze5mauU_8z{crw*{^aETqkFN_o9lbcyeGid-+C(xfAQ&ee)bptT-V0;zxzkK
zyO;NNZ+`i`caKj#z5C!#+Pmt5em*G1g;QZQyM67=ci;YXQq}q4{?_(hA4h91U%Gkg
zt*+0vUVhUS<BLhW%8q6p{VKS&nH%
zqz8j)qo`-NA}(<b?B6+Nh~rHOoFF*!CP0Zo$USekJHs7
ztjDb;zfeTW)H&@G$I#3KK?qnwR;l9HE*FeW*($3+Cn4m8f|G*AxfTq4;Tr%XtI_B3
zldWoXnRr>mX5iGQ+I&@&w_&>Y_&cYw
z<5(9{#>Up?)Fx+_U@zpo>zut$t+pUKA*0<>T(ZjTO*1g
zw#(Zuy;^B#7wzBwFaPuRzyJHI)5Cf=UfaF;#q(!-x32xdyI*^|M{~|
z-yc2u=A=`)1038y=VF{WjDFQ?YihGmk2ciKPS>6ShQZLqW#G`&rjD(A@Plv6X2*Z|
zFa7rGZ@trZT~R5cRM*d}E$Cmv35g(rG72u*_kfrpA%fBX06B|M3IIR=K`BtkrDSI4
zNSOpwCc_Zqa}GzQ3JcIa)d;fR9Ql&h=49I7a#--335Ik
zqJoTMvS9W>EzUT_AkBK(U`uh(#U!{quPBu`Y0u8XWLRV%p_FY8mNP+0$|SYFGP;G_
z0*{guogC{lcW2ea_)wMJ4J=cUTZM{?O`@WMvr)j-4p)OrX;yKC;vh|x8FtmwG;CUh
z)gq~}g|Q*sQtx#dqt;SLRFG^}jkt(8Hj0A^LYWK#C?5b*hCwPq^eiiIN?8Nq$g`}1
z*G4q5jR1_9DPQD$D3KO40zrlh016Z^OA_KT_gQEqMqz1zDa9a752pUNT8B9qB7=;=
z6=gVbvH!s|_lXAjRZS#DN>qctveSYFOHk`;@Jv
z$_i0JHOL|7z=5J@77|O&>0#frOvtrWiZnAui5-T3qjAYN%G|1DQ)y7v!Mw$jAKskb
zpPzl4n!~M~o4@$8zjgo7r@#IiejotU&`{4WUPfv~?Jbb#bv+F9_nC#4FEi=tdI%_SNg0W?Y9I70<04q@9
zTxC_2JM33MRm7AC5K)0bB2LVyK}#f=gptv(V3r`z2n3RKfym+k06`I1tqU}eLb|Xb
zfjGQi&nrZ(GK3L`=MiF9XO
z)7Ld3u|h<~vMl=msN*17TjcDPDGM4gN;n$!MI6kGlpJfFOiY1!WjGk16?Pd7Th+xr
z#)dgD8jjM(C5kG!6v-(i#GE8%2r3qdvoivsIVYt{L`>{$p<{@gm8KGy`@YY4-&mD=
z(3-Ti*3=@*(l7gEN^G2M`y3)%po#)DIdn0EL9wO`Ta^(ZgAX1;7dR!bN+1In$ch=2
z&ekfTA|+|s5CUse`k?2_)~60FYBox_T1BVdHvUJXQZcnFE1qQ+NT}#>nfYtTR
z_D)?~e)ZL#+q-;czFa)p|LExC^(+g2oVFPSgMNf}c{
z05LW%nIvQA3x#guyAA}XT`W2zWqv+99bO%VXPW!tV=VOv<`y=S7u
zvMG}7x=83oLuaaPYe$DrA!BLloWf$-5^06TszQ>|E(4|%g%sF=gbnauFk%3`Tu_e1
zD#Reosyk2656X355neGXL#_`pn=J)M>9%meMu5I>&Y^`xJ5M=p4>$VG0~#dhx>egQ
zt-C-*)J3Tyc7On^a>@ov(puX>vbUrNbQZRbRG}ebjH+*n9Kjj1P7LUak%l7F8c3rS
zAOL}!vY;}(%!n#m6sBkko0M$UWeL!}I^1MtNoruMN?K*pFB|XA%Qf<`KRdfCqAh)&
z`$xw&FD(xaKPc>Ad+%*iU#EI=`TT>^lLH5}bGaOk*4D3i39HCgMwg<7&>i2u`vV_{
zbYxs_lf87~rHErYsHXG7gR>*nxf~gl*7#&P?^lCjQjgXZ6+WyM&1u^=xz7sJcsLfq
z^Yin*Z&^B!?94VmW>}SjVO_myjGNDn7OPWJmCoqG>c}LVg|pR4my^Xz4MryOQt4ck
zXp9z#%hi!F&Z-II0gyjCT3J12vW2;n0~V@4Q~1;-4w*$zh=jA`EG(iGn&Kh}QoQe?
zM6GpY4N(oEki}R+h;5IkL?or$<(w5|L?aA95yT-l#7u+$N`zRVr9wDnXo;|{tP~X_
z%`9L9DG4j)l8BVfN+$p$oJbODP-F$jsHN*--|HAM8MGEbVNO6oMrlRVuX?h{DnbE7
zL5jc}BWbpRks!r{DUxU@ifCNuyCrF`S_@+c%pyvWGmN~ZRZg+5T;&Gi>2zAV@lXHM
zS06pSe|&N%&?9wLnbfJxjqTBBG@Gy5cByp;K+KS3l~PbpmIIx9@?NYS0Vt7H>r_^C
z*ENzIiqcvk<(x%fMgs)eXaY!_1riAhLUu;7>oFRuON)h-xIWx8uAHyV24+h{LTqAO
zDavFthzdb+P>KqzobMYjxhzfa;(Z{sRum?r_*;MBf404Kb>R;_yZ6x>cYc8?^Xf})
zI^l2s(Q5z6=N+Asa-2jDFiS|3*)XC;vR#OaMPF-OR$z5JJ3E`6e=jC)T(z}vWoP$F
zRk`W>U~7B#N_BHOJ9>V2Z+*+09Dcf7ECkxbjiN$<@*P_3GIoSEc@02>D&;my#H6e?
znLTh9`GuWYYh?ucH0#2w^DENHr~(19#F!VP20r+{%OWKrB1NSY5pAzu_A#fF`h3n3
zg!9GwPwG)=5%!W_P{7nj3Q-wO+F{my0Q*p>U+OXRQceQG1^EBomCZPU7E4v?3_a;
z06K44lHO=^L@_Yujv)h;z9kUS7L-niZO)q6AXh0ba}vZCNO?jRdeyh+yP2_~2n`iR
zDS^P8ISYvE+9(`y7DWbtLTr6p)a5!$2%$&J!P|Cqfr{ilE`>a2g*+jxP<5Jw0aA90
z=|b2WV}eaSEQSMXof;KaFW5oMktH~1+Sc}s0z4JTh(?jkF$2U^v&dNx3TsU8gXN+J
zOylvW@0*mE<3g34&=fEN=8Lzsi`l@1dmvvVu#*A-hynXxnpHJ>{I#p^Zr*sM+}*nL
z3AjT_5CUpss>_RE`sm%j1~JLAp8$;sh2|Iy>~qrss5YjHgF?!&Sw
zXGgD4{POwgOjYl9<+Jl%rDr2-UUyZsw*J@*Pg1TI^Bb%4%YN~rd{g~2adYQdXqQ4NWq>w
z**flCbIxJ;nYf3_f;?ej@e(y-JUQG#^*(KOlIXz-|kyN&8m8=>&J!Gvbx<*Z~gEayRq1hEGUyF
z57y&yo%DV+Z0)v^Scch+_WU(qDc3(Q)}C0C7N;+#egj0=;Zsw5iWH@LolkEE1$y`+
zyLR7!?#^FX&bG+S%gIqWJ^$PdU@st#|=kN3<
zFSo(3UwPPfUp$=l{_J*Gy=#kQy?%`9aqP>yx^aG34+aCvtB^heb<(w8>Q-fnY{y4-
z_z)3%b34x8G(cwZqk8+HfpuBD-A&h)>2PRZ>q;}Ox6clC+WGYeW_i2|n};cOJbkz8
zYR*ut9gjAi66id89Zv4Frparc;OH^&IGw*1mK*F&VZ2|DPI6ugvupk027}nqLpyou
zioSn-9n-ErWrv4$bT>zglIFD^z~GYebcswu@6(`*gLLmb5;+M>!0I71~2pJ+{{7=H=DtRcmmt
zeRsI|+?MNM`o_uAI^*+V{0xgjLmR1iWi?v^fNFBCt7nnV%=wqs%4=ump5@8&`}f{`
z^{tfOdi-!x#jmR3eR1E-KKDnjE|xpSE~?3KRX@uaZF)VOUJ5C#?Vc9nr>g4Phc~;$
zcu6MhoiJP{^ZnIroe3^z!Y&D|@#W4|k6r7oC5G_Ve|tlgW7NvyYb!-;6Og
zQ}?(2?k|nU^^d;o?(F@&)be^+O|DIszWc^MKX`thxnJD=scLeqdvyQF@BV{>4fp2e
zg??MsP1Ld>81fqw8sdvN3?yC1Rr`y{k~x5%qk$;Nu)
zvp9M*a@XIu{_me1O}_Q}b2I#L6Tfcg+LL>uwZYF^y?*8D?ePA|KWY1;=Hzvgex=f*
zU;62TWPY$%%qhI_(RT(P{c(QvZMybyD$MSuKcM{woMdt5t;yc2!MEju&-m#BF{Xa?
zjbeRgI$b<`_VG&}{P5l%;*U1=P7bEu{OVVK>GGktR)yIDr+NRyuYxLmARrj8~zkax1&lfjz8Am%=AfBIH3$wR99E|st
zlfe%w<7P+K!}65_g=XW?-ek5h(*Ep~=INHhrd)fn&?gVz%I&o39}TUy29%JZ`RuGPFaUAvF#QicJXj++Ap!eSS3XBlW2_ixJcrcP4b
z(CxL{V5OgLyJthjeRV0(COENpH>wjA%Mo9x)y=82N{&oCACPnUvdWuHKUl_lW&Z@1
zTV=YIdDvH}<_EQ$YinLjueRG;XYZs(&;B3ZfB&W
z?zMMe>lJx+^7QLp{PoMLT%wt6zjty?ZEw(`xzs-fIBRV6xv77^x%H>7ug-5JI9J0j
z)~|%sEQQ&n)$?}>hg&!HVQt@&Jp6}8e%7z8E#2NfqOC`lF2DTp+Fx_+d)jt`OZ#@~
z+0OO7=KfVVd*hq`)rZfXezDR=TbsAO^yPp5llQ~R@c3{1<}U_w^7Kddf8nctYuvr|
z_U37~8ol)8-x#lL9sg1D>!Hw3k3Xox^F4IBD4*)`)D}1t?bOeR?FRh3N{d1#luIkv
z=&5e>!!j=`jz&%VV0$VQTeH|F98?EV;T66_o_Xk_$#&#?}5_<${v!CBG%?;FN(H^O@FGe(29dA83
zeW@_ET)&@&j~Ux={_dwo)!iTd^S|>yw7>cn?!*Ki{Rp1jn{bx5e;Tg5%pB>nAHahz
zxTxD(ubCUKQV#aXJ^1{iL_~MqrYkQ2sqonk)bpopPVn+;qnmd?lRWx_?tNTmfVY1d
zF29sG>d$^a_dieNVE)o=d-FA&z4?Ey^YQob=(*1cZrvWnpJP
zQi7yE|I<3J*}d1CPjLSOc=9cflP1fhj5RUJT&h86yJ#;&P{P*Hd6!-V5WCO)2-n%?8${
zi3;~oX{VJo1zt_so)loybMs(-x0s9GnCF{bveFMn#dFM61(&9)t4YsGb5PP@DJjC*
z3@uV`b0aCnG*vtGM--yRg$?w&kJ2xs4XjhoRl;?YvOOgZj)2m8MC9MN3HnkJPyEcL6Hz7qnlX<8{
zADQ8O$>aX{omIPO-I*IbL-Rz!B(AQ7_PSE2htF00K+H)v`U=NU7Nlz53?883ym~7w
zU*nYZ=*N0+7xE}B-pbu2QK#r0+VL|K#oaY-?~nqh?{f7Nu$S%~-&G>Y*2iY>m^t~`
z?X-L?^-%jzCSHE_i-iS`w@sVuZ6`WuH=nVZX
zJI!lfDDy+ztit|lhy5GmhB*8P$DfHfS-mCxvZxi>{c7V>v4LhUOs_LUS^vzBo+K6A
z{#SDvXAv`cYS-?O%DH{5o!$xQ)U5BT$&U&(na=<0!TDFh;Bs&7Q}HLN-LkVcdG10!
z@a`jB>?_rk&)yYT%Vbk|CiT9OWR`ns@s-YWG(NV&`+X-*Ki_)zqc^)a9Br&x`;ndx
zwSObeZ->~}wNLHZVTM_H@Wv`^uu!>qqK2Qx?E9ma)8d^xNx8l|**Rt2UL5bWi@F;2
zi5}UZ=TP|Rm9*UBlMHVPSa9V2jV_W(lo2L!|aaZh}8#XbT>xnr?2*l
zUE@MMnVCA|gv;qA=!}JYazR_YfqaRcCMG8e;
zy$aoBa%ZZ1ygq4jqS)+p(;G2HJGu+wBh<;CZ9o3;wFeLM!Rkr1{sgd#?G-qAC!up|
zpXkX$lu38`^89pE)GJjzacfOcj20)u{_OU!s&`*L{n=l<`O=NI_CMd=f9&u6p#9PJ
zzj5yW*{VHR9^B&jmlQ5&c(+)aCU&`bXW87!a5maF$b-kp+qk+a-5myi>Qkv6q0TbD
zlU8en#?|<&8b3jS_W4_j`>#mR4Y%(XlgFViXZzPBTyle1G2XZ35$1Y+veBIEN`@$3
zBzjQRMMb|5=L1TkOB=^_`~?7q`BxTq-vixN8+VJr7ruwQe4|-x9o1JVt;;&6P^8tZ
zZhn)QtM$i&jRVxplh60q@3u=d+?wmr-ORZ^e@^#D@v(q`ySdaQl9haJJ{XSqV(D)Xg=w@Y0lp0=hrP%srYWa
zc^|0?%hyA91#n)}$1-kFML)gPAHUUWGurtvyJt$7=MS&O)m390UTfJUuXA_)T3o)G
zpAn8e2=zxrxuLqB@rx_j(Dvn>a_jSF`{_I1n3nX4EzH)ns$82+)-Ihs99PZX>CexN
z`J~pTL7)@s?0|1%$m8O;ofNA>o-ZiUVH^=@`GLoOzdtF@=fuBK0ZR^qN8
zn&CaFzQ{~z`FdX6S@8K_>t40~RQzal`iftzD|c!~kBYT<462`B3iI0l;MVSzYtM}3
z<@AkyKIG)g@IkruL^z*4yLs@*ZwCLsZG582`#8@|NLH_e{GyMHaB-pmjCkV%d2M5tQB8{{`PYE$*BHO>%ZTuJjL6&
zyOm@<-aZ&sJ@_)ux0|DDA^CFSNxgFv`Mf>39os9MoE_h_|=fFty+k){JEN_IEA0D!eE#td+gp48V?X&W>u=q^_wnlJCU>v&
zaW+^#r^yc~jd^)BcGs0=ai3thFDjfpeNEyTg0X`mGk7Er(&|QTZp-Y-dbyEFK5{R<(3b^#ChuNL5++3n^lP{|E
z2gK9W@oRbYHtF6?J|ArC`=HPE_j0pq%U)N{hwGrINXtu$*^QhN)gKq*eQjj%{GE2G
z0bsJd7;GHm5ZjYi{Ory-HPz@$jzN@zM+d*F+QN-Q{c6Ju{%G#ObP}Y`+E2w
z!#tk+*rSO`o&(m
z+zGLtY|pB-12=AD_SM;$;WlsWPR;O(vMdfCUc%K+W&Qd3By3MUuxg5kqyT`kEESF~
z#sl)FVmnksN+GDVwT+!CGs^QHy#H)(bF;3mkb6Aqn%%0Jo}Y{cFy7i+x+4L9|Je_h
zpPh|{o8?$-TH4-JMD~0!cI%>w=cfA9=fx!}I#s2K*RDO?U#*_>?NF~`+da+G%_47I
zO_>t9wzQ4};-!TfpPWxmpn}(KzR4jU9DJdQM}Kah4^wlT*Pg?z<6(yYeL#Z0FOQuQ
za4YS0*Sa?0^PtO?92Qs8{bl;Z+BuDEf@g^9K|-f1lsYr@*5IbJqa!mJcJ8%jOp{BCwd#p#
zPL@Dxc2u~AWV%WpHz5H%M`<0eBd$!bGIc_=HTmKv^r5Bg3RmMk$x@5aw_*|3XR$CZ$3Ugn?5^bb(XBS%@Kh&F14Mi
z>ondz>7DdV+rh)w^hQl|{UqWe008TmSrV$^V6fXL$9*`6F)EdHb?DRyP?PjLv^~(G
zOADkGRbqzcg@L-7EavB`TIGauSuE%0NDSP<_yj}S=9OPABq^|W_2__XHl?`QO#STo
z%h!ja`26|D^R97K3z%f|R9i(1iWam~m(qV?4FhKd^Qr9UqUfJbrXM1PNdP>@!FMT%K8vK>4#ZWgHkI%DWa<#PRgIXni*T7@hFp0rhs@_OgNKgVRpy?H2ef-S#IY`CN~e@gsZNC(1F&G%wv(uP-h7CY
zxnps<)#xId@%T{Z=@N(8oeGf3r5b#YM;{H6^Cm{H$^^Cc<1~IWFLz^o<#ckbfN=*e
zt@E0+sYcF1Of{rW-K3I`MMrW`D5yV6z&QL-vc)&(2Pi
zx>;PkX8Xf3pDjYVbNS9M|I%MlI7ZT*=bwH0;pyzr+pqqO@uhqFPrmrh2M24XgPXVB
zy>xBk^|v<9r}>}#{y+HAqHd3S5?_P_l4A0D6XKUl22diBj~FW<2=dQ+Xm=+FJ<
zrjnjP@%v`%P5=CK@$BZUjbS}To<4i@!#3p5F2DNTFW-LUz5Dm~ukE2+jY(pZ{&MX*YITa{XxH6qPBZool%s
zxAz}>hIX6Gq#UJdFVpVjQPV6(8?y-KNy5&x#h+Faz(
z_dhUKSFiq+GyRuGgTCH5^#}RXt;^urUpoK$+?|$2!S*o#4l3FhC>!s7zj*1%W^pTg
zev_W{SAPDu}PZ!=Ow1#R=4UK*e#o)Fn{GdOeS^&nvgq4hKM5<$~IRvvsn5
zCgZv_RVezzQzcQ2g1?enF;T_L7L6{cyNz)Va%^?E(5ENLE6%Q;6fhjw(a@@#5?3QX
zyVK!K1LPKMW$RoG@&w15&B@EfhtqHTS9gyOPtK&;-7>D~8WsHU$-%R#yiu4-Mu(Hf
z$8k!fnQXx~R(ro_95e&|VDvL?Fy1~ssn(}`{vodx_8bSWaOy{H^Lc43xOUh?;ROz%
z*;vh+l}fGZ(C5*4eNyV@rii9F_nt?We##V!Fa0!p{QR@g_V$gNUul=;*Z1G++Aidv{)Y<={zv+QKLMKYZ&_*wp=#OX?55
zeeYX!NKREPTZd_T>!eDqZC;(2l$dRgn~Bz2Ou%C=iaDij$a*kWI>jpWMNSNEvn}h~
za8?5#g~|E2+o$agQGuyye}o2jhh$Pf?$Nf|7#$#7J(>00!0wLwKfcue?v*c%N*?-a
z-B;eaeYnaG4?{KHaCoUbUuy0Md^_Jr>2za^c2YBQ!%u_6$@rzxwm!7B91Yhx0bt?X
z>tVE!RjS9Ekdp_*YcCg*4FhN<>j220!rCssH*!j0Imn;{$y~ZZlZ}BfG@5`gv~4%p
zs^0y1n-YvSfpZc}*Iq6r8y3(`)&bzZyUx|?w6SH7WU>wbkOFM%rol*KhVcdfCxLYB
z7H8dqfAY+IJ&&T9*Vj88S7AT_g?fKpG`hCrxY4UQ%|T1C
zJ*?I^#B*QuFtJ)&n03@*rqo-3@qmQv-ojpaipwrmG0RLslI<=b7>77qK>&v|w&3o>aPR7AgLHV+dTfoRrGD2f#ZNDzLC^!=h6P9%sEz=cb$pDa?>DIHv|b1AE>UIx>!
zI0&P9Ahz;MwvCswk>Ca?wv~2+{&KBHi-A2BqlQmDyY=iFR?dpK+OJ+d@IMv%msW%I
zbosR0`O)BUQ+D$(SAaEc44^WgS6DQrVKnS7cf6i4XGN_>uNr7Zoh!wl$iVl*kbrCk
zZP=PsSEr>3Mq_<77;id(rA8YwIS-qR0{{Ri09;Hl`?cN;y+KTM*9<%(766qKtJ{C8
zbxh7icl9JFr)oSC=Y#HjO8=(TgByX92bE&g_l4(-dP^0wnA0y^rkO=e?B>P
zX0w~5#2^Nd8it`HSjiemw&8JrFYGZ4I9&N=V8Ff@&%iJQ2{R*GmPVvT5=n`nNp_PR
z*xl!xKIinw=g;x`zx<{hDy`*C7@X
zE;D!M;l|J}?5O2x(QqR9NJevRpmYhlEd6^9&+F=w#q8B~@4~wJG#YzjI@p0V67c6s
z`tfP|fw6b!{Clwka);_qAUn+}=0VQ^Z3`hnUOPN3rmY1E_*nWxc5^X)hiPQEOuX{3
z%RVCAG1J!Y5ft{E_LufE?fCg9v?Dt`OkW#0`u@Vw)m6Jn_j|S~S
z8LW$F*truodUMhHl+RX+m5;fc^ck6%%OcG~GQ2d^J!e|fOzs?vPmwn^kEu8tJfj`T
z$|(ra-LhK86$qY$DAo@3)`NOuu9}boQo&^ld5dcMz)$V_;eO^or}4x*^UkqtlJecy6N6>V>ms!e;#uxoE>ir
z(S3rb=#B=z$i~BH7};rE*I5-^)-2U{ePQSP7k~a&u0DPF#@(-f@SM)gKUhcRx!xn7arcvaIbF#`cCOM#+bu2_a`ytvRX8aC1alcROfr2;+ZQ0
zF~^w7*|C&wlEHGku1Ax}*viz!Az!+jw{~%OkJ*HBa#A)f#;E2XrjDW9*vUB_V!gCY
z%H&Jub}n>rC<9a>B#+YEMx!r${0KuFrm%*{CNB65HGFhBBEjeIn`D5OA
zWCla!DkG-sLw{Bj<>&llqd`PTl%
z=Cofd7AyZjzdXEh;kmxQT};+U9bhMkG_$Q9ZC+c1Y_hf>O$-qEA=PJwhhj<=ra*l(
zAKK}%thj8J)^h00X!k}67L*105Mou1cCK2!>H#!^j3!Or_s_ogG(5YZR6aU-OvQyP
zgRc*|sf0OEvJuNHRKfP>0k>5ZL#$;TNy6?0%u2&TEt)m@8Iawnoa*idbPZR(VVtm_yE$rqzjYwjT`
zl}8Fg@WlY%=h>(!MqtiFS^=rI?_7B0^+yj6xj7@3K~{G1F>pmH&c>p+Y8atBO65{S
zOwp4ySueuYeXg_mB-ASgWVGWOmA#8pPU+M)@Wb0$Vkmpzoj)aZj;_;A|
zj~@HJ|3{%ObN%$PJO1hCf8~`IKJ)r_-o1PGH{HgHs|VfkY_dhAI}B~^yU69dYhBE1
zuKlc%BsubfKAf-Hp34I=J%Z)p#2QEBLb{jrF^d7)BPycj`mW^z##lC~8t+pghi6Z{
zv<}|C`^m?TkJ>Ju+0pl?GzivMVX8s6oSf8?5W7@PlfqC|#m?on94~;{qjl>Zf9|E{
zU#{)(`LTKG+7&ID4{x3Si{JW(P54^Zx>&yS%I-7Uo7cYhgs-5Vdh`S9rU
z@%do~C-ufhZu}4^lkr*C))9_RGr!q_>DrS5JkZ*QlXc%rXBUr;pPsM#>1^`MGrP58
zc1IyL2ZviswW*FHts5g~*Et~YnyZI^D>liE&uqn@v-099g&`#c>c{{RnCSx;FDCmf
z70HJsn_O(%GR+~O6!34%J?AhA4NqmZQl=?
z$&K&1(LyF5gTXf(k-Pv?t+2lvjeTp7e#fhoHe
z&W`5I7{;5v4|C_*?X4HnxRt((>yJfnv|)(?#8EMNsCWkI$_i$q=a~wFkTyOX*H?zD
zs%l)=+@Cux86hNQi^ldjKURS(3CqVt;$3D
z#V1lYf`E32Q4tr}^;n+*1J!G`J_nZi?aeC_LL1ZBWM-X(s$4$vBYL)6JUae3x5un*
zW0woFUU$drI%6BaLF_#@TUn5LBPU(^m9NY@JgP2Vy!7JB&)j=-x@u1@Jhe3RaqPCg
z@S_)wmN!H4hMg#ACN|?AL=O-d0FVqKUcd!~td`QhSp
zT6Z;EM$jOST$2Pc+49GJ>_^V$a(M7PTbX%(iX_{6yVtI~WcmL62OqlTYz3pLEX&4#
z=EmK;($6R5Gv#~C7r1!Lt)g`AuC
zkmEZ+&o)#pdRyE1$QC@|&8N~rHKY&44_NRR1qsIS>LAOMz?N1r$MGn*)?
zl$V<3&{oIT(|i$w?_h-xSeSAD*NCSplh)%Unsrw~HQL7V%7Ec;FcTU)G*JB|bNQp5y<
zVx&{SQ?3O-2peTiasXvdoX9m~CYl{vEUK;#orp*(qfOSFjcSKb_&76V@8i5TCxX4J
zt*x_2>bfo`L)Yrobw{9U`h!{O{*@T
z6(rK^urk(Un-3&w@Rew`MYPR2Mesgl@ydgmLA4EuH3|?$JB(CXTd4L_wKZxs&7r-b
zssou;N1fXos*PBWy&DCZW}KAiYyf+4^>&lrK^>+i!+LQ61pCPqEF0OF4sHaprqGe^
zT>m%o4cZ?CTGK7XZt=i1MhLe}*q+qYx!YtEtGM5qTXhS}xO{C>Ues6Z+QG9jgy
zg5+2jlgYx6jw}`^6F(r}TxgESJ~_x9DWN^}a%|X?bpdL+uDdB$;%FDBUoQ^Z)Be4)
z;}OJOak4`q(5^*=%1Ow%XW#(!$?2kN{l>=D)o1Fgvbk~9hkm&{f9><1mvnaf=FMT%
zFVE
zAD^Bb^}T-K^S|)H^>6xaMd(U9Y0BwzTAZDJw6{C^+kf@Xef06o_rCFs&wlRHfB6@`
z_HHZ#rm^K*Y?fA3lra0N=XWm$>_G;wQB#~XO<+0QSR-RbdiV`F191G|~~LF=aIx^dSI
zfArqp`_d2n!sg`J(P%Q>q}z8tymEDKy_BLl>3o;K5SeN`9&eOQQ;LMjTxo|c3r;um
z{=Hk@ee=7qU2a#?SUsL_GG-A%0Kg0i*qd_rRMij1XGb)79Ixh!&vZqg;4-+^H$Oif
z;>B**9X)8qa68R>e(J*bsh?VX<2&QeelL7V#-F~h-Rb&)85W!h>C+Zg~
zrfdqUI5m|(j78KD0cl7XJQh&&CMq4N1uzA%MPYSI93LwcZOxgn8X(8OwS}yhGODRC
za@7w7rD~15gSw%x;by0!qT0DwoFA@d2$x*B8{RL^zc<{!^Wf}GzgQF-mtEa?y|cN!
z|I|~T8u!z^r(XG?AOG2Ab5=KHMJ0&s+Hmpr{^|er{^=S|U%LHu2y#=QJwI+O*&t3&
z8|OERjS_5@!NY3Md{@24X))c#1{MctExTDZLRBt)=7+a}jMKS%^t&gwzm-1tm(yK$
z`)K&?pM2+U{s(^t#Dn4A{OW)4t#5zz@`cNP=`a5Ezx}2E@%?)redQ1SK&N+io_THy
z_jlIyJExDocJq&(oj&v3@4fzwTlZbf*RJgxclES98%vj_FD9VHGQ9fgQ?t$8mtXtbwWps_0Ox2rb6eY_8VCuX0Dxu&%?w0<5LAeb
znQmp+N>7Rm1CR<$cVP+y2!Ju?p=fkt+kpa50ssI90L@IA8U6pjkxbKVm?8oqKmd>Wcr=Q_ujd6;zGHmRz`{_cBUefKC>ymfHYnT=NK14)Loh>JX;kaN)^MO~C>d|`#vG=LpC
z@%j?$2;QZu9w*S2+Az4~^U#c)9dt^9#?iV_`F2pbZ!z{(UY0K}xO5!Il{XPD~`
zQl615h8bA#0V)X^bhx8WGs6P1UoYDuf_N0I(Kw3sg}Rvjs)tJTfpR
zL{&jVc4ao)mdzZ&Nv;JHc#W<_iwR)WA&WO~22Rk`DMu35gALF*cg89?vT`j&276Gd
zvJR{GvJAU3x<45u!NcBm0h2F{GuFn(v*Ezr`(1msX*NHl+n)=gU2TxFW*=L-y}0<3
z`T}#r>g2J-j`O*VOBcKe5u<0wMZC=<#;lEbn-xT3OfCs*l@Q}fvVsoC$#ID&8QKMI
zp1_5(wsK2y@Lo(Szs@%GC!EH(+%|sO*}`I0pE979Tg{osT1*gQxx
zcvUD0oNQ{d(b{$mXfY7-IBJCIu&hY42^|)UJ)hZ8?MxN5N?CJ`SzDy#XaWKddq%Xb
zjHz6OA!2gIDFLZ2usq5l0u^JSm=|rYn3L;}L?b~)g1)NGV|7~EhG`2x0s9bEF&OX#
zm7TGIcCF@m9{QZUh6+JPl`}OrWh$r|2XX$v~z;%B;mf
zRLz_vk(WFkcay#K#&FympCl31Q6#_SM^k5W)*NOrit2)jm8TNapsrFrfByN;{)M0V
z%THZ>`nP`XAAIY3zqx47jItY(#S|qb1qpyI*ONH(5v@(C#EC|SRc%IfjR>LZQVx=i
z7?+kULqbJVaU;q36siR=MiI$aj{J0s&iiu!RGHW!kEV(^AQ*tUYAz!_I2DMu6N-Gg~P(L{CqELG<8^jO?uW^DZcf%DuY)9LR_Mth%o_AiW9yJ7wGde|I7d)~bf
zbSP^fY+OO>{w=CgmfnZxV1&eAyTm6cHAqP?Ack_%R;~;oB;_28sOQp~`mij~vSGrM
zx}k>{{diK96^kT4EJ8k;G)`=f8T+AY{XutpyL4MGKL7dSB`w-xN!i+BYY&s;lDLW0wgtm@_*fYa1X35)3)x4AEMKHYZ^!*#=KXnt(Eh
zDx?a4$o9xHKz8jLKLu
zZ|rVeyL$PhqtkoGXSY=h07dEgc(^%>lcqkJpNHVfVwz&&^1N^W;sKnh12eZxa!>+8
z34f>zHg@Gx?()WT
zR*wuDY3fULHMxHK-Rqxxt7B|Jon_K4`vv
z{l&W3I$xjdZa@3{g%>V#d#7iQhqydGKj?>b&XECCfH@SCwW@|3QjjnoO!{avnw<}?
z|G_txDFwsL8hmrw3`XTXWu1c5$BR}?k49=t;6{JbbPOJ}{zqaUnpI35va?IV?N}TO1BfaVym@tZuXwsX
zI`6n^Jwe4(pcPxVC|w**Hl`C1NGrh73q)pHSCbT2WxZUtAq0cYaG{dZsuT6BTZWlg
zhq$Dk%4#HN)WjjTacJ4(FaQ9k5TR3~oZH>K&8BJkJ{%vN8=C=xkE;}%2q7Xu5>WlvA(jy{qE=m$9oUxPH$gyqLeY@___K0e9%s3fUMcWS{_F*kWQGwFVmW?s?
zeHh|^K!%w?APW={lZX_M4TG@-84g&Ug(Vb4weHddZk~Gj<@41snX0Ut+^>|PqDdka
z#{xv0oSSQCm=Oq4j5$drC4v;bWkkWwG4l$)5DN`;AOQp#n_kB<^
z6h&byszS=D5EPSWpj;`K3>hFACTmAoC?~K*^m#qRbsPYs0%hks+a8lxYpa4mP0lDe
zvei*3Z4VWWM#iDdvD5&NBx)LpayNx;h>LY!mk^!l&o*DVzxmqk&2LQl+ndX`n5(|I
zoVaZK$=E+w7n6{7w){b1j@Y;|)t!70;6Zh}@{-rfaUn0Ma%YP-JA;MEtX_2T&U`T5<+bf>xW!qcOdAKv=l
z(I?-WHj`#{?eXbRppP%}rSW)gz3A@T{%TPa-6C#pZx^FzxVU=tscwCGc6L%0UUe})
zec;>Wy#sX4mG!i$HzgJGrT@$qe`@RMwFi%GK(fz1^Fr5k4E)sPmxttS6dhe4D
zzVYDxs2Wvn<8ZhiGyMF|{iQ$iqdz^FUdSqJW7HJmc{@ZAnT)2ZJ`TfB)$MdPF5|8r
za^JUg6P>X~hx1_=%3}OuKl01YRX#7)$8!L%cJk=p=peRTEBonJYtFUD}RoPPOAJh-=EPxwbaBlli7{dR9RJ{Ubu+n?K6-rc{nd9w5T
z$#8<9O-W0?3gu9^LVz&MGB;^_iY27M84K#_#6>VR=N#KLrq=e|jLHEiCq{-Mg*>cN
zh-A2R7}}h;2qkeE>gBmo3=LJT7vl_UIa3SSPAzKB*mi|2yn=^Z*ekSn{_HLI;CHXT
z{X6C6i}keP&7zN@Vy;|#;Zv`C%3)PLyZPd)KYHcbiwMa8K(aZAp>(%>F#Y%|FC>}D
zJ?q=c0^`D#b@D4gVy#W;7UE*amXsMOTM8H@8iUqSpF5(;m@y^mm&d+4
z;hxKCoC9)&Id_ETpddQ&Q2Wwtv{uh$b341MFCN~W=;`g}o_p%Y|NP&1;e{`aN{As3
z1FYL_8RO|{VVm-qXRrK+ul`2{;Oyiyg<lX7FA=pcThOtVHv_3_l6rw_-vJ)9v;k(=kek*yH`KI
z_VV5LJ}gcAp_e~(=kBB5`jfZ5e%c?o)Cr7=P4MhP*oY^9F^VE>RHJc>$90HkDpnL(
zStdtL!qodT7!Q>sFF2?SA(LQfxpIarp&58c!ntzirCqZT(iGQT&6;+M4kiVT&d?a0
ziDgRQkv3gujbA_-q^y%0MY4(PJSl2I>5T>pKvXbKpqf!5CV2L}M{2>@<^0m=`&Ulh
zzBG!j4?Ev4p55Ph>7?tcx&4cO=H!WZtskKkCtvJARrbLcKkG*OpGQ`YogTdycIrgl!#t|@du{#OJHfI)YWF{H}
zLZZ0^S3nsbp335uTio4D53_V@fRP=ZF6)g;J0~BXx)d>ee{
zi9ri%ou#C`0b{WgB(EDzYKaJPZUOg|uR9i^8%sk~O7d
ziK_yv4ca>&qiV2@%hDDW&<>J^)<>fykuk=wVWX8ZMj`pURLsC8iWg0)teKQYGAvb6
zY0J{p)-*BY43cD#q)@Q{g-A>>GTWlCRgGPD2AqhUwKExVHzZq1GqK_WICX|ngkcCn
z?R
zV=7Qb3WHBR`aWFxY|>
zx_Z1n^dUj70E|W3bqFdkt-JMjys=&l!pp%+(=@2@;NZQ@a=f>_*A43{&GRK(dVKiy
z*|BU)(%y@|xHYxVe1oCUhDy?g)u^|O=H!oo&f@0Kr&FYVvDdCaTauJ3*L=2v<@u~thcv!)@n%hl<0
zG&Yv}uxcjF&?X55N&;XcB6dhFyF(R|f&-+Y4;zb0>
z7;ZvvmInZ2g9r&JNkkD%F|Z4O44DuLW>QR<)=38#YEWh)0`sh$2xF>J5;Yw$Gnc5P
zKvqb|MC3#QU;@mdDwzNxpa+(ej6r9NAp%4MFgfQek&E84%h3>4DM!FWR+4t?A^@l=
zQblAG1;h)q!s8S%22a>M!BJo#t!FmoZb&(407lEknNqVw+Zk(#NTmP(!2q-dw|04_
z>pH+dCZrgF*qK>MX*8~USb@5-n66hoOT2jTs;YMVVzrXXmtOwdr+?y$Km1c)|JHAw
zE>70{5>cvhs=|sGQYNg3Y=~Mo0riFoL?apmI>OAZQe6p>Vdr>^U;tEQ==0pBCZ;eX
z-}x*m{U8#_QaK)N#+X8q93o(58_*_1%$bX#K`}mIN}dpgaF%4XHQf<#ie0zrx^?#l
zzyE9B_~zGs{^$QSd+uBUa!|^*vA1-vYd-p6aoN1CFG*AhOsJ|s&uZ*J`bu!
zBxX|J5EF8)QWm5^L{ymBD&`LFQAC!4&>2%2lX4D{NfVV#A(GZ@WMpGX0&hwIQ?nY$
zlv9c+C6y#mIf|ZJ%3zflhWrjvi7L@VV^=
zFh>*zW^%U9u`KOo@>CTiB5jP|?BL+a-ZPiCo&%u$%|c*3nlO3|(Po9^+U5+i*~Lk-
ze`)J-618S5Sz{WkX#}(nt5tUzJOW6PtQizyPQA5UIHu^l*J0?&GBwZuKuYX`R~}R&
zVWFtRmW?yksvy&J@avEU=bTXzMIcd3)|!+JDgdb}KsE?WBoLt)m(Dq4$?BWg_|?yR
zM&cMzRh1aJe(4HRH-$*@J^@nnDd*%Q3P?^YN?A1~40@*R`gE;PA~N;EP!t7V|M20W
zTMK{%bcm25SX=%edH_H~=UHL;1Z+|G|Z_+Z5dtaB%ba^|xNm^OK>`3PedD
zY;0}4u>I^${Fz^FCgV5X`uZP#b$#K&dTRrl*vmVEc&dpnaCoI8zKdKe)~
zU6fWG^lPnGw(pY#sZiny6B$duQl@>E0%(-njJKXDs^;zQmEl
z31U~2D@svRi^ZaJQ^F}58{#^~F1o&tE1*e|oK>vFqM)1+LzS~5RHYUe
z7&zvzXqSps)K#@hc`(MLj881EF$ZI$ET$@MG4J}m7fIIEDOpLTD1otIHlT?>ovRu1
z5Kf(QOl}xpwN{@XX&zGNilLK5g0+HAzB8o(!7Rb&o{}p{*xIU@jfiwZ*M&Z$hVyn+
zmzDDbZBf{=ESU=D%I)pT>-8BzGDwU=+qOcHjjR^QSksKlqPEQJqae8TIv%cC)s!U)
z1Cv8r2#Z2Uz5(bJ6QK~6KBPpl=_bytx^Agb&KF15Z@mA`JKqD$tJR{Z%AB%_W@XE@
zfTS3P)B+GI=I9Z*Dks+31bv9BvYwnSkIJG}F)Gz~G|O@5+aVVjohi$jY?GDz5DkW6
z%#t>VCFYn?0JX$cG>0e^4+s_$=d5ImD@&m4rNtO3FwSgdm8?lKII2cG8#sip8l+`r
zrb+}QCDANJpTf8u)UY6f<(jo3g=O}evUaoW3vS$KxeDpC5C8D_2j8lS^Qb{F1Nv!P
zPzDlYCh*Hiy0<8%p4~uOhm#-HYO@RonqYBm}lEZXY+SY^VZg-S@6!3A}L4C(3DjL7>2l9
ztR6TwF@?Rnb?wEMUw-iL{`Wun7IB{PxZb$@`1r^cg{XS*e$k#D-M@GJ!+Lv1o!#7j
z_UWt7Kl8%N_ilZ1{k=DiPae#sd(EgGx=~Tu+UmGLA9|tg-lLE6u{oRYqyt}(?Y%yPj`}g;j=Ldb?e(>((^z_u&V!W|cRO2gGpFTf3mE>N3
z<9n;_^soQ*ziy~s&AY{No^#&Y-I+amalSaZbNl^wQ!65yJG=cV%%&TI%T3hRQs(``Qc7edbGF{_!Mf)vlDRvrZ85;H<5R!Y-C)%k}x`>G^cBdw!O-ccwYR
zVtIP+;iI~&PLI2)8eh1uQ_~`CzHvT8S_J_NBPnYH9?Yp{p_vVM}PR$_bzVxVZAC{s;bn_0SFPy#5Ej|A+tl
z!Qu7Ay8p8!Z|`iL>n@#TTLDO15jD{Skw7`|GCMVjMpb=#aT0EH>qjf`<*4971Sxh?
zInJwC};8wPtW1Q8xMc!r$7I5|K3l0`@K7dM@LRh{_d~+v4_oGwx&Zl34=6K5bqITQd2qe
z>mG6{VN%cqMKuzgM1;XAcH$*h3Y;)UNu+(j##w8XBEUK)3k^kYZPLbQbt$h_@c52-
zbXWIxsmcZ|%cw#G$u^$Wz8ET(-AbfP!aYCIWDxpbA9)FF1sR#0Zoj0^0{||IvecckZsPui6XS#V^b%*uC)aTmS0Y
zfAH&Xe)9Tc_my|3y{9A9n^=6vgJ733Y?2tG7PIzKeF
zX&N6FU|a>(C6cu6bDK1RNA5YerYs6#AP`sAVyxE;4sftC1YT$DqAMCSoCG?dstbl0
z00kiHl5((OO&+N}LPTW%H;L8`N>Q?97=y;-H3pR=pqV0Y&&=l0ijs*<-HyuCbsG;K
z>@IKbxU&&o`IVRU-hOl)?|t_a%K!4C^P3<2-V4`0y*at`H$FXWm#1HQaPoC|aphi4
zKt*lVb;sTP=jJ!7vv=Ry|MT;`pWIUTx$t!p_m^Yd$nBr)zIpR}<1I6N1X
zlnYH+mjIAptYieSgGAy?G6o}%&j_l9r4$^5Scnx6R8UjYs0>C+BhjHZkO3emi;|*U
zX=EWQ6{CX5ilV|Ar^4O8QykWkH8Xhwxx_^j?=>Qte_P}6lz@x^QqD#_85_wBP!^0BQ>6+Svr-15d9V^K5+GvAn9&3EmUjeNrJy26j*$}1fsZWjh$hiN0TgP(
zr7K3v<6-Cly^7InvjAyNDgcyPTa-{%Wlj)dp~7X^7v;d%N$S}YOl8iZkje_IQy+WE
z1!s+U@DlqjSY?Rw=~yZ|mK+SNI=I*N#^=>~gyf)Zt0JBb*mb;FGD4UAqCC|+y)-Ue
zm%0G0E3?xgt>JWQeV6*&vH^BYrszXVAYJY)8vzz9OJ{Pz6f{Sdv#3U9Hm(LBWdzbN

CMRM*vr zDO+bk7>3XeK?ZLDfSIeRsB_d_(^vT@b~K*>omh>|E_RHIq}at4nYN*t%$CXGIK zDYJ+{gT_?-dWopcHYGQL1yD!Q>7bkTN=cJG4Xv)Rau`8#f|w(446S00F)`ND*~R_+ z-4|YbZnu8vXMgHv4<6n6#@GMjP6I9##fshpfav~E^Q3MpSLJoro zVX36Z%0;>MeIL}CGUV=jIc#l?naBrYumo^b!5I|MAd0a$rWUYdYq}0{PAMrHv_;P9 ziHMBxzQ6Iwq8c?r3>joJvGcP9B+~5c#-`-e(a`~tG8M!wN(QXPwl-52FtDv$QCNf7 z=WZFP$YRtIQYur1m@{B+%iOh#sw&F541)~==LpHBx?vwWpcqmwN>x=8=a^Q;6)p;B zpEXKiApt4_MRubnu}gljkcs zJuj^T!L#H+Fp~iyN=ZP=$#}|uXdmZTkD8_*&b(hR;o)$9cdt+VQ&%Q(+IA0@=hbBA z)oXw4&A0#P=J{cJ=Nmhp{ne+Q`|R!eZ=9TeJLAFOormh(_SWXZ?oK(q^yu`_Xu7*K zdFJf+q@b(e?9TH1n6{uU!`+7u`+S;g7*gO>@$mdxu3>bP#CC1h<8?EVN!~bL9D*rE zldZG!M4Y!bcSq&M+3H>&eC4K1Jq@v2wkJbcxzdtO1U#F8(WYP*3y5JTD-6;2)J4ez zRZ5}~x?weRal3p5bRNR0EX&gE4Ly*K)~m;gxV^P;mRde>Do*96;s36fWAaO)e1e0X| z%~|nDAOHXpi{cuj0t_M{^|F6`wtKA zZEf!T@Rxt&^z`tze)sPl9i3db@bvp1eUr2Vu*fc}cI(zhs+&ztQBxZ{V04zo#2IL* zG7y%G6V;wjvtstDLI7pT#Ol!kB9XNyt|-T3StS^{f^G?XmabVRwA@pZxlxAN|rVz4Y>@PKKf09Dn}G&n?b} zyZ3HhxcXdL|JX;LytV8fE36)#9pf3L=z< zEZO_K$jJxag`v-g#!Qq#2zf}$alHkcpQt$i229KXQk1S%7*H5w#8?r=f}$`circ$Q z*0@-$nMzDH$Bb&%>%O$Mu#I&ag~b5tC|yjpVjq)<$z|b$q7YFNU@~R~7($FO{%kS7TbV6u&Hmn$Eh9;cX>WhC?H3g7q#O~@cw?MW+8AAA zrbmw+4ei6C+L%uFY*qJNAjBMe!444pkcdoGRaw)62X|NNMKf|KwZxEflAM7BGURMD z*KDh(iv+6#BO!;Vph(;(*$|h?tFmY`k5p{RNg=G4=PCQ5Z1fl8m+$#{=Qh%+F9y76TC(aksR-Mh0|JWe?wvU6olN=!t?nqtU38HY?EwJ}D` zWl9St51J!RX1OI^9`D8&c<#N(`0nvk5}X8O6j-CoLFel830dZaW*jrOs>jYGil$AAR%ZnCljO7uTZs0x_T`rdWfIUJ?S=V+%?a*sr zVl<{8n{tMA8&Szx>$o;XLI?qpbUgvq&N8v2tURpd^WJyex^>oS$}#$)C^X5s4{aP$ zpPI>KOr<8k7%eOL)`3H|N~N*@078a{key9lkU-EmhlqoCA`1~n*-|Nz+x5MZ<5_!t zv@_lyqHOW(?1T)I1_?XOLsc}^7EQA?osCjnADf&Du0gN?|qhHrt6Q+_)*4 zrjH8iZdw=&>vX>A07jG95V8>X)IB;li&?9BvRhmX!(teYt8x={@})2S#Iw)5bo0jT zaYg6z#SeY?)dvqAwC&0=02;O>3^<&f4zY8UdFS-zhwptobb`uN*$nH|I-Mhd_ud#Y zq#Tmd4C9eA<)zO0(YWZl)3S(0cE{zWjEmsY=H_H$w)6JeZ)b^%(?`3rvK>~pZyk&$ zJ5N9L%GE1RzxBQEy!ZMyDTj@{EC1cU_1~$QnPYc$d?@1prmiUV#w>F`?~Wfnesr~T zZ@v4*m%s3nH;x{R=f|(U_>yx!as88b+V$zsr!+J>FFkwb{=L2Zd#czC?PxkOMID74 zw~vobF6>>bi%CVBPwf{E53Yax8~?Oq6%7v`-d}esW_#)4)1UqPmqJJL`NL*ZpPnRF z6i+?<+{-V0s_mg&4-Rw2?8-XEobu{;PMa?c-#%Xar{|x#mX2Oo-o6^{Q~R(k56G00 zqVb$_QbZ1z0}Sj;$(iC=H-zuu?7eG$ZJtK|s~>ZJ=lPvtlQ#bSzv7RodA|Mj*WUcO z_4$8tS40#1Imd6*nWOYc9vZCpRn?4cA<%h)18gxm}&*Mrs_b=B-zPy}ej$fY{BM&C=yq zAVXxVGj4=z3K~x)Ds)yLr-~;=47n8rm8z+~3s{ZAtpPAo3_~vg9M|X9|`5c%2 z_`(a9juv+xeCPhBp8ZtXx%PMe-haIOv6NSZ#)%eP@-hs1cxvT*^+oLRkMlS zlVqhQ4y2Nams;+-y0W7iOVBu<=EeE=^7_R;hx?bkRAKPru%0Z>v@W~4Tn*#1F#gf` zyRS{wU-+f}*`0^|>hb$;y!D5lsqAJgG4jXPKR!IX`6q9De{ufsvrlc@c=He6dhZU* zp0V}Sd+P4hGZ|4d3^CHkk_OvmDTzm@zD$rNxivUbaLEv*oUFA8gQ=E|YEL-?h(nSD zCZRKch7=W3VooY3>M&zd`rS**dIsW~M8b+IL=pLjYW7**Ar2$c|26_K9w zLIlF%U*j+|PrR%C4KgA_BN>!|p};tRaRDJ@9WiPKB3MEy(P+Z6!9TwH=u^+WU~KsK z!J}~Y@zlESFXP2h{->{8j3=u<{Q7VIi|@VhLXCHh!#7PlnCOB zoUcrtxSYhrXgM=*#lxS@JOd!elcne&IW3H@Eiy>TcF2^o#Ndr%vSe5QltD0K@VV;^ z*mmO7F|SrHK6ta?gD@R`(C=S63(t<$Kl|#JZalcVvuMBg^fRaPn~!dOaNEE07#E}2 z#Ru|i9_?vR>z9d!b#vup^o6s$z9RGT>~~xLZd1DhyE`q5{jC>RXe#UQ_!I8$pCjTK z?oD|kdCBI!dQ@(JXE-uu6kQ%-)tI3un=uw~(sz-%NFB{;n_fQt2Ss}MVR{iK{leN@ zgX={K5Q79Yhp5B3X3xwJ85n>_h*XgTj0!={NGQsZ{_Ks z?p?X`kN@%iwQIdrGN=UK1rex#LNRn9r8yB9!-Y$b9ghc}O%Q6M= zopfDSmaZ&I0Ap2#V%GR!bmMC$CnpEb{i?XpNYuSso~O#t0MOXFqxL6gb(&cwRmFs0u4rdbH=G-D+*fus5Yi?83UMZLKN* zfpVZ^pstqcW9ZvRjV)?tqfdo(22Dun>Y_H|!{xGQ>PcB!!wH0gSnGgEEeZ+%TCI&_ zn*&Qtj7pgaRJAOd>10DhQfd1>Bp(RvWW1SDlmgbPHYPBZT%lP8Yp5_rG8oh3*sqsu zRo5B8HFh+s7X7+gtraZ*xUv>d9|93&(QdKCoSbt_NmV7m2V=?@MAd=%oDu{y%&Nky zT)G%z$r`y>cdZmu)MZRb5VNQ&rmC4Nu}jf+M3lkhq$0-=+myWTye8QgO`6)QmWw0B z!cO*&?OC6#APB~?01BX@R->_4 zmWqtbF6RwbndCzr{KPV-P1#UoQXWFz*JYJ+&YA>>$v9_FRf8l21uczBl2tqt14s&K zG#VL0Q8J()WmQ11oH>QSj7rpyshh%c6oC$zgiao|o1=+y7*c2JqSMHd*XsbPm9x$j z02U(JQ6T}9D1d}6a|#+GW~q&}n{GLDW>kC=r{5 zt^@=W96J*9VF0USFbG=XAVkC*!%$X@U=2v@7! zNF_TCh3$|GLzyWVVnTwfm958hQ`!;$u3UI|u{b|po;l}YjD6qEX0!A2lRmDhje29} z!n!>lk2Vm>-JMGh4!*y*ejN1=UitKoP&0GQOHF+_t{01=qmLilYUZcg+bwd<3ESmC z3~fCck0(xOy6Dg5?ZbksRhkGIq||7gD<8U)2WEB^NyKo+kxT{Ylp^ghq?SLIj*Es&YIUO~^>hoewKeyIA@@Jj^-gTp(}=L+Naa(f3(3 zXH;dIg%!9YA5%`66g`0wC?~CvluK8T_rnk(n~I2{sB*gL z1#5;OMh($gHWnBP=bA3|NfVYJ7}jlXLOCiX>Q|;@p(rnF@Wo=F$;RVN-2h5jET8*)nwzs3%gH$>9x=Nu;FrZHu~`W z@7?_5{pIr3xBlco&Nign`E`o@otrnGfA)FH+|CaslTk=bAQjtva{C>&u1iB@-KcIR z)p#=7?7FV&doY5+<7NYOB(_Feh1kMiolI>xa7~A2Cp0Ym^_%x@Cp6=|i?3>E_hvJY zo@%dSQ>n$F7(54Kt=%YG1jI?Q6QQuNEBjRsb(S) zNyZvgT`W%tP*l(4D_&yJ)&3FvFppU8JU)F|1@rQue|_`9i<9mBKKR?+=0~zw!G*T+ld9&T zY7N!v-mqj{F9~RS+wEOY>!MFI=X&;hgVk2Lb7eMp<;BmBHZJ#og{(fgalIaI&o=f? zkB{5MqtoSldA1ynyQ(zf301DG>flqSb+v>z#EyszTUAX-e9k}&Y$$q)u_{Xu9EPas zFqW>Y3K0t$LI@!PDv}k=>%K$5jynRZ%L)-CXYXSUxgQ2Htb`(hhDBu6u2t0%fe7Rb zL~M*PjEF>77PbDV^JD1>MtWkc=!RYi>t@_ArIY}`iHkn8Ll?X-CmRbSErgm;<3TjjEi#J4dc3DBF6?1 z`_TIkH0IG(b76Nchxq8h!xUrAIpjn{O;w24cC~cQG4uKPxpU4sH+3T*?L)t8TUD)# zn%QKDR5)ffj$@2{-BhsAU>kz&L9-W3Kg14s;=Fc3jVrGbnHgphMY%{h9>fRIwms!Ajx z36YD!4w`*G7-K3|VSxw|V`k>bY+z%n;!4cGD zL0M6OtR*5MLIx2ThM~4d;&Y;|div4UPaaoW9-B#Qt8fzPQFh~2t&#b7{vjPMcJf;C z=w#y=hiT{Jjs4T_iY_rXKYr{!`v`21uFNyHWf=hhErIcLpeY`MQ# z^*#u6N3@?sFYoQ_Uw!KF@%@i)eXx7^>Xlc%z#-OKZCRFA%(z{ge)9g?j&UA~7SNq)dRunDf&!j%7-DJxEL-(w1C!ABjv#>E68uhlj__ zc$BkKfVOqR;I3SH>eAI`uU&azHk7r*?$2j6?|?QgyE*)Lza`qJa0ljZtkbGy0V zc1}*#U;pYKI_IuFyZ_#;_cun>Q=3m+dFHblyO+wNd%yFW|Md95r>|VTxVwJ^;7Uxw z&c5){XCTw*a=yF0F)qx|FF*O@le*sOy7T$`^y3dddG6_FwzhV@@vYYn4sTw+@qGuQ z!FK=&NnKGre*EYUzVh3IuJOcF#lYe}_`ODAjh47VPc&s~D z{y6S!UU_bU!rXq zU+v|`1C_QJQ(@26ycxBK0z=T-Vf^EV&g{rcpsx5|y(Pyf(Qef0MC@$B(G`nBKq zkNy|Wt`0u@wSVxR-P7rB9Ps9Q8~aE19^2h+2|NriDre4d%%vYf>{3}8N$f$QX6OY9 zbksY{Ig1D&s$k3tDJ4yrD5VIh1dIlNiO?t!s2V^)$T^D&5IEw|c-l0LXv_lZA!MY& z6-em4Z&R2py3NrB*PDb!1Z?5P`!e98yf%v2MKN^Paop1em z=-Oo*%F>*ltYV@^4<3K@n}71Nzw|%&^5*`3`RtScMQej>69MfrEeo8Y+ko3>g|EiK?h-79d1a5>$dS zLr-EG5Rm|xp3oS`PYk=LLI_H<4IeG)_>8v=pDhFal2gNqi{7;Tg|LE|$es~~WFD|#5 zYJD8&O1(Lu>9RXK#d1Ep+V~G&O5ZYf-n}(>?NXI|yIQh!r5%=s;&pSY!Z78cT%DZX z`{ep-UwBdPe^45(23mV<#yiVyXa{97O;OG}pL$=6vSsIph7_}CpVv{isi#j(w#((g zVt&{kKDglDg8Sx##`8;`U5uZ#*%~5XAfm)pGH4K00R&YQP=Y6CG(;p+`F~$A)&Eu` z^S|9jM5IqLaDuAlEGg%Vg2?zJKw9-lwxZ{Sd3#cFaR<$A|omhsvseX2q+puh?wCA=O1?N$xQ$C-%Ui! z%&Gu}5=2=h4Zto@nM~Y}Y;)&>=|nR4vNmb-@*&Uahwo%uW*w#RZk|OqKPncRStr*E zt~AG^ha>HG#!wd_1cPQIxDJ6+5``p^0Gt3NO;0pVjL{hLWF-+f=iGIjk`gfyi3o}! zl5?)AIfTAzhY-SOG%A{k3$oU3_1`GFdZjzA+%toto{&OSyBsqSIA%CaQJlo9}2 zUFMwbUi>qUpZSGq+7!cSfUPnWSoC3c6e^RM?0jv{=j2ZSkEAn2QO~fTdelPfF%m_} z>3F;`ZZ=}{L+r@dvTynHUTN;mnyD*Mu_VY6DW)t*lMGc=4a4BQcg{KILW=A4I;4om zLx@yJQ#fnw6E#PMeb?29#u#SKIeYI#v~ootRm~}85H(1S4GB<&Btub`eea3L7}JMV zT0tZbRB@tJ*LAa^@~M-I){Mv|Nw8XV8?%kcWYn$CQ|?8V)o8}f`kdIB@pzn4TCdm6 z7#{@@bIxqAt_o%bfEZ(4jGc3ULPRQ96s0wgl2~I@weNc$BQwV+A*7-x@{>Fr&N6cl z>HGjd#7tljF-6HqnHv-{3~6^~-)G+iw|(JSaqIhi8>z6Zk7d2}+~&m>p8wRJ`-#6W zX*LJn{?Rvny_8bfSsNGT;@G;9$vsv$3dh#es^0Z<4bgn-5Y zKuVENkj%IlS!*SQC%1zqo;t=Dqkv2)XJjyBVvL#ccswp`8DsR`t7hvMkwp?BQdOod zCCv<^3MmQ$H+4NNiOUL7r>^n*Yrf$IL1ea>^lDHbqeo zgZDA!gos&E@@Sa_Q6v#TQP@EYBF5xJq;$oTEoC$s^`Vd6YZhbJ7$Zqhg_zMXB4$*{ zYM3;V0Tsp}$`g~1F~(X(RR9=`#^WLx>l6YR1j4d%hD*^Y9uRF`e zm@Ys ztCQ&n6R6{jo$VR!FBXfw?FCm|d!UfI_I$OrQ+u%~T;p~Yi*q0QYBqLNvtF;gPs~o4 z6kL$>gX%4X#V9F?WKluM#t%98)_Xsh&Oo*6*4=8&Y?;UwHu_*_vv9Sm#+$plF{kx< zJsFQw^W(=KRaNzbavn9KI417(`M86ai2~wALsB0Ax@UdLott07M{~Q4tZ8 z0G`kcNQQIHB8rH>HsvfK5RnQvTUcY+7C9!6qzQy54}k!kb!ZR-GY3LYN}vo%j7mt5 zA%SGciU5)fGFwAHA%~ol*#d*c%#4UAnvAhqlg-jtM*8BH|2zmjc<}J)r=Mn&_uhXi zghf%9lm{gkH}w;%YX(&H#;S!vEyf``cB5+SlMkA5&_qc@5RpKi93dFdT5E_x_I>X| z82YyBy3QCEd=Nm55t)mkP}Q8WDi}5ji472_Y7&VlCGS;LDVgtm=TDw{;j^kS z@Y3{B*Uus498F4a>+VOJqg`VMU>{>?XmonEG-U!a44q%CaR1_!3zzmk`uN?+wCXwy zKBtsYN+ARQMFPdh)*%^dN=aE$PN1lWikf1~sV+kX1T+T3Sc||hdGCWl<3pd5KGCpQ zqDW%Kx~ZP<0a2o4KtyAVbB+*IB!nPOG%|p5MOl^%xLU15z!-xdWl<23_r6P~NGXJF z7>3=Q%b<+x_I5517PfGTIg9VQRlA-K>vio`gOB6UW?A@0j~)lFDJ8(vjHbqFOzY9O zV#0NMW=&WVVPjNf7`i7yrK*Ss8B-F4 zTsT+Q8IuPPq?A$`2A`4;nen*#!I>5j5hbZA0HL$a*DMFT004jhNkl zM%LPt0vUs%{m@FUNyQpA&WXcr-2#A!l-5~mhkh`HGYo)23;_H@r82W~?lMU0qms>4 zqljwIkrp{ZFA>)!o))x#*ifjyJ&tdH@}=FcJw6!0x&Y)QZ5y+Y+PF9M@02)FBiPl1Rg~7+5sv`T<`h!Y6qzXc5Pbk3S5|dh=cFob1mJ6}b_8#lgBM7#L>YcGGQ z@B2rG2ZWNv|H=Q8y#EZgwoB84usf{qgdI+NbNF)VJl#FhJwYQ4N&`q90Wu(@G6*Qj zU?@}NQ021y&&E|UWn9=4L6kA3Hhlb?k~qTr;~Sh!b+m=$Aran4;d~on^KARKKa=CPsYpdEAv+BDPGg&=5I}_<$<)Mf*+^_p#80x0l zlmTs9`-)X{3_TG!?^QLW1O($ak{}tbnyMHmAp>d5$-1iK?2KpToO4cDpm=M?KWB`a_s6BAQ*)7~+xv&#aCd%p(%+jvE=y=)xGU6 zx%lwfaIaG6RRAT9L-zZo0%LL>hd}vU+PA6ynNWdAc9c#C9Xl!zhY^|Dwted9XG4e=E?vBH@4=%-kJRa8K07)+cy#!rA4g;9jUTkXK&rTHH`7opZ)1qzw=*x<9A=5@4WKNr=NOw=grCT zE=-#ju3bBR`Puj1{(gzwXFmI-v-P;>k8fPsPpbDH+?&?T55D@nl)5ke)aReRdF9f@ z>*JUe;_GjG@8snE4}bWL_uqZ1Zg^+jHvZ!2>4`#W+Zqwq{jglF0nl<3DYdhc+KsEl zcfR)9zyGzbTk9w7cC%fRbt+Ul{1boVpZ>jXeEas@lh3@c^XJU%@85a%U3aa!_Sx6> zf9f5+dLb<@hBq#rKB$_i)Kk}o+tsxx7_VBO{y91Tpk-hKPg8{c0Ms_G+K zZ*A>d`{XY>+tl^`Klx|>d2;2u-}w6<-2UOcqbGmzXI?rwIq7_VrJ?Tl#U; zbCwgW*B*62GPRY6IOl*6(QPV6X146m<+*W<7YKc2y|1x8djH_-Z>>K2j}4PAfML2g zMFChG%y6v2)@nAtbK}c@=k}ZPx4-=tpMU1kbI-o<#_vD(Z2j;4@BUYRT}?ssgBhCKZ(| zngLl=8I=q(!e+Aq5Jb`}NNA7>DgYD}AVRhP2%-cefatxxB~?{GB_y_F7}yp~pWM%T62r^MqGFZ+;S`-u*QI$b`c2zg}_gu5C z&7^zy{_@@voIfrza#}7r8bhrMEAHX?*sVUYyxWeqovAOEewuD?58p0yv^BZKTg@wY zn^I+F+e3odx~eR#$at0w*02BITh}jM`-PwTxz~R1oiVMRd;Ucm>IdTiXlbgpt{9Y- zC#N~YY11~gUY@Pgp>r<8qRfVE&T_muy?gp$Mir!{Nj9#!vN&39=h<}%u|yF7YARFJ z##<~B$CU^vsHzZ=pjt$fq=o2L zNQ|nAfXu8UC6yn4;LQOB0F;p7+~r_>uZkj>vQ%mfaxQro<>1Vw$+L^u^A1K7?<`7t zJt$XCeKD0>tsYPA-7f1s?_Ta-`r@)_t{fbI(pVab@o4qYj-DErNFpW5;y{3K06Si_|xRw1I(ZgP6%XVcA}nYenfI<2rwCe=#X+3_7U6Y1+xajw)< zN++qT%d9eBOm7(~4oRIt1;%h5U|py#WgsYEIN9EvTd+igy!4V@P8f zvI-*Bb-jVeDRU7~Rc5O(hZy@Iq%fiaB3iJvaV3|WGh_w;Yn^e{P_<6Is!CDRVwjlO z5DFk#K3lGHi2%?xQ)~UAkJfrEsFIPb^KDA3(!?CC<>_<>Bm*)qth&|d*@_vxpNvCv zm5-tzpn|y+B12><_win#8_os*z!El1lXK2Vn6VTch5>_VN7eDdIFRWML z;lukczWDMp7hbt`WB*5QeDiPp-T&<9_+F8*@0N94yV{FbcGehTYluJ~h-5Gp3?g8G zBA|p|m;p*yuTxG6ssyMCsE9x)IgXO!My5uF36w24Z)MC`L_paZB9c){$|1!(9mlPbZH`am)X+SX|x~b?vts>aU z`G$}gu#7pC0!Rv0B^PfTWm$I1kYej=X6q_H<}{nlu3fowa&$NjW7|$taJgO~GJzUn zthJlEETmYnN-4(qoHHV`b!0XSVq}D-@r9>!5XopZ*y0tG=pi!r8c+nufLVdzdz zPfM2sAd4`ewbmKqj0q`~RDcACBxjUNgrp+5kOH#-QAFLOB5toow>Ra;d}$*Llyl*%X)0vqRycw0(A#K!u_z_fh$@QrcEH*P*dP>iWA z?ccDd_spHKTdj|dP98p-&6_J%_eJ7P`%Kq$sl;LI`|h-)Q6Q&fcw+B)ZfoBbl`yWm zbq1K{5mc6?P*zY?1Q1Cnr<4`SB&9;6$cSi&4G~#|mkTs} zDFC1VDnf_^D5^PS002TmoK!aFUPS>FX56p^!lEJ~Dk2Jk3aa!6Z=fQYbKcAcC|U$k zESvR+M4*xdSP@O>Q*X$&RWqB+31IApVd(0r5h!2?EHW@=%s~N4E>J|QW6MU16p@0I zxp;(_6F?#|0s;gDqClzJN<>5;3}GncOu!k3Ye!FxzVL;g{?@nNYU=vIgS*pN z&0JEHKCTr7QGM;0?Klo1KwvAXLA7!-e9B2ow$_43ObH2^$}{tZjR2IGf=W>k z5ix8^Mno=(7MYnPYnF&WL?jrQB&Ad&000s=Yn^kPR-*Fun_qiy|3`<%M>!3Z0mCXZ zefQlr_x7*7`0^)i?%f2P|M2^-9XvU?eDlJ?C+|Ice0X^L~x6Txtak*H*=*Vpsbh))#4_W4olITaw%$zan=qv5j~J07@yFraK^N{=kYL1GL;Tq5%6*;EvF_I9`CyD{dR;Npd+{MBmg#`_Q7B?6+g#%x05yw+K0i#;)r&^Q4`&+NUg9w^cWGs(HiwA@N2L zyIC@ZED>GUyRbF8a&-LY_~cbf@Ujh7hXI1H-FNUCa#vBnHT?|n@~*4oVh zID}Ao@s-^SOUjuL2_W@jzwVHXh%|Lg3Y(#0Bu_>(<{V2&h^(pztOgCc;j9Bh*AQ7o z!_(>1Ik#GN(fFKBlAHiQEF8fgfW!0SwgCeO0J&xP1M& zc58dNev*d%*&Cny3xE1&-hKCbZ@m3l>|*05^9vVGx<~7NRaN}p)Vq8%aXYi=wS*HZ zhaLm2y0rVU?G6Ob#0-0rdB`P4k&sIS;989b>R~t=;<_j1W{X#2HB;W2rS=QUyRU!c zjcuDR=YDX`p|h#bSg^ArWB_DCWe8zh7w^T?h8PjavT?@57(oQYFx#vpj-#zS z6U7ip(ULW>s20^?3^5}!{&><9)s4s3ghRvH`ugO%OlE_sIt*Rct(dKG6*7|DB&Z@7E?N3c&bgY@Y%KsOP}g-LvdSZP7%{dzugXZ#^zco=>Ya`wbyR@&rTPMee=D` zS1)~G_{PcDt+QVnPYDB>1N+jGOJ7KjesdcSN{UOv7pIeyQr?`*@Vp)-A}+d_s2gIy zIVYtcVvI5L0S(u6?Oa_~PDDzMzHW%96cx#(oZ|we6l=Cg_437wZQE|ow;nxuc>C6e zgs4y6eD0;6zWV8>?mW1Cc(i`@nU@yBnDRi&hmEEE3l~c%A@4kR@SrevWAC+BvKRKR zUcGYT@bK{Qqx(5WTd~1vQ_OGb`IOT6kARG{S!yDg*?D6z<79h#XMe901*9hj$J_gt zo`2z`B9yvAfgln?NN>LJY8DU;^`x!pY134v!}{50pWofyJ$QWZ_FM0E-BWo?=B&9HpJqcLm zBqIXOCi8f7aC{m^6xYs&xcb89|HzNt{2r*kdmk*?Kl%LDFUukhf2*1Ayame#>!el( zq$-4%>CBXiZ~}{4-?!V1FE*T>!T8kdLZ-<8S{I19pR@m?f9Mzgl6$p3zW>Q*pZ(0! zH-C20{BM8byQio7WK98ke~dES(XH+7?1?YJ{o%Bp1$ARVJDJ(o3jt1TJvw7dDp_*@ z03uY?ATft6=Ub?CL}}UAfYtUMbcr;C^@+5TGTYHSmbi@Td$_pQq+WB3ca`0AyV}gOU)sL-EP<@Yv*VMq3-0ni z^2dJVmF3TUhp`0lV$KmuzS1z%wEeK>lw^QfSdewJ+_q)^2&sMtsx4){# zk1{o8vATWs&UaqB_)q@HKmCiJeEx5ro;^N(h&(vjww9SR7m0B+uHt&0$yBLhHbj&) zmJFmqL0~leXY{5hgq}el+92jp+11N0xoiITov(cR{qLLZ+vVili#J}maOwFfpZ&G} zCX10*mpUkq{p1&lSg-l<#Mt;nc8L*H1}i8#sC0YN_w13Dih*aRskD4hR803?7S=WF@LlH1Mi zGqapuS>z`}TBXbu0Gygi9oalUN&}mV{rf(`dSPN&Z%^B-Iadt-Viw z>hm(1f9}uxi^I5l?VG=|?4KAw5i!j3`P`aj*`2NyXJZ;n59 znnh%zz65|x8LYHPJUc(nsA@ti3QA&}_tG+)j;ZHi=cxUpUVeJqd&S9GPah1Yk8O9l zgG(Ap+PW;>l-erPG@BTJ`u1CT_#tg=Sw_=L#$do0fD}Un6v9kI0EDVVq9I07WxgVl??du=E8d~}OA%_@tliS9Y@#twsfvS<$*X*?ab26}QGy!>(!FP$DU45haeHkO5~bFoh6e z$;jlqx0PS7`?hT@8{?g-I?E_pN{Kl&WSnzFF^nmtlyfdz7-<88l5PMX&q$KXbha&U z3aBxcA*N6mp+c?^Z8ggh`tZ^oc!XX{~o2MPnC zakd`^$;DcmOYw#Q;5KX zi%1DHj$?>pDaBZeM%uRBDCSd&!??D_JI9OV@n=5$sm9g&+gJYZ&;N2M_UPcFOP4PF z-Zy^d+CRGe{=2t+$MT0m7n5dhd((qzaPXG=}wLno&W62Q}6%nbbNb@B6-#g2n*h2JQ?Y zXeqU?oUtjTE)AprIT=JxhKz#hxxcLxL!_!9B+Cc@!!YbLjtrNAf-(%<$;nCUr#oAd zs;*bdj?mdEP`=E9tEPGRVi=%k&_wzf^>SF2Uhq^h=VQVM_=(%ARwoRaqr4X`so zf}{e1%oYWiNfns1l$0bn=a^9>Z|)R;WZ7;6H=Cj$is>qUR?zL;?mSjheKSQdC3$kN`n8_g7KHjfiJseM7+wVk@9PSQJ2u z0G>;Q%7Dl!WzofMy_)!DcYE*Ir=Grl_t6mg5Jxpw5J0p@%5em)Fbk7IVnq;;LXcD@ zRh4rV0bpRZo4=HGN<=9}5iw0gN>Wl^iX}@!H=obndF$=zbcRZoE?upC_2kLp#``2G z#y*ua^a&sVleM-nO^Io}UiT^1bzM7K1co3W%1FTAt#QUOv*Z#(Oqi{yk=Ph&nu>^q zepE?dgvbDtls2XfX3MJDHj}1mH-Ed0%?m`WwOU|jduoR;hGBa?tNb=o9*0#k-5E!j zPAp3IAN~jbCjc?V{NgYC5}Dnv|L$*p`O9Da?595am9PBPod+LmZ_m?EZr%Bw0n^lT zHk49XoDFpi^T_}vLD{}GVSe$sPv5-p!qFU>ytxu2QMFaG*({mna%UT>Sq+H&jmJ6Eq> ze(||afA9O>nY1RAV)^ltIcHLklw&T)wywR1^y}p&4TTZs(#a1iju?<9Xlix63b2m zm^h~bSuMJ898yXu#8f$Jj4`UHn%DghV@xSAGq4enk_4G@Du|Jpebbigy%`sa>-M+@s?4l$Ii>@U0A`9iQ}6o5Ow=?(=>2*X z$6o4+XVWSMw4723t2KvF3piifJ=^VE3VllXbXrZS_Hx*HzL_jr)nU~xW~r?_ryQ%V`N0J~Vgm&hQHLX0uTq*@pSRIABu-A>0K>lC!4E_dD; zLs-rZ3{S0Pr@{nSd0>}gz>@2_ZtKPw6GKMeO?hLDjU{hNDT79f#2AH{nKmm{Ra40* zxS^V}fH6i(QPnIZmE4a*KMpa*e6GM!6|lxR--t-Yf(k`SPC29mC{^W}y5))O$1y`D zYe*%8q#8+qnL`N7yt}ufXh12KD3bcVmjYT86dBo79tFZMCctr6i-@hPbIj-gU<{!( z45CUprm+k<-`=Vx?djQ>wRSR@#u&S0XDe^57ZLBht1Y4?C@F=Z1WW8~WvvZyFve_P zptG~H98+aoQ5=V(5AUp;F}}8_WxhQ*K6@~xv+>crTX&jgpL%|K=kkO5PyU5J|EHJB zWsLFhlZOb^CqD7=F!t}f_qKDD@9p~^y!F}-zqz|}$?@c0`HTOv4?np3_|fT=OV`?; zuU>g&@#?FurZ`B(#qr6+x#yp|v08O^AAfLsa_7!Hy?puF&R%nLcy}1aljF08k8Y`4 z%PBo|#kdOUAH~ktJ`KaU7%rRK0P~oaPOga)zdehL59bVAEV*6 ztrv^M$=L%YyY9Qb>&Qd|E}+)hx^Aq+L@7(@$1$biHk~M9ETv4F)>^B%48vfI*_zLv zDP!-fldd{^E1tX~l}Ed^gN9um-u*BB`d@$Hh379^y$K+LZom1_ zgIVjgrsl?#r^&Qgxj5M0xi$<#w~PXM@#5vHSDtzO_1CSP4yS8T$wiUO&hCEKFT=Pb zDF!*lxBDjCnw9Z zDb`W@`ukt^)nqoCeDJ}0Po6xPG;Piq z>b5IuYniOX99fww0gx`8V(WlZVo(DHyfLmK!YGTgvv=Nmy`9(7Nqcs5+}Ous8uuJH zt;brV*d%dF&*rO><1Wzli|B8pxD>KCE?Sd8cx4itmN?{6^Yo8@_UXUzYrp=Rzm6|| z;t#*@g)jZ;r#=JK6l4fg{_iey!hNG(we)PiPwcY9v*s;u&r#US) zckz@nGvu5%Wz3ky9D`Efh5@|=YY`0a(-gwJ@6};2S6@C&e%!hM^C=Z|+|EDQeCbcur+PTW2U)W`t;e*%^~)~!5cq(y!zfZZOUiEsja3vvkR}h z_~Kh{ymtBW)1Q0!)8GB>w|?rUzA)dKf&wClYyiYeMDYKMtHK6h0LI^au>95U-dmy0 zp?hwp_Or=r$A?{q_ZD4G0fteB-t;}Th-5QX1ZY#%l=4Qm-_^yt)DV!4eQ+fT=>ciW zkPPIQOc!j9>NwXfvmKBSfQniLABBekxnPz|Vbe8P5s^@M(=-dYoYTpC4tfBgmW|h% z2}pqf3W$nm5h(l*_`c|;32vUmVGE(O>iRLj$g5%;+ zmYE-mi+3-Lqd)k*U%x(0ht_W0k=c8+mEzfjI&x75l&NFyjLt<1k79+SM9f0YO90K@ zp?h?)xN_;)B5*RE**rL|tSzA=?K-wDCs}uejWvG8u5#YyG}yWs!%~@@abpR9N|;AH zT6RY}TNg~Ve9|wD(*)*wdvNb@AF{Ee#pGgDjTRyaEFxA3k*zW*0E#L|Rw)8R9|sNn zm_7#-`0tmKV6$1^hEDsKL&1+ZJc12;_7mD*^Iv~}O4#J#;09_hrIZ39vJpQb;yBsi z@yX=$?Wzp@>{8Np^5mV%r{AY==GvL4UaIzH+wJaj;+FdGXi<4rRn6c1jlcT$e&erw z^2=XB&59k6CDKi8wpy(eLdtzi1`$IDxPj0uehjsjQZk}4v!Ir|NiCpruwcWt$@!Pk zkJTHos|u~zkRh8OpNZ+*jdbxD-v7Ky$MW#q;#{<3Dw}gJLzkybf?k>O&T}eKnZd0e z!rA>g+^TXDms(Qg^TJGdq*ZUZ1yt+>kiiK6nFLgjs+7`Zpb)W$Y{u3YlTs2bB1B}3 zF$ka#W621dVo4<+8D@(>)-ocWR@a)es`6^yps#c+DXP_KudLU6PT}b021kgZI^gV? z)$Mlm8wdQZqbN3~yxdy7ou=FFm}un&p68u?q=io(x}!%8x++~ei#uulJYIgTm|fM< zpqaR>)6>(HEKSSaTD^F2Z9mNijsNg>99eEE)Rc2fg^Y0sjty?2T8uG{Da(1vhZIHc z0U)L{#sJ_EB)^a}%vJnOg6h&lXjbr9j-<49N zfFe|tH|Aq`Sr}rB*7`Jxck`Q9pZjzF>|glp-~Q_N-+g_ue|_)D?3>^E#>v6q$-%?W zJ)U^{uYTh%k1?DrPqw$WLI^7CY@=Eb)j4CFEm=h*q)DI`XK;wjPZ2ccjR~riO757ty3+4wrLt>ULLQ2$vQ{g zBAl;iL<*9!*(5_sNKuP4P1Q6_DP_4__x(r;Ie|zdYe1n$IcGCB?PgU}73CZYBcgHa z5s}$i$Hti0kE$vvq(($bQDh~ewywr;9J;WE6>Z|Y8FNVtOw5WX0x=iHWQb7_RHYOZ zvCa{qVU8(@NX}AHHl_wBBI2snc@twysi5QxxdPm%xEP06s_yh8YjV!zoL%iE(^)BH zdwcu%ti(=3ABMhH)vBtzubJ7Rul?4Oqemg1;)3gXp0$t}j4_tZJ3367b`^4$#xai; z-7p|CR8_4aU5FJU6YWg4f*;2mqmBw@ELg^1Y^s77>zN!RNu|Wp7YNq!bk-0N8dTLW zh8Ux&C_qS&fQe{>oJ1vStu@ZNsf0jiXOpU`+7LoNjtG^jxvsshU6CYE5X4x*%8jV& z;dBV=f-;-Urn_w^CC1oH<|&Lh=adK0k|kP&QVLm0IRKEeR!dO-6Zjba}Wb|)V#4v#I?Q5_MX%DsngJvw;w$N%U*@#)Wg{_)X+Ww*A( zwXeSO?QeQNtGrK|$T&dRNR3HA)hcFWU$SV4=TbH%c6NhWCdq7A6)YO63<^pqS~4Q8 zLoXX0O^zEb#25yUjYi>Pg@*tD5<2HlRL^IlLOC~R0)S#cL``vn%y{pm#0U59)NKVS zaU267B3a}6E;#2#cwot%F3$}4Y1mpWd`_~nv*+Dz40?7tFmoKu-pRq4{iB_7cOmDzv4|m) zifk5d+qN6mLII6AA);d!LU2TzkHO3$pinjjFE%}L9kK$lBRi~mTl#5HfX6*foMi@Z z8d&vXRTBV!0VoQfAgD-S5D|=D*4uN}V^|N_RQ<6Hs}#sVT2=G1@2Vm$1XP!7od|Ut zH4JFdal)3ePYW+AHU0VV@(@$WLBWz`+Yp)(hOP{) z4w%MBfwrbxZnoBr&$VXAQK#E#<-lIAP`Xvq>5#WEGBf!7L_AdAE_qV)xEaptpN?e+4m zX}uTVq?&W|#(OrNO-?z+n53|>D4g|6225kn0+Xh7YzU~Wo2bJkeImvfQ$)a2vWR%! z=2B8FL}Y*zH0GR26v@b@h=i0%DTc_haco15r4(eUs>*vGODv_V*S$9$L@Zkd1OZiG zBqsVvhU{3Is^Z-q^OTMI@Gl1yrFF6-fOs0zgyMWbm|KZO!Ik3~3CL z$t>r5bab@tI!6dow$|{pN6g%(5~y8tAG%Y z1&OWM*?nbudwb~mix)4Zl)SN*FBW4=Kb|fYi@W#VfB5JQp#9yi|3=LC!TWbk*FW^W ze(gtJVfI_|{fGA-UBC8>vEI1Uw)MO3zI*rHM^8Qd%+<>;u)){gc+EHPh0lNPy?5TP zY?b*p?B?}XE?(S!@4ff#-nu&s-QM0FfbL)1k9l8IyS~5q)YEm{ikiBa zcYQfN`>2%CjqdpD@%{Vv=JR=o!!QgNFI-u7tK;L-y}iAcUwnm`7mI}i^?RCgZmV{j z#(o^Su0w>%x*%DA$g@FPdCp_EF3~C?soD71i!ifyKC7G_pIog+T6f(zoQyh_>=DJDL!8wKC}AUu5|0%MxAAxbj3dijlp=)j?3b9>v|E**7D%$!2w|$ zN32b(*=+9d#uxxdDQy}I7-H8>XcJXPm`af#o9)S2YmM2&MuiZ5yoIF#rIa)dr4;nu zIp>@Qpk;sZt?&HK{Rg)zKN)j;=fihz+_+LVm7eumO_TF_b-J$X?CEE=zW4p#M;`B# z+8J7$9Gx9K`KWe%x7vgqv5}^o5z+le_pe{Qy1l)%1>fhp_{fk!nU`#4MJlxy0$T9fslb}jWp~w^Y4HAH=ljxGs*4!xqtCrH4b$=bZ(0khETr#>f7IX z?SbF=sk~Sw)c3Y;9Qo?A%MYGT_tpqvPiasdipEf^Mt3{t8|YG%!N zY#QI`Ha-0XW8}j-f4>`ha?N!I727Mj7Y-gCyrW@%?^4b7-4EU!M!I_K(*FLwDq3SV zq`*Ij8#s@K{BQgEg9d_t!AJ(0R(kTsuG7oUKl`2gr+?+u4^QxT=;FNDS|xU>rXR!N zY!%l=u2l1C)TmsTSrXP*5qg0k!es15EKALStbwR?R6?TM+XNg78k3E|3~kq=HMlW` zi;`|E0SW*W)*B>RRL?yEsGxu)o#U{GAEzTB0Tu?uP4|NHOBjm){{ztM5))ZxD__W{ zLdubHRu+g@9HV1U4hzn{m-a5~tdXC-Zv9JG)87 z)*r;waAQmCAz_kM-L`QUS0`RkdAAcV&FtU!*Z=ifZ~y4m{`3EII;mob|I9!CzxY$X z{AU2+*1b3W-GBSv%8R~G&Q6azK07;guG-nYe(&hGl)~JJ=AEfskE;|q0AY>-Jv$bW z94(c2wmeqSAZ~2t0DvFUR2y2-8uw#3d-MCKN`D}3=)8K|wA(1< zEF9f9_{PP9@5YkZ&nK$bug~Ieqoy$S&Gs(BaA|iM7wZoX9(R@OZ(q6f;BYoE*3oxf z{oeZI=+gE~pf)v%qVqKwmy-&pXigHHuMv@e5ZM@FW)OT*KnD+1{NFram1U#vzv=B8c7?10)CpXiFa9vqOZ;tRM)i zK%zN>fFS6QnO&u-zy+BJS)~Y~WDS>aJ)+CVV@!o~1Ye3)VO>|%=1>=tsH$Z$Oe(_b zaP z5|)fHMI_|p*i^n5%b**a8p%1=wo~Jsma;icyQ*d8sAY($ABF;&SAE;oWF?m&kK@_d z^6>b+3K-*>dN!Y3JUBRBF4y&>W+Nf6bKAAeCCW%c2!m>ACv~@4q>_O!NeMYQ=NckI z45C0j#JubWpHm3Iu~ETs3@H_BZB;cJ%eXP7l+^cq$iX?UphXpl0AvgUB9f&Lf`}ln zbIuUQ7>8kKd^MZRi0Gx4UTWK^HO*JQ_FD>C8SIxw-}?RE`t7g%)z&u_nIRaYWm(14 z)s1Od0~D&(B4Wv0w{o-UDb2i>ou5#=o7i1?Qr2xe9_-tlo!|Sd~s3VHC z))^0^4%rbC;xG(jH>hgutB^uT(R*LYIX6Iti3vodNXdEAcB*I|WANS+0w`%#u*_&c zA;y@B8sm&(X6(8SQ0u1J)J=%!y%)(jpHF7voR+d0deY69fU1&`C0nv)(IlF3v9<=$ zO=YHf9`uoOUMo|MLSo2Hs&>-0DV8UPPl^g5H%$Wu`Z(s548ySK7R$aXk`Mum1;Rws z_kGD?1(-vOgr;EweY~H_Bqen;Grq0sOZ~9u)5wgC@j9e1#4M?*op8~T3|sYf4DoO= z7}na*ls$4iW+^1(G)Nk?i*dE#T2hIs#e!|rDIsJ~01-eww?k?|Gz5sEfQB8p4Sj1Y z`PvTSnwbTPh)kxn0*DkJyGyz>ZPDL{&#C<2m| z83<4dXc1y&Gy)n@hN2*7P>BfuL&-4}!>okP*Gj4+)>=^ofIcQfjgm?bU|?oquF0SZ zA{yfWfS{anmPJ&>Fgx~=Gb1rukuZ#7!VYMtrkcTc;DuH{M%puPnoCIRA>j!wPf-}3wBLi3bx0KpYXM%6aYpB)gq!m2ml|)66TEp zK(!#3oFyZBTUlcTqDU6a%*;dpkh7=)BBYc{$MGk95}_Qs+y{1c9KgG32_)hu+}ny0+dox8~`At0zl3=RZS@n znIcJZnbg;3Z5_i(A+pgPVkbpJvR*vq?I&(52pT0yVFptS7ytmHU|AJ01Arh1ButT$ z$!@EaMKVt3bBG8*O6&|~Q(Zxg!6!8?k}m=kN^Ro0Ow6>Q_@p04vGcla51dbRbC3V% zr122e+^#QTzTvwa|A>dXbzH|tj{I`5zVp$$jm6!)z1QEqW1fV`vsVqQCXe6q6U%cW z=woDDuS7X95m%|i!YbB`&2&6I0Z@xbrUrC4DuxIJ7wrTrFqANKsz{R=I?h6T@Hn45 zaPgtJFu_T^JbHNa@RqBrvKjjYqGDOq5C<)qgvf2_SO`bPF=@z|nP-!#ZJU!}eROoz z^>GXs)CZ{ZSO?*rY#(8UkxQPYuo{w~Jaf|24JehE!iFAD4H~i(C}OK>(-YMj8~Tt^ z%45LBsH*pz0RfquwZ@7_$ax&c9Fs97m7nClOD>hMMYX6HV>OpC1+ZFI6(TUR62=(g z7y+QFs>Nb4Z71`#an?$bqOwUy1SLccA@yO6$nC@fNGXDX-nqK20U(6XkDW2bdZJ8F zQYsmVz4N;1M+6Qn!2r;$q>em zZvSxAuaIDO@{+ao#h-e<8~Z0u9-Wm!|=k?aDTNkhFwarfG`j<93moGFpaG%gGVRFdB{v`!8BF7Nrz>$1PG~gu}i3ogaB>Rh-57VRqLt& zfRs{EkfO)^SrSvwLX(5!m#;Ox;?EqHPc_HIV7i_(v zN@6#}VKEX4?zrHp3u9U2JR)MBNAJC1XX)HUdtUGpl3_@M8>w9p0AeI_zG0=3GZSq9 zf}FE46$2`B<(+e`q>@r7SsmxFI6krd3t#-wAN!+!^zp%??cJ@Tqk~CWk zS+0&!mQo4=%x1GvO1JDb!D5YVVvNYlcFHi6k_=Xk-MSw%MaQNeLN1sAN0haU2FG5V z*4s%dPz#|iad56~#?x^&TZrywMSAOSP-#r`e@NVrc55ozYm~jF`#i|o#EhUU1iiEyfWYFB0&{RwCc{0oW zr~ps73l}L!l#YC5RB(l^RWNfcUCv#`9djyM-7Md!Y0tPL?X^LYony<1Bf$CO=T ze|lyvU*w|gBmE_K6d$2yb37+|a% z5L^Bis zB}4>9z#>vqwJ1UXC1g^?EGg$`8J*$Y>_k%2It|*uCR3-381nq$(5;EM-P$d;!Mo{v z{~9aYJA0VsuMFzU^$A>k@pwH-cLKBRy(d3>_50su=KZ}Zmo8ob*cdYY8|Z=mc9Y-# zYYk?9!Vsu8;6dTjH+DXCt%k7ZFV`n zO4e~0hX^=z_I?8bG?PrWsFrA~a$zbl6T!@buc3e&M;~+AWU!o500j}r5aLFsxS;`* z6#zF5e8`3XkrV)c{s3hnBA@`WNpnR~Vq`=DE&q+;>D((o}E2DIC#80pZEQfAHDwl z#C%rgE4(xCwIq9XuytL}+k40NCur~9N(W)>wzj6(oE$Hj`81R=^ktor zujaGK9y)WpT6AMrb^UU^N>RuaU(edgjzd(1QZ{`E(Yj3%k1{hNAgZ-Y1R@)g#K&KX z@W;)Q84wYj6|@!=M>Vft;_wD$XKjwQM81vwQ98 zw!gkzvN*5gWeB~FxvlFe7d60G2@sP9qMRue1yJ6!PzLz1YH#D1bdCi85-O%mlQVFZ znH9x4Ys#j=k04^5W06s#l{JaY+ECyvmxIYGb#t;g=a@?t5s?C5j6q~CyS@z4aM!-r zmrHfOY{sJz+6abnCH@M-wCWymSdcBInP9FHIc(UovFrQ3zdMsYLfE%WqP6~1XomtsjN86pO1s~S|CHRIUdx&8if zxlSpAnipPp;oTp7w=wSM@S(L3|QZ7YOP2N`C7)2wUhX|K0cJ zlijwONG{!KHLg?NjgyAVq!N)$Ds(9V1ZYSJ=3BeR>t#tJK*>;0v~b1@LmB#YS6QE9 z2_X%`*tV6mR?Q~Tr6`Jko+G5dc?{>$3d7dd1VCgBG5c!0>dBa9x@9cooQ9zTrpj4W z1tMnNoa6x@B_%Q`rI-?8$vNkkJUeR*p@@oQa@MbhJ^~ang~1zh&RL28KnP(R6;Mu6 zs%+a<&Vqqd2(WG|VOg(MSyMOmL^Q-e16ga&y2UtT)xritIE49D9+3h^o~3NiiWSE9#*5g z(&AtkAL+dRK{g&zp)88PpyCo5paM{&WZWe3!3LDqT!;)R z5fQVq#AKW(ielwF8+YgK0RUCih4s1z@MmY;`*(kn4HYVn4;~#JA4n;V9VqVa>^tx4 zi67!HK7QPHWzy6L#d{w6&NY5_Z+GbyF-AmdjLiWMOex8+gNld@D)z$wWSpbQdgok@ zX(LW3rNnr;v3)b)xdQ_srQmoL`ry5v`Wj?|*!i4upN{T5yhT8Q*%<&X%kGqIb-Etw z+5|1gq=0qp*If|JpLpiE&wTE4f9JpayNAaIlWCPwe|Y+Mxq9NMLa1_LXLhX1!}##m zf8#$?#T!?@u)Di03I3&j;eTT%d*6HW_ZDaMFaE->oE<&*%GZDO!w<(fqgtR4D~fh*U*#F65DskZ6-uB6$;dzS)RK3v6T%NXV8p z-CR^Kr36q6J0K_}tHLl0AGeaC4NsqQ-uU4ZR8h}6-k}O=DY;*aj?Hx1SZjx_F9DRu z7`L~(6JxBb4AP6nlp-;R#<8oLZQoA3HKgV2$-$vA}OVyD!#IJ?tLgC#*+1f3X%(>VWN^Vk#)73FgH!JSTDoru<2m5NgV_z z0Aj5N;`Oj}HK-CX7-Naplq$rMQjn4jF*94khRsIZA3`W1DdhsiIVYfq*i7oWtv2a) zLm2xoj@^<0EwZ;Zm(tFr-O%S0o2HRma>ywqBWB~d-53n&iCS0ObWrE>qY+IZ2Nypx1mk7V8VE{yVSgPX*v3w;d=&Hh{nFv} z$cC|`9G;#vwM``#g<%-_F*@g*ac9fLa=8={$z@YHBI#mvc6fMrWp8h5wkwLq z$EO!AT-x8e49u(5>gezep?T-M?_Ifa%{qAgg=d+pF$VCSVLCcGxiId2|9h`3PUEG^ zd-oo^{ql=1b*r;mr-xNlA@VD)yb@CG`zUf!$owb&>A!dF+VxL==A}D#Z!wzZo_p@8 z8!vuz>x0jH`tw^`vnL0Swzenp`4mwuU3vQK>}<6h4YiOt4(0TC>BuEHLd0pyZB3}u zbzRA+t}3F+kOk5)hR4TGAPht>gkHP2ySrP@tK-G#G7MQI#(WL+Nta(3)v=25|l&{jsP14*+v1x%ws<^Rc(x6L@kmK8|S_E&bjmMG>CcgT6Kt; zArK$|CJ_cQ#&|=nns&!$uYLQqo6kPqEyG72{&0+;n>~f4-)oxPx_b51*Y4ea{o>yK zZ%lL(M8ws!3{xJe9Mfdq@iqeqY4e*5*dt}U`9t5`Ont~qB#5S5VPCg&>T zgh<3Fl0zOHf%iU05Gh@^cEjHa(9AF1xc;m) zrm4-Vum1hp51u4-Nqpd#cQ2c3pMQ9;zn_mj)#8QS3)9`JU;g6fA3VCx&L2KH8HcW^ z&ErS63T5xBgQLZlzWfW%KmY7Ux8J^d`_?zU_P4gLUVP!@o6EEH;gg3KuUtGm(KrTB zcAjGtM%tWHC-X^4sgx8}OKUA;Et(urQ4V7t!+@@>ob66et9GxC0RVjMM#s8##__N| zn#?9C1~z5KILJlp1QNbsKlRSDzZ5P#@4JN@okXf(%uR-W{*u|*zYKuQjOv`!`-8>l zM?d`D-}}w~$bsO_=i4hc%jvECtJB<-lf}tfx8Eus{wV+6fBAb~{Tna7@X5+g+RM*8 zyYK$x&sYCwX#V~0+_>Ym%XyLEYIcKPbTYn-ef`knF49e4AI z?#)(*2H~VH*3ZTvHfB2^*KR!f=yy)7Wt_i#a+jcg=|c5~-FmyiKmI4b^y0NoRTr)) zo1Zy4)}iyJy!qzu|Bb))|0qDtIcu%4b~>F;CX=gIudmnZlP8CdkDrXiOl;dWwwl0d zu{b^(XQl6roA`-|XE&dH?$7@#|H}*8m%jDQufOn;?p<&fzVrRxefO)6Z+;#oZUSAf zE!K59TN78lVzBNkS@2UfQV+di85N)qX9jF?>x%W&s6eQMV!fy$ctkK&wJBN%SPhbO ztdSC7IY*xXj3N=Bf+!RfEgSDK0Buk|QOk^mMdBtp06>8OEsAQJQ>ey zE}F890@H34r{33OFoSw8C@G}E@krt#yRG6A%4s#dB4sl)-g89e(Q2O}WqXKLqZqO<~K|lpnp^tmM8Zv^yvhhYL6~!!o|G?3kmeh@_ zKnlpt)yPg`Oev64Du7uC2|Y0rm*P_@!zlp=Z>|KhBzxahfqYxExLB{3VwxVsH+I+u0A#g-_N#yA|NgbV z`5&)yfqFh~n?9^kNPpqa{`pTl^E?27Kr^?qPrPC*fBv(d5)c4W3Rs4mLKw%_Ui->- zzx!&6aM~Yjm+r9a+}{5MxNymr0awR892@mH6;hLpiI$6TT(kjeG?FpEoRCT`B_$}y zNJgrF;SVOvoYUw3J?lVl9&)@ogeR3`q!UZ#^0aDTTm8NPuxtmGj6WJl5aKyo`p4{DW1Mf=`C=VPDoK;`YP24QA*5J2?}#>r{poaCSr}u8IjNpg zcPN@tvK8kPM-?S5rHtdaTrQP!skJ1rKuy&;gJmQAR+!E&_I+Q{019CgXWZV-8~~J3 zw+@Td5V8;x` zvdKB;95=dS0LZDtuy)?26xpaq5w=4NbyZpK;uu=T&bb(d5b{QJY={`qIR}cMIt=T> z!$W4Hg0W#3f_B5Y=BA*C5dG> z#sH#Yj_cKGZ*Na>S*%fJORl}%WSOl&Qhe@qsqn8}eh9-I*zq$psi>v}q! zs_HoQ-r&ZfEhxs=4YEc=Ic$WO8w#3{rK%+>)ZF&Hj=Ox?JcPA0o-`+zy|=@4`3 z8PIYSN69j%M$6eyQOE#7$j*9WiecwGrknu`vJp@zpc(I7>uSQ(cN6FcE_)DI-&59C$&*6h{OBQ5Dpz#8g5t6~!2p z(0jizlNCvX5aYNJ&Tldy&NEfcDRV?r1&Jx9l8OkUCn94EfNb1!3aF?P-)`=MjO?u4 zL{~}Hpy}KYK&Y?*`cYM};3n^lQ~^Yl1kVd_0%T@XmhzKuU}vm=qNqVCj3TOKqwxpY zbQKrOQjiM+Dj*OFs7Qfx=jnNR46!)tSurM+tR>}f9Dpshmf0HPLI{$wCF_kRM~Iwb z3L&6Uf+R4eQV_B0#+W3RR7!Tv_y$5aRn<;L7BoabiYvw$ZBLq<)DLS0 z*VL0$|KXL{Gq>)3_<#OC{%cRa^y1F$vnP)azyIK!$WQ&^&-@Q_XWswldqsLnh)iKR7m4FM@J0B$l+0boN90EnUi$OceL6{x6z zaKZ5|Q=Y}M#$LfT}OiKJq84MH?k(THwb05cLZn;6J_4>a8EHpWORKg=P2c4{sgdkB-_`%_Z44O-fKslx#mJlr=0D)w*6b z7b0s#8S4h57)`d`>0%GY*@7H&EGRQm=Tsfs?=I{&RTH@C(Q#c1XwnL%wVCgg41F4* zJ20z73YXTU3*8#ajoZVj-hOtF?RTnQ`lwT1d1<#OEK|wA4l1N+sb#DdM{@Awp50#U z5Hpkx7zX>2DJhE<9qiT7yQb}L8Q zkH^P{2e$GtfvXx~46zgO(xEYx6dISQZD%2N2{f!uV$imkCou`2>y{_ou|lY;%XQl{ zu9`J%Q#k~F`0!!XaM2!6EirSIb7sR$Qw_tgT1O%hAyfq|pu#n#l*I@!5vx)Zk#!il zN6Qexv_TQ*)}wRPtquuZKyLa!wqbIZ3?C#xLsyd&~QG!B9aq+Eh zel4X`Rn^Jyli&W@ukG#cB2eG=IbzGHqsCb7mOg`m>+?G*#~S$+jp-~W z2EU*_W5beV7;Fn+G)~g9GwQHjN4LsSVVICL#S58I1xb=}nat*OUB@x7cVmnp1Y&s3xKX~xr(b3)YxSX^V=W%ux z_jh+Bu9u6xlyYJFGAgaR6E?9*>`IKjI$Gx)7rNyg%2$k>ik+;^R#RP8m0M{#Tz6ZO z+8PQ&AV37AO%&cH2UVq%WZJ2Ukl@5sMH3pX&=14Vv=ssjeaK1G*i^v4u*!aFoz`)1 zs|iX|>JEu=xU_Tg+WuA1)rWa`@Z@x_u2=%??eEw2gZJNEJ$!Ro@lB=G>3Cq9CwJcc z;P!U6HjS8|i=X{d|KxMee*VtuKYH)Mhd=eHPk!d5PppRW#s~M0mQA()%E`g~>$B_o z`xoxq{r0l&_xEq!y?Z~7ov%zoa};IoibQ5!4Si8D#&l^RJvrL~TJKFYZ8Y_{1nm#{ zS#8_NQ7m&;B?oEfNUc^eN%G7*W``s!3QE>Y`p4gzeDR0Z{|9F0#$<5|FhjFduhvP1 z6UYz7CVurld*iJ)e*RDYbJwnaI@9{22ahjaz5IuN{?871_2|Qo^14qM$WG3NyLovw z+u6GL(x;x;yQ#>(`MX~!asBY*L~z{OuU@|T;?v{y@7T$m2@B70T{UBdDlVJ;bWzFI+COojIi8&!?3AO4&pGm(aoRuW!XSQf);#-g z@@ybqa>mJp_b3_V3Qjm03a|B1cQk+YnSvY08pz628@h`i0EP`E>2SYygT)#KA$*Z z2#s6Li~}f$G&`!%)e;tq*v(5uwcpwMjN}km{L>!DR}|Y@BguhEB*igP%(l&T$*3?-u&-F z7ygGw?}0_Si#24ygM-2Q>d=M7=rUJr1X-+T33Sdjx; zmM4zTNjn}qJ9K~Hw}-RmhF_njVc~R_@_Dv4H1qQG{J~eg_*!$=q+5G)(JryCL|qeU zNToeIzH*JW{p#d#9m-hY-~5HY{PGJQ1_e>5ET)1)XaGRNZfs}jZFBPG!@u(@fBReC z_{Cv3FU@t!Clqgd>)KBbaBUu&0Y<6HxLOcqw{L-z$x4mAl5a}z8zyp#nv2yE9e4*Q zq~wqzWEfM;sWxqBLj)q#I`xtN#4?-ifc?)DH7HrTCL&ak0;b;i0Di0n%+p%DL{I=H zk!vi8Y23MM(mGt-3pB6fd`b$Em`2w*(*{!M4(oc=z42Qgz4*1*3Uyqowfkv1XrMhJ z*w)c?46Ly*rO}g|^{3a4uPiN=Rb8cg_4wf0-cf(DeemSz&YVLLO}P>~Y&oL2S}SoF z#^jtaGdVW1l=I}c4VF!jh>REjm}z37kqykVYTlSsM+ee{IQEG{*`_cf?jr7D8SN;? z=QanQj9L2Bhvl=OWnMi}>Hx1wKR{Q862rZ_{@yp1<2%n@!T54KuWMHCJ61&^6A@Go zPD`~S()f(E)LeGxHtqg)@ev6(Wb>t}c^(++h^22L0}6mvWs_<}FgF>;G4HAzpo17O z1}5T3>h2sDE$FPsW-%ivfJTB;O7@PL*bWyP39)Gp;&`gYE4C1paoqT{+PgNd#dv*s zu(y5^tL3=I+iY&*!{w>~Cw+mxpm4-5`5?b%?uWgrk`SJ#Ow;XSJlq z>x;#_n;m(xcDLO?+il0;%KlrZjOFT1<_{RJge^dF?MYhHI-UR5*1U=9t%4o z=NzzZ$8oOSw?1TTe1nKh>}Fw>&AXff`N_>AcHLp?x7+RN;zA`H9UTEIH`|p+Nwtid z)HDq#%-XJNrD{Roy}iAZ>ZadpSH1VX>*n4^k08bBpk@Y9xM)dwIVA*DQw3|7&8(V< z)M`4Z4Rg*A0yr#C$WgJ%!K+pv9|6$0sh#g)Q?iXiZ{Vshla9M>2oOS8UFX)>;)p)KN7qNMk9;jLF(&?wM698b(jf$OCW6 z=)H%k2;{vtU|}{Ql`Ww|Q&nX@(Uw(E1`}K$5eU~^Eh18@jyZYcQj#a$mNW~qCivBA zV`eTaN1+D~AA9c>%T~$QbFF#7v*>+{UPPcu5lmz0`;^+<-v0jjbQ5DFB5n{^P&1(~ zT2d)O{NU*cF$Bk(T~7o}*NVt?J5IJRfT8ui;52}v#>tqIh?FdYWEj!2LxhsnCuPOX z4P_8j5kevtJTs5Gp$uanp2eWFsaDI^=1O5NZzg2>tVf81T>h!V5KDOyn-kqsbdMyZBQ(ZB(?iTRA$n}G;KtX9Dj z0OUMQ%sC~#Z;1Wj>vLOB{(8N zMlrlB8;A`-u&62-5CS2A0iY6-z-W$5QB@Ufa^NSzLI$Q;3s?mXXf{acYjz4|WM+Hw z#nt000I)v47>4A%KRG|~F=zw3JZ4TAM{pE}NFbor`e4uF{AU0Xv(U}>Hq+p^B7Gu zi6$A>{qF9`{VPXDLmrp}YB@W3^40C`Ti^P|@%0b=#83a(^Yc?deRO(vJr0|^i>p}~ zo)p0`l~P@is@M#ILpH@+5zfXHBIQ~lw>FGKZnfStA#^^PYDt;Bi!nAXrm@7XYiF}G zjK~?vW*knrK=21H5Q?uVRUH$SlC<g`MMqLmFI&LvM~cF!g&S<((=2+|T@}JI{XX zz4z|F_WE}=+mrSA&K*0#`B=B} zc5YCsV$speJP~sQK?I&rqbik(`Klm>tX0O8L+xCMq4TArzUi?zCG z2Q-u_L==D>v-b@)GgCrkF$=UikaKtM&cEIOf|Mqt5{_v}0~KVrB~@Ex`I;?H@o*UoUY8RNAVd8jVV zmHl9u09)A<4!J!p<1WY(jI-P>Bv+?7##lTUAcMoaQyrXVGoQiwSvTB2X^)TmQ5zeF zQdJdwpu>YYif{oPfg(0d+&uE@2O;2`*S*zTlpRszJ?5F_al9`J3`5a`kYhsFqV}_W zer~xxZdC-O7$!tAYPoyHHP1ZhPq$+@d-9#@`y;o_ne>u(kz!dT@8fPaq*_u+wWuMe zDv~kdl-!Luk2wt~k>fHJir|?Qprlkv5fT4>?!@8Y;Uqqd&KD68na}5e-7pN}m?tXt zlxV?(IoB9tJDV~4iF%-_G0sIwDmr9kCUmnZPAb$?5!}W3&<{Dr7RZOD$;Cu-sZiCk z_^xfm@bt-QHlLGXGiywXpHWHxvgsnt=d+=&ZswjmdD6D6b52rHo#d9Mi>3nuQGD|F z5i#s99XsQAIP^)x5IN_{3;>Ywz)Z}b3f>2FTx%6gvGJ+MIF6;H$%u0@1RTfl;l;VC z4t)+Gq?EQY<}%b;>AeTek@x<}l`BKOSobF>rIU*X4BjA{Iv1MQwq4gD(0o4MZTt0l z9YWK#Eh3BQ>1utk-A&lLA?G3nwcFn(#(o$p;EmgdPfp+Zn}7Q&NVy-@Z@%@N{k^L% zz5J2Ydi%|9z5CvK_nv*`_N+Vj>}Njz{qMi}&fDLAa`G^Qrzs6p?db4I*Dcy+cCmi2 z?fZ7qo}N8zn#R;E_x5UW%1{U{&bO1;P)$8yV$025oSZ}pPrSP3o|;Rl!ciAOci&?gFoJcqla*j^Ej7L(o>%Em{yIW z`K(PGiuy66#qee-?R`_OZ^{_&TtJoEhK(Uaw$+W(t>_pkZ4 z-gxl#+kfZ}|B+9<{4>Au#b3E^{cqoW`V%i){oD`V`iH;%>;KZ9{so}jmwx@1zw(vu zjtsX)*!g2^Ud(Wo7g0PQF;(xa4bJT~q0h;C9pYlF?CKbxg>s|Y`F-y0JA8MwC`ZqB zA$BrsVrbKFuzT7J{b_gn^yqesv0>D})hdrWkh;Ne<`18AIO`&X=mT7wztcW`uZ@oZ z^ZkHQl~SIeqy0J8yh_0f7rI>&eMxGt+YI z+O_R=``zz+^*4Y0zyJK_|D`Tm-@kLzl(W0P_4UUO-+eFM8Pj{$U&FK8&txBiE1OJ_ z={oddvV`2>x-T5MD_+c?^ls@iK{}Vcc^rYzJOUDV3xW%_!9f!@u;g!ReKXU8kkKCGfN3|*<{nP82e+I`T2OO1vJ76%I zJbfIn*2+iMKM~;G>Gs~Z{h=$h3;fk@z%RYA@z~pL^jQc%o0vecR1<6_J0>+qxzwzH z6dV!lJS7BB)uCE3V2#g~FIB+Jp5mnU!AWZ0j z2jnV{L_O0ax^_&eBDK_<$39RoRY`ducU0ax=Uavt0hS5&uq-KMa%*UI;v|-MCG71* z&aQVZ36^MOLuPF{PJ)8`kQaQ{_;yd^pQ{e za66yfdj1&%{@&}~$H(vf)nEO^yS>QK_5R>zhvlqkXXlF#zjb)@pllKLjh%9JFro~g zb^+OgLvkF50<-hQac1;m1prjbV**rEHG!%HrkaW!U^*rj z)QMf_y*F@Bi--Uy5dHrB;gTPXVH2Oj`nbI~-`ZT-7Dm~s1RL0zkahx+HHG_MzH<8Y ztM%Q(#cb_d76ZKi%>ZD?eQoltau-B^foI*4)TGqid1|}Gi~Hg6`uzUMSz2FQ^Z4vF z-p0l5EKgsx)`|dVsH#@PIi55P%si!`PUg7;WTv$WVnIX&K{7%@XtWyWz~~*K0#_u6 zu`!~6Bz-^Ndhw&H{U0q5=CZE(w7GI!BW-po3~qrEhHY^zXvU|n(Y>$j+v(Abt~MbR zZQa7SOlol=LS!O05gUi1DH|X~j?rPKV>i4rZXX63T1SCYOa*Z)TC_Un2s*9nIt)Y% zE)Wn|(3;w2Hq~I0dV`4AVJU_PX6hX0GWOeSCR1#wU${kOr)zLw)|OU0==rv8%IwPh z)g}lz4vOS4b&Xf?vqf!}z8hh6o_6E?7?y}w%NqPgkZE;#x59A!xWk(0e0Q*zZ`LP( zYGz=$3Z`5T(a2ghV9=a$&dxbO&LUZ1k80ug!>5K$lsAvhP%V7J~30DF6TZ9B_pP=y$q>21EhzefzUj|6Q*%KxQ(;CxZ8(ORA9p*bqE(r>Vdkbur9K%qEasi_F(_E7 z`oTu3sx{{+cGU>SoGvdH2NN+9z{%JRC%slx5K$s9AO*-(6*O1XvYY7S(=cIDqD*eF z)BcldQGux&ZJMT=X(`3Xh{UYA0HHyV+B64&LO+bBXHS|YgwX9a{gWq;nikD$wOV`W99XS2mDw9oCmus%C&LWBj}KR-dNn=uXAhY+OZjG^h8l5~LDbkQnsh#=rB zAX6F>0L~BQCD+TKY>{f!su2+xWRDI^w6cH$WON~TA{tXau?efnPzF^+$L+jrwld)z z2Bwpp5rB}4DPQ#4F=xjd+kj@kkSu-Y)vrEy^hQd9_q@Nqza4lS%X@bpdZ)}0pcteQ4DHrEYyRKPuGg!>pc2*JA+x7YI#6Q0M?43`4@>3TVXRm(m zE8E@K!$q5Zv=yII_9&P%;``|hK(+bp|w)Xn<-*?V`t^Wh)*;m>^b zr-lLllmGkw;m+;mc=@5PeESRI0JGV0*I$4M6Lwv9e*WmW=cX)oh%t&t%EOQb!0bH6 z=n4V@4BAP-SHtQXr zGN5^ywl7Z{XKgMUW)8va@9)oMi|uCUlgt>6yQdiWfQ~9^9t8pBibH~g5VcZ$jDv5X+uI5t9ZdI*uTCw*eAyz^61e6LM5a$))3>~-of$F)nQmYdh}k+ zJpiys$4%K5=1I{(A+%s5xkB=rl_H|YIlA=H9(}jJxUY!Ac)V&-jkE2^uzzJYKNM!O z1}g8p`kWmfPDw-M^nus|-M4A*ROaNF_ zMSa^KC`eTyR8s^br^xe?y8R;=x3b%oTGhqO3#hYp=|-HDl!37}d$Cx7(58Fas~_%$ z$0XL2?8`<~)x1;Wkemw@Ir%R8JsgW^pJ#KooQW9rBZ`>!=zOdML)E+)7fu{N3AJ04 z*@Cw-urh2OAy9DA!Y*vzqSXZt7olI(*=`|Gnzrrwam=ZL5fORsJ^NgGLLBoj(Gax?iX!6CVm1sz z>bK0?#Mm}*dPAoo{dPBP`l^PAjM#Nu?R)IH=mVoe#9BqoVrWUlI}IU}Qa0Q34?N~l z$z9il5NfL9P!&06BMKpwc1BI>7t3qN@$~eRq3kVZ4PX7(jhoBm(mD6_Z+?Bd-F@nZ zewrz~@!ekqkfLM9ao(|M7>ANp{o&!F?{{}@T^Z6ipD%X1_UvrsLI)tZ%B*#@;H*2m zxL8NuS+xp==AUK*$uJB@M@P={#cFkWu?p;7dg-OI)B6t|A@bhd?1dL!e(kmI-@daKV|#M8 z`qZaBHRf6smix=U_uv1Wy}i9>o_XeQ|8rv=E>WwHmti?ChjU@~t207NEQAA4G;B_0-R1v$kzzYv*jke{M?t$zWJv<@##-IcX;FM>A~XamE)uH)Hg@B-g)n>2an(R@z4F# zS6)B)_Sb&5j_3XM{Py$DT}w$G49^YyqYcRLn26G_d-8By>L5~l;316{7Z;n|b}^gB z=mB7gd(M^7G{z8vE8{?btOyF8Lx9YHBSiuZB9FTd|IX|~S2wRWHgZ|z)Q@hy+kbw& z|NJgof#u$w4{p0sP22n)HSHn{pS#iAc;|ce&RhNJt@Eb&#+xtx?)Sd>%$<)kdy6~! zH+b;N<#9u^mgL{~bN|-s@7?tyKL5#AE|mV$-?)3XX?`TU7p}fsZhU6^cK_t@-TiRx zI`~6>`WO7co!ig+(4AR&{k3<$@YR2?{oWTJ@9^X!|JGakA5TjqD%-)&@Eozp>S}T< zfX=Gv4(h%B;&6QOWA+$#y9dp!hsPh;^r9)B`kT$M6Xj^gX;oxsFitz4{fB;EEK~&EIy6?UAy$289Jvcmem_GFKhyV3w{@o`J-u(7& zeIc(;C@i^+XPZ?SAI7#DH9YsBkNm`s{cI^sKWxAJtN-<*$9GTeKi)sMk#P6j?|t(} zKL3Xa=Hsw>;du7^U-qWwz$`p;}_oD`?vAc-wu!OzWLVbPyY1FKl`(P`oZH* zJ$Lj0}M=4dXdO?Q_d22Qwj5s%^~>*&q9rn{~L zyhq41$0sM}Z@u^6VDI?gV6ixk46FUN1Y!pi_Kv>%t>6B}z3(3I#d~kBUw{2uU;GDu z{mH$%fAUZMnVGtL4=*z7J_6zhgx-7h@$CE}7n@y~r?jHkUOeLV z?8#=cTdx5fW0zZPr<&Y3SBp(H*^V6%{{CR|X=KLC1gKNOr@i0v3V@MwqT)@NQQ4;| zqf}$;DkJ9$)pU0FqYtnCQP=Ng55FP?bmPXj1&~ejqT;t{^t*d|!$$gxfaix-$NhE> zfX4z%z`oW3A%qxP02s$H=PV*2gtqm0sKY1~!Lbt61z2ZT#X0AoP?5~+6iF12JlM!S zVC%h$6pKRkI)mzgu$Jl^nNdG(DO1~ChDA@D3Gco4+{K0%#&P7(ife_2nlhz9be5Se z8a|X0Q03a$Nqvd|Qq;Pd}9{MC&5&dGZn9ugdF`b^A zr*w`;)A(R|(=idVuQE(?e37#2FFwfV>Vh{jQi;JKnMxvZ%$%i8YYg7+)Zl%uaPR$O z*PtS`=28q1OXErwoMwm=n%HU;RmDJY?;?H$@akfODxS zB^8?@E~N-FPkv-orYADhTDzv1&E|;coL{ZhX&Ae%opp;UJpeVa5iP?2&ec-3W{Yv` ziKt3eRR!AZ2APKM4iU(4Q$*W#78;*r1hlF>08FS2q7fsa7)UL`1VHGSh)BUoF$3A} zx@jRzP1!VcLRG8Mh1fK~M8CDx#jA&v-jU5sKPa)Pwj(|$ds>+PDW~d;w5>G{=0w|gR zD#H7J000z#5fO=|nxfP?p`k{I$Wn?G6jAh~S_KhFI5Y@gfeYHGwdQg1k!EI7QPs4y z0{z}>adhRF0fFg?!*?D(n1!y@&@`b;uOW2-Oa(}BrK?CygKdB&kK>pIAQ(&Ux;V|O z$T?N5QnV@nu(A;YkpUL4j2S8kmO*N z#7WM1OnpiTN!DqYHFME2)?qgcUDv4sa#au_AkUUz%n7U_siC178MUDWhX7D5(-gI7 zR!Y$c0cB?AlyDq&AvotTmo%jO!t);S+<(=@wns+R~9 z#n?DvGLTYJ-Ub)au>I{X{R05JdE@qvf9_|G4zI5+#z&7IkGU5em&@gR7EaHGe!E-q zyYX$m~T-Bsto#b14bA7H0GRRjEHQ8Wh^duRhZZkDGie;znPVqk>GrFK8`&* zbk4=tcpt}clv<_I&{q+urXd7m^gep;iJ*!hI0TQ(QtLPr=7x|~tM!%RYtKLb{5a+} z-g?Jjdwg_l=r1vA+Oi#t`la+ zCAaekQbkJ=5YdSA{6U+ZJ{DcX^QQHO>vop+?-c)*Nmif50c!?}({}QMW*XoB8_mqD zozD=_$b)m{AVJMl$5Qtg4GB2_QZ-WpF#|=M1S3SCsY$VO&{Sx`(` zq&${LGhVh%gp6D@3K6)-*n1KP(5qR$-ON9lXM5DPH7rxFo)XNO4Wj4$ec)Wqlshm? zrHl}q<1TW$cSLBVBp`4AHI3MhMnMgu)J#i>gn6rG+oKn=J9N8K=PLJG+nrF^!1kW) z)_^#lw`bc^a%}w|!x-YMUF>OSl|!bbSgl*bK|&+cRpDW*YgbvCqf>nGvANsxw5LNZ zyLQ&eh(n(?yWP~W5KNMb>b>{GIpxK7k;S~5&*$6i1i!Oq z*R}If>T+>(cyxW&Uu<`0H*Z}#KD>4H$}{)w-9?%I=x2W9qc45>$>Yt-&wb?8?|)~# zIW$e16?I7h@#nrV?31?%%&(VLj0qL~Og=isXr&)}R_#a5M`+opbEGLqt3~ zUrl{fE#gAA@U58v1q!iiW<+7^hjF_b$C1cyx7!$S|KK2{G^DZXx`VE(MHOJtET4Jy zHX`nJyS@Fx$uXlC^ka>!2X$Zj+S~j4FTC=~PyN~#f7u7P{p>4u?>@YH_x{zZS3mZ# zkA2|_fB)r|UuNc$$0tWeS3dEHPk;7@fAZqu;%i_3%9GPa%Y)gAw?Bz!yMBG;>WzNf z-M#xHHg31u1sCVbMK+hlq^iW8$vd8|!25@3t)jc_n3EZX&`g(0q=XQruH($Dp!N+K zxFMNE0RZEMAUoowiCt`)5P|?8f(jnPn?j|SAcce21W#$)U5KuU0qOy9!>$Pnk_L>t z*)R-_crlyL+E!|1#Dl&4BDJJU$T7yL=Lya^=cHQS_lOvp=z=FeMG#do@Se?hdvWpL z-rZ}rZ(q53MYi4Q@e|*mD&$;iUAIl+oHJro>t_3ypX{;j5pVSA%v^T79s6tRH=aX( zPPGN*Dme|#h3L2%F?phiTOml~o%i0iZQC@B6?os})3eP;$s)yh&Uq|FoI@gt zhx;c_-X4qJ_pMqP`-hE=w!9LO2JZ(O!G=Z1pL%v4e7m_w-12sL>(z1_XU&IP`FB2h zc<9gj)HTwR8^c^b|GA$y+`In$H@|lJ_?`Q&-u>F|{nEn+cbmu|`b_0?7(!Tf*KW_t zcXq1>3S?$zp;pN`EojlsWB# zU%OGa$<_=3MMs}C?6GYtfd-a_B)U{hxF(R<=5b%KvLlhvM`~sW^ZpFfFVSkWp=n)T z#y8)7{ct}2>KFf}xZ^DSfBs+o>cP>CpZ_C&?vtPT{GB_mY&L6&Pc&^``_iw6$ON6> zkqs>vJwAN{cB${TsXWGc_j}*|+Km@Ke9UnBg_rIc5G7fy3VBEl=)$!LzJO( zw|Y6?GHNXrh}=Z+*ot{a<`~&3o3Vj{ktuUQ1D7xmT;^@msF@mId}o4*FM|1*E)U%7I0?O%QVf78~# zeAmiimkf;@&X;olFtA#{`+qJ>?BDN!o+St4DX^)E26CYA=;Gq-^>})I@8h@juO2R} zgrmjm+N`$t5JkN>PauJHg3+9`Y4wpAy`Kw>~-hcMSpw~Y3*k0}z zj_MAE)lsuYxL@pE5BGKzi)B=vAz1K`&T|U^E=(WzI^zdo4fB;IL~dIbqmR9YwddU8+6S<(|L3$ zZ6=)uA)2UEPh&UpyVJ9W_upDBH^Ofdz;=D$)W6W9Vgr(#z?4QI>7*?>bz5j>pXf5B9~tYj8tH|=>huKDj{GO z!H2BX02wQqMnM(Hc~Appj*ch8YoJBIRG}A5xd38lD0t_lq(N0RMFgG()ZRN)?Pd)D zPR7A%;@Larw%c*b4dZ;_&JM_D9@c}=cJ=OX6s}!42wP|epil?aLTz{WAn!;@qZ>ct zs}3UfF#vQ07R}S`X4W?QT{n~=0aWGlRIV=CR7y%kVRG??G0DYtu%Smp7n-(h#ik6Y zYJ{tFnQbj%yUn(wN<>3n#ymLZLTE%JrIK@w>HyT!V#)_bkx3vz3^iA&q^bnyoF_z8 zO<4d+QFRBd0Z?OZ979?$B~EtbsX|`=tBrGHV`ktzv#VD zxwyChfEazX+RmD`^C^{F(p2p*8!@TcL^(GzB?FwoCe6&UB!SAtfT*BA24Du3hF)_@ z152si`+3`qC97x?S^`%LQXa;f%`6bN%^cXAbI3H7Jf<|1LXHv9OeWfsh@@In!9;zG zIoE$or)5(*5pIX96*!k}mWCqRVFY9a0mk?3!7tMV5W!5Gb2;a6$j*)4`wzms5og|+ zW2dSpisZbD)J25AsK~AskWyplrVVzx*-a{nDfO*h3b;AvVrDo^+VK*aod(KC1^~6z zoU^Jr=R{=Mp8>RMX3jY#uc}PsT%^g&1SXq!6{`X!RwgIA3W`&Nyop#vBY?@W8<7e0 zvaE_8YqeS{(zJv?HCq+uLRFdSWCJrXHJFJ1HsKD4ldJe8P!mYBZ32c8hMdTN*#h?V4!Wk+sufEJ5yU{zxc~rV%!tV3O_dOsSXGBySjqb) znJ`mu-V;v(0#q_H@=mpuQosx&($v>jm8#46z9|;bz909Nv$kzltMmK!UR$r%B4z(z zkBFv%E$7T=CGEt8;ZHw#eDd|*`>NX5 zw9$Je)|7`QPaZIP!2R0%)eDqX?@6U7HEVQ@fI>jw-G8fOgZ!8z}Zx_~yW zksKnUb6_mw0Z2@ohszqe77&<1*bqU+%tVL=W`xY-rVm;>m}652M>Ya$$I3xRE(~3% zr(<-8esT8w&prGbj~36}4>RUFdDccf`$XJ4hqIG3|6RHMy?85P94iEfj)1kQ6G5Zx zhC@d-WsBERiipL=`Os9_ccT*)gX&6_rpd=gh(!BKUEPz@H@4We4)#0hxbfbHg`jD_ z8nT>feu8H~%?WTfON(yqC~mfC^+=MRyV+56;o{&^M`v$-rg`hx5a%&q6MQjV ztyWc~jjeM|tAHqZc4&@K2$To_N=Z`6WP8%K?PAd(@tKQy(yZ3CkE%tEQYq!`5Z@l%rNB7U(9KU<^^o;4-Ip-Ijzx|`1`Q+6r&-}!Xzi?5jEh$3NG$Dj>98;lGFm}-tz4z5bOlNHy+lG)Aiv_@njf_x4 zy7}Vp;CS3_*6Yo7yJ_2YDpRgqyY}GWK!oeQ7)vCqHp=Btmo z`=?JHBjTgS4?+m@*-=X4-r{ETX*c%Zco+ue$@Ip>Ftgk{VhVwX%rH-Mj#}Gx4rHL0 zD#ucanVemmd%riwvlcT$+t|KSQL1LDi-l+oA!wCh7zE6FW@?O=4aq^QfVt$fO9EUS z^;c+7)1Y-%!3yM&m54!zfZ6#Fz4s0g%!q@lLm7r4hGr`1YONFk5GhJ%nn?*eNvYAG zlpBQ^K1StPY)2&?i53&m0ufw?U}33-gP#^rhpU`Sb5- z_vE8Ls?)PWucQ48_Yxwj>Z&i@B29(H_XPQ^%*h&DVe?%H?z8;|^z(=M!j^(%Ll;U%fhEa=0Z{|TV@!$0;X{IyT~)mPv7-SJh2 z-miCsnzdI{ZAAgQkq+0DMUr#rL^;(O%TY^oef!;y-2d$>i?KVn`n58=GdwNg*hJcJ z5IZi-X~(R4rddnS63BuxBxRB^`at6}o@o#?d?}dSW&AVGBTfZ0Q*P6W-!o9z{tq*?tcmJB`@WVg$M-FfA zztr7%CR7UZT!90q=|rex_s>N*@K5BUHtfN00!(U31TIP4s(k3$(XDI#Nq?FLq9>2O z`S|?%k6kyO44$cKs8-cRQ1i|iQ#I%onMN(uup+WU;FdrYh!~L-ST95A1T2Ggs&HAM zf&D)B2?YLwpVL2sgMpJ30`P>WC^>;C0Tl5pCSanX=*g88SK7I_Jjf z*aEvK=)S)=%-n%7ma@ZL=VyVtiUy!K-??!+Q#m~Q!aO~=<<@q3e+ltJAAY5S`0xGY zfB()aFaCGG_=|t#KmSkH@0}hU>UzB%%ku0ZY(DkJ`o*2I=Ge_#Q%6jdLc5mXMozoc z4i)=c`*NZ%wggzNsoW>49WkRARga)3C>79?z%;v3RMV;eC}4~tg=`{M@^sIbJ(8(v zHN`66z#-WOF~vA(1?W;gF;V{hh1|ve3D(I(XHg?0bPfz87pdwQ73c$AC#awas4!C! zi`32fd&||=563ry4e{Q^%zgXq!%xz95_b2tv+GT0W~&EhLkqJ{KaHK|CrAC8aPbbP z3qD>E8#Q-L*N;h*)~g!k3s}xKRnz(7=mzI4ftG5G`6eI*9g{~y)hW=`F~F2b_(6sq zydOvi0JToBNopnr)c^p=5W!$2L&viQ0@eWfKn1^qmUkFW%aQ6e;>EiMA89>a+1wqs zgQ`T`RXOw9qrLuO2<@1k96tWq_4=y2;|uzevT@YJV!;7M+18;9&iTEPccm0~f6Vv3 z{R|AN0-jxG=uwTelBjb81Y9L6F#(7aLi0o=7i^1?Y0gz#RU?*4JHfSxi0CkE9f!7U zk*0AC#Ky-MH>>s382A0C4b-YFs<)jRwZwMoWr%^C9JI5x3-@3A^JB4R-utg!Z0?QA zLkFF&tK)c(%TC5*jEH`$HLLjmX8mN;*XQ001N*=fXPoNjT!Tbrwi`x7OGR}%HqC5e zvWG}151xs<7b!!QT5HoZOpr?&$8jp{OgRgdoJ%RH*tL>M#r{QqABp9rAJlxY=wb<32#Hxrj)N z?QAw9+2sPeU4OnB+HNjjlb6wUH}vBGinpG<^WedQckaF;R#GxTKkZKMFLEOyLZ>FG zB3i*v%%;!s-uGiQFty7-%VKIk=)9VfT9JXs8EVODs0!dhEEQ_*ce}x3L}Wt(008ej08|wb0YqTpF#V<&K}ACZbOb=tyF8$&YRP-#F{1NeI6;HVm$s{u0!9_8icW|qK&{mbO*F0EOhHKj42%)fOb`g6sHBwCs&^=$-g^cvCD&S2 zD={dl8W_No`=Y9h1ca)lwHjW=3{3C6nVn-Fi>gW?hpfb& zOc_a@tj?!LIEWLG;JvECKdh+z4NlN27HUY?Y?ZS)=>AT z&0aSSxwma91yn_3k|+W~&UrcC8&gUpd(Um0Iq#%s&7!swkz$}wO;u$IK%qs~<*b*9 zTxL0EM)H9XW=)*!wnNv>VvMi8_MM`ls*71TI$rjDuUgFz0BTh;j9rs+b^uN5kB*K- z^!)T9<-sEX<_R#K6j5T>b={E0&1NGNkPQ)rai^+Ru3RIc_g#RRy}i9!>vp@nxVUhE zo2HrW`1IzTGJ}yYct4?%rHDysnr1$qm)Zy4geE#i{jeRh5TW-z)t!i;ArU!O#SK$tnG^E!^B4;*3;)eu-M4GGL}leRIq82xA&NO$9| z>t^fS+DvlJ%znAp7m-p#&!1lK)l-p07+gyc+Z}h~;fKQTuwCCB%LZufQWu331JETM z&?x|4Hk$?jmvSpqF5o;OIVG+NR26~As2TtwqZv*)H-5?&W1enR5m6>$Ca55#dJNOk zt!4H|WY5QrGcz~A#V*!bYt3ke#+XILKrJ>N3@6cC(=^lARK)sz zGv+d-4U0&O5k$4rsRlYfKMx@SfOc(G8&V;nz4;OVN-5rl*{t2|s+tVLFpi`5k;vy% z&Cb*?cC(?EApwGa=K6-lDm4=kUFE z?+FeU=k*K!;K{-MwJXQhA3S}1?C~?7`pG}`^Zzp3zM2xe^2`fA^~uk@b@!V;^0^#(drY$~$krw(d6sHA5e)MVM?}rD?*Vne}Px z&$mqNa=DyfEa%+VYp$hfj#J%ElZN0TGduE#EFxz^KcUB11^^IIGjlFD=S4Jx=)D)Q zngqc)_R(S9r8M?U8wgm{rc|pmZWJV!F-8xdL?op=dHk@9DFpAFTdh_h1n1nz$;oEB za)fWc^M--GckgxY!lQ?0j%WYIzy1ripSv;)!|Ca1t@_cAef-^b?mm3{z7V@LM6|zmaIrck?}%W3IYYAZ^Yi!KeO(j-g&Q~ah}`4H zkC{9xjZ-hgd`Xc2Qv@RvDU)rkcd?W*41=4B(Nv3fs@zIDW2$WyBDAj7M0`1R3f`NU zWAe_SV&_6pd#`ry)}!4xyXnRc&DUh-MYAYZ@m0hd2I>IXBI~KJo22@a4wSvy-`|g+ znG&`q0M+~WbJ5I&Af~&a zSO`1G<7QhtHo@mqXV%!Zh?=riteq!{epkIK!(}6c5JKa5k~-#ToH3ScTD>#E7D(B} zYHDoWM-lO{@u3khuJvAmgK^E))v@d2kz3w@@zGnSyMx))=UY&Dypfav4rcql_}TJc zb^1C2?=DWnjR?bf+oZFno6YKPy!WjRs7T&XxPNky*PBr2l~4cJ^EYmstuF4}d*}Xp zZ+`egchL0Y;z_EgL!K`>MKBgo^*%;oA4v>jn(`>QvRZ~jF(Sda~~Q+J~o&g#PNy5)Q*b8J;g<^vQ*C9u+(NnFK2EVL}r7Z}b`DE7N z^z_}PJJ8|Qbv(U(@!il}Jvca6t+${5@t^sLpZddZy?OV`U;gq(KlafpH@hz5wfWwi zKlShIEt`wo)7Rg7=db;(zx7|7mG`cE=H2bX58?S)XYV8^zxmyZw_bbZ<3II>f8r0H z-G6xZ{x?fpJv@8x*MISE`*z+fo1-@l7qhG1|L#{Fp6Oryi+}RrxC;KJUE4py!%@|9 z0ak_tqX5pj@iBE*-01^-^tv`P+CFr0s^iE9w-jovnN11Av-3vy!6@7&OVf!i^#{`p zOveYV2p>2mPyYlhM1<@_p{Q9Ea*Qt4*}m7uYwMkWD=E1q$Voc<=*8#${r1W4?LGR^ zFg$(ZjW_76i`kF;X^pOI-y6rVozLcrJ*~RGe@FhnxQdHUIz)P|8SxHzYRkFy;PnAt(S@psGa2 zswQyB=M@n&AtcXTt<^*oK#+kM(a1o+$iN|x0U9w*bG3glH}QXh167!rrdUz4DpG3| z!{EZSo^^6YFhx}W?G0|-{k1ulwSU2J1B*rR&!&D=&mX*ec5lzuz-5*9pN8jk@1t{E zIqJNQM?q%F=ff^T>tk2P^RmS)%DKi`ssj}wG|1*Xh8egEut`@` ztBNWj0hx2Il!i>xkgZ=4ITT<3WE{~HiDD#!JPs+PE_jq`&}(TW&kYt7EAdv*P3GrB zG?n5+WFVJV$fzc;+YM%T8OJ47tEM$-Yv5$ZMLUyNQtM2>beaniRI6xY0Ngl5Qe!4^NMzu0-GNo9B7#haxLoYzoX2r&j<2PZ z#7g6%fi#Y!=2|LH)3$!T=t{}}ie1Alq+E!&R?VqQiCo^b-g!_PYxWFiT#D?r!|D0i z#l;0tHQ@t=Y4m&koZlPo6wNGBqZIDR@Iormf6O zOXwyxDGQ)toiy z7<|-f+q&E2og*f|$WCL76U?gG&CIg!I=;*y(n#51WWS1y%fWU;YZ6^b$ zREnyGCSK%ZQW$XNLU3$mRb^ud>$EN=sfstkREvl-!D%Tu=dok}VsyqoaIwF1v{U^c zUxbJ-kckjYJ(+jj6QhYve0e}ZQXmvl!zqsJvX+~MI*tfEv#*F~ATeTy5dca}B0?Th zDxiW^$KAs;)|?Zen|ecH2EwU7#M&(Gcoh&xT7hc3F>6FV}yo4=uFYj3<(Jh(GZXw6O&^kLPdegZz8HtFLftX zZL%MKU))m>5r`03!K%u1O++X4l>tDqe5njHa{(J4OlwMm6zSR-A&oh6hycJa!Ovi# zlOmMK6crpJTPaC@pg1Hk06@9)v%>e)hnJPAfZ;^Ju*=O{L_m~)LT~^e)vQ|53P6Nr zSlFVXRj{fmXz17>y$?lz55c)WK$3FDPKO!*$-BH68VKle{4Gk4#8a~Our(?{LJI^X7;V~F1S_4*=>lVX-imNrt?%_Ye;Z6@Szog>5J3KsGZ!Ti+On%$H<~FsP?cH%2~~8v9d2B^+Rfr-vxzaBpP%2q_qKDsoi}F2UWZ{N9QF@pecAyE zl9^fG_qj;Z$Zod_e!=Ya_ZFM&s&ipJpWnZK7ZDkkx1KqiFWQF>@9z4|m^O&$Jg3g*0#q@I1!jtt%4C4D413os7YNnIx57ntUg%9 zY<1na$v26ggj;|8`d~+H2=^N}*93(rz5b=o)6`*o?z4j=9w8oaY!-bvNW;7}~b& zx(-B5wQ1Uk8-Bjosa6w>&I1#Om??tw+g+`h96CpcT#KY$RB(BGAp9wWlW!LlgVmMJXbxfVyi56%ov`G+y@TMJ^Yhg>j+^baN)ExBXq7z4DVTXo$vL-JEFeun zTDdHvOPPcb)lbqPoJa(!3dYRbaU-pDT?YW?=jUT8%v=S6bFm5Y#+8~>>teB(&-P}E z`IuJQ?UtQ2%^b{XF($lnd_9+Rc6Nr|Z)#OfKEzt_Ti-y0`TY7z&)zs+o$rS2eBSlj&FbR3Yxbts z|B%L4Yh9e+L@Xm(^RcG??h@Y zg#jln-o(kuIV+g=@%;S!gV-xE1J!BASA~=Hdfv1Zm6|XG8cVLe@gjnNV4}i^I0fuz zmCifhffd8DZ5VNgUhrjqnZkof!ZQ*py zb?9eQG;nEsSXLH>(VqjwNSM1>KNCkK-t*n-Px60zKbt>po}mLN`N?~^iE%%`)4Fq^ z

tAfP-1zHVcUH{*>6Xu zykVH1KLpgc+jFU4F_1$p8L7_~ZZG|LZ^bTYu->)AIV^^YW1&f2#EG;_i3ed1vwU|M(w$^^3=g!=L%X zf9wli_yW4wU-}FG=Bsbt`%nL~|LDda+xy%rKY#PaXaBF?efrJ+(|>fajO&|!^!oCC z341>8+DUtr&bj2;?G@|H&EvATk9=OcnYBT)LeA!>7A;y;3YUz|Gm@hpkeDwgS>tFx)PyF0lZ@=dEmN1^3U930T@$;Ymi5OZ0uC=mb_=9FB|Bs%+|L~Vp z%fzgDaQ9)Zays^Y-rafO#bMpwxpC)>d*AxS-+TS*yQAG~xojPhjHhc2Y>vG55!oRX z(*eZg-P9Nmk(h!Z)~c$kWnz|_V?Y2?7IOfKQ*8Aq!ymUYA%^EC5R$dxAAKw#L4JEq(?V=qwtFzMhsJccf z-QJOjHtqQckzqHtrqmHoqJu?Ls246&#ZraUK!I4;nPY=3CQ$T1#0Jh8F)OhLtx3TE zpsL^`k_ALSt7>-1)2ymEO7 z-fjB3QTkkPm2S@6Nqq8-i?G~_!NtO)fycfcxY*}G*%f5jDg%nVY{$Ciyv2qol2kSW00$s|c~>j?z|oIG8dcj6H{;gY;5|8D2FyA80M7dmz<~(J zdb`Pl2x`%bC~#2IA~g-*yqw%hG4W#?QBEf5aFpbArp<7L*xbPH5fwVQWS<$P&x1?U^k%!_QfmQsuya1(*u zAa#?rZE0$)g20i_R!`?;)^%M;39&G9jBUw!csSqR-`nigYEDXjsY7`m)HeepLxqVa z;Js(Es**}BQe%v57f=}ioFm6^Or=&q79SiaIWhuu#D-|9tDTIaI_HQH30#PRm5An! z06?o0)om>!iAXgwsUrYDN}z^_p8VyQkGYhJhz?Oo%{h}%2mw)uh#h-kL@ZLpMg$id zcbO;-5-;hoMbS_TB3CPBMMSEWYNJ@%Za1^poG=VSQdM%ST184J`CxB9ICp-2Dymh% zIgD*9MO9(qxz}kzZir+$?P-nyxtdn7$^Er$0@$T13{7ux6KBeSYbm7&s$B*NNXerH z=fO89no3EfkegVVoQBbR?;I1^4y!R8acNQC|wbse@ zpPh5D@t)CfAO=E31wb{N01ZH%W&&WyfGDbFs7MZR+D?I(h>J+6wbt7DIE72ARGLne zX_5~Rkr7_bZU`9=07D3u1=9P$A=7cO+6rXv9Z*|JN+y~@KkXJt*O zDxeB8&}g8+274m9*<`aRvME}&9JI+`$QG@TLk|0=e^`U<2wD!y!7xcV>`+i_vZ+?j z=mBT|1RChZSSSF6nyWIaa=i1I_TFpwzRy4Q%>p=B4APccwDymzjI4;fC(qeu?X}+T zeV>ksQ~A=!nt4lVc=QOEG#l+a`s1vf zYI^;~E7R%p^qYsX*_5$9K3MFXyFf%cJF~7|-@AWnwOp#|WHQ;T`v-g&-A3{5@XqT2`0e&w6L^Ru7)D}VCGe(KkL;~!K_^Tv%gzx(0~ zt}gT0_PSpiV#*l^J^QZfaxCX}W{+Hdus)^uaxdL`-=Dg22S0Om>F~$Vo2+2D$$XU*xzq++G+dIGg!iz8N?d=i6 z=H>S8-MdvYdh*Gq$D`@t(Y=$CldkJ>$|dI}HCZK=yuW{MAhVePv5PSt9VJzL8;cEa zHfr_W1FDuHQgSZNxrT#k>0|0Q>v}u^JZo7)=RIPVV&7#!ymayUpZ@6|x^wr|_n!aO zd}|W?C@1jjHk(cCbC%+{m=-e7k_)J1bKD>x0TX7z=IWJ;7NaS~oSbl6m&)OMc5%5} zE|$xx2}CfT?}jjneR}GtcfIt|E8XUJHfqZ5rS@d8GeHl_Eq8EVUQOeN+BoaQ$U5__ zaQVT~f&aI8{4}i;i9jr?8sfNd<1vIfr>uiXSrsjLGXyQUBfslBot+3uHWe}*>>?Gh zbzL`2Q%X5qtj^>K<|sw_gosL9xd93ZF-8DXgyG&T>{H*7ubL{T3aENV0B5OHW<#cw zO4pXhAA8qNfAVMF`R+eGnv6s3Uwh+?t?d~Zte2~=ef5vdjRR3woSb;4TeF!YxNzap zcvJ`2iTN~mRxsu!s~sNRM_2rKr|m z^*RlC;nAoGHH%2subg*hwi<_y1EV6aY2qv%!4ZL}Q!B+x6$`pqaSn1`q#}kf1OSkn zGuiyg^+#q~+b_QO;+t>YJ~=swz3lJr6FSkHOF2E!s;WTfXmRSy3EAoCF%g|Rx8uD( zJw0{KnQFE)n~Zn1u6dqKr_;Ol->9l)Hrra9EJ`V#{NyK}dFI`hFJBp){)dN$C#U!B z@0XnP-S2+y?|=F;SFemOTsU|0#_P|0_iHJ|)oOL`?(GW~_Li&V;r{x`C!hS}PyfPn zI{xnWzWwRXe(Lo2Xm@vdwEvcgk1>u$BW4!UoZ{dn7#Nhx<*9Rlh$O3ufpJ~em-?N~ zO_yw?oVYd?28ZmZ0OAm2X2fybnAeynP?e?4TAImZs)p^I>8?I{Znj*6W5sF|!f5Ox zYw9RUsaPsEo6U}nj>c78kDAltlbF*G3`e5DCNQ|4h8ad(kH+KiU~(4`M-0q^g_)re z?K(qcUaps0lSyggl)|Q448&D{Vg_c=_g&ZHcr=euTw_Lr#wul%R`)hN?gheqNhhVa zNxkKipu_QOugGdRV5@4*alKvx08FRDGoWqbbUdl5x@%hi=(-M}>Q@~A^f8UcQ})ex zGI{rVKk%*ZKL0DP&z_yXGdzk2wCJzFJ$zYbVq6g_mE+^6lgHKH!)YuU)xD zqtVf-t!CR3=S535KDm46t<%Fb&H>Sbk*1hcCe!I`JUdw~j*pM)QDcZ>S9g8!$fMoy z-efMTMebIiA~M;7+YK!@b}eMAz=XG`Q)r`ehz`qOS9A^lMY11{aC2CDN-*vMP}Kz~ zjkq!$dua0ggHdW1vu)G(_rHB``|kQ^b^lmKH)!jHWz4-+IDrP&1fz4bE*RY&^ytd^ z5aVLg)%TD3?fF3x@<}P*X zygUk4RYb&jwRPUD7o=#-PO>p|s+GyyCD^iRY*H+yxc%W1e(10Ny)XT#OP}96xAWca zyl{GYI-k{jJQ+3B@yX5kWbb7E?!%8fJ#pdHw_Z7SaWA^+?!n2{toqnr_;>%r$A0$j z{=L8b*|*+$`OcT=%12Kg{o&NU>+*B>`4{e<8@=(Jm%cf3A+BEgzyBZpH%~tGz90M0 zhwj|^#veV8fAz0?a_ci+|G&QV&Qe{f+xDcnP>-uflh$0|lr)PQt9R@xf|f>O+-}QdgMpzx88~}f9gkl|JG~Ex4!ug|MU0keEJT zXaD*mkAzoWd)@QaWIRCxc7RX-{=X9+_~*_B03fRzA06*bx6jSCRv@4L{MV!CnEVsh zuf6`lcYpiN-LFlam|eVhZgX$M6#J9teeFD`l_Hrfk)h}qnSz#5L{yXznV5=U(Go%+ zHBuu1vr<$tAuN6_&pwSoy!zvT)~gzOpR7nx24ti{#ZW~GE7jyJ#U8}QfYTa7 zHPQUwv`oWUj@Sc0wYnFIYZw)06S#NarZYU@(mP zbI+853i=9CM)Os+iUcLLmqso_SFAHvaH3@3%sZ*HD~6`*0v4mdi4lw(bXm0LI?pAo zG@}|3yWmQ&B-VLNC5#=IQxH+LqGZFA0E4FoLcpN;5Tcn$Cv+yOn3@_QBMm|WJ^09I zI6&Co%lTvZ82?+pm^q7dee8=U0z5!Ls;Yn(ng9>Q8;}!}wEM=dn67iR3!B?Jw(#WK zh#l;B(5QXFtRR-xfkG+uty}%h)@WzjL&iXn zthMA&GR9|qezDjfdCn~tQ|7+jPLpPqjrzm5d6TLM*B@HwewA-ro1I*^BrBZN7{+8r z%XN*Mh?G+Yl0~vzP$N|KtjYIb2iwFn=a%x6y z-j~?*33FbqH?8FJJKN5=9x@}Mff6aioH6Iv_x;JyaaC0*6&{bg_erts5+Iixx26+B zY}c#tcwAM!j}l`v%Od8GsyZ0>loFD8^6GubU5R~hl5#0UbCz5RlNb)`?eOh9b2qbt z7-LmcA%v7lN~Nyrs&bb5s;Ur8ODd%x3ZnDgZEcU6rV)|NW@C_ZDJduCM}ja^m$hC0 zU`16&KIeQ^Zt;Mu0uKmX!_5hUOFb6@LB!zeVhTVWIWRl+q!3jUnFH5T?Yhp?4v!DU z)i`y@yw#PTOeUcj#VmV!=TDa>F=-umAYho;0MA4O=Lii=jS!3o2~f$-+SE-%9fEU2 zrd=0BEO_q%*Oh1H*lkj7>v8Q|SZ_AR8>yUk=){x(V@iG3&1SPP1OSMZvMB%wf_Vo3 zwE+=nQ3WLfMsfp~B_WD414MPkC?TiRZ8oEM$Wqjb_r9)cM<_zH7=R(_VkZ3d-Aq16)>1^;Sso*foCPV;ot{RQ2lsiyYRUw4X=VXj+-;xAXC1fJ< zE&z&Z$tiJF>tOW<5d}jac1-wS3W>whVR&k&+B$Zg9XrfXREiIQm=&x^pp4NhS^=iY zdj^zJfC&(@3aSm*CCDjBi7~1wvkOfvVttp&;D7f&kO4IWumMd2W)96B(5BDmQc{R` zP`b?o5!K3YA2vxsi_oWW>XU|W-vo}P$s0HrIc8*sWOwA5}q|X4p&h{MDHsg6A>E7 zFo=UT&CG}lX1R|>IP|*=4aEL1T`f*%TZT5x9!Y(4eCTpblu&uTIm#vs00>ELcdgumy%t zheo{}tq<0@#X(;!B1MX-0Sv>=u1G*4B8Tpb=d7Bwj9oqa(~6WVLY3pnv7`DyML{-I z1Tiy015gu027oN00EDPS&bi#LN28z$hLDSvY%z9K)%ZY!0YOx}iXhcsEDD@6pt4yA zKC6O?cT`u+N)jS&x^7d}P196WHPnogV`4midACcaM|V$mwsv#A9=TM^l`JuweqBVSjut*!NXz3y5;>Piw#71XEwGWEA#dg&#AGT%M-+Krnj^`#^L z*qzMo+`X@Qzi#MsxeyWd&Uqi|3W>zjlU21}t$H)7+zZ{B$Qr4-lBg}(2Pj}Iqv-JOh2%&LnQeNLX1JN@CclP`bs-o5?NmCTg^5H!vl z3|9YPO2BGD#6x!uXhtKTGT9MJV2y(cod-ui7)w+!vEmJUDcZMC1c}hsfht{{VpFLj zHSu-r0Mn-P83VGfTmZ#Ds$fMk1jInzIyPylipWapopX(!uUBbd z?YMDm>2@ZUe&k1g@{xz$`$Hf85oW%B>vfW8S~~7Fz4hnz=C_Yuxq0Jj^yoWJ+LOe_ z!5^-BhAQo!Zp~(Qj$Q)5*rie3Oh-B%)${4}dsJvfQVbut%?JA?)$!jHF*c* zmoZBl&2WAHFz36kz4_|aXma)Z!*9IyY6@DkiG7)lJE$&)syjTs<%yjO=gysLyLH>H zQ%glMAhLI>^{MwB5mCx$JgePEYFMvZGx1f(CDv27YWq&h_GmlCgzQ%9H2|35hS&AL zYchp^AfT%3s=oAJdG5;$SXX|w<&jd9mIGVKpF1B88oO8f*_xPS=2_cLp6A^jp zsi(g6t#7vNx~}W08r{AB`n|)OLy}=Ko8P*B`}vo@_hTRX(^s!N^xSuz+pOCQ7cN{L zjc(q&;XO~sGpPvCuNKR5d)qlD$J4#tr{?o3tM$>z@xhbtczQP7df|l^?%p~2&__Pv z{rKgVUO%{fcPrF&L;Lp*Ja&xHVS|wR4Ko^9pL;ZM4phV-NVHYIGF1nabIv)Q)cS}!O-a)!D@av2 zUpui_=SZE)jWRI9VP# zhroS}rMKGAXnUrTDyiKYlZQ9eRVK625}|j5u?=xiTN2$wIo@t_#d(jr80^QW%9``Y!#Z}nZf(qgOS@{PE@#|0}GcRp6*9ld+IEV}vF z({%g!h26bnw_?p-f!wvefiitI~lL{ZGIs+O4y4jR5-MH5F!{zI?E%k^vDM$BTFrS zDN*({R7g@xS7}Urbm)qsUC(b)yn`nu$VHK(?Wb5qPqM_N} z?&|hh`Es4#;-~J6xAt4S1V?VNoBJ6lkeqXy+X(2ZJw;fZ+pbe9 zSh$bM7RXF6J4ELdkO0-0FIrfYv^r?Ey+E~rGEt{c zhM6A`4Yoe~Z{+X%Q)7U)gY(1RF#Ji**#(b)>&-g0l2d#4sDJIaUoP68|B+AJKfdw8 zi_ib+r}m#ydOkcR^~6dC;8r=uFquLz6F^`_00RX;QZOw95FQjyf+-L{UaFq?03?7QDF8s*p8dR1JaiC$@Fmoc!N8ELWFP>@EdxLS zV6ZIi`{tM7T^27@a>|m?yjrleFf>)FU;}Sq4#?ijyG7>m?0nJ=z_1%9j#(rhuk8go6IMr zlvXzeq=1xBM3FI;Xkr8eX2ClESS)+M5d3&D;jZgSip{8!u56S}m&@Vf4k1kIT15K3 zrP^T`H+3t}k1IYt#%2_`tU;C00bq`54jq&CY5<062yNdEsQm}*T}3I#kmjQxQIzI! zC62VDpjgo)n}8QK05gfFZc_IEBMvhSC;(c-p^)Va2AK(_m5(t_nlPoHsjsG!xaxh~ z#N032wh2`%sKqt)R;&yV*>RErOlVj+&&+GZ#i}1S!8_mQ1Vp~3x*AEwO`im4m7_VT z#}kxHDydh?>bVL`-q?Dhj1`8?;sBFUEvj18%*>K#3ZIM6)`pM(OD;DOA8Bm z141Y4OgI9VuM}JI&a`+02WF;C&8D%f3Sj_7%mEcGD}gdIlMYam>B-SaT~&<1W7S!&f;nr9X)>uwiA2~gT6CU4ho0I^yGe0r zS2L=BRqIt66ydJxB;~peSyjkQCNozBCwVX__kE99+t>xq_wplT+Lc<9QAIX6ucf>$k}A~Fz-DWw#BUAy`AmMJ=B09~IP$D}?$ zAku6QW6h3mP|;^DRCKW^`4yfPQVbM zPraDcAutl2PkVunY77AEea$p1dZ6gp^_B@( z6w6r&f_Vqbh{*_uk%sO~B66PCxvdM;X_g|Z6gL@mY9C!{@7%n3@*gju7$~x zV^9=ykR+)QI#Wg>4{8w%))abI zO(iMR`PRJ`iCpm0zTX6PC&zbQY>&2PJ2!5=@yNsD_q^-FIZFsz%k_N)5rW>N>yC+w zA^3)TGkM|mH~;4U?LS;BPUkEE2|dh4yaaoal+Iam;~bKIqlId$C$I~P;wju*+v zq;ZwfTKdKMCMPMb&MPU7GxL@kjch6r&1`EDHqlmH%0?zyx;!j6%qS_U6+-~eSa}gi zY5+Dik77jRurPx2!^{q&_M_2ku~>wvDjJ@C^5d5-UA%Sg<$h)NZr?s#-&<{NxxfPS z*1e@F`m%1C3DJByJ-@xZbN}A$O@G|fHS}oN9)I*5`$zjR^_2tWoXcAJa&mH_T#ZNb z_3FlC+&pyYp~tVjzoUX|pZn~ucI%^uFJFm$f8&*JKmD$U^V+XACnn{%+W@zcPRws_ zU`~0%Z@ubLC*al$U|>Y@;OO+vPY)pCXmZJf*n$xt7~8`TDeimq75AUp+TWduM;AjzP3 zKoVCRG7*?rQZ=2G8KcqAB>=S+Z`(LDhDp(URvQ}uZIJbuWNm%7v4b~W z&Du#!<8hd5x$O&E=g+^V@7rm!bL-AaMOJt2t-&g^W?t36qds-P2j`o0*E(PMS#xxJ z_uTFtIhsxzBI;7>k!Q2%kU(VQb)UmH%qEk;xl^0xWM=A&y!YtO$dV$kv$F%DQc_j< zs;Y`&UDs{igb>E#v53{RAfhP(00!JUzx(tv&wTE4pZoHc|KRO9ul0I8*vk4+03gTS z3<>bY&6jT9x$&V7{_qDs_@R4u_kZPAzxc{4uYCODAFFDY6TJU@KQtQE=P&Hsym|A9 zC!RQeaVpB;IN!SU(u*&B_x9~uQs6KA8-MwshaUaz3txZbrI$UzwQJX6T6KA?v?{j0 zZpVG!*OL)@pR)|T#C?ikHt~Tqmm=b-$}*>%*@cm>N+}{|aXbJN5s^4xLNlGzF5m1m`iX`d4InD{k{b0c4j zY%*!bS34Kh?Rbw~xZ1w7gITB&o{YTiP+WWGd?=O6a3bnmXj(cV^Y6YA#d9JhxhQS>EdmyuAIF5 zbQ+D;``PP_qk9!JV<=ixclYndS({Ie7dYmeHvOj8Togov9p;>dMP?wCT)>(3 zv0DPNDy&!OXnbxpe|R~%iuE=X2@W(DR=1vRMRpR#&IM#=1#7h}bStLd4`y&4yWhRaHEg?+6J2i4DO>0kLB~fAB~DlXI&dz4-2b_b2|` zZ0iElTeX5B(Bj@VpZx;Ae7kqOx`r)7)dn3MB?(+4Ot@rf5471U$=9)sINIvg$Mece z3!U?L+GLBo9TL`k-mWY~iFe;r6<_tNb8;a}wqh~%PD+;CO{%6X=o*Y8=gK(XN$9&0 z?@hSvtH&4j-t^uxkmR+ilQlC}&-&C$pNgQ>bI(+#F4U}-t{;xaW%+8edf6>c`{wc+ z?$N%FRd&C0ctNnX#z8(v|{_gkfT)2Au9Z&qFzy3Goqw&9U za`3C4`@R48^WQkyd2Dt06T4Qt_g&L;Xnp!K|Esr-uHAU!@BEox_{A4){bz5;Y-^s! z69|*&-ZV$X0+LV^K+%&h*60vbi-HwIa0aDNby7}spS08=f4}Fjc3W5}vPyoy{)ZqdFkeLikQv<{2 zU+VP?GX_<4&V>*RppX6j{{9k^+%GS8@!8uiUvBLF^I!VnAHDFM+0NYyk4-9f(OD%C zEEM$MN(~!;>W}-a{r~BOjzQ2=hU2jVloV6DO3M|hm=;n26B^hH^siER{okp}>rhiY z6x5#m{%zLtFvvkwBZMI*kjt>1mH}mhh^UB2fPof=ep_vflGhF~wWaXgFbP_MXdt72 z03cZ*i}UR0Ax^!XZ0tpVV z&9?7Wn~i9`zc?l)%<=T#{${gU62%x}F3bL4wdtHU&%D~Km+KXf3w2f3b)TXNiO8^G zIET&w8Y!R*1Gxc)JfNP@5HSdF=oIpzpRej?`t8SI)YQ@eZt30xB&%#fT)5muH;IW| z=!$3Dx@_RcxiQF?A;-4g^j#Mbjflo$3Lyxqhz*`!W@a`8Lt}tp!v@2m^mc#%fa|)h zszd}`zhUysY#?S**FlRR+GevE8d+DXm2v>8=q z7Gsx^I_JpAP#J}YM2z3R9K3BhHbX@3JTrG)cU-pf{3&(8_pjIKE|irnmV~}#w5qDA zT7xSpIkhQoM%9*?#hCDcPnAO)L>XOn3WL+BC7w4}ZZA=H!5$DI3EG6*TwoXGmF1qL66#y3|=lAAvovyl+AF!$phd}zvUb%KrRu$LI~awq7fi_7fbQP60>C(H6t!O9(?e8D1S6v9B`FuK=)?MEZX7J%dWM&}nLODeuVDHHJz`+q1K~k$mqxE{d zYBvf-&J9kI;DVYA-U-vy2uyf8;RpZ3^wqpWAHA7PX2ZkCogQZcA zzOHG&o|xq_z{O$O)Lk1_n@tt!;nS(AaI!eg+SYXtk<-(qinMKuz0+^R=7@eVk=p>8=l1xe|N=60m zy^+hQ7%)4}%m6yH{Gh^QG8q6JXGpXMoAeOEaH|Xzn!{2*$fe%p0lxCav zA%t;LIp^B$1c-;phI0-Lj2$8pLn$TYGU(YNB4FP8+#)i0@=1zGX6O8nN;hF7aCBA) zt(K%DDOJ@V5&H^Ni|Ww%OFXFd_Jf63!`EuaIt)J*VWtK)*(HRO+`xPL+iI@-q~3C<#f5cvNt+>{PF#)b3ip4`JhN9lFa}~ zobiKKhy*xP1S4YQ++ey#7_d+XgLC98B0}g{RfocJGZis000A`^%6>!*&_ITDoJ~bV z&5V(%&{VEUSX9-F%v3>9O_)-j&+a7zMMXp;2MCMAuKi#9H~;O6dr#eZ z>+WK8_tu@4j*ku|v$3z5lf@|#_pyuGRz5V`3?Mp0_KvV98dO!4v7{`Be0;Qi?D6-Q zjJ9_!U$}5-wtL}Me&xS-?z_MDj>q45aBy(*trLQ2POvqfZ&qzaV$veXOkEWiY-@H& z0IN{H^vbvIAKkkC@U`7@m%sM4dt&Wq96WiXQOaHAvE(jG>+0ssJGX}~X`3=RPe_I+ zscbeIBV*>aTcdJ`MY7ftn9SxNyjiVg)9Kc1uDO8F_T=HZPK(XLNW0h)OM(;#uWZfN z>*a2!t%QdzTv~Un#k}tK0|hSV*{{|o>-EhH1tV?OyPfq~QO9t+Sxeum0zo5I@9xs3 zUn4>Yftgdvj)NOlRiQ<{7N!hb)jK(_>rgr3k)r8j9i`w(Y73(+H9A@@z#dfGqal%+ znwr9Zu^w8oz|1_7ff3^N_O_lS!qiMu^024`Zc^88ZEdZVYX)8|Puk6CpV#xr1xoJ1 z?tFW8ebcSy^Xc<1efQe6YbSSaAP0xGT<;$q-zq74c1I`6TxCruy>a3Ub1aMF`x)8Q zHO&=yv{;wr@^Ei!;+pX{zVnq|`pti|?V|&^+v2G|_r@!Wi@cafNe8FVZFVj_YShTR z8xE(V>dA4KSXpO`U%Q*0U3UnDsQ?uL!v_(3_qGGgegGhVh-oy|0$$mHLXJ`*dG$WH zlz~lw3$hUd3YZuujw&^9W^8J)l$?ud2(A=kD^{`xKov{HkiFu{8E=izz`E$$4D7V7 zO65h&KuT6UD@FwNMC=@jYSALb>|6jB1Tp1)wyri~N}<%2gF&n+t*Aq%ZNSpin$SSZ zEGF2T=;n01yfq245}MW$_8aZ*acD1%C-1p%p{lA}+EP*`0go~Fp>qAItwLyAMWYa$ z_l|PvQ%YUxnHc~#n@yi05oxxzZL4aWO7={Ino>r`K6vL`WrhGI%E*CTH42V6=X@r% z8kCfC&N&w%0-!7saUZzoz*UsExcAWwHj((Ub^_ux?A^~<>|?ik(pVvLu3QWYALB|a?T-A zT~`3AmH`wIkZiMBkD49xwWBg3#!b7uz3nw1%Ej~NPL2+m>Ri_~>vq9D*0pxJL_?~a zVrkoC=1S3;oO3Sc+^0C~q(!pxeE;4($!>Rddv|-+VI`XTzW3~Qx3_9?E_g%~Gga+# zZ)c5Lh9keVy`xe#o0TfmbzRr>_H66^{rk)1vT2&#-JKlMpo|xj{lojO{l=>U(`h^& zDcJ7rwh7$7e;?FFqmgYklC$@oeKCgP`bhe#Rx^r+RODSy&#C_j)u@BBy<54Wx z5i5d#shJ@pHSN3RK~8y~88UMSe%SP!9Y@JK_5|RFkb^>2HGsGI31nafWQg85AnQ_$ zt0g(E>$+*iKG2&tZf?z{&%EpD>3nkQ-rZNWfQ#*}bHa z0c_&~V^%m>k!%2F7(@yH7@~8uJ)ff)p)aIq_RJ^29;+usd9|Cn;=C0cWGWlf1^-oX#6=J9>O{RBtjQ0dDD#VP!T^AO%GLG=o^g%m|bTOeALk6=vpK6wC<4 zF*yVPBv7!ccfR@)Tlde8>bJV)@rzI1y#4BImh0&*0sP0G`pR#7_s)TTY&1Dvq*bcW z8thW#CzjX21lo>8Pv!V{#G`)Q%r2ec=v9IF{`3b<^WbbPVzw%enf)V}jQA6n;p zTxiqn-{G_( zLRg);^}efAci&raZ|h}mBO1+AwH5M{OEeiRE?l+a>wfv|^U!X}`abQpvulf;Cnj*$ z?tR^tep*RsPrvk;-@drL{jT?X@VPI4>a;(YUAprA_k3ciJ0E!82mbB9_&@sTkKd!R z`Hz0_)Bo_c8$a@-YrC2C;}X}FUrSg1cfb4x@55U^v1(6tp0Ipwxi~zUUbYaS9+R)Z zlmVTACP4)n+=W^IaoGDn>98oVk(?n;{@7L8vpv}ZZQ`tbQFU4ENGlMElvQ_hxV~`p zp{GCclOKBYFMQ(TTchb|IZg{w;qkn#%okC1gZKY|_&G2l|5aZS{^0Hiv1uB$>h3gM zn(p}V_}MpJe=db@J$jyYuS`=vqSjd#%OY7|s%pg`B>yVC&i@);h^%I2$ViL`21BW3 zRo=eE3}E18mbou0)8>ow;v4%W~Ku%0}+@Nc^qb?Y$A-! z2XRCK=ax6Zy>EwGpV?|g%gs?;`-XP9I3CUS?(e_pu-)A~_ve1{C�mM;?0Q#TQ@x z$VWc%JHPu|FTD0W$?4$m{StR76FrIOo6y3kU)jqOl`pgu-B< zn@ms7UyJ@73_DOX62mCClx@*T4S~on<7Rh~_GZm`B~{JU70bkF&2oP_aYtR8Pv#Fm z;l|(@n1LnB10fz&Nd-i*hzc4KyiK$qqS0t1B5m7x@2e^Rq&!d&7>S^i(znq$*EG$* zEFFGUUDunK%%=h!duWm_ef|llahSm%nVvl>OA#os zswx9C1SRZBkIB6EZM#;5sxs%?Y-`?ksf&r3!NiDFwQakhSZ+KX@9fTAd*d|&9a@N) z*%K2IA~t~$0EhpDsCqWs+{DdLD>A5NMWqx)G(Z4?iowLx3W_SBf?XVYt;VN)zlgn% zt-7^kZCs_rlIj`MsO>_a@wfp{?hPHI5eb^8s9Gv&)>SU4i5V~r`}Vr7n|`$3q?D3c zA#l^_baA>WCDt`Amn|68^#}k5iYX%3O<1qjAylJ!oO9|{8$#bSje*9l-=1&foR5x< z#?3f1VQ8=g(YS8=oT@4aQH+roCXMeU!{H$22n{$F9dznq`h&}R@E!n4DLD`2 zy$%3GG--Qg(PKtLCYM13Oc79Zm}(Dq+Ay|9G|5Hej36WerBABn{a_cv;JhOiQ^_)P zBE_5`gFEY&C`3dIeczj*gbYAvHGxPjD!Hla5JGki)rNG#ux>Bci@L6T2&EJw457|B z_kBN`?joW{p+aVcZbo6RE%0(@(t2N;Wg^=4BULpJH8Cgvh6Xe|;0e#rc2p2C4sdTfbJCpUV@(|poDsAX z0hoxHJb*^Ap{j}=6fqb8nj?pZ46K8kZvX;TOyVFonUSiQY7syX112((VIeRnswS$a zqvDIIA)RgfhRP9xEaKD{)Byq++8Ot#=Gx2zjg-LvM8#A@D5#c_*$in=EL6Y*8|GnA za?X`fx~?1UQb%lNgow_WS=KW<5g{|NLjpBXVM9lbh%6T?VhGHfEO$8#e=QLq6Vurf z4Vl0o=R71)T;Sn0bKVa~`nEkQ6IOs`HobP`T2qbg-M)hcQc{fV>0*B^Otvj>L>nuGai5K>tDTd=hpdi^S9o5 z_2B-SB3TQoNC=gQiWm}9p>EgfFs|m~>CgQ1U+y-rbAJ2M zdr)SiQlN)YjV4tUa?V|hkk70Dle)>8nbUNF znPk-Gt`lw7ZB2JB?KD@fct%&V?{Z;wh-Z}^gIZ_=1UwXxlVfIPBt~)st9GrbXhK9o zbe5Tk88vjWv?3~QI+>gt-;6xp9szj8ZUp7&2qAG@~o_9zfd>^~h=^Qz$<_hZfku@d8H=E3aUKORvttNGNETTE#JXZ)0EA~t4Ur*cgRy$h ztSNa=Gs`025keI-n`q9N9EI~+vE)rG2rxn|Dw6Y{L=m8w#bgDNcd1nZQri9s4P`UOYgs$7fwgWZq{dhd8d=Qa?(>0qB12G3Y z>jsE1j+(~In5YW1dbI~|&o1`93XE>OUiT?_?}rVy78_hbgg`{ZZjo21kF%{=>XL{K z6=EV{Aa$sACR@yQ`P?27A08~5(X_6sS6}?z#rJ;Vz3+W*mi#-P{?zY$K1E$lxBUL; ztzZA`zmp}ISzXto(bU#Xx(rt3Tw?6{zCE%;>L#PvY(8OxyyErI=Er{g6Xdz=7hnIz zR{;9TtE-yW=f3Zmorp1cLoH~QdLl4I9~=UtRG4|Qi4R|T{NMPC|K9HT%fI;d|JK3H z8xLK+c>nltG@X6>+h1<#@qBjv$3F2hA9(){fAiU|KKtw+93J0XTUni+)OB#2D(4** zHe24mg-pv;*S76Y4py>)`b!rsu8!9lZGE~TwaI7}kaLO7xwOe8I{1Lc004jhNklmS>mSYOe52riloRAO!lC0P5n{VED_St7Y@rfV%*vCJ< zSlv51I(p@mm%jLgFP%Ss-Z}s4zy52#@teQ)Z~nqx`O!b|WAA_8M_zq(H$}XD?P-==cIp5FfYovXC^5#NKv)&~TuhN%dH_N-MI-Nq$8%0G84f!kAXL>< zvZi?H(%xn*C#U;I-`H>FThdg=LU)gj9-qx>M~m3@ZQoR3``p}0%>8N|W3SqQhN?2l zW>SSwQF36XadtS{KEC*Ks_pE>&j$R>$q3ydW51d@ZE8QB%yes~>pDNaFymZ`D~Su$ znJd)-rNz>#ZMsc68jYL_%jL4JN1mOk_Nja0jn};Qv)Rnd)Cx1l7&n^zbc6u=t5fq< zO-A1HS_-sBdz&{V!K-5J_}ZxJSF6)DR#l~b_Z#6|qZH>s-WhJae2|wBuZ?bvs-}iO zNtnkmYzuYE~>>I$%!DbS6eaBSS=nq4KMsA(#ZsoM(tYi#Uv{$En$lwbtuOuK- zL^Po+STef~>(X_rH{xMub=iv}UBSbTGU~1pzDCsZ<0vt@||GG2x32ivc59doT z0}vQu1UR_j42SMvP%|sek+R6!W_}#{N69C3RUl5vE^m0GzGI0D!812tP2y!eJY)Kt7mR8Tckd01%~= zqM|y;(+WsL=$M!_7qbD8Wk5(ogY~qmQGrrunsnN{`MjR&&-Jvz*iE+=>(i~tITA2k z@6A)7<&S>wsSo_fUz*mF=f3&H>wD*RMw7+CLAP1{mH+YoJjbPIx_jrwXMX>;0eUEo zSa+M|h1*B4JVCV_(}mr0v&ne1Sd8js(+hiFvuk5tQlZ>S5kl%xLB~>ZN@+YE4?0fH z`9T+~f!YHyI1_p>L_#wh++oDz9I%78@^%?tN!3NuD#Td%dxgCfwAc|T21O%qTe{`B z<}qh=TM!HbLe5rrwa(iWJMbk75syZrT+(LKb{%=|ovZt{Gczg!yu!{@JBFcdbl6*} zYD$UFjhacZlu~kz>bj0G<{S|{JF4qCr8I0H0bl@ZGP6-8th$XYPQj1BEU^M=<-DYUr%*;}5j)vRqd zelwIs@mIM0f1q)0b-7fX%NRe0OTw&#hi1WQ&m;$+%VlFKsA$6l-w{q z(VSCCNI;&D$i^(%)(wa0#1Kl+xbFLQ(JBw|dJtu12gDIPQVys02*en zZ*P_mF=ZJl55yDzhF=xzLEok!A|Yo<0N}kBG1PxzgEXuyL^{Nk*pUaHbICa)oKfb8 zD1^YCtcaMJnb-pvTIB7X3`DLfR+Ul`m_;ZmUPPGL6;f0yHk9F}l!gv!7e)$XYGO=6 z0_eD|rsE;Qky0cY9ML6P1ONo20Vgy3)4Ut@xVdO9%ERaZ!3dRDQbuAl8?+^)AuGcK z_;$CgJ~;Q3lF&rutd#z2bnM+wnx&~PMM@EJhQ!3k>=86d0T4nSo|s~gL5qpkDTNw5 zqUz49twu34d9j%|*D`Zm*L|OvNRiI+X~P)S%oIfP+lv6Xpm6q)*?_B5Re<2#P(yt- zuSXd^O#?c6b{HZN8W@Rl>WHuELh9CnM6ajFEk2xT8 zjz!MEvnprBWvJRipOF|C35H7Nq1QQzGUC8r3Rp1)5)_m|8Duzxdk1u8VKj0zBjWH5 zfZ5^U;m*#^FdvFBvh$&?`o8aD1i~}nc4!GIio>)3iHXh7I|rT(Am)@b8?mcBl&lB{ zHuML0?{l%7b8u%xEN`D6hG&p~u@Awy6pN~!EKU#e;nBfyJsLGl)r85;_TE>%`iGm% z2FNa4+96eINVg*Z;6@&AIT7 zcRW(pkDA%R!NF`cTP{yeP7mMx^gF-)^{@8b>fuM;@xEt1rWTf)<$N|TDt%sic9R)i zyLRoy>#ww%zQ*mH9j^TLz2lpu6jdoAE;vJAAI8(kgva*}ZhZKMe)RKS`0c8mmc(s) zUv(2xM?{`$&(o5G!S(&J_9HKhqN|fd2%&aCL_!rxDKU1yG|X3s95NCzk~xH;wrSg@ z3Z7xJUNvqsoowB@^(uNA&nF_Ax?xR)K$&s9PUCu3h49eTOMTxTo-QzyE7u=d9UN}% zzxDXT*ImrN^{L-`=3UQRyS{gD^V^$9>>Q%R)R95(B$^Yhl zsF{iZp!v|KqUr!tFh+9h*db}HmQs`tz5gS#>DEiHJ%8!q&f;{re|&p;evxTZ*ITNJ zPByD!gPc{S(`nYw_id3hn@@tTRm&!|2H6u0v#TPyl#Jw-avCDl1sd>d`@PpR+B$8{ zA71)7#kx&CyDcSpVr_1|3_w2{U&HeN5K7a81X!&X_ zMUXQ0E|_Z%UR@uao~*?ZJ14pHn@!!+b?{2uNhUQ$z?z95s+3%oQA*Be8iF5>nrYpD zS&DHyX-e^J+oqfcjo8SXBFhgfcw1VHSncRuy* zx*nCTeBeVr9u+?R<39&6KJxJO|LWI&_3|U{D$jRyHUH~>?SHY^bYJ@N7xwpWf8hNe z+1mPouY= zqk#}%RRzKjV_f%LmXe_u+AtUlT$S9P+uj>B&FSf}6jLY%2lo#TkE*JgjVIITwD0@n zVwH17#Al!V#;seoCX>lxu_!7{(@ZAgzVGhezh6~VUDp8cV1pz%=K^+mvI3w-u08f+ zKlYQ4Jp9D|{ll%To&Vv#^*{Ybzx2!d`-gjb7tWu*`0jT<^YFuu{?>2(`Y-?L|KpqA z`p19mul+mcFI;-;vA>XeSu9U?c4nh0ymjlK`l~NpzxLD%FMh}LsMiv6 zT6ZltnoK5x#$8GYbz_7GBvOVmBr}s^_6&q#T2dTrUuXNO;0TA}KyuCv-3+2R^}QPa zc!VaJ<$*boAm@~&gbu_?&YwN*V>QgJ1lSyU*c5Fj&8r&As4;o+fc-Dot*QMh30W;`bal7r}K9=9oJQwz=e?r@avo~Nz2SyUsJ<*l8aogDz{3=C7z zxK3&t2>`XkN!n7k?hLC6L80`>NL7QW&o5&fdRvKe2@vrBs6z1p?3TGXevi0f*! zn|WS5bu7gSXi^nVjO-9m1=tao!4OO~0JLG7nX?MX*>-Sn;S2@pSQr5iRf?K2v!Y2Z z6g;S*8i1Mtia{=x%Z*Ea>HPTjx1anI|M0EN?SJ^I^GjEL_2$uc4(EAyzB7W6#ZH%_ z@obtVl9RdPx6p5OWgJs3?JAyr^6}k&@1u`@_`M$iQ~*8qOP~3hFSoN=c;$K5RPv?c z;~%kCsB%Y#cmI?Be*d^$zjk~}`onqY z=j&B~HJ%>VmR#sh*73Pd|MKD0hu{D4pa0t*`RV=d{n3B=5B}cg_s_2`exNPOwoe^S z%Z16^&E&iO$Ae2dqjito*m*)8dJCDL_R3xsJ)%P*1VSQq1Oy67N(xFuHrSit2cy=1 z4A%oyf_{)Pgou-PREVP1m2W7ZdYEQqhy?Nuc-~2Bh`^KYx_Ak0? z*Y~zZmzhAsnAy&vF7yDT_Ah-6?onoj2Ohmkdzbdq;qILKt73O-GYm2&%z(-+=*_$f&T-;=9OwLSMUIPo5h8mH{QX9QBp zP&^Do_+cT|!U_m5IfjvlBPW@rd9aEc&}#n$Vc{g>+gU~>OE)%vIlS1K1SO{)b~@4EI>Gns$! zxj%5=8@Dr?zvJT{{}`K|9=-UruYK;}YY%PO8-M%n{=w5vfAHZ;k1Usq`-k`6ym?bK zO}ECH^3h_wUatueL?65U@TChEbJw!L(f)yq&iPO^EL$L{??iJI$jrLphe^iZx+MYv z9XCzi#UB9XoJA`Ji#Pxv^4rU!Dxg3m!Z8wP@>Lhy#sL#JBJPC=)3hlH3Xn^px!x{Pd2O5%1QN)ad zh+>Y82Zy7oYF*bvluH7Y5JCtcrPQ{q>RDB0v4Y$qLOq)LNdU{9`??-gH2}DrTZiq|@x9fvKwIP2pQq}uYG%f+ zm}Eo(K&YIuKuS3U4n@uePbCWx<+f*LRP)S4)OB6g!PeH+F!oR|MQ}qX4pCJb?@G}a zV}wjZWI%vt&~^Ys3?Ud{pGz#IO+`zdR3q=}9{QSS5<=V5hJaP{rpRs>RSdMH?|c7QnA}iit62t^A2XYZGM>hkGF8qy^qfk=-W;2hBBiLD z{S8C_HcW%0sKG#U0Y)^kf+8ZG5Sc{80A26|SQILv(PT0i&juf*?ROM8%Du{?8q4Q1U0pJWvLNq+MLG-NC z+t>inF@Tw(XgSUVYQ)87am<0So`(4}ta~J)w(WaK-Z?Z@C0CDQzZgn!d04qT7_ssh z>x>@nU*5mW;35ob6u}itIeiE>_v3wTb|EnM1WLny0}@cms#fX zSylVC?N+PRZ0mv}th{Tb0N`S=2qConraO*1+j9UAmE*;!?>9um-jQQ9MI_@h`9@3- z011+olKMgN27uue5>ew_DUxF{q$&h55)&|6_3d!-8fHr=$?%pA-vV^r4USbrAv4pM z;O$m%t%HU9;6ctn1=rjuZ<9iP;vpb;2jYmz^G}?T!vB6xQXE*aVj^M=C7C%ys7wjW7!?sk2W|=BkS8E=-eW<5v*Psffa(@Co+Z48V&t>Q z!VeGyb+BWcz4$C@h78P3nc(dSQwTx91f+n`gPa8e5+MKx3@bbmx?!ESGn+wht zQtChyhbBreGxN>^Qz=PQL5&R@`C-Zr0CioLQVQDOj#5+h-h0o%p|O@|588ChKmmq| z_~Bbn*EIt6eXpt|YqpXA2-x{>a&pqQT~$@JpD@69Yx0qgeDpo<`p|qn|D&(`!O`)( zD;LYJ|K>kDJiRfVjV7b(AN%N!@9dsiucR-l{d+fd&iUh`!(7^?X<`mtpML%?{bld{ z;r`)tI%(VW%{SlJ+L|7&^2y;#Pd@Q3a=r@F@pSv}`2MXsZ;9AfzVeyV)m!g-=3}4u z$X~vF`>t`ib^p2Np8HNQNhvo?O-+?b8K`Kh)hdni$?Ev@Wc{vp{?I2s`7>|4e(Tpi z^@}?@dv&wxpt2(C&2d!|0kGBV=8bl!$Osixrlv;0{HpEEAsd;RWA6qm7*UKpId7(Q zT|1^KP&6$iJ@(jR=XNfA_1Uj5VtI z3g<6hff&E=h2MMSwQnijk&wUs`mJYgdLrtPr=DGKQm_;NW!Ga(Y{3Z>5nV1<-g_pV z&9>@XCevBf%i+--&Csv(V7c$E?s_=y4&OMpyWE;%#g5GZNwT>JNq|p-n=oLoO6}!-#>Zs ziKnxiwr#tAxPQ7h_MWpw6bPZI!f3f#EKWCFyx(@KdfGIjiL(HTC5vc3sy$Wcj;k-;6VN*wCvAmC|EO#H@TzubaFnQr6*(qCjve)ii zPsTnbEGyMzb{U*YK-s|<0RE}*z&~+>o{yamAXe6W>HF4u9nDkisLv*XYRH)YAZN-E zRITmRQ7M$HC?T;^)shPbV^c6w15)*=P*T-)oTMl9wGW=Vwu6*I-Du;1{IKYVoMG~q z8IT-8DI!)t)iHad6l2u37ACX|s6s5<2r62VzIS4}ArJu4risn8426J`(fW9VZen_x zA@0p*ld(TpE!QUpqh=a>AVg-5&M&+EaCsa<;EZTcd8WvU+NYRO>P3V1>^u%DruPoO zinNPU@BQGa4k6?mODP6oq0$G8j+9c{cWvL1V2!mgUu?TKF}s{o(==w#cAbcD2*Vs1033(99-ljZVK&{iPDhR|Y>lq`^v^TH z?YoCR_h`t_z6J@)96Kl`&k_YZ#Q@bh2z-TQ}c{QS@Tg5#+c|A$}x^0jN19)0M^ z+{^j%7eD#Qi{E?bDgy>T-oJl;o%UCowSX0ozK;Wa5>(SB`pT)PnK24e8?uj<|W;34qdXAzN*DMN#3PizGRi&!E z#NetVrNlHIkK4ALOeUkzXvj8t@4=`F4U1XKj=^aNjvbh<$g|b88&ww|IK6j&LRgsB zv7?j2YG+o<=AH5)Ubvh#>s41xJ+ui^37N=EgBvMk(4q+nBQL4j+dT)OtMyWxOVFU= z$h*pS2VGUcxS^(TaUSaB=>o-drB$s=OK@(rS?6xEdw#FotY=N-i4iUKeI}}XFwq#J z1}K05b1Qa}whzyJoX%fqZ0R+``KX`HsHk*NI6FvVqE;J3uY?F$5HngP6(?+GRhDRW zcE~VQGe#gi^EBbx4jeOy$$QVt$+YAQ%tQ=i0;YfpNUAXGyhK41qW@C$r~5a%rH;RM ztD=KNswd}mYLhkgqV>q?CTT}en1W=SPo|sIz00oOZ*?a5Km3J{p2zm(yKnBe)zeQ6jR~Ndj9#a=5_3_;s`?vS^=WpJAcDW~M~u26sAuz4&nwr< z#iukvE}&TE+`+xqFOF%wIll3=@4-L*+}=O$RNrrE)=>d)={?-mGot?iJhnqH0Iv1duZ$Dx~hI7WV z-@_2j8_xWs1YoGhN()c(bU2O&)F0ib-|6@|+T_OeN9o$bm<6(EUxX4PpqN+cD+2-r zG*o=xZ+-xR{%`nnQ1L7WHb|J+1OSpCsRl(*jKe8eMO6iCm?D%UX2{I&fc-@e_6`Gv zC)hAfK4U;58mJ&**QZO%o8$Y>W7=oEZ*ye3hXqpWssN(LtJVJA&iEtm|B0Xb(VyL! zOy9b3^DAHd%#Z!ZPd)Yc`@i;$-+#|DAN{FMUi-qAfBSbo`~L)U+q)N^e&&gb7azKJ z?{GSsuU4DOtGBwYTPzmK(}kH$CZmWdwL3gHu2$^WKzc{0rrtYMZ5Xl?036rN&`JdW zlhGKF9FlX+2bKb2QA$$Fc-A#NsA)75J&0JDzsF)`ExyTE1&P_G!cN*|&#yKR98<^r z39lnW&a(>$>&%iNgb@bW*7b#Dd4+u`xmJV_{Gh}oDWakXgXU~!!39vUB7y{r{B~tJ z5i_%hbzN*mbtxrw34nsDR7xMarfCKWN{rEaKbcHo%w~3adb)e=B0^F=0mwxg1c<5Y z`jvr5v62al0Z~-}g~&`DKA0*E%HtsIt=Fw{&ILb^G*p!sz4I+@3N}ZVJ~pcA03Si% zzWS}tp2m6m*e7sTcW(S&*fyxXfOX2m&IJs``l2f8J?2tU*8`A<)S(*a#@>4Z9L!aQ zFpwe3U{zsaBp?(KP|aBY$bgugBPIjKB$7jL=-u&ZQ%ZKuDS#n2O_Mf+$e<>*XeIn0YO6lcVO1^}e0DgFKcr+qJV2G;9vRbb1OW*+oV@%FD@2cf;SxUB+no(6%6$*CiE~VtWsj6x; z8Z}L`>Q>g-08W8YNv;Wf$)O2qk%&kT2t(5h)2X&?iz5NeZ0<*`2pIs#Zphp+r%-z= z1rdYuED;SU51>v^QgY;s5k>$Qf_{g|x~L9~{!)sf7G@%~IgeEJ4o|-jt&LIqTa1>AxJ`KYL2u%$Dr3hz%AszsctQo-lEFvkD za*Ahx(`AYjNR(yJ1P}or7arR5){%&S66z^^ZrB$fq8cE8!KI{Xj2kYSta!@R0f0rj zMpQ-YQvw{sAHvKbI0J}=5KcENr4(US81RM$Rhcz$I+Z#B0PBE>8<0eU0M(EiI!-B< z!S`XTF$jjjFg0Ojk)rp@<jF1pV!o|$7EEp`mEe?4u&>u z!<1E#vkWQRL*_6Hp9m2_2gkHo>5ra59kcJxIv}R^e&{N?BAp%;&%%}d~2SRuO z6%~Th*{mRBE+ERlXlaQ05K(3pWklo1STaCEh{Hf~9$pPeDH$08dk7F!N!fx$F^-)K z#z};t!l!E^kuZ`9fB+{=fKh?jsG$&TVJ!?rl?DP95Ni6v$4$|)y}dmk^0Z`Q5Davw zi@67b3|wMFL;^6zIPZgXh&V)Gu40#SmIq8lBoZxsR7Ylbj#>*T6=4Rl)*2yeJZn)^ zB0Hddh|IXDN+}XE0c}iY%(`B#+P0mI&k$*e>C0dG!eqQ1`@DZ}XKQ=9v9a;1fBaAX zm;d!&c{{EBs;mOH~b2xZ0ymzK)5OF*nhY*&_C5RLUF-EI4*qWIU)q9uvNMxN;5Yx5m zZk?LCiY34Bop1l>5C80=$4BqoeD~y}E1Y-FJi5DeW;WUQu}}Z8?|k>$-+Se&hpQXk z_|EU%x^-(do_y%JkDs};Q#C6aR;5T~CzT1)rc&t0*?X(oru_ITNC%r3Uetgx=hPkk8?v^``lE>s=KoV!XC?r%a3BAm_x z!5D+)evvO)249;n<^H~P($b)kHO3e-U!HJ*EYctCv*hu33~DwvH=EJMVzIb(?RqH% ziO%ev-QGTX`_|o*9vgKAM@#*Q4X>BUxg<9xdmsd&F$)?948Rrj@-Am5cf* zfzPq!3;?DY1CTrAj{*GPc;G>-*+3n&Ba>s~)tct>szeNKMg3a*OstB4_Gpp-DT@+< z^W1m2h*1|1$e3UV9FYrgWA{b>r_6FwT3IR-t224gMoa?mSXpi#2Z7veHx86N+}12cP5j` z_g?yg|KQL6uVSB5N|VXVT2kPnRgS4s-tfNO-q@}}`^Y1g7wgrvYu7TD>0~;eACsZN zy+B$VuDzBXz?JxYjfAH6y_g{bd>8IX!^K}3j(5d6PQk9#xt|4OKuIt1Y7koXc$HqA4uxY$= zxNarJ=$u8woU?HD-j7COYyGNgSFIK;7GMp)%+6I>pl$mYV`xTI?Omw)wrA$uo!!)B zP&=ERnN80e&ks&cPWs&SecyGRwYC&aDGADCGKnR}7{_%J#v@rNi3uT$$IW`ZoX?l9 zyz*k#r9b|we@Y~M^dq18*0;a>*4wXKxpL)|S6_JRtw|}RuEVeY`oDAT{H5=H=lQRH z{p+v2_L_B+b39yJIM~1O-qpAN=5PGR0=cg1?ai})8Bt2-7kK&qbOMlXB~## z1`*MdM-~yKA%F<92s86Zp9W&AF%}I73p0zVC^&Du_ugCYEdYQqRt-^A%D|u(A}|h! zfE)pXtDJF;nSE7>Kq+N^f8QIsv9WO~=drde#d6--Ju?Npy}!JD(5|pX-&C_ReO%u> z?(eM+(Kkh~CfW4CxvM+GqZ#ZezQb3APbwm(* z)pftfsVCjqymWD^FXi4iRTg}p{^(X;Sjq`hp~y*(X5?m|Jz8Z+fEa2QpfXrbceftB z^xVnGqK~gCFSO*ywCZ`gqK(b*a()kZ*^BCAyzcs$QL?yd+gV*N*2_iPdICE?w&1Ly zx6W9!fJ5OGa~6>RG&i%m_11dS9Ke)~30YQBO}LNfmB3lg0ol8Cl7-MjSA%wDFfm(! zinS@+(|sf;D5?%Ru|dQqr@ay|=nw!wh1qI?oK=iPQbIC_#u`K@d6*(7AQBlOB?~ss zr%f|=W4R~Qh;eJ$7h9m4ETWm&szu6n9Z6yow9O=>sOyfxcnLX;!8_M{;%l#*Mg>>}E1Kt*&iKOJ!QzCiocv0#*WqCe#Sk zR99rQPtM`0>o}XrLU484wcb`xlR8(j`kg2Dz_yJ!g3e6OR@6TK)))SpxBsR#&)D}p zGd+6y>e+J(KKRkSmoL}t?=BwB=bt+Et6nGDW>ySDD9XhW8Wvo)KuEQNqJkibg3PQ) zmWVPis|@*~gGu?HbDI@O0cil24_NjgB2R<}y&)+Xj2(?@WFb;oUtdjjCKo;kcV9WV z@mgZDxxF)q?ya{s*X_xtKKaA@2S;aic2D<7|0CXy$j}D8PkcUw0oJSabwdr9EmME` zq4VFn{_6AT#+VwT<4)EkN76iXjrB$;tx8*$|KYFwFY`fA0Eq!~FdTsy$nAlGo`#n& zASi*Tk_!Hho!tBjbI@?}(CN+-V2JkxJzYNW{=J2Oh=E0htU(x(Z&e8uVX7;zTdm*O z-}+E=(27P>?{03Q-ZlT!7FR0D!^s<*Zu8A9OJM;PC?@Q-FbU zB%-Q)0RhOAjSle%(lea0gH9nfL)PV>W2ThSpieYILjdlR1&jf}`%VcJ86t~e^Y9zf z{ckM~?`}4;^E*$v$?oy~)p>t>PYYVJy7un0*?8=IAKyLm%xJPZI@{d)*6+Xh&hs~K zzhRNyyZ&8Nw=ubRG`}(LAq01K|LEaIpZeg3p53{9^R-uAy?yJt^=>>KFBXf%VnIZx z0s!8+*_dP7GIL73b4~=Zpa6{t>#A9_Yd{Bt8yg!^c)45}8%im_r=nUyLTeQYkP#mC zf`niJR7Dh^go!i)C7a_DOY1ZqMduRh7?P$6q#pH&s{RKsB$c<+61QdkwNwa$i=(t6z?V$(D%kR#gRdDs#@Sw)h>lmVLYCaHGS^e*q=f_ba1<>vKS;|oXQa7sUn#W ztZ{~bA!ndf=01xlA|lR+Eu2{U#mX7aB`0M?47Cq2k0%}gN|BVaDwo0>V~ky>91*BG zM6lK(h{(Vf8rr!$gnvAc-Hb+)QkaOf6pQGq(5av>I%~jej3$Vt>pBrpva3>*&;~z< z=1=EzszoKI0szbmsCHO$Pyo>pfFXc^)vYB;R<_nwvk@65phqY3K}%jrsq1>uY_HbG zg8@?$>L;V0su^9FmU9=>)fq#2LNCoO3OvTSmQk^83TmMkWC>} zwZ>Y)A;?!%K{$yL8HJ&7%q6QKvSc7iDWw1maH`W)o29b8lu~j57P5#ANq|MtV1Cm3 z4;DRbT?r@}LL$~-!aFc?l+dU^*3%>p@4Yb&(G0STK@tl9L?k*$GZ6q%N;$JP))>Rg zUDr)#qX&*mpn>8*WDO2u0T3i(hHf=55h+^cQc|`uG`P_l1BxO!a}klj^usDvP(VSr ztYZkFX&M5%UawP1&N(y#7Ksd6L^MbuMy137HFn=BA-Mnmh_E19LM_Ten;MF+A~<`B z{TcGrht=L7sUQrYwJNAW7M*nj8v8WNMZC8RT1qLxswyRmzyRd>K^SIV;^5RbKtOQH zKT}=D!>NoB8vrIu&KLpdsEu@+F!qM~vxDq2)Q0C70)2xP1pr-lHHP~kj;Di_58u}#(ksT1eOMJxuC1g){G zz(}URgEKPBw7Ra#IXi2I>2>C!2m`CFloFgHB7@EtBM73BxMbnjb%;19cWW{OFgfRZ z)Gs`N8-{b`diCu)x2|^Ut?6WTuy<=dpI^Ch<;?b0<=sOUFLkXI;ZWQ8$Q$;r>BNWip<`jO$hE%Zan& zzxVh5*7siegVk!4%gTlL{^y=y`T0NoEC0?v{6~N9>)-m)$@!*WWm2lZ1p)#))bh%Jgq92-plTZ<4S6RB2GAITKwzop;EJ{bT&qY* zr_fL#Q(rv)&~kNn$JE`v zdE?r(cM9jNoo!(=8f|N64-fVLz&5pF-O=_z8*JFQ*UeXM){QiV6DA+*2(1-lmzGdc z>ZEs}si;uu+txr>Pi8(xTHe)>?Nb}Nbtq`+X)Xeai7h~o9<>1a;uHWa{P)6zhGYHW z%cc$?I4W~K+UMBYX1p`o=t_b^*P2<;FmELB=_rOyWZG7pHeca%uL1@f|MdN z*Gv+yaHLplRxN9Z2X=li^)vO8oRhZ4)%+sL>1u1oY+APSBa${+ zm`$ers+FR3sFo{;+*@l!vj_xdhqVb2(RvXX1*fVpr_}Y?TQp9BFQtsY=A2Q%8GApR z%B)jL%5`0@`%aS#UO(2st8z*=9#(7P?4W5fIQ(oLgrCD-7<_qyQMQQWu8Uo(oWpeT z$N%iVb@zIE_QK;gZ@%g3dYKO|U3&EF_QUU8edAYt@mJpe%qK>Z$=!o@U;X|!-+b%k zil{}8sN;!Qtrp%oL`P`rsv!%AxLUOq$)KGd&ELKHx^g+c{SbS!zMgID+}%64{K%DO zpZmbot5+X?=z;>I+<*Jq-)idFs?F9^XLdFLZQJE$WEaarKy|^Q<g?L%iCK6`G&8gk+nUwm=@-t9AIH;gf(dgn(z@vG|L8(;tZ^|Bu~ zPD^Z8b4R!_8J|19wOlT{zO{w`iVrXuRfmVi>zK&kbUGbxoEr*@zV8={g;9o*AtxeJ z6R8c6A&H(jb7r+XAqhgfZkNmDvM6+2hvY?gI-Q-{z3}ea@7=zA*O_qX;^o8Rd(OFs z9=`0HyLt2Gox8Voc6Jtv#eBW!V;mkoeNJtkl-W5KV|RGCkHmnfFKn&*>es$}=gzGU zJagmhxzSJl)Q|jyzwqaO>_>m*r+)GmZr{55;!EE>`TDJm>F$S~`;iZSA}=3cuxo-fb@J70R1w&w^~Ln>Y8(9A4{isQ_#VpmgmXmhe% znfZKivP#v~nGaDh)bGbuNtVk%v9E!6d>BCf13;IcDEhpU`G8>)^r@dOTADp}dH3;;{LGD8H}BlLix}-}8&@aB&OKQxF0GraV8;_6k}mG| zdwbtL;#Qj-GG2L|w!ga0LCR2!shAbWjkmUdqlie%g+NLn zI^~}Xb!1)FA?0YwQ8hi>{ZOiBzF%wV#htks)+0nvDhiQH)pO7+R!r{-nnDO5)>wA7 zI7+4npH|_+2>p~}VHlJ_Re^LMcPUC?PT6ycB_kQ~fs948Lj%e(0zd{WB1&A0Apl0O z$j9Me#0xONjmAe3VdTM#s?^%lW0(~aQ0AU{UyhLXsciM3k+OMiak7u?pZf=2{8Jb5 zzyGiO`hWhNcmDkIH%Hq$QMy2+{#;|az_jx2y$c_y=P!>;b@Onsb75`!_5SUv2g}3t z{OH!zx8HjHi_LiB!=L=Ia}Pgt{_^7&CeNJA@078PNTn5JvpL)9&dje|ywEZoE#`fD zeEZhD)oOL~>O1%L_qR8;9=<$&-0rj%Z)M8qmF#kOr5OEuDdULUOOXJ5Gb zwOhTES?`NaC)`CS>}?=ppjQ$z(P?r2^O_HcFn|bH7^ofy3e18&4t!1pqJaghC;)?$ z?_c7>2n@L5q3NI^078Um*oNb&N)(29D!|xyL1KigP!w2M6a}C_0Eo4;RJLoGFBI-o z??>Gn^HDTVhQVG`QcyxM*nopmQi&Q-hv0=AK>nxS-h-{R>K~eehmABd7nP(=0g{R* z!$@STkP%}Tz&K|BnHd4CF#uEw-@+0 zcFsM7My}WVaH-Cmec$K5^0{CB*+2GkKm5n0XCIl)Mqm5d@Ba8BKl$jx@Be@NU;f5> z*RKE0@BB_5C(k_nn6vVgFaM7ERYB{#{jJY@<||+Ra%FLIV{-1y&h=IM#`X7_Q28nx z?jIQ5oQ)@t<^0a{=%@g|dM!x4FS1U6#+(V(1ixIbs}Q;vi*g+*9+)XaNQ25!1Q}J4 z5bUt$097CoQc@*V?#tR^4Pn+-q^gd&iG9%Q<37)qzLk9LY(G8=#fhm(%+p01MiW&V z+GD(XZB#TBo2;vB4?sgN`2Oh>A`Br6Gl`u0Qi`Gp!Fccc(&4~CPCV!~RJCneV=(9J zePykUF{(P}tg{|T+qM-EmD2`R*LBRQz_|#QC=g7e(`oGz$P7<7sv&jkXp~{*rZ2Ql50H_~ft4^)J-- zzA`IEUYQK34<<-aH*$XHGUuF?OI6w|^I*^!AU1$v2}AHzRn6z~;cXC+xu2WL%zcUjVTMy2k478QnKRV4ZI`n{YP$sp z8fOJ@k=u;gltID|aNkikM1}QTG;qJ0X$B_}Bcf3RV@oNjRE5gfV6C4{r)Yqg0qFSn zUW#R4YfPt8LydQ(@B8Vf+1=fB&fU2A_VLNx@#c8AJcfDF=?jr*yUto3PW63z1Ob4v zX4ot+GXV_NoYiU>yj2<|Qj$_)6;vRyo?CLZU{Qi^YA#Fw;Jx46+$bt3<*r{>O%TwwZ$k)4a*PRphP;wcK9Vjr}#k~HAsmKoMx6Vvq&Dk!3VAF zq1O*eTuO|&X_}_-Lzub6(-@3_=xypL_cs~CerV2`!d z7?UI9%&OvSaLzddB5($U{BRCML{HWB3IJYrQdzaMAgCMDR5m6fHft%#drw?^RcW73 zqX7V7)(6jP98}CAfJiKeK!o?t%;}<&MRH~q7}P%xevk~I!jKv;R4IxB@P~)ll0$Xg zswFVQoLLJ1C=n^400<75%Va@`h?Iy33^GBnz)HD@0YVZ2F(RlSBnZ82RTa@#q=81| z#1I)@nc-$Mg_so!88t-K_KB<#83s0rBFKPQHF*2r67{uTEEa=FJp`L_7GYyd?X2E+ zvq>q#TSz$%h^NGbRa|h!7;7vklu}g000OgfPK)MThWq8dvlIKihaqPHF~*3ZAq4Mz z<*Va^LlUY(NPQI0t*xym-}ls;ue}`8s<3uf*TIj;$Z~md>-NdN{wx3HH=qB#E*{M$ zJ3e_&x%!>Y|Lxb`{)P?o$mI_jvY-0+r!MZExqbcW zt!wX1r{n#DJ+b86Y_20R1?ak%MD&pR6e&^EFIkw@W zP(PMHlvYsDarH>pa=saNzB-D_Dji=QpFiBcI)%j|>>N2)r^5|%H1ctKe5N}4*zU$O z)S;@Xci(&a=;-L?jaw-tvPQ^Zv!#kx9^HNKcz(265m9Q6H~p$MX{0U~ijeADnu0s7 znlld4hf>5KSZfRoj|M~oLy)_`aEkx}7&5AgfUF8!Qc9(WsshGyXSbKj)$#JU9tURd z#_gOvyI3AvyZ+8(Hv8cp{$WO;djVU^>?CZ6=Q?y)oL z*q1vulC?CR+FY8$WedkL-Bzy%9IU9%5x@csRlbE&J=gu3Kmkz=(Rg#K2VMhITNh+< zJ}1_u>Qu3)_1-pP1>%$t*s7$q#ab%sOsF#$SuGKI;tuvt>cuKGwR`eP8dD=HpXt5bhJ6$d1&2biw-6FoJ<6C-qjnE98IO&2^ z%gk6!ifeLFQ#d%sTF4plzLY|&1}%c|5YS*vV1s9sdGG3*f9Bu(U%&C%_b={lzxl=sHm8TqU3mXmpbQz>i~p+cDA_5|D*r#|Ak$;di=^mJKLdTuj7qo+Q;)g&_u{c@GO94f5 zc_-Yx2X!^xn4Jv(CntAZ7~SiRld~GLU_KMZ${7F{u?Jq<+27cj8e`|HWf%u=$XZg) z);mU6wX64TzN?Pg#q!$i+{TmR`N?=RO)0(m?z_&p_dolA`D!_u%*IEHoZ}E&?!E8U zJt!IDx_)i|W0y%O=hDm?-{9+Se{W}dHebyyU%vcfKl;;u@YOH9_|mt3;>UmHr+(@u zUVioKU;p~oryH~1|HjRu#oe2CZcZi>a3n(NeAjgVQACbTjz@Kah>vyMvmbb_E9vOu z_?>I-3W6eRZf-zOrxo`Qa~8!Sj3}-i@`xD}3_%r~qEXcnJBJiwT=s1iUdC95s&XF0 zRH#b0?%KYK)2$5~{Jow27-ZN6DfLUcB~22yp-q2?}SW}ePwBCzVa zQ}uz`7;}_lNR6^JpsK5GrTyY~uA^zVdh~jIJnNR3O+VY(s8f6M;O6Aa_PMRi>WL3w za0ds68*A=-uZfIx;QNiqnRX>9FRfdY)hMRr=FYfswq4V-+L%r^(3*m?^Zo68rEi!Y zSss3S#!Izs0gb;gqbhxhPFLA!tLQ_4MF&HL8dT zjx3M?5aetl^SW=x)`rU7-)Nw!aPmZbZ4l9jn2ZEMfGWa;3&JdH<*t%Gqb#fx*gqVR zD{9lo8eP|^NReDOedUX}7#J#7G4W!vG-+A7<~MHEpa0u0zID8qHDS{M#GKZ5X^qL% z$p)9U@_6_8r~bW%Ui@okmTxbP@7B|uoKLpTKH`1&k)Qq5vu7`Wn(dvlo15Ep-5|p5 z&IRV2Q%t#YE_6|+)3ap2ELUe{n=qRl?BB()XT0{IkNxO-Z+z#~H}AT7{G%VZxIEnd z`WtT@+lSwS&2hZtaxDk3nKW%Pij!SS!S=>b-Kx2zNc3UUgXX>2pG*2GS`A42{zw+r{PJih){(2?5x{({u zb?akp>WaLHT&!BtL63-pB7!8rlB%*Ki>zpcOH!~x2$Tt&=s!z({Qrjo2Y3BYk`D-v z;ZM08SO<+o5s5KdB#rC3x#SmnhqnBQK ztNXyO|N8`xAbJTvqL2^}lvE57Ddvpm)j|Qaa009JZ zN^$aCUOOUdLa%PFCHLzI3Q?6tQ&R|Xc3BLk#GMl-0A>(wBdCgS0RRKqi!sJnG^#oX zuaJfAr^m3h}3pPwsu^yI)Q&3iLt3W>8PKPob2vy#=b1( zZOIyf2PHr@mduc7szSyv0~!LwljC_)IcD1zZu6Q<Q zZI0e?X60TG6&qD0?1Ncl*ml8{WP*i2lunM7Mp~^lNS7#!`mx2<$L5nWMpCn0*_6-R z{@cz=6jBA>M^Wy*XNug;^?sOqmON;;5iOba5qUS>`Wx+dR@`~CSK{dWiFI4;wUcun z?KXGwoiA@LZ;*|B5m%hELPV+{F?#|VDikfH1W(*+UE{i6h?%eoOB|c6JY4L6LNH+Z z!C=ps$ciLuKqQl3UDt+eDtHoGM6pPf!NyurtEy^T>0|9YDKQIxC3N#;Z(S1;JLksZ znE{-i9C`1jmDOO@eIy{~?V!{eHzQ+A+pQHzK?l@gVQ^&0AWKFOBx2@1mq`e>4iAPf zi2=v1mN*xVg@_!H2eOWG&LRa7MGCVbs`viP#2RB3^A%ZBkHV_$3ahV!H%Ll7OEnb0 zsN->1_eX0bN0!iob;z29i|}H-=nIz|bBTRNi<3LY$47nYrju%`sR5;^svr<%uL&|R z8zzyQH>sz_92$2h`M#sGwyRiIPw<$pd75tKxD`U4}_vxw3x;R>rj|;!p4?ZvZ(WfmL!)VOvZXcv1C<2V?h-%i-$->S`bVwEW)93&JeR&XLIJ% zi-?qx2Yl0D9&^qGOMrlYsg0`QoU42zEEE`N@crx1I~$M3)o2tYIduKHlO#kWqKJi1 z+n9-{s)8{l$E2#>IT1zSimY*A2r>>KxXQ&CnT4sEl{bK;q(YoZ?2%pYeH)!~1jIR=e)TS8$d(KMK!P&R+5wCy#+gcJsFf5%Efm$L zSydf50I+0TWt@^^;gY#v@0k<~8&Xst05K#;5IGrR>;T`(5^Mk!MBH1?2L}K$0K@&Z zJ`{rm6d;idV<#aZXWWn}m`Y9q3xV!ub%;1}9mE_^p5Rch$)D z&g7+6fBR&0r|R9S36rp#oy=RRm7gKv5)=YkNnyfzw{d~A0KX9m_7UC$KLnor#d+N{qKG8`rhhJ^OFfS%7t^O z+K3+2fVaV-aXAA(M)Z`=jYeEGchi~NJYD4_$8G}`k9XItb|(aOebT)5W_97pe!q8o ze6y^V9p-4u%0GN$Kjb8LcW&%dXhDmH(Fhg#ozd#inUyk8fj|nYZCA}A#bwF>ibTQH z*7yRcV<#+y_MDED)mI)sf^(GB3PXVfE=fI#6Vi;mf==pm4!8hL`k(vm+zZa`} z_F~shJAbZMEv_lm*o-Ews(qGx07hGKvcZQC`V>jPF_hR>=n`TQ&MIIj*s8`rfX11U5!4AVU=Ed4ft;|O zPWSis0U(4>TPGs(<&ub2T{|Re`G9CFp#d~G#}Gy_MgSO6jE2&@uIr<{t3USXPki&6 zpZm&JzjEo~qkra4|N5Of?|$pMUp;%~!dIUE-CM^;f8-~A<>dJIl{dau)e2MuL%UkX zSlV@&j%M5A$#S`F%(SUS<7VPqyvne%O7alT>|DXTI|J%uOo^$E@?zcYs-`w1P>+a#+#?G#*&wl^imvyj%jO)*R{x@&mdGGztJaKpL_B(IAUDx$LOpiQej&_a@BUY`0 z*?1b)8SMJ(?qmIGwKzJOj_dJs6r}3*4IUEF*4-NiZ_GBfR?E9Yw7DhVbocJv7~}TF_Qqs$@7|qrXLkYM%+Bs& zv3m5;$6kHyyCQ;MHn(;bi^bl-Vc)0GX!OE&Ul@(1?_PbUZC71v(ZILA^Zeeuqk9MM z9i1HSoIP{x`g=zwCzHvfZQHu8x#YpWJk)lhQSH6!`~FK``SR`?yS@sia7kyo*mqrT z0dvl!6j2>q-=|s$G$1*%CN+rYQHT)GT0+44py5(_?TG-qZH^7MwNNkarftQ?+(Ne*R}wWlJKQvu9r^#zJR zqtGLiR9s==qE5`2rUrAWn9F5QHJa04uwlAx$f2?fC{lFZ2cxNgvCdfQO;8oGni3HR zf^gzokcAQefFlAUU3=otRV=)Y7KlQ1#BpP?nQoNAvCld4YQ3_KM2d5#5s0c-N?EP$Ub}YV zeNR1e>(*Uo?B$0Z+nns|@9#f)Zu#BUUztwNax!-hj!)L>5<1YmZt`$sH6)ax<{n~{ zW~*b%Wi&>}w$G%}jMbr?uT7Mwt9mWfD5wp96{HTRL^|RXI_I4AgIe%DiI`KB-`INY zg|`2zfBS16Xg>R$ba6x@N=NE2_<9ILEu|pnfQ&+wL3#xDi3gx`A6NTNrw#oJc|1_@ z4;rEnf^$w)SIgVW?&!isJ!>3Pw0kH>qrCbKIJ zgACc)>V$_NHUNZ!d;4E_>4h_kn>X(*9)0%xyN~`*T*#JP``OGGmr^p`DpD4%MosL> za{q7^%Vd7hif>T`ZW*$Izqw?Kxq4QlH{=we{Mlqmm|ZBEE$=#xKfC<5Tl3) z0RY@L^bnB%1n*T<2l(f}nkOJ4BB6nWZH%A+0Vol27rM@K>y&HbCc&;%fDA;}#d}Xb zbLFew_$P-4^Ggq3{_Jo6)^c^!%Y_SPE`9xLe=r`s`q7UZeC=yrn{Cucg(V8gXf(3c zuG%#T8=@k-T&}P8{bVw$L$fhnAD^sB#3fn9Y*TNXTP~N&N03`;ovZ#9Tg+ngf_tw%liyj^r z1PtE#2Aj>K#@u_$a1XFHU=b;%j<{_$eC!LNZfw=1ls>JQna~1|h#4};LI}qAR5Brr z$K!6bQjo%82~afU6`}N)1~DuW7EYxIm1v9+5N*YZ*HRa)GB)TenK8qvwnda#iZU~y zcU7>BDz-Sei%*!%kNU-}GjD&}?mp6vwxz#!A{S>hkID&kYYM~_HzjEtiA#v&y(c2f zI2ckDyZQkqZ=7XXMFX9qbvm%^Mu z)qr{M>i4}hsgwfBq+p#JkH_of!dg2XJNQHYOzI9aZQoH%ccIOl@%h}icnGcT4Wb*OTQCnw!%d3@o*g~e(q0x`zHwlYYS z2Mh@y`eNK5BUVB{MHEAbn1&cn5oB>lr*X1YC1ye-AYd?pqJXL>(g3261uaF3FrzXE z8k%=4S|UFLT4^C;y*1X4W-gZ8ulpQJ@V532hy++h7Kl|@ zLe^XFy{e`dfq8JYl>*il5JO-nMa;3J)WJQcB5jSP%wh7$SP_jdiDJ9aR(sBv1lXQBhG9R0Ti+ z8UmUJ5+EQTp#>Dx;+!J`fQgHuo~~&>DoNQ=7QL!ghRR(2LRX|b{H41HE=1iv(=m+bho?hty zKVXpr02_SHIi-a6A1DKXaou%86JQuB0G#^ahFk?grTd3*M1;(&g@?zG8R&V&1QYs{ zo2FTJ3uCeEj)*YkzNBL8I9Q*PRDQHxMAm*>&n{ftxpR2q%9Sg3?%rH3mwn$&r_)h0 z`|Zzt_Th_W9UY+6qvO5Fw7GYPckbMJ>7`e$-+JX+FZ?coefF6jTDCVYT)1%Y z!o$s2-hTVt+qZAe7sqGLoO$xeC%^jj&(CJFo!yPaV(|yx`0P*o=r8~L&;IIff9^N_ zi@*F|f9m6(die66F&3*D|KMw%yL$cg;LTHyJ$HWh@wcwMc<1gdBB)%b>w0HrXMg{& zZ@ce)@AHp5`s{3D%yKp-Sa%0s`pR$q_GkZF3S9ZgbhJ58?q(a!aXrGtj&J^sKC-cMNh z@4|)tkasrUBZD=AC2|ZBGvx(Z(^r*}VdkNTG1ho%5E02!hDcB>dE0PlPfq4^xY*d( z=tgz11`E}MvO39-y%_~vp)T?&Ylw!$X-=GzPpXW{)`JoeTJ%t|NbV=paCa!H{V;8C zwrPw7^ggFHXIruf#Puj?RZ^onTMZz|Km?TJ)NF?<;|CV z?~9*%<@GN0ZS+a@pS8s?IlidwbV!-$6vV1I=WdQu_MW zzdjTqeeO^8P5`tB-?(w({JD$9xR+je@dNMw;NITeyI0>U5~tIRK9|+9<+6J2nWxvQ zXf3_+%FEM@YCLJ)di(niJ@oM5;o)Mj`0jVVtEAi8+sxcyw^%H;H#Ucj$MAR_+~_&y zNj*`7!{ehiw#;mU_ult0!u^Zn^oghd2&$TLG>8Uugp7!+AX$oLB%_dGj472EV==`N zT0`tK3s+U;jb&y*a8&>RU>J|br4#`LGR9ehdUSMnt7Wnlz@%Io+W?4a=N)(J)p%0R z!fbcr%%s`an$Ae_H{aTCLM0-$@_nCFAxmk3H$BBvoUsN-Rf({vf)Wz;c|2|ZTc9iH4!H!Bqi6`0fU zjA+qPKsXmfG*zf1I}!!Upq0)$oPNLgU>u#HFc!rO5HXOe04S%n$Y}wDgD(yMkN^}P zN3JGLTanGLdCu zmebjTl7fNSIYi8yK-v4CiYcziFmr}1RuS96DLG$t8^(h|>Jt{71XXX<)R|J90U!%9 z)I?UvsvzjyoE^>>=4KL7OY-S+s_ z?YH+2Upsg9;g5abiSO?pZL~SZ%zkvEPbWF3V67O{g)^69pJlOUsp%Oz`$@$Qnmz;lV74_39h1HSd1^-0a+|S6?{y)I$OWfvl=QE!u&tR(bjAi{JXk zfA4R-ox*7U?RIqeiT&$8IhmcG?Y@5F>iOA+ce(M_iwG0!9UjG8#-mB9{PA)1^8U5g z-hJbBz3E@QyJgb{&qXQ^R@5a zx_#r^xwD^n;f3j}v1cy4dgmmqQWUYZB zOk{~larS85*3J@iO{I#Xq!gvaE|14!JPp^yfk>dL-Fi8E5MxkSMTWFhx9%V^6#xM) zDt*q%-YF(f@A_DHAy-vp zO%;1UU_|NF0Dvg4LQw$#qaZ55>$s{6$+DkwIZ<0%QzO?^6*EUwD+;BEv(6aDtXD7o z4Aqx%chKa!BcOEa23?Z;=yTSeyR&sMdfOb|Yz|(l=g)h+*KJ&w6lCU5`-Vt)sOZ3;9gRk28>iD| z)y-2(MZj68jBBa}0EX6{{2}UJtuaFs>fkv87|LMQERIN>dGJ%LGF-hPr?(fJ7AmA5 z#-cM+O6gYX(Wo}YWG+Os>bnP{2jnhJ@L&AwXhK6lKnd3wdiN4FMY^B9aE2bHi5*0NiI+VYiAh zsHhRgTaA?=IwHhA=PYOtqB1dhgEd%(n$aU!v{JalWC1HbQs)3o*L46`RaHq@1hiX^ zCga9>R`u2u_kHYR-h{oTo&@NV0D6C!b)_NkarVu19 zLk6{~N?}zL0cNz`D6x{M5+3|Y4PB|;;5cUeFrEtjz ziYg#H++T)=LljYmj_oj48?@jkKm*ZE!Fw;jS{MOGqp>k2=bTf9!6krbj2+C)NI*!# zm##o$2$1yt+Xl!aJiL(ip2!ZDbj+uqBt-3FmvaOV9}EFxVHE~6&W?m5peiaW02(qT zm$F!#lv0dEvX%@0sY3%q!MOoS1X`KFuEj5fIHHwGS3; zE~R7{JdPrwR+DCfNQ5k^ppv1Bxpw}6ZfscSPQ^xuq%e$E5m8iSIO)T@*9=bb!O5t~ ztRNu5s)|m?fDDNS8p?oUQstpJDPRB;dtwm*9#q|ghztX(>;CQq5q@ASF^oA8hR~1G zE+?xD=J(<6(CH~c0d&?_bQaZGn^Gz&MO2jWboo6M5tdx6qHUZHxPP#yn@uTfX=bf` z=-lH!@~J=a+8f__>#Y~Nv?_Vsb?v#cTTQhw9dEwy@;5g&C*Ip!3=sAyHY2(6_+#g` z&j0c+{qga5_FJF*``>=y_d|VQRG-;Bdp?!pZ+z?Xi}lT>8NcwoFQmLcsPFCH`p2KS z^ZfJA|A{~GCluwYU;9Sar(gKRKl&H{;$QskYu~Kuyl4+stM>YPH`nX^U;6o9`nf;y zC-3ZE|LWJixNL9l9lT*|dHU&R9=ZJN-rb{Msz6oZ9t?Kv`uGT!i(>(uj&^D{y>oNF zORG_{IT>wSIP>t<#?EW+yntYwv(G&9Y}>AH-@X}pDH)8>lCl;akH@KR2Yo<{F}Hkn zXM5ya&hwP>Y)YT_p&$M9C;!Ozuf6=4-~O9hQ-10Bubf=Hj?M0?%O~UGkN4FC!5Rvr zY7i@D9Xe&`wGwYckIICn`=wQ)CR#$~S ztt;nZ7fZ>tt4m2aM?lFjsqtL0a0wx7PG>Rt!6v?xLub^m&$nK@@Jr5#!%gFXcYCr2sskXNgA zwtH@Kw)N9L_RChrRWQHwE5GpU`#)Yd5ZUj)`TX(v+IS-X`nuV8s=C}X%|HWT9j@_B``ll{D z_J973uPoDUqMqn_0)5ZKlaSKZ(SW#qo%HR&QRd+$d!kme&&f6UU+`@ z%p)KC;0IOlTVMamZ+-0_K6K^jYi}H0JG#9-INq3!9zFZWNjX|AmuOAq0)$A|_xw1u~ zmQt3>rHCNv)f-n4%<$ThLoTvTg1>*b<^ygKi9tZ4vU~Q zt9Cu?Mt*Rje!6pF1%TR)y3yqX{*plc-UOVP<1Yux`-r4EYu@Gdbg=PwIj!p>o&HeR{t8d`2J5@TgT!TdlmE~>*2V1Rq#pc1+7P8dR(5Kw{P)OvzLryoSI?@gAP*{Ue0 zAYft2l1>w|5E0SYL31hysHCccya$b;L3}#UCoK`0VYe=&fQm&@BZACWiUUNGnKLMO zzjLQuN=lZ%TT(*jwHlp(I!#Ch4Ur1wHF7k$_a%0=db)e%Tt9#RBTxU#`+xq8+wZ;p z{cpVS_UoVc#Lt~OfA-d`TbAhT?lzL#+rP6~A6He4q=2$quAOtteBc<}az{X^2wLX02ktaT4CgYjc{TnCq<*MNG-+OsnQ@1>M z`|A4O&8>Hz_|#9_{GEew{YLn-^_wyiE&gbFVaCGieZOf}0AlOP2T4n^QfVS2Bnn2+ zAdqG)h*)8L(w}U{dt2kpPPpWLo_cRYaxT^wATs2@C?OHCQPe_!rv{PJy%7H4zyAxb zbbs(xhd&P65&+0t8)rk~ozJCPGW9GJJ1p7gKMG!#CimHSF$cZ%hb{=RQLla=S+1O2s zmp1Du%&y)!Sj*8|Vh7icw%v?(8V=P)q&)PK38nIQ*+?V7kTe~p8J&H)`+XszWa+ZNr6%7SziVSTO zwP9j?t(L%mu_h22HY5~G0Mm0;gI1A>MC1Be`jy)SBcz>KAHjNQ)!=9YMx)jh@>?1W z$-2ZnDszocCtCP(1}dToh($rL*s6$^##AZlYQZApjhoE8HyM4&+#4&xuv{+tE{Q-@ zRh17Q>b-SYs;aga9F@x3!eTlawgYIK1^@t6%{hyaD1t=|+U*TrH8o@{>&&3mpol?6 z5soohETCrd@VAd1{j<3Bfm9ZnZc@F8ZlhXp<_vdV3bWdT-Q&r}7CgQ}uUtCQtyi}1 z`<%0?Ac7?_mJqS8tf#)=C{Z&(RLIp?8C{W+TeHRMJIn7@{b9k;!qh@(YQ|aCB2rY$ z;Knh;ps{3v{bphm1u0KwwGbjp?(-12ImllKVCep+>Tp+{jt5RNb`=PXo{qR4jM*)-AfFMGJ~0 zOYpUn6uZ8z{NUq^F*_SfAOP^emLjWFl9EwW)YkRJ%^SB%mco*ADZ+!{7?i9xL?NuuhN&6^Yy4AQ}ZjSQ#?bnt;ZbfE06zF$#k79uX54W;RHyLIMuRID@LpoC~ix zRn9inTQE6szlkEuzNMvZUp_$E23k1A$-?_T!NWEU+GYB&C!R zJQ_2^12kj-U^bieUFy1ynHP&i(^Ttj&1KnjSy{Zdq)xJOlo<1%wO;s0si*>tGwIeLZxs`#v!X zxIau(9`>gqg@_6a<~$e%io^dZr4Ul#l1d(e5P*=#h-%;U!&hRA8NBAhxh<#yIE;2I zl0i}hX5m3dU{uM_{q$@iD#)Tj#(}Y#%V0?qkzC}|gJLZi!_2CJgad3u0Z>j?v(rFz zqo?m2kyV&kvH*Y^+yekg!&vI{d(3K774$SK8R&FTHV#3MRRNSq28D!@N|7IIKRyU8 zlPuO+U;DuymU9*nLc_vJIsmoE7=vUj0YL0qV+if(;h+1X1di{;P+lT9ZUnHI1y--&hCx-{I&waP|jc@%|;hlufFunbr+o-zxmdSVO-w3_DWN2-r2kQSO3<3G@cmioq|Q8RJaQA;%i^IdhK23 z++Y09|L-DF*Uc-hfAy>1cHIhp4qlgZ@L#Ye7Oc`}%$m)@eC zO*XGzzkcuT!Lm)GF)ddI@4WNoM?UGDRYfQsAr#<5$N^TqE}VPt~0 znwi}~&*f3mtrvRd&18>Pd=L5Hq3Ybz<9aT2&XGgqJTY@BC6#=1ay(D>RylV{m37`Y zGDLz|+1rW`gR3ML0|ubnx^uJ7DfpTI30Z+rg+V2kMIV#TTN}H(yBBXByoX*vCB`&Q z3r`z9+lMb)pfEa`9~Wz7`|?}+?aBCKIG%Nk&UzY;Qe~lFnRl)V0)##UBx8(s)}T6L z@zmK{gynt-4n*|N!UGc^sDKb6C@Lv{q9Pk2QWz#2APlO2f9QW7vIqWfFZ*D^HlnGp zLdi@flQR@pdQ?Q!!PX3Dm|28VVnkF-2yDa#a@P9ne5xw)w3_%)0$6suimAyq7zc?K zCnFqJlW}$pne0-s>n_IVf;d)z5Jq*b#2{#JDv(08S{!(<#3+5HUMfol8Gs`-9$H-= zmHyCpKS6J#Pu-#|D>ZIptD0)TP>7@%_qCzD>p-erGm-jW>T!TcL3^;i6$C1dXbf57 zLQu3yDL5cQ-p`y3i^WQcG);ppOeRe!CFaD;(}k_ zC!{x0)Znk~Yi+ZC!uw&7=Lr$3O5RA9(K581S$D z?Z5o_Klq!I@%9r>Jp1UQPfRD<rsCO0(86?w;je_)PJRTckK$Mv^rgO8c^Ji$?t@@l4 z6|f+pAxB_VEvdC#TTqCAopbB;(z(-5r}ufFlgVU|cM;Li{3Mhx5V{6omvb)X z*meD4dDt|KcMQO$j~1JCjQz%$b+eIA4yJ89p1(b6W_>qTGRvY`O|&~~OayGOR1_>K zH=&t&-_F*Nkl>ws;1WEy20tBq@uOgX_CtJ#e<*taLlo;Yj5n( z;9%ClT^mq)^od56TX5CYv!8tX($7t18?xF@=+N3)hGK-V@p-<-nn;pczF5p<%b_Wzq9!W8D!4R@OQuWt*?Lc_k`PAx~Crf z#Nwb#Ca9V(Tzd9oF~5E5rP?;zR~~-%*8aJj^FQ?Qk2;v&x&E4K;OgDw6Zbx|gNMs< zAy-|g&W#W=wp-Z*Q?IMZVR4n)AY}Deij*~bKtkkPR8>F>8XzRV)5S;Sw3tSNKN!G^ryZq#-fh_b?xFv5Qt^W~ zS!ph4VvNK&j5kh{`q*ul+>NT+I=)yxyj#uAw%7Of?|km?khdneo6E~bwIn2MHn+QF zyJ@}ei?;7M`OqhSVbOMXZ{0rs$R$_TiewB3Q88xD-QA--A2x3s zZ=cwui{hFW;>eOE5|ZN@NX^WmOUV0rx6rtmEP{pZ(1<=Qf{z;akpym_>y`s1+fl zR5F*8706l}92%6-MyQ$@(a0E+(p--YJW|R%0OZ2fS=G7-Fc(%N0A^-^L8yUvs=}Y6lqa(WDNmRcNi``oKvsda~6vu^dX~0$%Z%5Dp`&(En`d? z#@(o@cQ&1^Qh_2GoXHAOk_qCRi?JnB3Fa6@eO0C6gPV+Z_K)_0t4tO8SW+wlSTe?r z$c!3aE0R)9$r))zBW4(lsFX4UP-j+VO(~&jSCGhfYeb4h|LXSTgdonD zp*R76L458CH_o%KPY4h4$^)PA_X2;Tf9;k!*ApC+5S-m~2|03cER2ZeR0{;~1pt(x@5{l^7%4Z&IGJGXMoam^7BR*MOUxMnz>srx(7!rs0Gve& zqW9BEfrx}l1#o6W`m z41lXe63u{s7?DI$j)~BT<`fI!Ho!Rq@0`mej&Uj~$OLL7Wk&K`IW3ouHoF)nV~%yn zs$C2@>c`AU+O}tJ3TC1)#yGA66Qt7m28ojtiRzGg7djUrTlF!7#e zwws1YRnO1Q&(6==zVp5T7*&gEskksOZ<2C^z6l@zP$}G7BpMhr^Em)$5mYNFBQdih z#+h6;$2j=@hFU|I7SdLaRNK!T5_>*Apm6cj=0ieauKpk zSt|$;xWeAEZB_r;G=96Fh+!L)&CWSB1cKU|HWM=>0#!vM6(RB=IA#_t01{d<;)=Yj z4cu*8xmYf->a@J~BIcZ9v~4^xdG=0BbIvhu!w*W%wTF8NpFzahnW-!4a#mLZ!wEoC zYXkk#bp!xDw9F_{rZG_jb^&sUWhw=mt~IlgBM}y{EV0HTrX!Ipcr z-LI+wA~OSR$u>q<(SlXAHb^$)Y~q>Odn_WVB38fa;5dcdmSp}*X-wce% zOiU{_N>r)3Cx`%Qd~q?U4mw0eVy?a=GZ<4sU~&TJ@iKNEouUGeh~yeL#Gcth6|!A) z;V(x8TIAB*TerQ2fK13{2w6(K0^wpx`t}|Q)vS&RYKzO@!uMbRN>)85goMl$U|y_L zJ`xfIMl;ik!GOrt<=D1uJ@a)oP_iH*kcJSdL>kSsNJ?c=6_GA@RTTjdDHpdm$Cy$g zr25K?jLcGGJ#2g+AeyGBZ38(=Q|#K=zxU&R-pu~?fA?SBx&Qk0E8DRElGO$n=kKnyGMsNpSb??r#}5_$EWN0e0MfGl7h}Z-VE!u+0_zqPL1E` zx@Jlzhet~=JUu<#+1pJi-?)A4i(mbG)3)=4cQhM^jSJ!EXm6)K{BQsE-$>orK~NBOup%7&NsxEQcAgq3Nuq2bretx{j8rYQwS@gF_z%4 z%zH;+mQ)C)^U+x=H=kx+#D*e7!qU*#O;g~=%){QkK%=fHB}q=63Y!_H!+D?5RMHf5 zYD7>mG7W?hmbo`GBO91N0EC?SMc?g@kdsJKHRsiwuXBR=BTj1qY1jjj&jYEIf;3xR zo9%V^?lR!va&b)Q^lal>j?>D!&@`UVH9q*h?b_M-`TFea91$1G9b}$jS`8a=Y-XL2 zp&432H4`d?W*vt4ayi8zaPVCx=9t+B%z4T&j$`cG)=cUkp>BM4m$N!9uARXu0hp#H zrIK^5{WaCvUo+?2o|)Th)?-12kN^D7{++vjx1I0&$dCTe?OX49>eiD#`LiE?c=pEL zwOe}!&p@GsNvD&qzWlZIFcm;z==&Yt_roOW&%!*-cIo{5jLDnL{>T5pzyFv1;(zk9 z|KjKWtH1fb+yDB@{n5L=_1+&xYT7)sk6zc=Q+@%DS9JK|A9}dDuR05}ukpzjfBADS zed3FsytaSke{$u2+RyM8f8{4%{pPQI$2;El$Ntctzj5ux(a}{Yn5Lu(^LY;d!G^O2s ze)ZbHY7@`S&SFkdiZya}@>l_nPtQaoc;9-z*=$lNhlhvQ%{H6Ous&~^=J4?F9^z1C9WQ0?Sjzg|$9d-angiOrL z1X~WkI^&L}?7I8m77^WuuLPYYYdW`edR7h$GOHF*(Lj#KKpXEHA4*ICac|Kt+MXOI zDXVcvmJPuiY|_v~Yn^lKjRl!=vXY%@({lERn_=|K?acYs#n`1RoexA%&=7rn#=F2O zBE_aO0UDq;GiJvw^s@zCShLAML{3glDi~40QcAUPBg7RDsR-UTb-QL}NeVkg_M0)L zREVhYbe$_80s($h%%SryFU5DhiPf#o{-W>^TzUBwtjt1?fz_M z13_kVa(32uZxDu36eUS<=86yj4jHnNqoJEGx`wMxP0dVgcRnMHIBceg7X6NJlZR7; z5VE0KRjJhR6|*OzQVLlXU{E2oSC2mO%Jq-Az-S76!%c7rTg`#Ev~59OJYjrWE@x(2 zRyE>9lqR5&k=dmZ761*b=29xE8fY<5p7&zv7y(Q?p$qKWQnKL|Jh9z10&G#gRAaR+ z_UJ@r)m-{^^$x0NLS~ML1W?f^V9O32T2{3z7I$}C=f`m)Q<=4WYfh%gg0nIzkpZ*! z6!y^Osl+7lgkE^_e6zXt#vg6ImLEPW50CFotJ_bEuf6f6_Z|R3C~v*_%{N~Ex^wRN z=btB(;H~$L`KsQ?^e8-)=VJM?d^MFTVI6r7M5cyNoc zu&k$s9$-i7c=f>-$NeW`J2#3G3QFl28$eKT-uckvDYF5h**0oo%Ozvi{!6(7h`%43 z+O=S~Eb}f$1*+>77aA4?#U$>jjDgoKgtFR6tEmg$c*pm>7T-6y!yz21G0`*!oyPO= z@xxiba5M~Z{_y@Ai$`CwZa6&f<9yj)zY@#pfBB#Nubz76Q-9^p|202v-+XZA;iJ<} zfA(|V@#1?v@c#GRdGpSA_Qtoq@s%k1S11SQqbLxDc&={$qUr&=5On{W<6ozP3=~Pt zbQ?ZsU_h1KWk8@+SPKAVXpW>vt+A_&LjeS#;Fc7JY7Wdh=YiPBv7%ZAQG}MYM-}kd z`s({5gE}>D00N=|%qAB%Zm!7$YNh~D6hO^Tz_em3R8?W-KqeDIFzlyC`>SuxPhXym z-}>?s|BjeK%`{XrVq=F$?84;-XZD6@B6W{?(TJ%CW2@&2;DP$x}BE{_=nLKYRB(Kj7W$lb`z8fBx}bK0Q7C z#;Y$Y$a+XfKBbJgm>Ja(h(OM9nv(ZkH7#~JAUs)ZOcmt3q`|>#XSrM~7I*L7O*s?0 zR5CM*sg%nU7A!@JnyMjUn$Aletq2-1V-YJ_qM4{5v`ui5MX%c@IgWs-EYx(YW#k4LcbTdt`w?MN5*LMI;-DGcxtVn9-t&Q(+${Nt~wSDXAH=M+``Ys@^kk-=kv_DU&$` za#PZj3JdNmI%L5j%q|2c$-oTF21gxQI=P>AZp9~_Ue9|-%A3=Ev+f_Al%t#Z%58_k z-t?Lye(T+TV)p3oBckH~h^k2yn30*y#at2)J9Yw&bxD&9XRniEv*Q`eTfa-riU2^{ z@3^wsO79g+wJ2UV08&3}R*xQ>QSf`$cSJ14#F$&(AfjE6E9xFkRXs5wR-nq|`~U!A zp6d8#+o1!fDpvqNGRK4f7v@o@x*sB{zWt(7mvCi{n;}(c_JxC}SETHQb=Lm_f2r8RQ^_S|F*O7Mn|nv(PzoZl5=1B^CylyWlyZa-2r^{#1P-*AMRd*+duKt+ z0<_ zbFK4TXh<*-E9RIybK`tY#fr7QnRlT|kyNAwYc6_9AS-6_N|v1KI{nqM9d)|`>yrqtnyufYWI07IcnQB z=Pad6+h+ubK+ZW3swx1dROcmHErgW-nHY`GfXJze zWC0M5K%jQPqHw|rWV$s$0svOJ)Ngn3!Uelu?d@v-4C+0Zh)lDYRsNWnxr>|cB?V4Z z4Ko@rG7>u7eoFwL2qIcg$VAyyADEe;qM=rkd)M?ehEOiD7Kq4^ufw4_8blyqQZqGm z+4MpSMS$cufCB(eh`_~il2S?mwHl>PZL6}aVME*Wq0KQ$)+v=Hge=*SA2vhZ?ET>P z{?RLkPyavu;eRou)y`t)-7kKpcipdj;ui``uiyXLY`!d7Qks~#opq%YDcJ=FH2%WJ z|KY*mRdVCbZqxPr_~dmnZfDDJIxlL$vv-_xDdN|stDD!J_THbZVw`N4(iBe)kFF2H z0OCU9c+rZGfIVK_Q#XasA}*TNsHd-e;|rqs`1rw_cVBVNH9i#7o!y;MgdC?Rf8g%uGy>~B3X+!(W?dP-RM-R*1?)AR!r)m6^fBw%t{pnBNdh*#A zV?PV)_3+fQw-$@VOJDjTQ?uHPr|H!pPP>bpS6}_a=RS3>VbZ*#_(MLKkb{7Y8M75uBQ`WOQBgw`07J`IN~F1hUD^Cr7vMMAKQp6c z%bm{pvIq;8a?=X6IG*S6%r=h>;%~JsH*mUt5IX=!q3aed;5co9%?wqn7#krgXwK10 z3bIFFTcZwHW>Fg<0nCJ@v(QfY=_^n4UHACpQNJ^Pe0p*^^*l+MO>aii2yL`TkzpE>mL3uI-}D zYJvwdi-?L40wQ{9qL#NK0xm`Z3c$a6{Za>_mnh-wZ<<{M5r3QSz_zl`e>s}?@`XSR z)q2-3&{&FD!O<+G>;O~JZM=B(LkS}Ge%6+Rhy`_ApJttWKkJc(;-+=#a^@}&N!hG@ zDTHp*uEGjJ%9htKquBc07I#wo*7__ob152-NCc+|L}N6jr|NETxP}82Fm?aY<KlPd4`n8w7^peWi7r**> z*Izlj^*yiOIo@5ip>d39ad>$4=9?uIWbS>lb8xiTjAn~RkH$N9PP>+;X=*~aasArQ z{^Oq-Qu$x}<^TPE`Ct9d@4WF++rM~dFR$^rM^FBdo-7Ry8h<04e=WTA&#b-P&8(Zv zeXyRF>1_SK{V)FNPyF2f$$9_UTW_@U*}Vr3h@3>&-8uLl{OLdQ?svU-_ulKr$HzO1 zn+6e~{o)tC`og=yd*1VYBADVD%?IyO)X;?e-CaZ6-QC%Y!{PEkRfl0XU#*urdpo;J z@BQJ?uGr$5e{=r_e(>wxeEr3bp1tjTb^XH5b&Gw)o=Ii)GIP0snPyLSxz{{DX7_xJ9+d2(`MW=}o! zR7&aYn|F8S^Zor@6Ri=#pmuh8HcAEnAA*8pNul*XVw&5wRiTt}j7cqCxpFj)QA7lw z>$;|CN=nl-0YCxt-b>1L`%+3V1Y`y<1~@%CL!u#0UDutStqh^=Jn93Ii7L*Owp%9) zjx<9_(d=9iO%j3ykELYzHfrIP!y~HBD`48@SV@&j1T{eH!~A?4&xSPsgvPbL88?Gq z({Qf_RB{YqZapT$fmRfKr8WJjIo{{?|ofI<2bf$ ztE$Ytt}iu2mr{=XtXV8~%NjQ*4yNw>^*v~=hv^s=cX!^ov$$TyS3-J(d2;4#WDA@-J`wp?7nrjx#`yXd%MgqZdT0PRKG$9LaWKT9_(a&avHkbWmh6f z$@6y0+^qFRDHA#t5h8D3y^bs1f1`h4^0T9SC)1PQz~ZRGRK|i`fHq)J9oH%s&3huW z?N>}ldeOMQl-_Kaj^CywS5>UM6hxbK7$el4O5dSx44hZYvh5r}Ro~UbDg?w@pBvR4 zbv^A!1wgGhMnx<+qG})@(DkBYQ}t+#BPAqP)M9WgFC?x#2bhO5Lf3YUwYpx5PHF(; zg@XDv(OeCR!Tj-S%|Qe6Cr=){{K?P!Gt)5J*?Z+{Zw}(?#ZZdu?CeZq%2Hkr?#}%a z@57U~ZiL|X50-M z9XjV8-+BDO_y6I~zxla`cOLAelgD5G+N)pte3;K2w+_xn|J=8Bt_N!xa@zHT?zrtW zT)R$uTq}^B`wW z-u>)*==6*I?$^UJttCKLK#LTMStVmh>d140stll#HAO~nL^&s%RYBW@y6X3LH~X@p zfQtlztV@9j^I+DX^TxR&YXoB7wP%mxonxIZ)^t5t@A{C3!NWL7v2F%8_wJ91!OeD> z&5e`8H{R)(A4zIYzCIj>;qLi&zVkg_dg~j1&C_fB5Tv z{Ttu-`e&BUKS-Us%8Loe%5f{*?iCm}ByaDm{>|~P-+_o`Si68(p%@fJ1#1Z^+XO^w z`_B`b27`LsXu4z2pCKS zun+}s-ZzB7v#)dn0I+Swq8aEIGZDG1K9LPdHTIRlP;!F^Do9{12zLm)#^pg&54rP9 zHqD{G)%&xyOxpU!AD_Ot*}T3wJNwF)KKJ~y&%g7zcfEDzTW{U{rfHfyKmZ>Q=zFIQf$1l(ugp6NLnVbA|WRcGnEuGN&y2h!?jEuJ1n7C3)E#TXsgyo6(Azd z9>7&~XKhonazwr{yMJ)@lds3;{q65g^CP%^U0>hqJpQIk59R9f5*FR7pWi?J+~L8) zzI(Fvl~lnDO-;eTG}=ziYrveaWFasV!il*@Z_SS4jz-`-c~S$8IXPmJ>S`kbR1s4{ zA~TuId(ZutOWTG5rdeivlM^5!8kFC@d$|<1ZUwwd6`BrAc6LrqPl>)gXcQ5Nj2djioYpb{08q8CD;ZQC7Yd;n zwirTCGb1c$DWxn*ss#{=A`-dKbmW{frnmX#X3jYU5fB^(F=Ovs@YRrgnUWf;YG|By zq0-F6WHpR^KU;667^f*F1#-><^5f0A`nF4z5{r(x0KjOeZQU>oDqHz;Kktbsm;)m- z5YS>J78&wTsEnXeauwu>nTjc>WL+sW-Wi)@8HY5Ev7gQM_jbxOP6Qf-ivjS|#*ZP2nCDo`eV=yu$Ry0Gio%u|`3Roqy6p=x)s*Wvq zL#NQG56F&LRLhVetAYub)Ot^}DuE`o#iHxy?T&9r?R>KyB}XZ&29_Q9;Ft)Q(L?=@ zKm<()2BM%uWJu11tYf|Sz4tXEq`H{_h>7~e%sFQlwPf*S$_WUKG>H%plPhXj6p<@( ziU=+ih!MKN>L?&$F+~MUC83x}No5O`L_j7*$WqcY%)35|RQ~E^*(+w=v5IKbIbnr^ zgDNnRfH}v6Xa!L%RwSn+l&5K`>UBf}spO-IOY%fyYN{nm zcFu(wEy`T!yQP$IngF2Q>rky$c|^1T;y@k0>}CauB2PwSA{%)j+0IdheX; zpb@iFvZ^_*(dg=sY8Ldi#JkXSUCvkPAyAPe7pNZtL=4WsmTdx1)Gox$^(Sb+44x>` z_J?MsAO%eT6u{SvWq``#a>xjk;iy&wL~E!j0WcyXn&@Tp3lUv{AA<|^<|8VA2!Oy0 z0J)@!II5q;Q_V}kL6V5p9s&_Nppvs>WolxcnwF+?4wTPUt6%!(Kk>`I@-ye7p`$UK z{pM$X^~%vra=6))ezp_i=)Lc{E~~^C4S@)l`Ns9<-+1fw*S_`5uI+PbQW`|eH?0(N zE>vi52v)Y!C4BJkE!PC3{P6f;*L9+%g}|xhMI2Yel8YXnOzbd&G`uq=S#M5!qlB(& zcV|0CMIt*Bqw_I#tkyk9;xaFzv~0LJJ+7N{oFTS`{tLw@`cT`dhLx@X0z_`;}h>f z%ta>MpD#2=8OJe=C*0jV#o6`uzk2;B^tWB;QaWwt&9pek>}5hFt#EXoRFI((={ulDbS(z32HFEY4F;2=wkyP!Nw8!ghd4I(y;?JT>P z+}_?EA)ehmcg`Ih9QI9%Mcur`Qb3Fq6krm~CV(1O|T-*Xwm9rPQpL)8*llSFaww`I>A0pW^xIeE7ikeDt+%zWyKmrN7#8qj~!4VHt1ta{gI2 z{Z@STBbYwB|EehIk7FF$rpKp0up62y`^&c3X)wC+5p7Ox?Cp=Mv-M*;JHPu| zcYeuJ8wY#OyFT#izws;g@7{gyi|_r%|M+KKdGoWY)!A~neDq-Rqkrg6&*wYOKJ#qX zEMgj`Xu~j_ym>ZGX|ddmF+1PnS!_a!h=k$fWYu*YLbG0NO1Q^mXBOJ`e&_Y$^>F;? z^x$as8~49`#m!e(QA^6Y*RXsa9YrGF3Ow&};oMyAdlvB<{4Tz}vxSeAk+>}$!SxlXC zm5o?TO{HdnZ98kGCTbua3!v*+p42;p=%ENQmzWHZw)Dg5_Y}#c7)f@{#hhbKM3l7% zSlX;%1Nhhw(Ta4zJL+Xi%;m9ps4{ z?5{pCH>SG}?;``UlbnZPv%kBG1;`;f)T~7Wy_u01)X|%Wlw6#1v)PQP+Uo0+)d4_J zaodUL))vrpT`A>ky{g|%Rdm+fr=Ke<^8XnZ8ECUFMaI;f93H0&!67Cb7W~=W{zi~c%Du+kIsCLE9ftE`xo6FZ zJ6x>J+pg(%mjJL{uXpB)#)n}XfoNzBIiGmXZO(EgzTH7bTVYEn+eC`EBGVAURG26u z`!harGIsg?8MnLTT(n0{-bP>;$)#eQzz$n+UPM&16xmj2%+5K1nrQVln0}l7Dpfl2 zHvFBL0KkR>o(mC32?8WVz?9ij)fq{-D0G3)lfQ(;6A@^UB8=p`XYa=;mQsim0oWmD z8^=w*v!~OPl9VZ9vOZJeLkpAxQ3RZ#7jO<;&WdG>gYOPSv&1CR+H8g7z_DYWVBZaQ z+WsA1-+9k>K!o&&Hl=U6dCTi@ST1%RJ$ih4e8$XOJG=Ad-AT*Yc_soRagG3VGpwF? z;@a-+{>|$*@7;Z{TCHAv?d!+KkC33mNrvI-lTQ}pV&X6(&lmeojJ@1$+x;hRz5eDK z)9Ue}yIRa)$_-9SdVFyHg|o%`t=)<7ERc3YaXk6=Z{%;j{>A_P^Pm0rmxuP?naz%O zBktwBiB3QZ8JZ*qKq^iXBcIMGX=E3haXml(=H6j=@10NH8Salyf0P=B+qn#pk`XDu zRD=PUz;f1{6#)=)p27@%|4#+#6?G|CzHDJ2qO;<_q1OVNQ3$o~&G|GMG`4@$Ew7(r ziLT6(%!acerOXTG%MjNLW5~dTorWdhbnAQk$s;QEuK3oYSHEF@7(f2WU*x`g;730E zxBl*bclz-3ul^_h>5u*34;sSvfApiD{LN4P#-~p9yIV)$jfeMEdu2FS_=4@maQ1KJ z0{>bcTd6*vsAaRHSkx4ZNj-TWMMYBuGe?0ussXUyCL{uocVdd7eN__kR%C|+$%=@0 zX_bRHXbeqJAUY^ZX)-!QRIHX?MKggd%GtIUQBcKfn$?Oan+Mxs)B%(Y>kW9>*xL6F zr>m!5ymkKBK#!+zJBzx8J&*uKg2Fg67hGc@SV2<|D<(>WfEE5AReVr8eU~?OL<9)9}z+pT!S_?@aACH-0+#%J+QqBUi88_}KS-&#*fA;%7f| z=guo{z5aD{=$sojL*KW)3B`1rQXJMMA%x&vI5}UXSlYI&T+K`fiO}fEvI*YB!svW~ zs4;Ffo7#b`iV6i|$0%rOB2o+-A)!+%rIcJwRU%g^^vRi!&Szm~cit6zc+xo+y=&A+ zOGd%HU1vN)>`;alR-MbUth8~i;PI{R>;!pz3ai&Te9h~l-Pzty?6mA5btdb8r47B5 zGOQ=XHZl)mZbodpqGZRlD-FzErA%XqF(G0%>&P^vlqdAQH1ocITdg)j9ts*@(6loQ zDP~3RNP)OzY5>S~uSXrq=0pWrw+sv#L8o$_*P&(ay@5<=V3xL@0cgTS5X>@ZXDx-S z2M({gM-McPX)%|6AX`mkF-5E91N)#&O(CX>`z<;k=)p zpPw)HcGv4wXuR`GWB^vzf*G)J@D!ZibyTUIs#>sI^Z`rTw$-_=U>UUWE`(rA>($z8 z31E!a22YMHf+4bjsz^;UZbH*_Nn(z1*)&N-83im$F;x%b04D)7D2i3)BO=?mPNb%r zSs7vx;sqJXBm|s&PR>+wmYh9DABj<>K38!gV>%`QE2#}En1QaUf zO#+Q2HEJH2(GVdp1M`$A7ZXdZ4<0NI(>Rur z0!-waBxNkYUR&4r9fffXA>Hg!tGsDQ(?$x@=!yaL*`fDrmsZn_qk zqR6DhVJ&>}#)<+c)n+xaCp93z_clvKdb~b+1J8j_i}Xz!9EIRGm}D|gG;2eEB4eJK z(3=S%Dj*XRaxB?wnl1X25&)Q@8WK5XWL5?g$z(tVSTo-cBdC%C0H#U`3WPii)7jao zZF&(yQnRg2%S=@ih@5vtQYi%uDt|n%1Gk+jRir)=g%H>|1CA-x;6@_KxkeDP_YFX- zg`w3Tw5^ho>tcuP#187+L&PdVAAnLyv4~Jy;9{^1h_NSR5(6_+KrIq{C|PrguuVH4 z(JUg~djMl5zG&RnZuMf`gIdYC3ero;=)n;)iJ1lOEB2?9q6h${xzwBzGpqxcZFS~d zQPpWeM917X4*;kZQ}*67ZJ*Aj^)AF790CZcfMt;>PG;!5H#8y=Lx3$0)_Wg9dv*WX zl;X+yET%OYC@ZQOBX3gxZ7a-2q>F(lGXrl?=$B6YI=7&06!1392t2|SG(nS|KTwZ z4>7&++8aKAqD^@W&O7JEaYV;1_!y&+Z8qyW_ufLDkkc^c(01$1FkkL$)@wCmUkIBt z46_9&=rC=Ht^!_t;oaY1Ccp9NPnjALO3}7&5H=<2yxkSGX&j({3tVs%wWe=k%--{A zJa*0@p(ssJx~56l?r$D9UAr@1l$6d-AAar&zq+^k#8nl&%NW_ZP-0IIeF@-{K9*ld+E!+VI)&L-%KZt{m$NQQM&giCkU~WZ`^yt zJ5PD+IhvTzWCIYHrjNee7f`g2ZuiZ z#O)3^D_!=e4mqJijAjyBWF*=sc=RpusOj;7+iW5kq?jrq5|ddSl>-10Fd%Gq<7xn) zzcX9t?a1QV5`>yAZ@@s?wfQ^!l`9C&Ob|?k$ze7#ec|Zfy-yrnsz=RwxEMG{4#+5^ z$)w)*9a7Cj$DBY$0s#g!rxp~MBtl9H?;xcq7izlfTXEhsY@Snz`RMtsXhXn^>aP@j z+Nt8{c^8W?PK%Zovyw#YiqAqN2|7$ycwCT*I-)$2)|vP5ylMRPj2L|D3&iO$yWl`P zCSx)zyLx^F&*OB~>GQMxLuX}5nj5CoW>Dw9*4#RpcAS60XZQRE{`i<41Q|Q_QK^F% zFdM>PE*o^DGbhhzTl%>W+`hGP3E0n(2xaZHn`}rJoo~SWqT3zU8vq+~o^|uCn|;TN zR~|lkboc(9o3{=Gja0n%-Z=)V40kX=L|_dN0ya~gPST(pf@?XMxYJR5$DZO^*qzg& zZQGDe16h&q&i6dAyWGEV_~e~8?|k6>-~EFh`{Bc%fK@bq<-6VN=`!7Ch>|CvH}K$z zCwC4)KNn~|^uZr_bmy%_)BWK0{NUd07xo*s`M3Ywzxy+P|Ka1uKk!{2x%=jwyANME zXb*n<)1P?n)39OtEKZX8}F?%y9rC8{rf$Y7|I=%-c z2kF+6KY|1Y5573YV}sMx=E0kfUiHh}d%yLSV>+7O_^#`V1))9he7U+!hU@ynxw zdg5K{^Wm*KudyyZ_Jhl_ zle^O1YB$IJ+U;>#fAIZ(LZ$~MEp=3iuKd_y14RzRjx}t?vFX}s3$-FB+BL1{)V6IU z0It_-Gnwx!RCS1HnkMhPZsny^x49gAmQ*+N{j8m)aXz1)u1*^Y0Lbi3v3B|Zlo$f? zW*oM-F|$$%lP4nO`V-kw^E_1^Uk%-5KxRe*tYL^m3AZW$GppP{1|*^+&U?SVd$hNE z<-0!k5sz~)8pjb4>xZhA(ycPWdt-qMaHMAr>^yPbw5BClRx@&pj>({;ymKX+G6NE^ znN2xm74S^XIn*vR*PeI|4RX%ite1;9Q{`=3*z9JtBJN>QJG|OhO1bpG~7kG+=T8k)2Ve7=Xe@zxQLC%_faB zUo;_5O2r32aL&0HW6pWj_v`iAIVPgJ{JG}lVYu>RJ(d;~O0;4k0xex48WHLA?CjZ6 zj*gC^-bOW#eEZfra!iKMblqmNXwDk zgcz<}fBwm*-Z}4fQ;M6_XNlb2{^7yFK`DiZHgTHGW>*fbgwUQmK3=WQ$}qh3@>dlH zM7(wg>{_cIdB+vd=%OIEKrsV(P_f@--|_qF znE!77LsCXWpyhTHcCle{r;)GpG2Ej2?pAqh?(8EU{?OIEo73uq;`yzcN3Xv2)%y?L zSk4!-w%xq(tsmao?dE5NmcwjOQ4FLaxh41ee$w*)@n8VZuFJ#s-P|i+s>UYCIn;kW z#~xn&M1^$#YyhA*M)ZqD-?u-Xd2?aLNrc2uy|N%H$nW?%zdJ>$|DArg_}$(Q)j_Oo zRnN8d77_D~3=nJ^7fe{CLPOd1M%54i2)6ITq(wBPwV66yIM9q6O)=nZ<9VaL-1wvY z^Y86XUcNS4zI*2#DG;eMHZ$BgQg(WBxCc3T?>%wtZh%^q2bG+Q!)*h?OtoA#L#>#j zg=sO#03e2d9eW4tW;gzdTYc+vIY&5HoV;G_{!w%FgV(QoV z6VHGA%~!wfz%mw9MaT7#C#v;m<^D%M`iF^vSVHTIjKOaf^PSCR zWcDh6$Y{Xq)O&-H)5MI3oF`EwQeItoBFQXFL#t1b%dMQaa-JZJX++-_3(AcPJ%N^K zqovI7;Uv3CGo#v zoJCk$*qn9!t^L@%=P7b7=2Xf#)12W4OtfiQtpRxzBW-vAk8r9gQXEkmJUgs+l2~G; zaOj1G002}q6sMLDL%%C{<K-HFV(Nx#fw2@XhSe52eT{R`Rn++RxbX~xi+Rm4pU0r5rWP_tEIf7 z^;1r%-ovkSjg4Dq^d5LlFX!O8g(JYc=cdn_RNfe0AOO$5Q#b6RyrYsDw_ju znhU5AKvGGd2{EfQiG0%nQ_Pc*HmbDUDTH0#%) zlylAp6f^IA(|QmsQiiX|A?D&zcO6|$&U~|l{-=~2RLQn8GbvTQPBF$x z^%1>9&}>(3T~^+6_07~8rNofvPS!6xkAwiB69CC7>gU2w^~_eC?3ZO0yfDW%4T5JD+X zKMrgI;kJ{-WTTN4m9`Dc-jaIsAmAN1=gctWTuP}lHdR$5W=wI@gk7+Nh@|f6TkpNs zzk!I?FA|0?r8xxUw~bv9t!DNhICR@RfIux39a6pIDyw$;I_S&{urQJ%s@x%D60cUS>JeSKHn~4M0t`FrxP!9g6Cd6C!)>$)K)QN7YlJHT}Kju6yru z&N)j60T^-_y|p*@o-#9eaWc+QFaY?*8A2>67l|>OK-bL}tQLS8GC7{52@SII;yii^n^i`~;BiVLn=WT_GdrK6F!tDq znSs6Q#Sao<>jSCuZFgSIJREqRH^U?6cDaSq;n>p+)-+>4kE5y-=-D%tLDvM@q&aw1 zmmsntaUU89%d5AJc6L7e{eN;{!y1N;XSu{S>_Knu=y^gF@-*f#uV~)B^p!8nT>sAJ z-akrq%wAbb0*A`*#P!@Tou_l&dwvMFqnp3b>?lQ>H55w(W$${}nWkxyI7bs{ixwX= zrO7n-b~o#evMtszx13aZUE;VL#ywwQ0u=`l8v6ll7@Y?`UT7FiBGANUrT%n~Q(;*$++Og$dnhe|2nk=jpAR zr}uWsyS3f>b$&O$7~bc@dx=XaD20tZ_M;YtB|g1dzh+cZy=rEzVB?WkIDaJ6kY;Sc}}2*K)I(tBn?rl)si z_0Q|m;YnMRW{=1x#XW81ZOEfs_$}12`idLdQi-p%ADDM?*wweUWj7&KDWGjd>I@0Fa#K zau`hv)e}1NfDRaASOFSxmm$J7}-$A3YxXSx0k>4l6P~Cdv^EoH&5MK_I~D{e<3Ck()RYpo(WB3E0`WI2E(ThcW(eXC0_sm z>&9K7=Jhc$7?L3xbPm872>a)kghX@zBnHNAB!BaRPmyo&9~0)eD%$BV&}WP0}gfSrw^8zEr2A5y^<`-4d?VHb@pPzbami_YD{*y1h@L_|T2a)XDUiGos2tH{03pd3P+uA5GH{fa{tu#Sj7+XpzSG63ICynh|k6JMiB3vsuo0 zikmvzZklEo2Jd|-C4?XaT;LdEKkERXlw@W%o_t1gnx^Djs5BU6R+Fyl(llOWA7iZY zy%ck0p(+>>*tY=|wK`+VUfZR@R@PHyk$}6@_$ueA zPvtFO(YaEt$DD|O7)mJ~xnVA8@~-dN#d7B$@Q&zcP=asOk|{gqy0*m2yqM&yok9Zu34t+SwV|mcGcl9a?HCatBRK(t znmR==Ip+iy>Q6nKO1|0mS3OTH`nE;H0#?6ABCHpZS`iTwt<`>=Ie8}KoJb7`LMVg) z5EOl}VoP>gV*#pEO`V9CH!0{0OQ|vqF4`h?GzAeMz-qMiZDY(YwSCx{U}Fk%z{a0H z6umZwhc2J?vt9PS!P%`Vw}0rn|E-6sduMJLyg$!Tuit1~lh$jXr+hs1zGggIZ8!oz zU>6MyT>#&c+3uo*ejLsY=l+QqUu{O``hLO4t=c#luqF=y{vaQ6(Z*GhSe!c5wuL_kg+l0M$@9d%v2_PFVJ7yv^ z0yF#rF+TZSj}0Wy=5S62bLaY2l6i;cF)h*k|Ni2?509c)oCOSot(i?KCBw}=sJ0^u zyOxqED}b_*0ek$t9g`&8;t?)&j`fjVDQ?cHB;_`zbQ|ME*O z?Jajc@}ci4(>i!eir)KS8cQjFSV|GFob&#&5c^!PD21jovRH?lXse3$SZa=*@uXs$Y^H4_iiHgNzF+?a*@!PFyLr`X8m$+e|h!VVH5gTHlVYv zCy<7j_m>3`)ufUMiRfTq#b`a1EGPi z!kkP>zP@9#h63sdlrsiz&?*A^6Nu=s&qN~?WuKvIcikbeh~*;Ag;a_mG>wlXGp=E4 ztz^{Np>|pIU{&N-%S1T?^WtMtEUJXL^$BPuBd z2LOcZV=m~B9Og;8uB+OVx;lb$F-9Y)ebr0ffcFg|ro5>?sU#Ty*tX*(e0w(^F1(T= zrR^wABdRp*&cWeT@Z7aKwd1X-da=2NEmGrcmCU#QTm-88zi!ViTbwG4Z^w1K4QwQL znI^T}4_8xyt8yR3%lBU?G$mz2sEku)Ccxl)nkK69S2JY7icm5vF%=Y56+j{)_Noei z#OwjaacsKoh+FSnF0pN$ny`hP`Hl5F7fVD=6o^&Dfl1r8aDnUqfZ7qe)D9Do8RnSG ztZACfxOOgFe%@{06EIw=q=|CQWZMs301=HZyr%%$^A;o4dkzta=2A+XP;&1HUCw!$ z5&?$LlEE}h;0iK1=LnF*$T4`%Xgq?cq7eXyDiA6{QE6N*3JM@l%*2W{j9m5vCj*gM2 zpo!_cgruSfMtGUMk#ml=3Vy!N9aFn=(Ci<~7V`r$7^Z0(j8TDIwq=kSwAJ`hJGr$T zY-WUN7p)>ytK|La4{+=S>=K`4W&pGmtuq%%uyqd@GncZR!Cb!ncHNaNu&WLmF0V#* zZ?D0NC#Gr*3)Hu&4@}uk*0_ zX3E8yl~Sr(s%rJjtmgO9#eD++oO3(Ni_k2l`UYzEUoT|%-nq-j-#THd=Nw>L>q#z% z7@P-G5pk|ouced{LK}Gd-`+ZQSWH~lrW!b8EjGwz+s((VkE-HiJ5L6JASNwZ1k)?a zqxI=sy7t`L=k~%epjs!0ShHf>ML3G88X0c^eFkcX+ng1FsW#97pn~ksv6Np|l_<+63Y6T9M zTs`4rbQPa0$kE2#y~71B9pNUPPAY+eHw(@qqktKpQZt#Vi5Bf4IBGbk*jb)p6RvdL z?=G(1KHP8mqe1lumk*MFo-{bK`E{lMQ^GxI_78>pbn;-(xI?=`nQ+XS*{_Ej;$Kh2Hp~S--nOctjqV7L#8zS0?}LAFHkh|C($F)WINymi%o9riqo^{*B02_-v6u-2 z2LLW*BBC~U?=9yf$sEu);@Nu2lg?bz1e=PdhFr+$oYSgRaSG?Nr>!Rh*N%b4IB_gZ zC+K!Jp9vcaPTzNM`yc$;XCD{!V)W=yH~%Wuu2=>!R-;lNrkvfhi*$2$Z)dg?F=w%J z#vmIe%gE@aDHbio5;A%3qm5}ytIc%diQCV<^M#kb@P*^^6*-<$tkDw$Cb`C?1|rHL zz#S2F&KBMgy7~lZX*~**G^y$-?%i!}pZJ5McJ0v@%Xqx({Ve#lF~v)muiyJy zzwqgF^95O6M{)!^XRoeY%U$Pzf@Z_Bvo=2V?L0M0&pz?fpZdfv9~~V2*pK|F^YgQx{<**R;g5XxQ%}DbIK1-O-9PqYfBMeZ+39`% z+N)oh&)h%xr~goLixlp}V|Q|PuYc+t!>c=2Z#7-l&pRUe!dJh%SnjM=s|SxB&HG+N zj+VPb)b~9i<{azRwr$&S93MV>NREd%v~4@3v6RxyXONMdi!s*SK-;#n*=$UagGw&b zluA|+5zWpK(Y8&HQhN5;=f-iobLWl<0AbfPh{z7>Gg~XP)vQK2Rs0`SFsqu>$m5H| zs;%!25fQxxQ@nZe_PpEwM?d{jT72U@5j@^ZXJ@B5=e7&8*=#oJoTF08W*jQT$a_zw zVAisiTz2;6S;`n=j4_pr2HyL|`v?Wqz{~?VVl#^|#u%^OxL&=MRU)BaQ;Lev`56%v ztJ|P3YiE{qvl+*f+m@KE0FDKwD4L6oV_H7r@*}49R^NpXPE=ez6?JOctb%%;Yq^33 z6Sj<{ae#uBs}D05i|_mHAN=KC{l@?rH#o)Ba(9N{+3U1P0I<8*8OO0M*j!>tY0>P0 zNz<~bifSe|OhuVmOhnXmGc!9qJxwLoU0v5M8z1(UyQW%YQ(IgnrE$G!nx^eSjtLPw zb16lNh7jP}Vj=3O3+xc4R}Y^5>a)8$thk}`HqD}h=}ZlgTjyKjP>f5hxM^hXki7z! zZbP6L5sPJJB4k(<6R{#(3IQf3Ai5nanSgE)lN6678*yX4 zNdQ?ANO4+V@0v#qnjUw zS>xgLlv@Zp-V>~j8zZMs<0GOJr_gawSc_uooW;>x%kw>z^zLV_9&eWa^lPhJ1}+v0 zk;~#k084+s;W_TYb!it`o+<#Ovf6YS+`h#0%zi&RYW|y7q;p+7Z@f-0s<(5|(~Yt8 zdm47a<|{zVO%wVV`v%p(Bsfa>54d{(fpPwx>(Bg?uiaUfhmy>ognnL1`d9tdeou~8 zd~`}_Q)}96Im_ zYC8|_BIVO-J)v!;SY3l?U?6BvZq;q89MLf$o0$v#@rfO@6uLpyQE48!5LVMe5$Aj7tKKdUQPC9;wqZGq zE9Xs$19L#56jJeOv1R|>_k8C#>Hq7W{8WnC&3%q8<}=68G|k@L(tFM&H6EvFVn#Vd z@SG6~*d!Sd8^>{+(k7k26>*2nd_Olci6es2>>5fbrsP=7rd)^{-*jfcv!<*c!urJ0 zF+t`qW7mmDH=Jal;QN@9Xha7E)HlsWgF{5iq*=y+)D~T@MTXWnY&9!F5!s;R#+9FApP!$XwC&3;m&<9I&}}oNVB4g){Ez?Ox`-`?J$@V0~> zE}Dr%q~J}azU@@fL@A|GR74hw*)VOKM*v`vf9KEATlZi8+)E#yVs_4{l@!Ho$AMOx z!jf|c!FyjWB+TA7AUX^KBF<*BEqR8pLQ1XLh=F_8=A8Sc_1=%eAR?X}stEzWR(RC- z02eU~p>0x{5YbSp{~$Ep%o0k~+>Gmu8T4%j05QgcZVyrcGbAdW3PDOK4WkRJ2#B?N zU3Obq@Qcd*2Hmx^eC4pZKSL=klWR%%G;Zm}(;2UOzRl^rpzr%ruvE&FVwO@h?*QPC z8pL6f73Jxt-*Nr=vmgJ&&!0RVT+>3iw9C-jc5~IxNWC7Krm2Mq0Awl7LydAZRWPbg zDk4`(YMKTBwi7;f7fSh3VfPoE=!%VJ|L-5K*tZJsJj#-N3Y$5`P-iP{4T{t1yaoiM% zom+~G%se|d{PoZM;w#^J*)E(_m+1n|Ime}*2dH%oRqqQFVSud@3Jt2Khlq^E3P2XK z%ZLIqQ*+gka~V^t07WGbzrCysF7`TFWfM>X9>6YLm5w+#=egdE6##0axgu0no4dfu zZExd#dt<3%gL)0;oQt`(ah;!k_lM5nIDP6je~G-tB*oM@=ZT1clvrUFX1nvn@w8d3 zR`uah^JD7M!3T203{;yds->79lSr0FoU7 zmZ{j30Kj)WdS^rgQ4U)RE$2e(iB!>JFpbnu-&ouD*@D2t43c`{CcVkj!CKbfcrh77gA4b^WH@6+0iP@%>@M4Z|tpAY~+?arP|XvC8A7rFqkxp1&0##Ab2o zpZD?D567TITrbob79b+PO^UCbjQK374mTEXYk!w9)Sz@GATVk;XNRsS)_CM@MhfHO z)A;Hyeqz3}UrLvwIHv&6H-53R(|27dr3x_yjQfsTZY%_*l}z_eW#_bc{;q%GQE6?l zvzg4A1F_OR{LDM-YjZI5aoNG9(BtW9IeLF^%lT|?J=;B&&1o*Da$Ho({QBnbU3cWi z=J$V7R^K?p^ndwZ|ILSQKKvVh^FRN&pZlrLed*O7|7-u7FAa<4+3(h!!(o~_7^w_b zTRZc}MIo=r5iJi)k0RlurPCHnsY=)kEGrge!Xb1S3=ot(fz|LUW19(pD3nynSSD({ z58(5863rcx4_?fQ$TTGp@qh#PhZ*PX6JuF4K79OcY`Ru&zGJ=~_8{$?d^x@TNqPLG z^MYTWU3+x$`1RA*pI9Eepzu;UwQTZ7w1=I`-fL|53juV zf$zNckMI4;r+@Kp{)7M8#r`k++5h-#z5a(k{rBGb)|db6pZar)Zg&6P>D}WyZ+`1h zh-b^iP73Am{(JlLm&F}~Vw+(wnc8M|wa$y3wv^;Le?Hv1b?cT{T(8fabEcLGuynU? zJ^S?2x0(4jKJ%&5)k)j80-%t(wi)AEp_rm$pK=)=pDBVO0nn^ps6|6+Ll>vPxsXfo z-cL~;Jbb*`tbsz`S6g=mFa-oOA}mEpeOMMj#5&j{z^ySbiw|sOVnzlvZ2$oP4859K zw$#kLVCKdTc>vMbd~VRM*Xs~yvC}X^N`smCrYIE4*}xYtLLiUXrpM>0J_~cv?c9q9 zne%3J#HyMl*D!#-X+kNd$Js;yplh1O&kaFj7~+&9E%%lUH|zD>>1=(n#1o{=-L)0L8*|qOcW0_8NQjl6D48)&7T3eP znJ;!1>-Bn?24?S^Bgbvqib%`eIhHb23N2cwf{`LP&)fl5Y=4G}xHX&WF%&Z~f;IwSxS)X<{?02MS5 zgGKK|*D;NLHb)0g6s3R^h*OM{i8O%#02!-RN2yW;tYK0CRWxCBjIHxn0-9nXHA)3i zR@H(QEhm+ZdTbgC!F<-d$t8mVs_z>10ZRcgK@tLEG-f?7zO|4Trcj*gQs0g9ek|z8 z8W3X3Xc$^<195?I1ocDruIuH~Pe1r{BV(8?J=$s<37m+OB5m6`=NU*u5NLndsmiPm z&pdPc;p12D-h2J-y~n@)NnI?Qmb6}3W=@$76dpE;sZWZaCUln`7=rE8kK zy;;t2nx;|`5jkJCwzT4_W4J!D-yI)%qqD9R>l+FP#m1GQ^R5lvD_D(WOnH!QuhC>O zi9a#`OlJTfbfnUmJNWST{rC@k>`#3CrBD0d-ue7{4t9=yBNS#i-fF16EUYD(ErSR`OylB^3vbR>q#r4Wz_!5?gO zd_ui!(_F?hnOTHxiaFUB*sZ*^E$Qy=ncB=koCIiB5glP)EayUb5NC$QWK8NX7b_rA zjOY)#%3cdQKM%kV9gAWPo>3xU`B(kUeou~sr695|CZ>!<3rX=!QC|urMPLvhQ&49t znEChEJ+srZuj{B~ieLKt7ayHG zP~h3lK3Ma{8*e!0d=pMiAJ@DnAmWTRAptbVWMpEcs7xvQT~$$ChuNZ?_1WcB3eJu} z0Z3FN7f0lPCL|UR>mWffqb7J{iR+Y=*>`a~8`BsXYCEqGMMoF9!>fmYSwvjOMW$g~ zrOeJbV|*;+NZSU8MTSkBY(w2J=ApeA+N(iA$q<FgNP1AJinZqJuDXD2@!Eu__V_s9hN!Jg~^KhE1 zFuS0+Zf}HyhT7Hs9TyZ8n>G_wJeG`Ftr{IAG;HCK2!1H|>09nTEBQ z5n%|f_GMB^l5;Uqdpm#6z`%?Q%wU{iPD+Ss{&Y2LhD1IPu4`KjYg0^>*i^Mxh5n); zsv+36Bj6w?ARrncAPQi~CB}#-HRlfj35k&Ezg{(j0*0m3v$2hl6I7D|pr!_P`JS6V zJ`ghzLy1KdJb`N(CuC;8TMR(Ss-nyJVlzOBSmzmv-u(@CK930VM&Hy zpk~}S1i!Z4g$GKKBTgMpc-j*-z(8sZjNs*qJi$q4G0TOTCI72l}pG)`m6lMm2_G3WF3x^3nvV#EkuR7@c&Zi-mh z%;$^7ag1YD0f)tO7^kL&TUbUC#K5fLk1%jm10FfMGRGewm32|%O#sxfGt_e z0En2Kw}Rez#SCCj6LCRnQ;vw(IWtoOF(VVa^sGBGRYq{^R8drsz&k>fzVF&uKaS%t z4FEu%5RoC|EJ!YAS)FghTxgicot5V5S5{jJ5g7j_AUDLWCiNs>dNfBClYoMa`zlP%>+r_ulixwOTB% zhYb;RU55a5LL+7_v@wlRrqJ}JxE`ktecR8qq$Igm3E+TC04IsZYdKFtjfb4_$jmh{ z0>}VV3;}`OzC@7-#JW;_HO8u{Sw&P0simj`yWGzfpiPQqV7|foX>x%LibtJwj&qVY zA__?lilmu{7#uogW<{~(%&Cpz0Mc~* zbPTWN!|6Nzcv>F4a?~&O55Dr$6?gVVfBGrZ`@zwOcXg3Fn)%ez3Ch&$i_pmsf20#2|ZZ=Yxz{3Icd`3YI_%qq?wp>So*Z zo3k};#vN-McBuV0x_L;6a5CHkaBasSV2y^hp#ckLqGhdcRe?o2fu;OJihaAie@z7Zd_cWInT@RK$~FZwx;fu4cQjZ4ZDqoVFO0PAmA6b>=(m; z-}8x*9L)RIiLM2ZwCPQ{^$HD^^0l|y8V_ug+f=j^@L;D>V~NwrD1Wet=B z?T0`hBJ;*~zBBE$*8l(iMUUjxVN1xsfo%UTsKWX0w-5(YwgoP13;g5R@c;y(YyS?o z0|GHqjNGN)Niz0lm}3CwC|GSo#H+y(CPy)WnEN^KK?8}WR3go;$pk%WUQJr7Tk5-^ z&)%f&M^Wvh4oBc^soaJ;+9w3odq`4Rw>1B zR?A@)yYx`ImBe+xXx&Hc>1#f{(cvM4UPCfRPZc9tL+-X5nd*Zs?o0Dhn)Vu?Wx|$= z1S(#0=uxL?V~iPM&9S?+&QCAj>-xN+$g`!C+}rNzNtyF981HkpAxkb=X{>aGR_1UC zn;oBhe)zVt^zfv?73GoyWee98=huvTaVE^xIJCW7v2O*f|26 z?_V3f{@1(lao4`t@yWmQ$N%iFeC`+j^56ccUw_)(_S4hv{U7o69QQ64BH9QA6D!Bb zuv0@xp+c8xjWR-H4J)Z7%??IxNQ4gR4IC8cj&Ds30En--8SYL55o2pJ2IP#bs_V>@ zfXQeIA&DFuxq;W#j9BL}dq#BG9Z6NeBZuQ*47q!Jb}|X>()q1?dh?^7qW6C7t6%4}AZh`O4RR`Evi2Pk!Y4{>+d455M+> zH`c@Ul@EOD4}Q<#|M}Pc@{{{l+mp+0y!ESZ{=}=l{*_<*FVk#<)JA|$MN31d(S`r{Q33;j^Q_c#aJB2v}9;d?+EO0y?hdzf87DMI9$wpoJKG#~S&F-FKbXOuI0%rrj0E>n~ z5djDZU~&2)0t1PVn_Wj1ATHAZcR#|@Vc62ZyE0Bz%iLlN{SaMi?gMb=?&EwAsj8a0 zNZ6T~IjDu@kDwG>d@f!lG**aLJp}|pa3Eqp z09C1_Cc_?elJVFybsI}zlp5U!A6h*~JE9vkvZJXfk&&MJ{&9ct&Y2#FbMqlO-VAvM zBP~!+1OiudBtQahD6K83y2J5ucQ^#z+S6u2>@!F=Xq=kj!gOSYI~y|bI6ZQULuUeW~9SPud2CvLqa4dRcfn5 zlv0pj#az*VAUVcT;WP?9w;ie)GY*-yGZg#y7wDE5G?GyEk9|*mwO~um8Qj z`|#KPk$mX8dalZ6)?ye!r6L3gJW*tD4In9Zvn67h8k2GYrP7j2dEcxntwID0fjA(s zJBiEhllSqT*>Vm`FV%mCkrM$1$Bhjaa&by&D6KnOozC^LPAgU8UZAgPgu#7`wQh%0 z&+eLnApw$z129H+hu)4Tb~x=< zs``N!@2To`d%Az}o&{pkn1hITH6n1=xoHR?_NCT?8zXURjf1aFb4t{XYX~=T+m}OC zTn`%#c@0vfcFg2n)y*9O4FDjftM#erf?K3Otzc`Q zqA_NsP)lnw?RL8>*P#W*VV(O@bUe%zY^;>q5&LYLTWSyqQ=KB_8qf7`ft$XBu7yOy z2^q>Pgs|(5nz_afc{Mp2C@?`4ym$~}SBnm#KmLkK7&E~=tt zR)LY2Qy`)Y$K!OET1>FotXnDSaO?K@`qcOP-P9(QdA;pEa&ni5N|^x}5kdenMsxrZ z6{s2$zaK%as!f`@a-=HkPQN2j(3VdI!H0EEzqIH7V~q1$YD1!kh_!(k8YTo1<)sz` zCPHKiZocTF(+~sqY9_NXb3ztF0(PMNVFG~2;X16tX&eheAkOY)K#@~3pKDW#-A3kd zHx*pWE6hv*hpu}vjfB=A1ITC%kuviO-2ep?P#hWvn284m6t_|jU57-W>ytJB@};dw zhhXj4Obdt;06O#7Nr2MC2&$)mZDG5C9@7Co@pzC65!K1Pb9iTeNO|bH2teAvt7giHI!J;FOOhSBFIn)!W2>bpFh{gt?$zjheL7em#Hv%~3;?D^ zPDbtw0*i+8`o3labq4@5pj6!m%&uEn%eW4q&M~C~MrBe5Wlrc+X9L6iL24_dw5F7E zHuKgR1>v}O9XbI=$pKNmBr@B+AxWiH0SdOx&tKYa#+M22_rT&7>54hL;E_ zg}f#a5ixf+wBHU86AC$~D!9eCM3c=#R7X8OJ8i8=ZHaPV?)!c-tS z&k&7}7fqv)1H>5K&ouTWq82g6kWw%+Fb5~MWpG69=H^Vn-J7~PhH(ABC+yG-our0o znp7dijF9It5-3;|X%5Pa9H2>A;R>uenkzIjcUSadZ2+vwwW=AV)W09}G}ji;Fhr(E z=xPnz#V|%JC9Xe?ls~uyaZ2~o##ns4a20LjUlmgo$N7}5{ z4MO##p&y$G(~&l(?ak-O8ghyv)09EUWf3A0DSLVozrdUT_J0XQtg9S0-7jz9_qz{@V*91ygrH7#>P|82DtI{35ra-W_c7$9N@ z6v8TI3FvGVRva5KV`e;vKHdQ#++2B?d-i}B(S16WX$Dh5EHR2(?ov!xrCA+0(&eLv z^=66`W3x@Q9L>}VPBXU1D^JE0Jg%4-HKe9&rfE~URb^C$+d zO=p90etw!$8plz^pBWF?n?7}`HUaF3T{8D{wSr%L`ToyOjI)e4`kb0Sf9J)0T&dbS zcKXC#ywV$t)6Jx0&0=?D>nRGimgYSn_G!gpaCq-VdE>SGV~yqi{Er^&e`&|3-_9R@ z;qLl;uEsP_EMgZye1i8ge5Ofd#Ct$IkDQVKD2&Kkh!Su7(FAqUsG=mkzac}gh}6Q@{f>oax~f${Lm{O_;c}FUcGqu*1RLpeINSHP1=T&^FQ{7 zKlA8~uRVSI-Jkv9*Pd?f-Ffw|KY99e@5A|O4**~L>esujdwlUUQjDA$stO@@AOF-# zXWNs*;qc)8d*JLAe&ch$vE6QOKX>Q;!~5en0)SQ};IDk;EB9Xd!0G8JGlodV<1wbJ zAluWl-yZ^{*3{Kgh-R|gp4`6i!LNSxt8Oxn$CI;dOu^l2o81)<%%Mp|TM{tQfrtqf zlQl7bxjUd!#F!SBsG~a(ETTwvH{+>J;ugBpTq$=Hc{lBQ>KBtQ5>B-cQMc+APQZd5 zYAVc(oQq5f4cV3sT)<`Dzy{zBsZ{}w8S^$Zx69qApzF=4O-?c7zUNZStg0-rgr_%G z?mm@~qNJ1vNM;o&`v-Y-BcvPm4(aTX+&SQxPNnWX%ZCD|x8(Gb=%7@Lf- z%NWT;43vnr>DD|?%rUGZ1oX&2@v-_S?!*AU>er1en!XWci zLjr5SnsQ)90$Cb@%*-LyIm~l?^ypD-3gBla>lAvkwySt{e&TLr9!n`XMCRxQPcI&H z{Xj9uSglHkyQ_(K(Oc$HU;FBB9ZUPsKk=t7_m4mOkA5zM z-M{@O{>Z0)`S1Om*MIVJANWkS)tJPr!@x|5R7@I*K?sf@+w;W9c31UqWHr$yZ0kGS zY3^1b2naSM1rBQ9z0>s1Sv~#Fdu6xZ_0J1a2bj8*9i1q|)kb%3Z-$PfI#^CI0yR@( z=3y&2SpxuKCpQ;2W+ud?82J4@#DA*A2u85&y1;bRO6PGdb%mYUzuIGu-)jv(vnYdD za|+x!T( zW(0CXV`E;LBF|!lTn$XA*hM&Je-P!Je0+3LcBkR$z5L0R-M}*kaCA_$*}xhZI+%-S zLz%r*)T*@r=x}m|h}fEodP5hfEJ*y0WpTaO9ns);c(d3Hnb)~ zm-`S*j;IBhPpx~$`+?{|>;X>4wtAtSbsDWzm+Naf9UUvMiy9pq}g zmeo3oCsb#quIsiZ>(lcyH#{6Ko?c!(xtzA!jfj|e3K^Vg)voIR!@8r6oS~q4L`c*T zubJa`@{)TO4*M8$&FfvGEIRaXyzyaYsE3_iJsH{rsWYTJY_?)-UW)_8o)BC*b~%6y zH&HLc;cHvGOtwQiuGR^&m2q&#<}IZxSdY^_cm1~Sd%mS!$7z~ttzsO4x;RMUNwsIIN7Egh`hCy2S&G;S8{cT zp;Lj;YR-Ax4>xY!x&P?xQfjF)BKAWhqIuR@C(yVAkGvlWbUhNl0tgP^xYX~@=%yi> z6r!|09AY-WlCsbBGH?!YAvut$F5soQ&>nurdOtE~v+Y#XUECee%*6!EECxVCga%Fs zfsmP0u{Nn81S9|xMI>uYtkxR)psJ2e0Rhn59gAad0`VCP`@YXPrO~bq2lHxzZq^hG z31dP;vG(L*@0{M}uPzCPA_f4A8dFx)Qkq%g=m^mD;PaTW0gui0$iR^)n2J+Y3goI7 z#Q_wX8_hBaK?)HT+A1rOJEA~~F#xp1S3v{-K<*G_?I8p%>T|7Vg2W8OL~0HoDJ5o{ zr&$DZ&dg{!5i_tldG%>3wKNNr5&*hA1Mr$Z+5Sr#n%xr;(9SDdp zxw;!zRRlvl*=$9uiF7e}U~y1D=TO^`7hTFiLIqSnbBK`N54}MIz(x87mXwoOE!F0L zn7g2A?s`MPO|?#yz?mq8*yTKAJgBPGd1`Y5fCaUkb01WgfZ6*#xI3FhC=IG2PD!u4 z&Q-d`r>S43a3nMAQ9ECQ{H8LC~F@B6*Zi zact%Cc$MO69fwUE=F$5Uu4MYhk+7y_fbLMuu(gKh%!~;kSW^Wu1mw8f5yEI3Az)oj z5V%utB0`xNG^Vt?&WM~$h$u!{FkN62BPe)lV(v=?U}nS-K-HS)^-k`3IPJyV8IWiR z*R>@89GTR`3>_Q*)Z82qF=d+PBWdM`6g(tpt%c-n7J~!S5ObZT)NyIMHkLk|1_)@_ zOs^5gX3Xq-lqRa`goFXfI2fR+>nd-}t(2KKh6wH+k}kY60CKn1-p4SYc|l;eX6|Ug z)t0QGlkH|$@jTX57gv4fs))5U-Sl~(*{S(zwOOrJ<6L*gs~j?j5t+Fv7z8qKAeMFj zfF*-xG0qw^H>=)a=nwOVG9b2QCHBObj7H87LWmKFur-it^*f1O2lM8fiAYpLBrs42 z*CY}LhXpyIj?terP8}99H#&erMRp=G7Boi!TB3=bxqIC~;M(qwh>UDr-JLi(^={7} z004jhNkl6kBVw zs3^@%C%4)q>^o9Jn@7S%krY*=FmuSgs(Wjt908#3JNM%}YY2haV**ei24GVW_Xg;W zXwvC2J9-v-6+(azvnc?$Nh^;nR2TyIAhi9a+@9P$ZJpOeSD0_`=5`HfzWNeiSZ&S} zVuOIWpA~_^JlsBh^B2SMCuMwk)o-uPK7I-J5_R!(%kz_bc#`(VW_{V-nYK6P6kdAs z7NmU$0(`UGy4O}W+tK}G1JwqP_VyR4>v|SwXZieI$A@vO(yj07#~;KO^5%HczjjBy znx>lLodK*IF5jy%b?a_*vW)_Mb*IU`k5Bnth<*l#2zlS0UR^x#zV9U{VUZC9DGt_U z5q$Cfd#KW4Ox=xSXK)FI7nE`H%{LlDNL@fO#>RrBo*9nwTUqaCeUDe@Y}@z3-O*NSieZZfT`>n5-(FpO zDGld-`v)uLe0HOgQ6P=T4xVWqs3a!^5O(*a6f#6aVho7LTz~h_(N)N3WFqcVxr=5! zI&l^&|3d1U|Gw+k1u;9a0Z27=4gY|vaWvrt?#Yn2d+5RozK?jH$C>Vh?Qg4 z4xL_-9Q#S!9F(QOWb5K>g@;u>tYnO+O^VU!L57MNa7=U{JRK4O!|}LBcPsPVvzsQ~ zug?LX*0Km{LI^}y>Xf=5La2~X2?J9{GUwx?LtI_aDQ#{em0*?nyQeol@aoq;`?&1~ zeltv8dwQ{(U--yHxZ_?SLrk08$|PK6-kOZV2Ooa)KfZu))#b=1B0^!TI%Y;MS17w$ zyHPGZS9f=7wM|+>qSb~sK!?d@vlV%_qAo{{OwkQ}fz~S+AOr_Sb7~_m&t=Dk*Zu-T z>CBk1HW9NBI5C5%HIptninFQCh7dAhh}$mRO1wWFOLKr2Qktc4%(El$EEba{rexFQ zrDr_3~fAwM6uJ7J|@bENuw^s9wkABPl{LcUO z4}Iwu{?^a`H@Cm*Ge7m~f4e-IpIq&4-MqQ(hg-L9nZa1*!{I2VdwaAyJhJNHb1mzXS{764_xMd2 zBM}B{A`4q=k#SWr^H3nv*4i|3yf`bzL0T(+A<(AGGkKN@b(U3s6TQ3IO~a64I_>)b z5o6R+4q`gQ3EJ4qFfbD%#*|WP4IsF4YgJWLKumMaeHTk94!|iULNRrT-QJ}UV@Q3BhnH^ z!4b&OEktlPL!*U$=ZKC7g($@|Z@Srm1y)yW-*#LlmT^!9_7{$Pm zh{(~+#y)1lD2Q6E+GNrkJSA$d{A)^!V-(3e0UvUOK-D0cCsT4WrU`HmBdM)56Cor; zboW3M*I2B%sWtJ#B|Ald_2%U10ap;CrLp*ddtR^9_Ca?ayZr3E@+Q@GjJE(Rnbs5q z0SarQXK%bNgg{6pP}`N0ueY0vr&pyELZ3xq@T;q<+H&7_2&|&jgb9d()$A6!&DPwn zt`2~qj~$rC?4`-!@x>BM$-IQ0rbny8x#dS4)ATicJHEaBIOo&TWYZp3z#ftsLO>27 zV`O5XoRam7NlO}QsJn1@hYnw7J{F}-KJS_Cyl{H|-KTHA`bK|q_{fL8{lEH~f9=Jy z_^M$*A(APJIK$$Z;vLTwQqc`BuYP;_-GgibAnUuq%r6fOADMrS@ z9F!B0NmE1a;9oA)!$0Zz?{4Crb$b&Y#4K5t`_SsK*qHL<(?PgrW-|rw#6c(sz^L4- zxcPEg*%Zt`+@$5y{eB77269RF9;FXnhPCu3C+rm%feAArGO#)mcR^z@L9tLUS472F z&>Z$x$E6lv=157101RCT5fBL7$<55)pB|AT;4o}eTWe|t2oaID;L>lu@%;Y)*VZ3D zzk2VNu;lIO<}~J_^OnWhn~EXc@n3Tw$^4T9%Byx?>F|6@FFk5LKKY7IS6!kMxbZ6W?>PlGB5Xd4m%@#MYJL!gFtjF;{Xd*QL22AbPK;aTYsk@vHxno;NNP{Cl@bzJ% zP?s0DsY6j`>|&o-0f=KSF7sF&5>aT1F;@|5rmbl}FucW+q2O zQwF+jvGM&Vf9^QX)admw0-UkDn9Ljz*P`NtB*mKA8JbO`h z5jjj#>*7g79gmYVnI?6Y5K_Vf!CNV67MW5?@3-%{>ig|ofF&USh^QG7MidIr$sDzH zwXjqV>&8qu21E!7ArLx$;GGnblgW)ZAZlDNW6gnIZxs zz_rJM7FsYE;*#Uy4HyH(@QiXSW@dsCnSsDP@sK$6X%ne;_X297C>EGOt+fWub1AFM zu!u_*na*NiL`1^mElI6X1i1(B@mN=@o}(bM2q4Cgx>D*q2~r5mrGXhTcZeuW0NgDQ zF<<~hBq2-yfB?wU#K5D9phFHhgbaJgsbpX#24rw^bPzyDU2sQO`uHBf zIj4a;np#z}CSpK@%-}inW)0OSL*GK;tM)3Cs-x#MqE4G&~c>ks$fz^F|8tE>f=l(AQYL`U9N2gFo<+{ zxPpw=6pa{}`F)<2HtM<#A&Jz|lnfB~8Zhxbc~~q^TrBJ=1j42&YQSU~+z2eXsJjCL z7=WAO_0PwUr(yu;9+z*JnGh|dv?%vgm6pLC5-CO=WGVk(X9JPem!F4=^ z8mN)9rSndRVcF+15-*knqNSY7oY7E=Cf>NU$bCnrf+fKQD-wj5QOaoU=$tqVea}F< z!wz^MFHi`%wN`6efWt)4T+IoUZTW)SK`jR8y6m2QM?Nr^v`ptmd#Vf?HX{zZwmJA& z-ac1vyi!f$kcg$2ajUV7!& zj^Fd}zh0wH+NA41VIEuMytllWUY&=#hxFn7_9MFf&@^lgT-*86v+?yYo$caX>*HI< z!8XUko8ce$GUaPOr7!wN`tJ1_fQ z9mnFW-h{VT`KjUC##7%tN3=E90FB5}zVX5fpZuQh`O(LZAHHz!rT(i~tG@Zp>%D{# zWu&d<=P2B0b7)szzhUpJF5do@?$M3%#%~^8-aq&4T2{$-XQC5!$ep7{sR=X~L??v^ zwB%7Op;?GX2)s-mml^6mRu`BUVh9*nDnoS+isIU}-`$S=`-LX9xrxE50*ZBFahZwx z^#&;{AIc2K4G3Hn90Ay?#D6~f(%i$Y0Q>0n06jov&P&2WG-C@yy{k^W=c*xHBt@P zF~r!@#*z+5x*8_e)D2~9Wer!P-EmJ2d47ufTDz@u=ZrWQHNG|K7*KdJY!-;{c(tAn z$6GJXd*xXvubxzxtx;HoIB1G&GaMesadaMZIIq%=^|52K0hH^`j)bX?XKU2KE^9nh zYft7_>E08#BO!M9WCagz=?(x)>UQ0?TD!hWsCVw(+Eso0_;G73#_T346mw7P>&U$d zk~{f`1-Lrd+@N0Qjk9qzcrp!6?dtG@-~A_^KAxU@@uzP+7dGc7Kk_I4gFp0#KJ$P2 zEB~**`RX^IKj9~DH$3h1xb}9a$r7*6w@+<*etgtz+Fq;ZihYe*qI>776{Khxb!8gS zDgsA{*urHuu{vV#j&q0`@$Ar~8eWhZgAf?Htt)xg*J*cvv>}^?!r`%Ucn9Yr zr{m_kZhz{#e*W?|fA+un&;R{ze&Ij-Gyl69;oZk??rHw)&;Q~Vzx;(exALY-zwp!l z@E`r^-;Q9P_^$7LbouJ+H-7aue=chMt-t=ppZEiR?t8!g2hMM-zUTY?;OVUwY}`G# zc=YQRKP?WISNk%zr%x_#-n_ZpobHF?lZ&V3V;+)P*&TOZ{NisteDGfG!ZaV=c~WSYP(%OdHV42IC zz^JO=6#$8Gai}aXl|>M20OZgik>kl~9f3Ei_3_Yh7mz3f>bd~{S`|cVt(ig2JyUXT z2vp^OoKnd1Tm!LzXHGE%O6;PgX;Tr=oTAu$DVosV-ZB9dc5cc$2^ML=Yl z=tmn6~ONQ?Lo8R~bm+AiHq=|cY^~t0CmEIhT z*WCLiaiSQ6Wwc2|9FT#tj3H6A`8XFx&aq30hUmFNwJCQP5Rqh_XVKL4B#KTo#8~Qn zu4TO*I2j^3%pvJ`oFnBS4UqylBmiFx3(JaA5Rq{%?#Rq!c6oJ`R((j(z`MRP027(# zr$pdjn@x`a$9amGH~|tGwAR!(Frt$!k+i+Tgkg0mXY!@+sE%RU&kT>=Nb6G$i9&3g zDtE2h0-rb;z@Upal)({H3{`1~iO9iw)xU^&M?RBNquQoF_!cdJaz0n}Vnmp;0unC{)XH_vkn1j!N6n!2MIgh&q`-@h4P zbF!*d=5lZ$a;V3;IH2iz2Eog#|2_S%@xI!znTjz|v-L44B;! z!2^ah1ZBqEd+_irJd>ipq5 zPigP{s?E6@Qn$K)|9*^dI$XsV``-6s+3l`Ys}(cXIuVhHn^~Q%l!h8^9{T5H`(hbx z9UM){J)SXs&Tr{{4R!#t@emx|H&2#A zI?j)#X`1I*L=aL{GieS}=wfT-WW6EQ^$=2*L}v=Rzle~}Cc3NX77sTg2CbzWkJDiv zH%JjZqV&O;Oeuswrfy7O3joEy!H}z~^+c?bO&7^`gS;N#ogCKt)E^Hwt-z}9J-~$G z9S5y9L6yj@A?E#50I5GaZ6FtQ^vR~Z^urc?Z4o9-?_C_m5%SnUn8Qu$`wpOxcVjUQ zAxIT8Mnc3$j5!A+lM1WDTw6*Vcu;HT;;2AM6rC_SGh-lNLNEicC5{&t@v!-V#dHOT z%FGlg0Re!Dt4eFSO{tjz05LD1&RT10ZJjs!%SWdt>)fv%K6o4=J^#W6{5X#Gep^4L zShbciskRV^h|FtD9DzlQ2wH0JnijA_$F82%S(=nus7kmnl$uzngp=E@T3R z%(z&OmgshO4;W(PMKrtYNvaB>!h*Z>$Q-2A`B*2Nfv`<8Fd3pXBZI#0>s%%&G3OM! zBIViWLUT+6PT(<;gB{1G%rRwjZ!xl{J5;7zi24F;l87Bt<$j%8Uxte<*bOb{k z@-Sml_2V=X1U0XsV9mhHXz3<85fG><(n_>BIA8z(b#-?n-jD@Zu)4tnrZ$N~w;C=% zh{#k?4b8PxQ)T9>f%(+@dBV4rN%+DvIIT3w22?g)?^%m4{|S6ZE0Pn2V)s*k55 z$9Q(<*5%csz?p%`8(9ghMvo!H#>p&H(M6&TKxF0sb2lF9sAkAyW`wxnfC!z3_30U* zovv5RI9lDC@p_=CsK|1~#u%Bo%|%3{VF48papOQSWWrF}tO97x;hG$~{4Zv}%z+79 zz<~@S1}7(s!=@7vcWu|KW@1Jn;>`0rFPv>rT`&*sUgq&xveWxvxsG6JO;lWMxp5X3 z0U^6dTt19cM2yShLYiT!RmT0e0J@jptkzW9QM9B;D&?viyS!De90Frp$OeI8jOm(| zJdFaO%!(Kgf;9tx5L1jqu7}-m5yCDWriJ0G+7K)TGWA;L#VVm{%plU3h(jRP5JTT} zA~KGXs%}tSQENw(IDJZZIVzdy2V63ew z09Zr-n(8$ZeuWmgG){-Kgc5;vW-GLuufLGibV0tmd##OA3?U=}5m8y%+{~Ox5d=4O z2O~!(BmzWMuLiad>J~8#FuQx%O&k|wi>YcP7IIW03d?z2MyZQM#0l*h%|h-7h(!1- z-z%u7d4m9vqKZ$owAR+!%{Y#=Rt_n~2)u+^liQMoXztdUsI`<*%E6m3`;Y>A6K#wJ z?qK?4Yyi-80m(s(4Ku~g^DM0&hEhs1$+^QwXoyOsR#oNL#h97mvkW~gv*nx@biAsn zTdj4T>XY)8P)+OeIleU0C#8Eua@6g0GIT>@m(=&@UR{|qJF%nIMrPQW*E#iFYprZQ zxY?dGFc$zOG3C4^1Z@)+4;V2g2f;S^49XF^sawV-ys9(U^!DlTV)KCD*6F&v7h+c| zclUlk;`4_m--xrlK_5BGhk7ilnjPi``BpjqGz3#UK3(0o81C}=Y^6fSt4d_syKkJn zvYI9NRimgH*Ae6PtmLOz`bnoGEd#Wi(78{8^u5F+aAZ@knh*Pk<}?7TNGBQR8G{Bj zaws)&<1TI6n@i{L<6S50>;?OQ|or?fqZ=@ZEu6<(&WY6&3xF9_Ue`iI2jh&FaWos48fmK6;0Q|eV2Y$cC0||-9DIgFj zqZ0zS0xY?Zfno6_0#I-O;|0!){JvW7A1{9bV2;TU98eJq$y}=!A8R#4H)*9{D+vL|qwJ(7r_;0J@zBSigH9k=rZSdLtCq{zvT(vC>2QcJBWb%o`SyjH zO6I*!%Ij@8Ki$uVbIRE+FRP}!%^@7M*{Y-C;pYCzT_t7dZwUqz@4-OIQAUj+ciTX* z5;wPL-m#_Alk>7ZOLh^em_c6kiVmSQRh}>Pa#9QJy0}n$NQVd z7aYS7c7s1MI{U3d=Ka;_SzoBs-BT_T-Q2!S-}+P@?c<$3hP(3hGyhTwmbW(!IDGZ$ z?dQAA(8=sG5sw}o0?bNxXIxqHu^+pso;_FTMZ$}?>5uV}4sXwG9PDP-^S-?Hi@)@D zkH^E^mp)1t`I=uaZpZd0!UcOC9laes_3IC=4%YQcNvF~4e zc{{%L+H2EzI6Xam@5zJ1;jlefZ_m~jkM~{9^Hjh5#n&l>uFI=6K6m%#B=+>_)BSk) z^wo!(%_cHzPgYIF7hd@Ag9i`B;||e3{GkuM@y2VlP4C@*?~nfAzr8y?e)aX=dhVsO z2Tv~Ed-CXyf9D@uZ|*#J_sttG-ue6g@V_;KKl%rL?9t=5fALp-;p4A-=MR4S2mbos z`fDC6B%+Yqzy(#^Kpc#j$za)|5F#@p+hV>(T&#}1+~Q-A)TLSH3Q!d|v!W*IGl5qT zv#GS!8iy29cK6YC1}0T=7Xj+y%PR&9xu05@6kF4->lVaN&Pm7dP%ATY@PJH2kpm(g zOYMyyy|`Bt+oL%O_{ygaaQ000t#Kp+kwxT>l$QY2DQLZqI1 zRBu}ecrRUD?EcQZs}xr;JJVQKt7%$&%jepspTu+IO=$b7)b*ihLDU%6;-*xP%iMoB znhvO97We4Gy03fmX0Cv&2yVd4*^KIu=AFj0Tr&({3uX{8aiAvP?A#fExd%2wy_%nD zzeyBvZrHdF$yYw>T)ja9mT?s#G8D>|n3=P+9^*7u!jy(JGSylE03!3%J2WGf-nPjy z#Rej}8y^qTu2|f~o7m&oqqm0n)!R>BqqF9lTYS*2JPqy@POcCTj;Gi}k{$Okcy&Dp z0uC!oYsazem^xLETF4^?F{`^K`B;gFF`0QFf=r8+ic-3nZXs%&t}apJ7z0B%z1<#oxyY)hZ+fZEot_Rtzjb)&vYnk`rC8YqitEQ~>G8&?KUqKd z(suXijW`;XV*)<;K(1FQAY>N6hhKg4_S^5?xcRxKUw-)R*S?asxAXcmp?~t_7k~5z z|LE$umww_WfA;TPer7no(X@m^g_G6j?KXu5$AgC0pQF@5OiUq?+*k={f?2$#*&T^V zo0Ki~=D`1&hRi@{>dbs>00RZCNAxr@k{W<7yC6gdgr)@upkcxcu)Mz*oEKxlzw#P; zPfk)(odE_LDq03>4F8%=2cUpvPGog$JO+zSY8HW+Y60sMoWxLr1`EtqBH*+31mkbZ z+!w#G9dq)v!h3{{ z{R$+QK6ZX_s`DnOh|S>?Vi#b9Sc}oQ@BHMIw?k$hFE-yV@n&aoWS;nL2xE|DWXz4V zb6Pj2;B!KSmXl{-Yf?=yvSkY5hHkFV5Ru6dAVj?GI>9w(6|cp9M$TcOP&zWYH1oNO zAy)Aq(=o3@$eYd4`kVH&e;RAo?}}btJ+xw(!?0P6wOx$U#x@>Qd}K_SVwv@5tx#pe z{ply<_V*PSS}QRPT?n}~0fp(vu1{AV?^flx`K>EIGOj8GURi{?-3|}-w;#bK#CJ}3 zK0bb$;r{ucx9@Ge-@yJb#iVWj^z6GQzS}A=5wHZ^gieZt2O=P4lQ5rgt7s|6BV2eS{Bvz3ReLKaero)5cl@pd9q3auMF_Ray_0%KsR)&v>1 zrz+1E$Pqf5Z&ojH_02@vt6FZKhBevoFd;+=c`j0G&6zn;ohNB=wdzD`?D`Fp`7wkL zLp~gKckcMpCs)UDgV0534hf9D!u$EA;26l+a=Q44u5GMedM|2I-)KbC-Tuov} z{dTBVW2=%<3Y_M7R^|}mQPfYM#F=Ssvp1`5Wl2n8(8ZJykqAladewQe{l#NP<``!ePMN6_ zkp%=g&etsoRBR?8cg$=itxBD1*QdT8ptc-1FiWXGiHJ&>2s@&%@bDJJLn#FSICBOT zH!*4IL==c45;bcIO{yaxVFfd{s$z9Kw|aKEHnZb6qBKN@7&w5UJCT7emU?O|hd8fl}8YqN$3gtB~>{Ja(xm@;uL`!pv?h#>f$DGD|FhtBRQc zLWk5ljAP*tatMx~+G2>L>0j1Llrd}F#-*#V-Cay(_zS= zi@CK{%bY?OVnm~f`UD98nm}d05NB{s!Kt6k5+%+i?qXIE)D6Ia5dffy0~0YJE+%j! z2Y``%YCDt3uKEU{6=owCXFv=gES-AQw#>sga&%Q4LkvUe++9Scxv1*mcM&r{Pkmy> zYLkHhq<+)Svxx{XYcmmn)=HzCbKnS!+N3q@QcfxLU7sMAQq+u*MMRq-lQ~|y1d!AW z5tx^B4+aM$Acm!32Z%-My6fko^qm7H5sAS>qd}X!QKAOoZ~DRSbpFaywHVgxobm4?jb2umj$jjk1w+M2iKt+ng@ z03s2ENYV@)6LP}PpZn`4YI#ZpSu z>Y}BT^?EI>0fHbHnu4N1m-E^7oXVB|uzB!^Bxx0w~K+FjN$GM*Nn^~JX z7VwaB)i#%@?>ehfqSR`0WTe1A>cBvaDg&Ydh{qT?Qb=)sJSORxqN-MfW1CAUM8S}E z<7DP(SnV&b&d$yV43SDHs+v;@A-GyVio^_zj@)4_HqBQ6NW>IDg~W@57MTT{1(4m zk);YaFh^uSi+K|fMCK6`L+BW5vNrn7IL{=O$+HQh&HijH+m>P2!@1=9V#j&n)&6U| zJI=aG_|nsu>PDUr69x!5ZBA<7o!<7$Ct1V%l#V4jS`NoLMmu8YYQJrLAfR~^ss_8& z@$__VVJ;U~N+nGx3ixgu*S6n;UZ^V0OkIWz$0fW$N%ako$H@d-X2WG^~h zLq=K{coVgM^5oG+KJt+t{>~q{bL%DHTY26;c=GytkKR__-|xDLe{gr}+b_><2Zm&0 z(~Dtz?RopkZ+_r^{kiVEtnM81dQ0*7*h=vp4@cSpfV(b?x_}O@;2IY8$4 zL+pl*02Z&|NQ@AW2{AYlno$buWatnXOr?RgW=;A2yE$Z9xvryii77j5W-xeERusDs zAmvuqRSU`(XSIvh!kwGAIUz8T0~}lc=Pn&JwyyJ#7?mjHv?9;JT5E92mhT z;3k9BwrT1$kDh@!cLjMi$oonmcnZdDW0{W!@066*bMw?wNGr~&Ze;H6;?wpy!3}gi z-OOMf_u<_uai_fx8?`69Y0MTjXV$N-xS#vm)8-}^RP)(i*we*J`c`@J&G)A6-S>`% zo41sYlv1->Wc6-8KHs&mT*}Zf)B)yIJsis$mxsfFPj7$VmbVTcKYH}=>o@qU9gnY$ zyZ`9V{-@*9H~-2{|MkE5*8Z!=1=^|;pV63@_3*8 zR(HqiDAL^BDKtk9E{-~FvWcw^Ip!aw-AfAD|(%l~^n)K^~q z)Q|khAMMYFzxr4I>aFK)fA+J#^7cD#ym;?Um1(`=P|rVb?*kwE*e8DDH$L}`*It_s zyCH5vMP?Q;W*LUz?Ch+JMO9a;)mOj%8{;_Md-0`|atPt{*G-?8p#p6e$*_Q^+&&q`_E{aru!seFWH~6e&zq_VXy}h*;ZkIn600vK1b zV~la0=YYIgtwaRSVuB_ka_Cm6HJRrTkTDc=ppM*z@F0lc+V zT3s4A9K&01ac>ylD94FYpSENyQsT)fD8yEg8P^PDDi=-F$5Rd(!nLVLZ4i3O{^(LDUK_GVMLOG$br*dEqYT_J1 z3cygy?BE<3?Hc0Xh=k~7&8*g{Xpy=_?(fproxz!)88DbLBcL)mD0#?8zzl9+-c;0# zi70SJnw#;aj~D>WM57y61yNA&s_IQk8ALmbk~>Lg>x|VxsA)fG-|e>F@uy}3v!T@0HclX@GEF-3TWE;KVjk1>W2wbkWMmbLs| zRU;CGxOkR_Zc*GcFgLRlVoVGG(xkK^rogneZ{qBM11B;FjRsMD?TWUmS3h)od$j&I z`w>rcb&tFKi>T@9vjf#MKZ*5nZYlsCIx|R|U1{BKt}YKJCuiHu+1J1R`puhXyjp+i zJHP+M4}EHP@%ZO|=I=I;C$C-HFZtUJzxZJMAe?_><{ljL`n>IG!J0CQAW#eu110bf zD6R8`jTvGjY#@##-~z6+M3DWP^>5a{?#nPZ6EK=O8UllYJ6Lc)aHpjm4%fEY{~w#5 z-{~ztCUA$4O++0`MAa2s)LUzwPKgwC7CQu}%w)6)!MHrVb@`<)-T3xC;4r;sd}{g3 z4p-mzjsM$8sFTxWy6K@GSf+}88(DO1g$nP4XNw8u2^Yc4(@nOFe13tYc2%O<e5 z^+}+cp&cQyU%vCfAD;O;+8R$bn-^|u8;9LxcDl+W=Rnk-Pwut&!@a&|clPJs>YOoj zPa%frNUEiP7wgtnlJ1mA4}Q#ZqP$X{YpfWCuCHKC-AlvfPQjUkBZP4CQk34MwqbqC z?Mo3t*9rE;)9w95osTkJVM>(g)C}ubSDPV6L&Vn95s;{<9FNBoK}`@Bg(4EsFhs&I zzIW*^A~4Stk+-X1ZlW?fS&qzvNDy<%nNv)N$m`zST~r*%VcmDDu26O9%mNam z#6*4H#~5ot%vj9`eUZp@4Th_EOm zL}Z?4HM_jJ61N!RYPC{rV(RX5nU~xcB0{D`W=nv5>H%>Hgc^o{i0a&8jQepCB1R&D z7(xzl5$Ep7%!C1ni4d102x|>POewj?_jhhfy})RI?C$2q%x-R`;Td$A=o*m99L(L^ zYONi0%ayQb+X0}?WtIBUsxmH0(#RokY^_mX3d}J8fGr9-_eD(8#9J+;6y^o8no>#_ zAZ|Nt+UAA|9$;!!YUR~H%(Iy?#Z`}p#ofKFx7%gU=?Fw2khj+6S`In`(6gL(SG`t) zC7>l9HUu^^)dr5l08FlnI@?v%{5lqYwOZ2H=owM_I;x90D$f@4f(KvjYjsx#!|ORZ zAucA`Fim9~Co>B{nPWoJ9al24OLrI3LjFZFC|txPjB54l?o@9g}XLYP|}a!6el zRmD`L0;UjxNGqkOdg4Gtun@(U;RJw+ySL?@TR!AcM-Wr3wN2yl370QZRS`IZ>kG0= zjBY7J6P>0Bh&YBWMG#gnFdJfyF~+cHD)H|527ubE07%g}=FrE>afh|3w|TAzArN!G zPk!Q4Z@u-6`;WdJle3cnuKO)hRGx;lLg}4gb-F!HBQzg2D*~!hUH8K@P2)5#cRGf! z+zx%;ms*=#k4%|ZM4moX0X{bdIL&^~V44lkLnXvDV$7AFWkR#>&I8rsk5Sq<(jw#LaVXmd`djMlbazH?9 z_3Z2f(M2@Im{L+}90-@DXW+mA5y5P(jUa>&oUlkMr4Z+s^VYc58Um^*Gb_TycqpX| z-GZdX5CWM2lQ|J%Urz~nCJljZ<*k@%)8HIe-Fcv^=}AC4OWXIJ>>iKvw!ih2`m$|r z$@T?5|Ddi;)r?_=HZdlgXK&&G+t5*$5Mod?3g{G54jI`;EwciLt_KqUbi@=>N=a&S zS3sr!K!nDK3>*yH5!97Vqcz2rRf7NlP+%iuJ?m_hiHIwN1$tv7#MZA8a+Q%I#;$AT z^>~HApvvwZ2$==EA%JiUOyjJIfHC$x##Bszj5?&pd#V%7b8_#{-6xFc#=7$v;wBTM zRz|J|1oM8N;qJ+uTX>x4;vrwX)0a0k@Ms;65!x8J7ElrcC62Wnzx9)!_>cd?A75== z`10p|_Q~P?*IxbH*T4R2PoKP9`)`4>AA5T9+eW)Ofh&2a(Jr3jw+U{2^#ebyv95CJ ziMMtfj5-N3$4BX8(0^0t{`UGO9uF8^%gGRconT2m1z3kf;7EqPnC}pr0l8Ttq7Vbe z$Ox)^8+vtvNdv|iXjiI(MPv-75#Yw{u?V|!-&4;)O7BDJy8C{6eR{J{SK#0}c|a3r z`{IXP*ql}4@V#AP>|b2AE|;ju|3S zL`+?bkinYn#|4RODHj$-Ei?$zn1N`O9er93%aynP(o ziEmsFr6^@l8-in0#bYeH{yo28v3qRW>BD!@$(`xy0f`aDb9>{|9!i_~v?fRxcBUpg zhi<&9-qAEZ{Ia(4%K0nb`S1Mr3m^V?pnU7>_VL{(|My?{8?UcF`t-R!8;7o+-Vvp$ zXZ7&z=GC9R`Q9(!#xEcKEqm{0;@$69-}%;wHgOenRuls;QnzN%07^3{Qs>$Vgg^|A zNEm~s96TTdWHtjpa{$AC@l7LqHfHP0%}ogekpKl%00DptXrU$&)z<2LH^};fhcf@v zjgS58-+1+OdII10nQy(L?-d?C^yj#=^wDpJ)AJ`ZzVm4RUE%)i(+B-<_S)6E|K9)d zPyf*${v*$IH@^J(=ez#aU-%#YC-NHk;eGZ%dKaUVCk~ z+dcon?K`*5-hJoooclUWs`k0x__gQnJZE0tdiNdkIt*P5o$J#wJ^F_~`!`>I{$qFU zZ2rg(|B>^P=NQr}FMo8qxv|-v-@E(4Ter@~y8o@O{#wkmPz6?-9#{ZfUG>^ljijd9 z)UE^g7SHuUTY9$Zl_met)fNniiqEE?#;imHWQZ7uDA6kB)>?|G9LKUdEI+gF`?F!y zr7We690c7BJdhDl^=ognUWdTC`#4W!obu3%TLlX_)>1F_my!5t=c(&e^|)-~hBYFZ z3MC%rB8Ow&_bH`$o?B~O*O^()*~|h*ZOzPvVGxmMz9=0_YvWwe2oVm4nd3Nlf&gP& z<#ut^oqzM}+dja*i77M{0BcH)5JEtv)OX0=^7ZaRhrHtOhEA^?q$JqHnIH2-5XpYb z>CEx8G%vA?sotd_8Hk8B3B;6n$+&c|lu{{$i9<*sgf0&$rQ>)NLI83K49g_I0RTdZ zAp}5dO+Yn;5ON;6ez3mQYO3zETCIm}1ptS`AqHYx&j+W+7wi7@6OTK7F(G7Zpr%?O zWNM8#b<`oiv0b7$5_v#kKn{RGF)Trk3r51Oqa++0JI3XDB3$ADSj1JW+pJ8XNi{JB zAS86x+L|9NMGlEmbRumC2!Wu~8khl%IZ{ToR!VXAusQ<Pi1}B+Y z3UqpMK92LZefrZk&Tl>c-0ffZg`Yd_#+ROZ>G8X7y!y4@+&_M(hbwna;GZ~IpB-QQ zDI1@hzrcxlytMLo+8Os}Es7a{X}j(-p+Ko)m9_$BM|1}TR{{kP@boYELH}>ozgfQ@ zOOcf$cu-djz!(ez$l|yKBz(W)PY(Ed^STky-QFJ`AmS3?J2e;arN?1r{>;D!(k!LG zG(+Dk)4adAfAv%2ubmyfKq?PTUWC=1W~T~y81HZ1{^c9`14#A$g zd3H7LUE6W4BEpdn5jpnj&3tG?xY?c{IuW(DM0ny0_fAWhQy+5dOPgBLGEU4)fe6W6 z+#H$Bz1C`G?Kmyw&cwl+##wcDb@k+_-?-t9?$@1nGXwCxk9BG(B_a~3X4Z8>N-|B; z5>~#v9GBM7vm71+faUjy*biAmrui5{zdps)Caia7%LpB*W@s9Afl8)2;2?*<<-7ta`*k=uxx-^krXK8bh28nR;y(f zu@DPtYs_3qX{LzYT5FBW3^*)}+U5Q#wYqz4ZQT#ch-Dl{ZF2Mc#yrnM%)6^YU=|m2 zQa4~)b^UHUCR*$ZzI@Ni4VzMOL=iC)W?qg+Ra>hfe>ywcfI>RbUJ$FON-iMQvK4*=#Y!5IID)Cet*TICDH4#>h)d4Rl?%93299 zyLN=As zVgYZhEJtc)kvK*+i=`A1U$(@3pA`iFRIj`C?&!_{a5#+1kRSw7F)>Yv-2h>+Ph~`( z=TTJybJr(#59zvfzMPsRD(8L6J2NftVKWn{Az*6_073|nnPOZ9*eP)8oHP?l6{<(W zSX(6r2q6G2`dD&e6K!qgXr;|!TBL!CIGT&8G%*hN{&W+FQPEu&p2sz57IC?+yRKVC zN6!Ms5SdN2v=%7>17V2ax+=I<qsFleG~I@FvZ1-P-j zJKZQf6$j!xEUpJH4M>WkCL%%OQZ?(0G%cK1H@COVBH0OXi_p%Pzn=C3zA z%#47{9GM6)rId1Jj{A#C1ROY~l$d#z(o8XDa%RRDqnlQ(0LIKIaGBcq>U8M(hYufC zpQSXhnG+9TeYQSbZ&!Kf599vf!-uuiu1}0%9LMc;y<8GGg~Qw4;D*t6??OVh*CBDxMgT7N!M0!#*V@M2I#!aC6?oZJ$@Q$Q%lH z@&41do%(&XXctcj?%PX`w=Z&kdw+A&uTDL$8)XpyGK#Tb%NGYChZrC*0=Z&i*MP}M z6%>>hG4S5IO%cUoRW5C` zBG$k>G;oK8jH%Zl0>&*9!i1w=s`0f|K@_E3KZhZ8e)sH;B`JLu(ukl<$Jl?UP z3{M}8PVwk`lFujzgP;{O2k5~QzyNUk{VqZNC0hS~xCa0L2n+zge>>9GR1g8&6&(@D z0nA)2LR_AuG zW4(e}SN|XO{w(OW?Yi&7Ml+kW*6zOMboZQ#+o2~K0_=N{k|>I@mB_MEQsl&rQ+6Jl zN>x&+%0nLWlH?^xRg!X*Q&QrhV>u;7ZG{#k(h(aqAPImV0bJnX0vESC-Pi7BHM1FG z z19=ZBuHRQ+QgCHr77so{Gc1!@NNu)BhiRD4~P3|*i$Hn*;uSI_KaS>KiA zt*HtT6Q8PJ*nWGJuUgF8W!qi)!5yq?n&21Hp_ru*_vJ7`e(lc7-}{3<>uZ9uHO+48q_hE|31{=Fk7{P!MMzfB;avBX_h-8C`9B*-tw_4vQ5qaM&E0PvQ4|>bLUy%i}mc{Eg_|_OJh9 z>i2m6Wx4(RpD!0bcKd_Tfa>0_j(+`%?|T33<8}PQ-}jln^q2pSCwFfxmaC>L7mL;3 z{iSdI%zD>Nt+~?%i%< z_w?-Hg9rCihllTO&mKPpb)Wj=XKtO`-K>v4^x8-M=HK{B+rut$ce;FVzC8<_E0&X9 zYjU%aViq3E!B50aO$nI@nc0VcwW=W!lu`)64A5v!MME@n%nl`&8O5mvCK=Fqa6qVF zQ%*G(=iIE=y7R)lTg&zN_UbSWI_r*9onsMUyh%u&5&Sp7Yen*uaGF9JQYzIBb51f5 z4-BJK&&06_#tYZBu1R$`8~5`+J5JNQCtGyO5CWKv<7f)wI1&Ie55oWe!~2J3)i!Hi zKuXovc0@E?@2S1B(!-M5fUMTso*un${ws-CME3nbq}1$;3$RbqXi)mN$;($orMdb- zDDPuzgwe}I!@3ai+>Q<>O1=(9B|F23=y?OCloDfHE;Sj3)dqt^|p&%j}*IKF8$TZu)B^Q<2co!(dR+za`D5Z!LM0DO`6%YcT zBm%@H)x1x)ma0Gkp43nwXWjx3R}^RJa_{@Pc;Wc%_0PRchL67bq3u_{$32Ys>N<5@ z$h)Cdb4RNsft6`8$3oseJOA*_A<5=+!5(4O{>$| z3f9Ru4CA2``|Z#F!%u(eGgsG7Mbk8mPA)F5>zJ*Uv|L0Z8Ly}QDxax;fpdHI?MHAc zZI(?PR#pz2%VE#pQB`d;s6IIF5KShvV&HFlV?YMaFoWM!ReX2+i$Atnw^~MVW~l%) z;3_GgHVQ=OfRN^W?%cWiPPgE{Fy?H%=R?3$t(uP6o=y0Yw|=^sbBFVU3s-|v3%vdJD(bs zA6cZPo8ZuP7`l@@C2X==N^q3}f>X$bz}^W!&e?!N4CuV5Ai3aMtvBjOMj`+ctx{Bk zoQG$-vgfS=GeaOm14aS_2MlOZ0l=6=0$FOQ803k%C(Zqf#S7oY?>U+tF5vU4?N@^w zn0a)5oAI_W+9YhHd9mk@r1sUZU7(lP@a^N(^5`^4J{-=+Ltl1705>AVs75g{T&e{9 z(5vx_i;@Mq?Jyp-E*q4zH&&vP>;3q&pO)>>>8&-4a`E)-p}CW9|M>0nG@bw2;p8XZ z+kX0Y-k+4Vp*U}~lpr2-lt`&*Ft!VaE3JdM47KI!i_9nc#S5XHN7-rc1DJKUc6=YR zy4t%kF+{U4N{P?`R&z-$qZBIHm8w-Qv~l@U-n~_HygXZ`%AzzLBxtVc7u^x^czLm# z_SaH|!*%G6yOfhjZQ2-ORJ3s%**oWgYE{9M$D7WRApjhmEDnPJK&`cu8iHp=?~#L( zs+x-nOa!&of)$z9?PAIai8y#Qz@py!u4&JPksNvN=amuBm>CdfO$HHl-Llqvb#=we zbH9A-F)H zqmKE2WX_>uS~RWFOdyfy{9I?xxz3pH`Gw}L06gP(#29DgH~{!2Iu~-zIcM)0M60!` zNXeqAcB7tN)mpEwcL2c5snoet>eLa@)zwwb`KC3tvl!!H8i1UP!+iMNY&K~am&@h4 zYiF9x;c8n|+O~}`s#z1gbFP-e#36*tSwpBQ$ieUii=;PJ2FX%qrN5TCUM#0+8V{qY z#@L#2ErooT`axBF+l*uH7zx2Sy73194#VKtwQZZ{+!P9uOUl`d)s%>u zuH`u&d)|5>Rwn0MjIl^P_O8~NrgAtOz)Y(spokF60$w5mv8q{GBLMKgRTWYNGZz_= zGlTcOlrlG~a?VuAIj3ON6cG&-Q~=ODn>O_9;XIeoMMX@b_w#%XAt;z?Vdl1N>r`$Y zgH@2YS_L&AE+DALra&mB2y@<>IU^z@BNY`90TUrN2N^&GL}tVQP$-p*LQ+*~uC*HI zm@nFwdpo!_X(G)^=_5Ko}gUl;@Ws=TRyGfTf#f3j!m-bGZDx zH<+_S)G81$b18+0OR2N!eDy(bAbLLE zUX@btum?huT+$SrV`j;M4iWgU+e;|#zWv^7FMsGy{p{d1YfzT17?m$LC|sl7=c*> zAu|9$RWMKn0OyHKF^hWSxY!_bp1oFzsbqn1uE`jZAr%z_fGVV*#OVC&0uN2D0vH^H zrU+Ij$*OUsv3P;Td2QMvq>j;f+MryHmC4$+fYyS=JbRC^P3w+^to_8^xn&z_soT93 zg~sdA%2r1i9SDGAe>lk5bt#28$U0OP0t$nH6G-C$T--nLyM5GJ*^ngGGroSm&1Zr= z(!}11hCDfdFyl;oOsRTPdUE#dmrpO>5d;b?z@vHX9!#Ah(Cw9y4 zD}K93i=iGC6dGgfV!U&-d+*!FC(+kC3|a{i5vVqRj1o2fL7zYV`5OO}?SXk9DBw+v zm;h|Pk>>jC{0nA`1fVmMzN#n!ASy!Z1cQ-j$iTvtSPWbNlzr-}!{9qF?54vQDX#9m zT%|b2F;K;BFDSVLkHHfSq?PN?Ga5w?WL(q@nOw|RGXpK1H}8Sbm_`Y*C2&4?jGZwd zler}0xB+I92`!V=T6HLjV4F5@2QG?2acGrMMbQ!>#-(2axFx!V@?OkPT-c9^LB$$t zK-&zW-NHAgg=vw-YR;$?zo6!%)k$Nlaklmn{w`$vnHzj5^6 zx5eET#;+cYZ?EL*ceZD@>n)hhfALrTgC~y;#~=CmH()iU(fdDdpMKez(_QJUT%AbJ z>x-+atERG(yYJlpH@#=L^-@|M`|+LX+j991tZv_!31nkf&6OsdB zs|1L%2xI{nwwgjdkU8Q|ME0b2 z2yYm_>%6n&*?(Ndjm!0PqA20O55Ao^k)B5<~#XG$}lJdeY z{=?7f^xg|o`?G)i`~J`m`~*|@`s;6g&&NO0G~rMGYk&65Z@zx$clz7q{r4W#T2(Nm zRBC4A5SD4|YcA6?U0+}CueUkpSyBG<$<;9QZRY`N2w}h9wr$%F&@GSe-F@-ldlx_V zXMg_v_uu^ofB#o*9p8HT=;>;y-o>M%7hZq;^?Ud4-oJl;xn4be`nVs*4}a(*7grab z`QGpQ!!PPafu^!)5Y&YF~}T2)AMellS>)ot7^IfEoaJhUIeI#zw(B&l`7Ktv2HbzW8?8 zRDyhdvD&|P?`WNykL^kpU?op38nXfx)~XU!K&nb1A|!R`+NWL2yA*B{AQ1=x88|{S zGNM@;2JkExlN>w84#u2A2%Yx;P*gISY^xypO>#Kt)ig6%_$UOr>fq z3O1mb142h=5s<1t5%hqy4OnZvobn>DLDNf(|QfepKJl9Hwb1HA#*sO|_VdJ}L9zQ0q-#fU1JYeEf5cox52jXMeza z8hkDtV&=K%F#Gq@IL<$uEz-bH!OS#hCPQT~BxI^mt4N_Zw5C>jzJLvxzeAF$@wL?VcHloR;hZ9Qtt_GczBb9ygsIr#{t864WXY zG7}rq%p?gcN{S{Rm9>dJwu^YwF3x)(2qCD>;RlWPen0e#T%^FWn7-MH>YNL~i`Xo) zoeM^0Vx~>g09ww|qHAO00W5?-46Bpne%C*Kba5zkoZNc7T)x;e_xC|LwpIl+`m*7OG~oAUu04IOk@F zsH)bAB8CQ1su|o|jkb+3!k*@QZ!(x)s7TduP(}c2nubF(GnE^I?=($W3L=UM02EbG zbwrOIJp#g}YZu)rhN`N;djh=PA8M_pmDvRs0U!-yN@)rn09w?>hqi4=4b@7@gH~WL zFmw!NsH|oJfZmB!ByWf{RRYhRZVJ1zZA$G1(S$JTpJvF?%+2vW&ei!gcD2@a(bZaq zVVLhn-&#;LvU#-rH<{Q>ozM2<#3q1IT4A@&r1an zBVx%ZG!cMoPK}&{cZtAsU#8hF639Tns-U2&Y=npmX2dvuiK+t6rG9wBEM;ahB{Bq> zrcq6ZC-%0rm_2~ZU6$EJ zO#}>x(apYFsdl~JKWiu|B0q0S%*h;cEW~q|nkImo z_P{hvGbDDoSa|QRxBEP1X4aw=ODV-b)%zKL6g+orgfVN)2CQbe7E)=NaI!i&zq-ai zMWvL%s)$NQt(grq=bU4VCWR^x+O5^*{{8#c*O$A)uvjdPkB=WdeAw>>&*AyNb;<>6jL`!*W->^%_$I`XOiCNs6k`mk z%H#p?_N}`g_`u7DN2%rYt)u(@=AZv}-+BL?55M-QmtUNkrr~D##y7q(rQz1eNznpi zpn^m(`WdSxBFv;_i>{kjrJKjB+4Fve_dZtzW=DhsMdurriLf{Xq5aS^QX5+b7-IxA zVG63Eg_&nw7^ne)VlFk0T8G-_VpA56ktVER^??)643GJ>_m6&XSiTS(qh(+AfQgJe zMaRfWtWGr8(E>`fq9PTdNAF0nO`0$_*boC4W|>2t9I+=x@WgY6TPrgwf)Rp(njwM` zBYR^~G#~`eRcu`p>qMP z%VOreCsD#+Ri|!c4rnaY?=%UuE%FK%UFG(CO z=YdGm^}5rFrLq=;R0Hpl0f8zE4 z%}1VDGlhtDqbj7?Gj&s7oHrF<1S)D~JRd#)i4YLMG&HN^Kuhr^HIJ1{RatT4vLE(3 z)w){c<%a!{k+a}3H`DbsmQ3r;9j{$2x;;z#T^RM?Ls=e)PC*Ev)fn;-r^;vzdoZRR zD;hOMj*a(aT*gGf?F%NM1%NZNf>WYAIy9<^K4=p&B2Y+1C{?pnPZ)^`5I}LbDp4A) zacLA4sW`{HXmh8QMaKzz(iDA#QXo5rlns+>s)^5g*2=C`MeDEP2u#g-@uF@m?9Pdc za@!VXuDlFycjte3e|e;Q?@_sTDetf3EG^Hqw%5G)_9y2T-l zx0l!d52vq9<$&oOw}5_m`xE*9CExz`HxK_hf9#KhX)Kepo7F)RVns1ffTC!IN+>GT z=C(Yu3k=|h*?Ws*0swOo9RL9Qv*`_dXFTs)%zsouMMDF$IXUtM@aWCB7LNo>>=}F~ z6egh)a-$|@UXm973f}wg_3!*`+&zMLyg2Ey-8$?a`Nh4ynugVrlb>FE?f?9syZ26i zHBpwM(|x}FEe)^9Bz%Ts5e|UV_edTMv`8~4(2xGeo8Nf1y?yuY-MeqT`DPx6$B!T1x^;^Hf%$Z^czJbix$Jhk?R)RN zH%yxMFPr()EoDq~#q}Jdf1u`>#y5R(}^YH8ev|Wx#A!O&th`f~Wx)DFBcQ-uuALw=j;Q1BlBmo1Aa2_WS+ea3FOt zM&HCDrbVPy%^ICQ3_E7_#G)Xoo&yIaq9QfA7|^5hU2H7ZQYvI{Cd_Q2fXZE$K=*a@ z(NpDGs{`{*j8&x6oO8{k>Ds1gO3F>sc;;~&q7Sv!vKA}m7)Y&_Qme$p>h$Oqrvp;O zvb{=Dxo*o;;a~YHh*q$~pBXVHI$(*gtx*5UZ!_+wrubI|yb0bEY^E750W1 zZg7nTh-~PQ*f_*2XhH<7CqpPA)c}~i0uc$I%o@VEx(0Jty=1QDnmj5D7$TpEpf=@=6G+eQL(jQ59Syb>|KIk8jHz#xjIN& zT6X=ATTcmroV80^AMFL4T_V`ZXB^MA8s4Cfj%EG*?OO{S`*AFF%%x1FHr0%2J^;@( zj_)jXzpn%~ACjL{I&9|uK0i6n6DH@}Etje)rHB*&1Y|%YWWVG=$1IhBT}98qGgZ`F zrA31x7%i#hY4i%{xO1+J4wXj zYn_6d*$eZ`pC1T@VBg&}eRurJFgh2a5h}1^1tSAeC4_1~GMB*#5eQ+nSKb6a{$t}i zJL6}g0#-mGm{TF=hwk$Tpv<9Hk8nyN6Ri5vk32oO|8(`q?dq=gHfhV#H;{1GB>FRnaZ|uk3r=t*S=htQ{RdC%7xl2M*RdOo>Pd;ys zmldwbJ46sER@4!J3nDU20|1y=@X>jGt{G3Mgb=$`Ty2)X>q`ggY0L?z@!lhHj7%WY(63gx-Y|B|)tf074)lH`bI=@}V^~m<1cpqyVXwG#uKt z4Z+PdQy&}vuyZq0zHyBYjEG%qX3>C*X|r4bz~#k-bFQS^_+SE{b+!(NCQj4TPot`~ zZCi3Nv!dn{W?ny&Bck2V&tKe}3VZYDD22#UO^fwvb%SbgXBX$C6hUI<^?Es_6l3fb z%{cWyu1G$ZoC13w3QKEH+Mrj9&wuM*Ks1r~+bo8jya4$gxbB1Run3Nbq* zRFhI?tq&2gcgxkvwUknt-~Sw2jfkw~96O{rz`#9&_0N;GX`1KQrCFA7d42_i`&~cq zG|T@@m62Qw&JjZJ-aF^el&Fne9BNh7zUEyT!HfVM5-|Z|MN!ka4_oFYM9scI6=Xt~ z6J`jQ*+7VhW;X!X>@b*X=aP%}Ap}47pXLqvIHs{5=G27wdd`){5JH7#S|3q;#;Hf5 zWV7V{riyw~*9HKPQbbI1Hqq2-a4sy@Y-6tFxrAA6oW~;MZscF{`^h;IG9nTp8k(9| znP)yC8sDVI5Rs`p?@>AD$SFM&;Lrb*-au)yh|D1mB62t!=2qY4Xf+#yYEjh+Ky#bn z`ub2xnG5=6XrM$)M1VFomgeS4&I$nD`#kLfJ0@SGh}5~qpjtx+^F3&Wvz88lX4A&= zGW6UKooD!*AC6@6#}N^S6kTLsQ!uU5q^b}?mFi-kY9JznbDst0{)hAIX;uZ9bIv=S z409^;OCv%lWo9~g*DjZ>DO9aF~kqijKiTfRmlQs1?tdG z{WuNNI1KyE=J=DJ`rcaXul>!xkf!UCqvNB^T||C*@!sY3X+MIO^`dh`&;-}SrE6@n zS;e*)r`)yeOb`PDCL~7`D2qa|MQA$qDvF}f2NA;%0KjI*m5Lh78zd{jMhw76(Fb!g z<&4Py5vP>8)pD5n;c6=gZQC{s+wC^G5ZECQ5pR~8qt*J}Y5e?`e&g4^_;-h?&r-&z z@4C(tn!s#oL9$ZHYz}Z@MWxnCo=#3qhGA%8P*qRvkjHuQh^h)GgPGN;K6t38T1A8b zJ+T6-sUWa-W`@s_l^ijGrILVW#F#Ns1(@f&bJ@gA$yN32^r(ZCiv{^?y9Ei4!tuM^ zz1l1hM5%P>s$T;DR%3ORby6*!aN)6{vNr-ik8>`-O`sE$JRuVXXU?ZmL8~${kSAmp z^jYG^975BwBO*mrM=)lpg+*vL#g%Fm>_DM5xmrz}hd^zy(VI|T z>+W#x{5QA3Qn=HwdHq^v=P}%wJc|@CkJ5Uq#97;7teFEdOatU`TsO;q>%aRy|6@P> zr~m3-{|~?Y^o_4P{k^e{(Kp|E_lv&t_xv~Pt$#n`@{`AxA9>~Ommin2i`zSR@KisJ zo5jmDT~$<5AY%YTA(&lC-#z92=f(gCLHE8tg?nIL2n_%Pbgt>l8UaM4nK@)u0qo|l zcwVE(P_%%65QxatgbLyvMMpu=Lp3xYu_}2OVHl|kp2Z~9K}Mr)y@5t~(4c8o35oEs=D*-maU+AO6I+Xpi3fg%@_;N*4RY z$J||$?NvKmwP$bO`u=Y8!{x>LgFa0`x8I^^M85wH^24pn<5fp zjE-;Jx^>v?Z{4~T8-IHGU>N!*XYV?PufF=~<>l2o?|gf=JG}Jb17?5c_Jd*E-o1Ob z)O2xuMSD0oT{yp=%J1B}^Wd$w z-YQe+nohwGQL0ETV2EIbrm6~@b7e*Vt~C>pbEK*Qm{sTF(_9>M?1;z#R8>%@spP6= zR-7i**pOB+WJK)y-XTTrX|c5GV%u==<2Vi}dtzemV&h6#FIUdFz8_O9-bF-ADF<@w z84;zFVHn173?X1(wBo^6Q#H+bN}@wa?3|5x*77j2Lt3p?Cc3ZtS+cZRtq2eij@Fy` z)Ug{bYDqHHHg>m8A54PRLw#~lml@BO_dgh)9^wAd8WSu9YcWUM#s;&IZ#XCsKyamw zfr_-NBtysJ;wWB~?%VM4{*~`puUEAs+r8P;N0iT@tQ<83OUsD=RrZW(BK5yS_f{h7qR<%Dr+No+$%Q-pbS}W|XJuwpr z)J+q5=f*sxy>J)W&^jq6i|v)Q#USM>sZc_I!J(ijxDBcblCcN&;DY#0eR5S0%^~v4 znIZ!-0|Io0h%k3~QHczl0*27lJfn20Lvl?3psHX5E`awQ*=a47Ytfpu`X&544Ab!qC?MDd%t#c)Hxv_Z;IsO zaubqTWydhE6pG<#J?!zgJ3Y>4@8z`TCU_>0%2j>i32ENw!?SajnE;BKzmZnCIrHpB z5eWeEL2Lf+8$34{qPgfA*RXR#N&BHsWm>IP1yG$s0|xd8Ip;t!9YK?zn(J$JW)Xs_ zG-@h`s?IqFNLc}hUCYin6)4#-z{7_RgKrui7mL+)yJs1{{5zj(m+|!0W*nE-qqU0$ z+7e~|?$tZ4t>vnJGE8Uf{iHJ5vNW|o4d5(#XNbZG+W3ZY(s8H)F0j!;5Gt79clS-- z9shC+Q7sj#fhnMB&8C2C=)IbuX;C4bnH>LE2jKS&#F;^K(`z&$Mi4-)^%+MQzEd7T zbgj2t|LXOx-h28>PwI!h@YwmQ%vlO$!7&1i5I&rX$7Ea7) z!bKfrad_hu_|d9)(c zrs@OlFNIrH)eyB?Q{%WE=z##xzwLEGb+wuf`0Xp#;Xu7KTbU2($RS;ZN?_#?;qC94(1z~rn+ePP) zo`qhhAp!zOQ9~u7X_`cYGR+#;H08x&frxn;mTdsAG4Fj0x9{8?>lmp)saa}PVRnH6 zG6i;?7!_DR)RjrBOjl@6D{LIOh(1@1AV}3D7(5i6glJ)q5{h6k(jk)oK;nh2aD$%zU-o)tX~) zP1CGa3ubPcxLL2-Zc%H!yxI=KkV_$&4Y6SM%qC6*0L)AXrg$T!ZsrVLL3F$A0Sp`< z8_ZWyos*o2y>G*+>AZ6@hOyKtU@4`hX<`W8`;^iwWi^0hY-5bhxin1x(6-&Y(VF2@ z%jNR=`nn&7`OWm>;K;e)pW~GPwQXY);*j!O1XtCZr`g_whz_vn7C47XBaCU9N+u$& zQmlvyp))Y_=)D(_iWMDH5Cfq=gigQ!(ahu~e&D8PJ}cqqhM<4b9$;oEQly3$y!TS7 znZ+ikTCN2Md8V3}89wK)Arb+q3qHgfCCc*=eIp`f%+yWeH1t<(8{1{uts11UrwnFbR>8>eeBTklG^bf8KKoENpGd4I5NN5A=V*{3 zYG|N!&f7plW>l@txs#KVIRjaBhRMutfB~SWBeMDYq6)RnQGiCo21<;8H;sf4LW~}y z6|qvLc^Z(1`ECscv+t)?RaFE+f;qGU04UtBOhwB4LFR8{UQx-w3`C{Yy5H4YEmspj zL#skWVuo|L!fZ;I`y8c|l*XoA1m~IAvR0KMz(~v0@_M_UmopJTBb8)YYE|}FN@3=% z>&)zM=;x-;(iFWNyp@T2_yg_vzk)7apBIK9ob-wz-sQ^6uH0)~Rh*Me>w$*SHs6eBu867bSBV z_lWMbS3Y^V`M}e&$HTDOZm(|Nc@Zm#4rxr$d)2zz?^8;-Ok(V~S;S`DE*h}Yd(N;BuhZ)heO*mA%tbyt=DT% zD@9&;<(0SJ{r0P`zWU|YzpB-8sor~!Zl;?s&Mgn8stVr;QgukkE;wHWikM9WoO8r8 zb#0CqLBw$ynx>hbNzOAEk&!1~NA~WBtT@#chrV2&kF-y8cKoBTeC6usg|Tbesx5f% zZa?j#1aPdLAkI?#NV2&l zObM-)giL7WL=n_6fmydYuDQ72ENGo_9a91;V9VV(HA|qlBd^C{DQS6m?$a)~!>#ak z)!#Eb&Q0yhNtVXZP&}G~)uK|6s^dAY$dc|sm;_(C{js0>i9hp?e)HG<>M#Dqf@8CG zl5b;psDRzB%gaq|?4DxvPO($R`>QB9a&+Z`={*!ChSX zZ*v$N-dcB|9g^>(p3v1ZCl>bI1|8RiJY^?zD*azDdvt`?zRByVD(uL6u@! zKnz(;U9ocgRW%H*3GD*HqJXPt=E9=-gyiF?I?Z&cWoWAMbPetOFkKgCr8y;%_4Om} zuLXIywbY*K`L`GJdR(8Wx|Uc@ER}UnM-82A;9|?SF4iA^)V=)l)~g&}@@402J;9Y_ zJ{q?lxc>L!@YObvbK&8`&%>hnc742VKKkiD`gHsBi*Nj9Xg}&{f7|kyf;4_w3a73v zJXKW%Ly#(f^U6u`OgarnGfjxV*d)swV5tgp!;YHc==SF`sjQ97y02YtuMdG(<8x&eW#}{!u~uS|4_aE zN86{r9JUXuosBPi*1!74v0T0L-rv%1O@I6EPze713opO=>IeVbf9HP;Sik(`Fa6FN z|LA}8@BMqPz4A#S{OiB?SN3Uo_sOH{@sLuwzP|o}ANYX>_wGJ^{P-XI#^1kn`y}W5 z+AH7t{=>K4d++@_cW(8=^$YiJt=H?XeeI1}4}|S@_jJ3zDz!g4J$mrqLCWKyf8yQg z;d<9BeU*O9hh@7tOxNG|*6+2QJ6)gt8-MQKK0AMWd3pKmhu>%$NSUW;%C*o;jZrlb z#5rdIn0*MY%A5>_wN_*|@4y(`GZ_Vo+RO(dVgoWQS+rItB~QahL|BcPNvvvd#KHNR zVLFV96^9UV4#X@3*ZbX3+Y;ie&Hx})ok~q9bs>NOc}L#oD#}b80yDcJ=d+lCmQo}Xc{rfp-4h|so;0alUqdX2MutICYHot0dR z#R7}eS_y6>a4;up5sgrRsaq^coz)L}1@D?_;7U(>l$-dBUycfS3)Sq(9AH7+S7@~(T1Ed~=aV|KHV>N)c?r8Mgz892u1i1=By-YjwI z>*SCTQB;SaAM#YK#;#qNc1<&=jAKIYN0Duwn$40pQ}GH-uj3uYMH|niLxt+VP4h96 zW0f4pB4M2-re+uh@5|A8v+CBjZ{PJCP(xqRcy$5ExJuV`ss~8pwA(+Di`~Wcy@99X zeU#hqmh5(Ex;lL5r+|#yRJAui?;=UASt(6A9BMUyg%6@=g-j5_ccTZsJN_jY1ucOx zl5-5649OUc$%jTtHn}-G&}sMXR&(DZ-H4Dr270PbAYY5psWZIa)`AN5T(O@9N2lL7Oc(+RV(ToJ8fK`>Ii^x zR%bO@QXLilmN{Rj6K*)`U= z(DML*WCprkc_{^Zz1qvsa*_+zgu=9(Qq2HkCu2-g5dn2!OYDOwG6XwsD% zcgJ`sAy3)^HJzI(yq)5;=M&9Du1N}!+SSpq^!R?tYi2OV9MHw4T{X)? z>6uw&&Ov1nnL|paQk|nQkB!aEW>A1BP3Ux*CaGCLr&G{vT2W-}6zaZCVU3c>psGy!VCd1Fc`rE$u&bY1rh>7W2GO_OLb1SDFm7m~|3 zr2a4hK-;#rPhSG8WA3})QYtu+Ot7-ezA`$HJff3D7aig-436BG^4y1LTSi1Pt+g`I zlqv#*Sz3-*N+Cq{UIj%o&k?{kfk`~;f(ez43~toZwN^2>cX|W>+wJynILwg-QnMpR z298~Dv}n6KC#Q@w^}{TA_OZP_?D}auzqlZxobxoK#d>wU-|dHf=F&_#4a4A^vu$5% zZJIFG)`1WJn&zhZImplbmxz9>;MUQV4;ZoASiWM8SD(nuZAG z4T-8A4*Q%-l}tnqA;xx^hFQWq+aLj8t{~TG^xn_RsQDszPi-5zu5-_*J2%0|A|ZsX zTQZW=Dk;y_fhI=heLw600SVeBpvPLPngP?WJEUQp4ZaEnpsD2fGtTacA~Uoq&Tqif zrfDj*GIOmJ$pB!t+Zl06NkwAxZQIVjETxJ`N@FR70GG?DUI+jP*?T{?Wai-l5fFiInx<)*;G;qnE3s)s z_5ct5&e3|+?*~Na7J->Wh@}Ccqv$J2 ztyu?)yppMoI+&S@uGBiWtG8FXHa23~_x;(iE379z!uA>>WQ3*1#<|d$cQ#+3C+R;Y*O=DgEP-iGZf95 z8fG#ZYO1+3;F&ovR-hu{&>WG9%fF18$t`XGql zi4>i21g}s zI08LL>R1pAhvFnA!nE7ECl8lt`&jP8bzkHs(l&S_>d76}+fG_4u!5f8YwMyZJ zgbp8k>3jYMf9`+kiGK69{vB%T1K#uILT(RuO zcHJJos905MuVziBp*ffsdtIJ(5YsquqtL313C0RB)IeQu*({j}GBL;4qKj1xyO*+1 zja6L7YY+X@l_J=OxHLj0AK0NOh=LV0gZc^qC_+L61w&;J*4)$_y-#q>5lp5&#G&an zp;FhR{esib!Ip^1^47GzU&Y$;^yuohK63n~>S5GymC{qyFx6Ay52r7`_{xV~+WhR_ zdk4O>`f-2j109gnQvE9O7E4VDA;05D0y4GXJo($lPN#)qw`o7{`d7a2*7j^V`pm`g zC&$&tSh=Yar^Fl!Wp$yDW;HNzjw6C&b_g0Vi-G_EfB;EEK~${+XX>GfRRmCrVx}&m zx%s|fcpeAv&$Q3>Ux571-V}hF$S1I86B0cl+=PcqpyDR&4>T!cYb~0PY=8P>tFlIkN?OI|KXqdW8eJN%lB`8@Z%r47*iiYeEjH) zh_0`;r>D1HeDKPCcm4Hmec|E5_cy2Owps0W{Z&6aeslpwFWi3d-G}ejs-cOW``p)J z+Z-PsZ#E0d#}6L7u!!#L@*?=t!!TTIue+{$@8Rz~eDuc2>V;gV)6>&o9LI57`LJkO zBEm9_IXTZvj*)V%4BAESZ<2f*f;yz&eG+GE%%H9aWmNznRliF^(=-H7QwGpBcmtWH z6csm{quY1yoL!!sjYGFwD!8$TYuO=Q?yt*~nkIPfOq3bL(RLa{tZABxw(Iu@{e!$#>wVzDnvuzw%c7?5D}=LScQDr zEnUZ2Dgx|N6;0=3UlC6m-Yl@yvy0(Ex9Yn^+paPbmx(MVl)(j>79 z-Zw+3;Is+eX*^s_a_3ID^|6yjS7k)m~Wix@41)%l!+|oryoxj{)H`YcDnra@hVx5zX4G}#dqM~~5J9CA&)=KOZ#E^`b zd~;T&ZcpoO1??dOCjfOwast8L+* zZ?|8)(;pbHi~_C=@fc~5RI43=8>hNxgCEOeis%CoBTAZj^h;tVg&7@6sY7m&YOO-G zao!X3TC@T08&}jtdh)zHt?xT~@^!|w%46yG( zpXSZe@M1Xr9@9`G1mw*jXE=;V<#YF)zpcxvxyZ6L%y<;!1v`Ed5& z@!vRK{Lm#gYL_Pg)lXV%t}a2&h*&rj0PmYITswKtLfIW~T%D|JsAmT~J-UVU8}$c1 zj~~54Se24olik#+h*rXmoFam#0}^(C;0tC11%tVW1G7vOserI!kI~GuR;^+H42bRq z4D!72bW?;ibD(I5*lK{{#mrP&fCS8ljjAfz98Nmd0nsnFeLW3RN)`M4Xr{EgcbTT; zs;y67SzmwTWEh9OxO)qF+KjX~_|tIq+UncewE41oHE4s5-`#wVo5H4h!`0oQ#$g%> zBv2Yi5OLvG%EdXyszcp1Q-jd!KRAKvrv51s#&w^yIu zu0Iv}3#)0scd{5s59ZWcT7HlB$Z5|b7-}Lk^n`wl>SKuNjr*cYYB&2sDD?|JbN}$p zRkOY7L&%_$QoA`_cI$L``K{a>onI%7cbIV;w;eZ4*Cu(YQ{#~l!#EBah#DDkBBGSd zG-;`h**T6Oh*t8<3jLvvE;ouY~+@9KWgK$U@NEm{F;qdKXKuZANtL*U>-pGGkb zKGc#p^4#nva!_Z!2LOQ80x-J+u}H0^#)yaxfmqXY6H74Hl-U?ef(wY42wVW?+)TgR zj@RCM=bNflq)b}OoA*9y^30BW2nzru$HsKEyYkk?HUQZ$4a^Lv-uu|ZqSE@9b0&aN za*T6J!;bG9pFg=8_QS1{jZqoX&@Q@MeT*>;dnxL?Z<^pdu`}=Z>D8Eq@&5gLi*7UY z`#ZO8?XEA)bhA9IwMr?0g8{w&_=$*|o}M-;rDRLFA^O0bJN+=VZ99Y^G+s)2e0?p9 zV$itYT{G|3mdkGF`=)7*Zl5mOZth#W^Yp?8m$FW2h(0uJfz=Qjb>aIo3fD#$1nAG)>c3N)emmwYadFjn6RO(= zCL+L4RZJ2QdG@6gN8XVuS=y#$ky1)aPKg^J0KJKnP+59s2)PzTD27!90G9=Y0MW9Q zRLdOCK+ZKx3^er#9g~qrii@_Cw43b6JUcqqtWJ;7q1w1;yWmU8i>7U(a|A=5QvoI) zeMG2al2Yor=IE%~Y}$*93j+V2q6r^M8M7k#q0@7$%t4%l##qwL;*t)6NM_y`MdXSJL1D``{e1P&GEV- z#zyi`6`8QHss?p)v?yg7Aa~pnkasO8rj#f`HBrL#<}M=k;~8LHEIQET_k8k)zxR`$ zxZYm8_UcE^e)}J?=ak30w;#Or>WA*!y7$%BzxeI9zVhJy{SUqLgCBb3lV#GJ^X=o8 ze&R=d&T-gmHgCQ4R`j?3+yBa+OKE!Y_^Tg% z^yuN$ei+74pb!C4J@Os^qIdJ-)Dh3(yq;(p6Ejbxc<;Nm8OH%oCuRz4MI2D;qUBsn zk#WR)eYoDNk8{o*{r+ORSZ&IrlU=;~o z=%bZRJP#2JttPC%#Et}sVKG5j7Gr=)Ny$7QRt<*g*lCf&!F7#01X`@`Ph2r*i5Q2qY+n^3&C1q^|2m%SWMNjYNayy!7?0^ z7WM&g;T0-QO6;U%GxMmmVlD<;(9lQ|F6w?shzKZUj;<+I0YM~%u+Tc$c845nain=T zNxRNCVH}#log6QAY5C-Bmh*9YwYu{z_7)*}hugi?D(kfZQLWYcn37_kEE!Cas3|V8 zytsVjvmgA?<)Zn^fBQfF@;hG=qz0oT=m?X_o==$FQqNsela1cxX>wo0KRdm4i|uR? zL=J6grl8=+fkGu!vPw|Q=(`Br|NQu`6;1qKX9VxER1#IY2}Cn?YFb3JCSs3F&J@uh zR5WYy$Z+L{A~2au&M~KcdhhE)(}mEqu{9twKyv1MhHTNb9L+2v*t#__QzV2kQ|`zs zqAHju^SoQD0$x?CN>x!Ja1n??+klbHu~cecH(6~dxY?0P258_A5sg5+bQ4b2RWM#_ zyX4J<(GW>W@xF1vpYJY~i#ui1BiUmAjgRPWV7NSWw=WBghPS%gFRo4`9)J3?pV?Sy z+xGRh-+t<_tR6TsO79yThccN%?Cwz>8ISZP!co0fC*mSbijhd}r*{=G4i1W^qM*#OX5J8T%!@rzHw?U(BB ze&Y0jw41;9#@BfN&hEh3LYn2S4;`-}jFnKOBbP{)2no`(ZdJqahf=4D`Hv_pXR6 z+Lll0{rBI$efQ2{y?XTI@hqIV-fctcUwH6>E->nSJjfOQX(c(Xq&F8 znCNaebg{`*7mEdZsUU#V6|2eQ^QirIOb*RK(vTe6*Cja0H8pdDS^;V{-pJ~%>%O#qadwxdMLZJegqltmYc^nCWay8GI?$*u3E z3bhhQa$rYI5RqE6K)K1vBWxmzh_O4=f{j1vmVVLZTE>0=(Ze_n!_Y0;E_Ts*&q0CH znAt&$v2EK@$}~;0J>1OlckG-vYesIWGG>RNk1j6PD*-v|53AMYLm&NE+dS#ov#ssY zknnKL)4{89kCzWcsD;j4T$gw(_1pd7;nnpJuNQ>i{X}b{wFZtQ)6`?!oYk@lVI5sm ztThiHunEg+5Ol1F1|S0H7@T8a@JG*Dz}2If(g>10hLoOXD$HEbxrmT6I0Oh`7^Viy zIk!LT9Fe%D5C&JsEe{&3A4(n%bnM31P(I0*#RWsnU2|xcnl>GH55S@XNI+G9vK^tb z4j;a+9qQ0sBYR})lU-ZZIo6~i; z4UwpUs#1$ejWIUOVqSmyzE3F~m!l$SgbXmPWXRLzDnU7WKdt7l98e0MFb5_-xR#q+@y(f_f7q^E?KV z9jckBLj*%GWi>{&y2Yv7GM5g+a(M4}c=Tdd`{6Pi>t>Z=wRN%PfPJy0D+HV0r4Lw| zfDrUGhwrS0roQ*a`xClW(1y}9cz~IvLNupJ-ek*j1aq}eN~0J! zM1Y8>0MV^_JAjtuN7}dL*8MC0>E`fcO^^YZxqGhhR6hS~F_Q61@U9P-ys?ztpf{}A(+IG#n!lqPaw1pWt#{}vi#I9LUjNW^z zs?blP_Z|_Z;jrz8tP&ROoc{>`A~KF6)OoW+X2|}TwGK=~#Oka9CP3#LM4JDvS#nE6 z{jjgo2ms#2XTor5V!Ym8&+~_wV~p$dT119nh|N-~R1vR)$hD{xDU*SfQcBYXGpjWd z;r4nzhYfUH*L91rAK+Q^XwLcS>WYX@PEMAKu1~wu)6-$h05(mNcaD*6@Zxhuf>|!3 zZ)v-`Bm&dw95sv96OYp&gkYvm&L3ZHFC5WwxlDPeNQlVZH!(I%b3MF&`SkJe@p16} z&aK-@e6hXGA~8Bu1wuqn)zy04HcgDapT?;lN-5EYp&uBD2ujMPO=iwHx6Nt>EvA&N zcUuf}v^eQyS}&F*3sISOf3?)WSA>eYUyU}4#fISp1<6fQ3>$8l_$MnqI=^gM?Rgb)ys7~1Y=eu_Bf%vlH_$DGGJ z<3YS<@4fdodA2tM?q`pn+)s|3^Q53-87vz!x65v|SndxKd5++iQSkb-B9WtuLqR_{H4BAW9~WKJcJ82!C_ zcXs&QcPgCv=F&NWt;E)g&OfvC=59C*oR5 zt+nragE|KehY+-iLz+{#idxbtD$Y5_J~DPb_?ECQ{ghR5*9MAlDpgdFz%!RpYORV8 zqi5!-_0SIuv-f_zUW?d`Maisfqo#_8YSqL5z&RJZBO)DB6MP8H08N=(AP!oqZ!XLt zyN>--G?BaRj+QFW?=b||b)6#ISU{LnmB>AhWB?IzNX(@aHA7_Yy@};KiC8TCrJW*PO0*Tkrkx@i8%rmT4QjYdIK$apP-6520&Ub$Akh5FBK^ zb9(FDci;6fu2-vT&B)WOlhyDx05oarW3YGM`C5N{P^~}w*&koT%||}+k?moB|Nett z`lY}7yYGKaE!{djzPh?fFXZD++i$;hb$$Ni>>F2C zSGkNt?&5s+=}&wXP(SnOpJ3{knnzb}ef@8LDb;5y1%l)fLYUahY&RW1GLAM119ySI^GQYR;ee^rxrGI*#KH z|L~7^@15hn{@4F10jw9x?e1w+0i-5&U=si|i$&LUP^{LP^I@8@F$LEKrc?_WG>dM^ z2}Dv#ChEz{l$|s0XigMt$Z4%vr8Zt`20=3wF;gW#&+G$Pl||dmwLYkV&Mka1Z+@1A zGs@m%$)_GS4~KWFe(Ts42ReS-e*Em#50vhK=;WzJItN+;Jj<&>G(;i-us{TW0Hk07 zY~+BU*4hp4K z+Ia5@lw696l6R}H=*Mx~m*W@jedUcW|IJ_dYrP(-r{!{aIP`6RG*ocxe3gO$g4W<& zYcV&6?MZobDVv3Dpf7~=R;fGof6&K~{|1adu=YSL$q_EPMi8`A6UVWO&EjzW*b2l5 z&N)+K0#iv-0)M2rQl1j(9@nK=lpwkvL!Ov1iH&N-AcWwBYMEj&Cs?XVWo%mpwpta; z*bzBq1b|dU6-}Y4sN|W!QdBCSAPJ!`g8~sLpcse&6O*D6EDJF+B8ZlGsh)#wrqfoj zYgU`XpAA z!f*S1dH2@Oq~n*vG)nuyir2uAYW0|jQCth`L34vdM4rGgxf@uJ*&h%o=%49OLj*Ph zKxcsQ_SH*&yzfr*s=s{vYu|qK=U0H{-6EtxTTXWeBgzTUtOR5ihoIty?d?_#%?I z;D>2!+txXU1~r%M)pbs_X_~j+e*5nI7h;UVFtlyET6WW6Iy<{KIy!my=v{;Sec$() zFMY{CHA$WcEmO%%1PrUyiVTpzQ2QyH*<#T~=Oq_28^+NG^xhLWDVmEI0)ct+s8ic3YSW2UQv$E2@zZ0W+c_?{7{Q2BOsi8e%pxGa*7|s8wr~8?gI) zBmw{cL>#3$&`pdYvRtnYhXD;ligyl~!ECl;k9h(yZM952Q-g@Jb9Ok$H0_+n$~~(s zd^8ZU(mChVaOx|#sv^0LX%ZD5qnHjQIrMD4rUcLuQi6)@CzXt#g5hvj(xvBKy zSSlh~!;zIVONIpGI5ucm&8C#*^H}4(p$^k%h-OCQP)$|c4P%thJ2D*7aOii4C}h>- zo8S7@_G(vag*h%vA=-kKBxP7~8Gy$r_m^qBT=d_(SAH+pb&vNI!Hg)i-FS@cZQxZ6 z3?)=2u?11J>b!3frjacw21K19p$Y)VfQncVC#|gF)Et0esU|hJSVU5)D!|~p=iq^$ zNTF^8S=4#KT&mujSVTr}wZFs=HmkJ1A8lMeJ&5N^x)@p=-smy47hO3;=T zu6SndOOGB(u8uoZMKWS$U<0`!hgiLP_wG23Gp!N;n7OLvzIS0}%UX=_dcQl2+tp_C z;K76I^Q-N43$QY%c}i(2>*ca(8Y%ibcWxLaGnwlqXsWeFXCbt&fAz~Rz4X$f$L~LV z`gGH+k*SO+r8GArVvL*3X3Vxd^xOTxEl-YDN6YA9Icy((i@VV;@5<_aUaqk!#+71L zI-$UB(J}=Y2Q7-MLYT?5)!Q?et^gn!5`KrE^4;;>@z3{owzs)iVrF`*vXP;hiCGfM zRmlL2#GwER6A(EgB47goLNo?^-VrnWCzcy|QUwqL5rHCTfJkWWis9DzL!YKMPk(&k z7}5drkah8b`{vZ<>-NLEU%Zxn`6#4f$I~>J$zsvYrJ26J0W6r=RLnbt+N6YaoT%BY zIy|BHExrFfz1Z{Sm@1Q1Ad*balGfF3XhF{c*o~R+d2qtBz#Bj#LqtOo1Dby>Ff%6Y zIrDu!m%?`f80`5U8E;-L`lgg*Gl|Gl#b~b7W4Azp?K!2(PmTZJlkvj2&EK=TkL>s} z`Q6U*MINt;KYNuaud^S2HOTk-H7*Q?%K*EK{` zYmN=C*XuD)L=ZyQyikX6cYgJF<(ij{yY-6Da+wBES4An3_Z||?WcD&;nu}$smzS3e zP)!%BB{NUc5M%V-)ml?3i0GLKF?eSs*Q!}_meTmJZe0jX^)%I@bwWgA@}7wZ1ZLhk zA~Og$bnF43lrr0i5#a{=E(&n-%Q@9K0%4wIGr4}TSeV%~O*bLHDa~(ty~tD00KrPNSh=XZyFt+f(EfW^^b7>3JjVo%MY zYZ{kxhU}KBRo8XHFy!3d?$&n}8)lv?Tdpsj-Z@#Wzy0>RmTS{CzKN(d$7Nkyojd39 zl)U%s*orXNYT1Plh@frT`PTdPH{Ly7u`Dl>s>Y`~EO*c(bswH?t#KwnOQiUoaP8@)G&#PruN;b3Q!kdK5G{i0Anp-!Eu+I#J_p5g!h|75$}MjvXejFcb)5wpv4mQq9CmtvaB#>Z~O zbD2%FYDG040wONE?XK%YBuh3E@3Cn*l7&dD0I;;c!*R?x6C*RB1?9TjPo|tyH39<> zB7>S%1;r##Dz5GMst50?b1Wr--i_;+>s7CCT9$m56ew-yBQHJbv>a<&CbV^ z5&f_aMky#uD`mvVq88)jAle6wK^YS7iz6^b7kh!)9vNu zWz#fk-_O(3cO82_?2bS6dw=39U-`<_!^`JhdG_}COMmdE|InZR3;&P%55I$?CUo`m zdUthgX0N~g^}btco!@!qom%IQeB>j2_pwiZ<`aG2fA?E&y!_I~&(H2`*8RPEx1WFh z<4>MG`0Cew;}?GMKWkcM2g}^w`|jSI=YHffzaJ4VF7N-f|Ic6gxj*!W|K4x?wTIUa zm%BGGizpzn_rkcJMo8ou_0Agv_~_B2*!b;sb3E>MhrJ8Flu}gYd5+F=Z1&R? z5XTs|+ieI<%6WG@n1XX{$_J-6=@BMM2F@e1Z^b6FVNRaqq+PS~S82>eypM_qR+s8! z5yUg#Fpq8Q9N}iQVWN^H*OG?u;E(Irufo}WoUZip4$!*77@ODer}o|LV>t88DRpty zhCzrY)kQ~gW7+y3hS4DzqpD$5GDBxz>T^m@9$--ql&zRkTAzrBR690K8G#7^tYk`+ zL=lkdXkbOKQmrZp01KId z_rl%-0+2;Vj1iy#VwRhg}8k9W3-Py2M?$`l@cso#B} z?em5YO!H`Ac}5rlGA)Z?L>}^#kJ&Rxt!C()D~B}gh9CXpPi(fQ-~84WXWBy`tvC*| zmK4@KfHTwJsp013XJ7T$rJ;QF;TJ1`)D$K3a8L)NSZjsvuLu4S&i8CRV70JkVlM!* zm0SdJ%>>Z7*#7W_U+u~l zihSs;dw=M1`|N3031)*gfC8WbRiuJuU;(s2Yc`E6*d#DRm7B1|@Ac*QPyMPh0}xPR zKteOZ6o%oMAFj3Dy7)%$=hwrd^%FZieCx6O{GHc+!GHCy==Me8^V*!kalff?-#(Q0 zE+B0G%9nrrJO8tv`Y_`{40N(2!>(!_{Tm) z)_ne#e?#nec6Rp6^Uo1c`ucq5-uail{9C~{&p!KX2;tGAM|iz!`qhv8=nw6W$2Z=1 zqt;4}udc3``R?sIPv+}kn3(nCENAxpkS*_`|$C5@4kDMr+j*P?sR-| zdBNO_=yNhPi!#yRHr9Q&EwL5gC_Z zhmONOAg@A`bA=nW7KjM$CO z+~Rs2pw`0?`@3vlMhi}u*ehYRio+32ltC4I6-F>JRp(=KnDmlEno{=7gI9|i0>^HrHIbQ0^O@-)m`b0U`o=3KHKLrabv zgSg;s!cQeH#i;k*KysC;3s()i_s#|HJ@`0|qiR(!=Um_S9L$BFVyCxGAHH{gzu&iA z)Awr;sS5cxP1DRwec$^K4u``c+$(j`s_dxQV9AHqUjIs!@l&7rboz}_^3kDhng$j+ z1wbk#=bV6Ff9o|6IJ^72Hmf%;ufOx&!%gEhuYWr}yqI43k$USzB`rM8J*pcN)=IGG z?^wZdO~Eq3U2|ekfdoy!013$q!O8dfp8oyw{qv7-aAE2iE|EaYKtRG^;0jhUFd#s` zIJ5yk5R8oxy%D211pq)Gg2h~*|72QrM3qWQJ90hnoEP(4S7-W;I<|5~JzF zF0aEUd3x{V-EVZqM-KN-x|4Xk+Rg5{+g;8%=PV)$Dn*x3>|hf6><~8Q8h7$c6yNyA z^0BA(;p80BQI#MgQbv^}m==i$h!?H_AQ1u5LZ65CL&|TQX`oDqfC_@B_Cby^!oqd` zU9a~0c-Z$B0|2nRbf#va6;;4YmC!&JMva4UZ=Z*>7Uom)iWMe)1~6_v&-{=HmzB zbnPs*ZA(P!^*V&GjG?{vevDD0D*=-dxGaJ2w+hm z5b~n5H8ewee^0hp1i(z33(Gp37)s4mGBFs~QoAk)A5~rOp2y?yPP{`(o;^CuIj12J zkprg@n-JRAFxWT@^DrKE*Jc(%qpB`=066UT-uro~Dil4Hlx7iC#hV~pj6MwG3;-rF z4Ac3o&Gq#)Io~#2u31Eg#e2`}OO~8!2(D%shN0^^Q&z(V51$4XRaISia&g^;=tCRY zuItx*zseUE1$0XD<@J@BJ#+WjrXdkI91e&1csz{H-n(~ub#mQ2omIi0*2;jJ%_au7 zxNjY?h~%6#33YALwTo*|5uQGMn)A%DIXPL!;E8|(kMqEQC}lhjW)_>yIj2@@t*KNW z!!pmURmW+JZPRs~YK_4`1|##{13=D|n8~}Gvx=aBiK+;wwXwnNsi#S_th*i&Yc5S2 z)?Hgl0&ED3y#s)p^P-1v&Mj)Bxn!A_*B{kXs-#-lzL`tzeb@IrBQ+tK$i>CQG^Va? zyRP#ggb;Gh24otlIY2z0b@0p5;Nn&w$VoF_yhKqR1m#;T4Gsg^p|ELlns zBFa>ldGV2v0oG~?M0B!hK}@Y`%>jAS_Jla@4?cF&FxFfGI|Q5$Q%$*)0ww_vnHWjH zsupJKVlNJlc7fnv0=a%0s%-aDk5SHdt_G? zfRGtynoZ4nHdO(uwaz62qB5#k<6X`*I)9vx-+Ap@AV7fcz4PvC`t5erKlu9F$K$bS zn$5}jaQzfOeb>}l&G6psdx&^_d4UG~s#~hKT&Gm#QVOUB@4a`q;GC0lEMkNd*|Ea9 z?*pM{U@e-zp=*(_niiE(WF8YDcJ1mFzICnWQP-{x zBh6_ivAyPxJY4+1^aic2-+eE{07tWoM7*FQje34sWQ(a8+f8pO08nZ)vSq| z3cv_>C@NZ44L8oFnGIYupOrzJkh+S|;tJsVjq88)`JSx@$dj1Wl8{{EJ$GGZu4x890bJAyOW$H5 zfLu^~Y+MKdeV*sJ7ZAHwILPL};{pqLCc~6Da`Nk~?CIKnzpUIs~jmYIX&vno~8g7=1F#yQfh_ z;&#H;Y+nwKPanT|hVR^=w|srzIx7f$cV`n{7{?KYlrE1Kt5?4Mp-+r(3WIApX!d(K zb>?eLBDp~jXo^iwBs>=;?@}%n!TkCBR(pIyv$X4DK9v3TC$?uFI-zhdhjeWzH1j?= z5c5@#wGfPrLw1fAY1sRGDq4Kt|Nq4ad{2%6ASxOoA}Fj{@*_Mn_vE7=Z=d_Oj(5i6 z<$YfV9Il_d{Bzr}G#6joMZtb^x_funoOG!>MCtKrjVBkg|KI)x|Mth8xqZI+G*OA|Dx6JinZwO`fzr7>(ih9p>KZs8%xg2 zYj3>Pt@^903o~1voFM9)^X9ZqyUVYC^BW&~`3JuG<*z}kz%Djj)Aq-C?7GeqstG$L zBG0f;l~#S*#s~mu7O8dV=z?>q>Kw0q)3>hHI^Hw@e!W_UwlTBuaD8=kwM2`GnDYTl zQ%VA|#G80xCU1&TtzV31Q#{Rzd$czCOEUZHpV_ec&q^Jvi)%AzN zVcL)D^_mdooMPLoS8Kzgi*6iw8q?L`0*uU7Q1j`X+ntZc>BvmJZ4gw@8s9B3@y3{s z$8ns+EHrUxS%c$Y80K=URvkCU+Qdt!t%5i9Q(9Xviw`<3Cf(D^T8nfI2ZtOTLXgUJ zyslGTsB1ttr<_VYPUEyYtj|yT?IwY6DMeHj#E}Z1$EBx2$11g|)Le6_MAY}abB+mf zEenB-naME_s#ZWkL^5FJx#T2?47#rK(G?M7hoxXCOw7b)CQwicBX$7-x2{6t)f{AX z5KlNf6P-97bA`)P{G4caW#Or9F5JoOyXBeW*2iR}S(8^ACJs$;n_TB?1<6%J3;|+O z0gC8q&g26`Dy0-q0z$1+G<#CdfSy%I&2mLhS4Cn_RjXnQsw!1ObWOLKa^}#sZH#Kd zQe7PRg{LE|+&B$a>*ITy=r2}3+@u3=*!VtardmX_AZY82eH9qAGO$i#e}WU(W)<6f zJlc`+tTH1?PcqCw_LFPI&P_hLfU8m9kcaDG7aKaku{tRErfD*> zsj=syXMGU9Gk4 zI|96Y_qoRL!}s3Z?JtkV9?sAD40U~CerpdMIySGlg$@Tu>v}@V5Nu&bf zGas(tbFHci z{b%+E|I=EEsyZ`t41`3CyhNoK{4S#ayl+HUWEiT71_H`}V3ojx6oVrMs|8RTqlt;s z`D(vHY2@M~r}KjWcJs=O`PI|ww$0x?z<8b4u?@5$!{ED?j&BZcA&!LnINl-dQ@euF zUpXI!F((Dw1R3ANPd^xM{ph`~pAT2>Ji7AU$LK}MZa36gLI_t^7t?6-JR1Us2!^Jm z)>^5)uy!(-UzrC~NRlB>q+NIIVm}$uun$y^Er?nn)zSB3i?ySqi0Fdfn=*)$t z4OQ~RlOqS;w(T;??7Gefq?A&M)@o+Uq=QU#VMH$>Rt~gctxDu1SIr43b0lVBW;Bqh z>+MOZC68k<2;Q-GCLpzj5N^h@OVSi57={o^%}Z#cND;8635x(Q=ZuI=(=_c()E*I; z=@O8*@L!#Co6RQ1I1Iz(TtSO)XX zl~RV|u`W3(05;+h4z(bWopZiXh13{BUXSe{eO1&G_W z?YeIHZd+}a+Xy1la#qzvoYNsqx3*{Ps?Rkkm~&`~^#*ogi|s&E zxe}1M8_res-un=jaF~*_5Q)hCaA0QNxGpvv7#7WPDZ%-s31(PIu_^`#0;&KERh0}3 zOK|Si`8gxiQl4BqcErRGf@|AGt(uuB25`qbmr`;qh!*H32t`T-^C2|OY2WwLl!jr5F|s45T8f$( zJA(I@*36ty6+zJgl*TELWhrXLA+n*EAVY9gl}d72PL+>K-M++rmQto^^4`Py1IndP zm#V6j5Ur~BTx(s5`S<6T$_@!V5rHZLdS<9{+#lM$y?FApV)C0+(=^jCS1rHJizlmfQ!5Ndc>dXsZB}>h-F@Nd)3;I{zWkNXKYH-? zdJ}&9$N$)BwR-u*kG}ih-RsNC^?KF00AwC0%~KPD8mU_Fj*+TXMncC1#-*O0o<95h zi|h5t<@N5Jciz%@_A#R4Qi}IJLndYN&JaXZi`JSg1O$*=7>S$%Fmir4j?A7pB898# z1EP8FH0x@$YPRi*FT8{RCFQH@CzP!6l9Goa!iZhp5TIFE;tH1TiGic@CR$1vNw6Xk zs_LO6B62ZATGj)Es5hyIYN8c{fN-2=53c0ufPqLAJi^HMWPWlG=m@+FuMOKL=RY)c zpKaTNYXfMD&?`FBj8@GOa(5HUf$!%EEk8_60aQ^HR8+I%iPKEA?#HWX7>o06y^d(b zwAR`=Rxy#AQ3<0E7>X7vWX5J#)vB0?IbiQd4TeL}YK{yEh?SihNQ1>N06PbQm06LM zo%pN_2#(D=6$2=c45kYG2eljn3L-8#Xr!B#_8+K$GXsyA@&OvK;H-&$n(c748XoV{ z{?`1)S+gIUoAN-`34vE-rYgm|h6!saF$4mFy37g;4APVwhOTLDpWpt@8()3z@jHbp zvbDi=Z79>Is`ujQ&;7_B_}hQym)<=*c~~C5{?@z7(5_$;0u&9?-@Wzt;nVz~*Y17H zD4F&^-(L^>SD)|MdO%@;TmYgm``~@sG^%U1tg6Tk0HoAfixQjn<`jHafNM!4)tQOp zdDbV@0iugx$;+#dpiv9r6d)DiNr@DVlBbv!C`;!b=%JUe`AyZ`!%f6G(a zUk%r}2FDc4bAkDN zv@iaSOGy944T@OyJ%)gb_zx8f5OEyGQywaH*Ylud3*GsCK3raY8{~;=KV6z_>~>@M{OvDFx+rn2*{|Tvato zeEZk`-sipRe*E|R?2rA}XNU3d-8Wvlyn68M?|kj{y>kwJ5l_{sX0}@K)60wK!^c1V z@z-8^Z62qkS~`pauNj#xudhD$qj$gi`s;7M{dTPt7|g8ZLXMd!ORXyV!_aoCJQwfz zlb?L`+i!idsLgXGj;qxs=Xsb0c7Vk=`1N`VgteqL#OOnr=eCPdp}C1oH_zsxTdh~Y za|of_lq4y1GhfLeIGl zv29lzUF5@XRIAgl-?l-kBuzxrt@_<@pNq6zEUI%ZkRj)c$j$+pMIRLGI1a8rsV=s0 z6IWuK%b27>xzv1C!|fUm!A5TZeOr#2s&#$0X~k1X)ppZVt2V78TlC|uZJTVRnpV?0 zw{IKM;o{0f&rY`8dNr3+OD0f}BA(3jn8%{(8^>x*XcuwT0(<-*5cck~&j3K4=5ZWL zDIiPa3oc2KAvyq^rdiCq2NOdHH`0DYRj5*oDLTFc2hIdW&m<^-Ibi8A=ja=?AjHfW z%UF_LJhH3HXV&l5le>>$GwCh#r#v^lZL-y;76b`6fb%9D0xm`@<$y#=PMi}6fTUH( zV3wk|h=WXP)+|ye`*PN==pk3Q@GN@#L939_3`X`U<^sJFicIoPjV{2Nf5>SS` z$J$_5vy|c=Ki&IP^qY9%`le|Ju~axt!#cKRRTV^JzaIf$v++&i4~P9?pI`Dxmn`5V ztkz6e+>#s(O4s{+n#-;8Q$%zw_^LTq?}(X}bk~JmkaKn}n3+f>qTP7)(U*RJpj~w* z=VwX;mzPi8e&_ATyd)6;KoMa8?>!OeoSAfzG3i_pb5d1BaCO?}vsaB{J6_jj?Lhl! z=jbGaTMk{VU}j1P1Zd)_8-p@=?|=c>z^kzkTTS$RM)CL0Kf8y}3`mg-P>>YR5DZWi zP;6E~sYNS+3Ybb2W@K_m-T)K{%z*(^!y7a5vKv?q{^R4Cs#;blH+TaCG*C4)PT=r3 zSkV$D?Vi)C4ED%vc|NE-*V?zVi97%D9`zai26ea6aY!Gq4P`$j34ua1BcPE|CT0*e5t{SF}DhC(c=A2>IXl(ELlP=T@^9XhuiB!(vNW4&|HKL1DkxBqhYhs))D z#-Z1pFP(ZT^HE*Vj%RLCokJ<1MCF2FSfPSdGDhbJS;YB}<`L0W4oT9(hgW>Ex_!18 zu9Ek@>pEJ7s&o=#thL_iI&a0adXTp57290FDT;G#)9?5D-SMyxGKtB|N}4@W8={i) z?67T(u*@Y}MMOduyl1kcSrr1iQPR!+Myd!S8e$|One*x9;jgq`Q!FE~Thit#zK~CGmXuR^Iy;UU)%O z=Xu`m_lxp*F3$vyxy~jw0+mM?e*nVRh4Qn zvS6 z+HSYTmZllWMD)qy%UWySwDUX{U(ZgqFTU`?<^mLl%!{N|0!DJE5 zL5MNs;pydt54h^Lo@jr)+h6bcHpa#y(=d<6;G+ulZ^X_}@|*)uVO=wb+^6cuQi775I(iLusN=jxmb z&VxaolbD*BbL3)NwGEKn@CFR90swerMy#o-Iz)i7P>j7}@|o)FJN=2PO)^5fLMiV*qH^ zD^%F7)}Z3Z%xt?}8F2v}vsA`f^E_wonSv*ezH##~S5Y7)=h=mnQjzi2`8fqR?1z*J zQvjfnjfjJHQWQXQDrmY{_kGu;x$O6Q$AF^9#Ebw~%@`p#?}?g|b=&Y}vv$sHHY){V zrz)(fU=DyvDXLm)UOZ$Bh+x6_Ro5`Hi_XXX`f^u_lq!OV%nfsuydRETa9i56O+=}+ zR!}6w5F86KHw`pmT1uLyf@ID)sW;hTi^pq$93Uc~5hEfw=axjZT#A~7NFhcuKrE3V zHgTvSCE~6!Vk(2GXJ#tFaN;mKb^uVbEb_>uL`TF$Ol)Q#W(>7fl1hY&=%=QHWE`BT z5`jozKm^rVO`vbPJ~q=dsp>pVtJUhx*=Y!2JPzCKc6)aE^y$-jJwJc<#bKC2d)l?@ z4%a{X`+n|gU;Fyi{_2>9i^EmFS%bhgU;DPq^5z?F=lSrFS3dRgKljIXyUSntmA~?x z*S>5>pwU_ab4^R^n(gUr)tbctuS9n2c$jwoumAMlk747GMRSPw%hTZa59^UvS9wY~H9o8QS7$G`Q9fAdfN$(PPf@BQ@e|AY7M-+%An z+rzMLLS%;~3aS%`nN|b;{(68IDimjOaeaBU-+|GhGVMYngu{5#*Q+Wadd5WsNJ!o} z2guIH7@c!cs;ZvFMygdcbEc{g`YM?LYjpFRhr|4M^JKd{=~n%ej!NKNoafm9h{zD< zGA>XA2iW#q&ROY(f8;}$ry1K|YHVnVVrWPvnuoMqZ$u2NprKZ&wI&f#1LvcQU1+sd zDSCrfoX4iH|N>4{kmOea)m&N)R5398%(23BVi##&w+>pM72Oc zFfoiMq!mOBJ#&cQ8)Luk31%S&*0xv+GMXcb!5Gi1q(MW;nJR2G@ zLoiJV|Ng%3gG#t9VCyjEQqzQ&$$^6vfibNFW1=~K7Q-fSAXEo{kcE)$NuPl>yQ1BKlRFoK9&@lm+r#a`taB!yI`t~e?D*QcA4)oOLR?js|bbzRqW zor!{4*Y^~7y1bfm0;ahZFcdLWwQ5V@wKO>)I%Xy@Gg@LU&8$lH=#g%~XtmbGeolY^ zeHVLGonX#$iZL#uPY(X^#nWZ-esy)VTCJXc{&_^4r)=60cPWqT0KuKCS0`(jhk33! z11eZsLM{pwQmVr+5E0x2jFM;P3A?6o(UGTAl6RqNq7PgJ54)j@G`qVP@ZRZuJPyZM zG-F5sTn0gLWk_Sk2%)caavu6s0|0r-yZy0fRTbCx#{1pH{$#b@_UkM)o221js?Kn| z>eMV-sZiS3dLTwuvP>zl_nXazh{kc8=2@*SX;jWR5s_SOW-$V0w(9%$wR`402jp6` zrc99lV3r#KB@zbD)&?)hIR_%-ys59`EFiarT9eBYf@`LJ$#C0cHX18C?aL zhIyV_LvrXt6Ji%&PH}~Z)^4@dZui>Vnvh$Qx3ItSEIxcq&usVbQ|aqpT>U9DSK26K z%T^5=%YM*|$%BTH`b4SHJn8{&fs5 zN1r;HD$+cr0=_Z__QN=}Z5v{(fK@m7~ zUDv6M%rp$+dVO;1^wxXty(d+cEa1a9FtdvR;l^~isMUbAD!ur^ho3%taB_0`&O2{R zHIH)+U2`+;A!^(WrjLjmlEqe)Ropa`IgN_`D%FW*`)VTtTa>o{WdEL7QJD=*N(B`&RS+-{4l^`XZ3R`G4egzhkxO>{ z_1@inod0`OZ{M1}5yme-9!5j7rlvCHq*g=Y`@TbmcJq>7qN1VEq`6YUu_tYHr5JeI4Ybs)zrO>vBwcDr%%++Y(&$#zWN z9Ztjg-v02;<8tc|PfL?o#k8K3eS((LaG-vJ`%lEjFT||vo$tJRkaFpqi_tGBH}gCX z!_c;^<0Ijbc^W zCjbIsLUf{33nFMO>^7axRpz2{b(l7xt46cs^INAWB|yzZ8F87IIOmR+MLIg&+|me$ zsFghM>8h=6BBF7Et_kFvnHfTr;=QlA)&(Ft!nK?FEg^|eNJf&JIw%szYURFNK+-I53B4XRN-g7Ah5!qqDaJD%c zhGCxPMMJuf9*}GrZ?gEL-e?aO@7=}4#bO$$wGz?$j9ffk4=(u7cr#LfQh`vNqr?6> z%_FZb-*rTBi9aB}D7{e!W`NnwK;rAZ^xd7n{wx2P~?!@0)zJk1=vl5tJe+ zrBX`Gl1m0P@8|t~|H2C|oNYH|_Qo4;Jbv_6jBVF-0F+aTG5SER;gf!ohbdnV5A!>W z@X9mKo^MaBmXp=#lf(Yq`|s^`yVYvdYTLH)&iNT%n0h}BL$lrxk=4?5-Ky;Xz{McB zOw+Vz(pB~BWP9uMEWi5>h;HuQY1l7`?WL5aY4^iiYXufGn&&y?LhRuNz72*Rs+LlA zyWMuxMaN?n=iFws27qM(JI`}UDaKYzQ^~m&1!M2}RlDA7be<7WN)<3o2|Tr3*An&| z7ptO(Nv%W_yvGmEbwL0P32S48^RJvKWs%yFkH6i718!&>Rst$IMmoa)UoPIa#L0CJG7;iCrMX!c|0S zQ8wqC=Sr#yLK>>~j0VB+BF1vgdGDWn=3WS)l(b|$p<358M3joOvH75u6I1dnImcE3 zNUNvgX^T&3xHZzd&2W>sSaCRjDGWURFot=EVBRBF>S?>`?5kyX?Lq)JN3d%sWzN|8mg zZ)Tp*As4lrvzZaIzX4fH6r57 z#+&bKHtT9dK(b_;6%pqt`L0z2R5LX1J(8&^I5NhZ#Y}?>3{XocAgr`BM1kG%jA)uB zPqWc7VP@ytJkQfKHBDe0BfNlQ~S2 z85uMB$j&zkRS+Gbrjk=K1V&O+Lng;m)HPut^mtr^Ysf8WE~Wq=AUDQ+PlV1WL3BPC zitv0Nvzv z+Xq>T0^-#eSUnpkB|!jmUcf1biX7QTFwz=SDIIxd5In{x!4o5TSjg~qinJ^My*2wd+#DllrOM zhkkN6eW7m~tvl3#ueWC1mD{ZhWp@?MoH;Jj!N5D0<8f1Iq&l{~wf?TOm6Dd6*yT9A zndZ8p;JOmJuvVX_s=IAwu4c5N)KnL}K~S6D-wG^m1}qKXzqsoGG+45J_3!^L%6zew z-5u)RgZ{o<`e6v`lSd!>Z-=6?eP(*;bF_KhcOSQR|I%0{uzRcHd;9fA$Ib6);&brm z7u{56JU@i-*|t3l4^K}|zwqT>{9C{D7e4jL-+O&EeD%w3zVzJ7zy5_^`VaoEf95lv z`l+A)qd&J=t;VMVLUVO_VTv!j^dVGx=bd*{_4M?lZQGM>`?Ej$v!DI!Xa9Hq$N$6e zcnnQMW@P@xH@-oH+wJzr)8vpa`s4A~_x<($s_8cS{n3Tkw*CI<(t+hB91o+9jZp|8 zUSD4~fqa0bX$WneXBlJ>?@#+f*S4pdljHFiJjEEJ4rWBbF84(E4@lCL!ii*rjl^H@%)s!aheF%-bU#`LXRKrDa zAB=+!!!)%as%nU_NL2(=Yno<(A{g3!zfUQhY*sEb-~Gya&h2jB@n>gePai)isd?c; zC)704?m(j6dqqf7_DHddb1ZN(Jffw6%sHoNo>L;nAclx3Ptz=`{xpV=wRLW+l2cvR zbnH{@yR&A!I_$>DfrXlexo=Klx4~g{P!=Pq_gpJi$sBys#BrXi@kPZdq?(e6nFXMZ z{hUj?=?z${RMIqM7HfR7*=&|Rd>qFmgPs_cVqG;XU1DIjqyU5AJWek7;N85CyP^kB zMyfS4vm;03h}aYjs20vi2(jduQ3}G8N=j*(XJ59_1>h_IhFlR%g!1f~e!t&GXQ%z~ z9MfTrZ|-3}dBzKwf@)tN*PM&i;vzTf0_!~ZM8r!qX!XlNEVHn17X6AeMo{cfSC+}`fyW{aV&+{_fxCzA)`M`a@ z*=(xmen>IK&3bEEuCI4jS68rsoU?OOYZa+o*X5j*KITtHW_W zPKRQoH6D-0g|QYwh%wIdypRts)4^u%V$h1c!f797soS|tDiP*Z)MX^a56tnB{D{ymNzJEv@Bma9lL1Vo#3>-Yk- zR9;7&fI~r|;M4q}P~IC2KUTlGV?8Zd-3v!~#4{R5Y-1Y&1yodK2u(b{0Ok0e#BW|c zAGYT&-nzQ24_=pNnAT%7rhR8zZChz}a3Cgg4h$E)1R^d15mhy?4+i|p0|)>Vjr@(Q zd--|!1Bh7uehVPHza9{&M6`_a#H5O;2#7XdwN{YKV5}8NL8(kQnL1IJj&BV6PuxRs zp<$Xc4ZeDtuV+7I-mpGnsR`*dJ{!kpQ15bB$N4I|qvw-a8Hcp;mmjM4?}Uq|yc#5| zx_t-ZlhEvpN=XZ?w{7G3`RO>0DW%=|a~ItwABN98^6v^<5fSl-9`?;`fs^mTMpYw~ zxkw4yRks~=4^0DbyV|PFhqdlm<;WUJ0UxMw4CH5kniQmH(Xm_cHCo{i<~cmS+3aOd z;+*s0a9A{|?BZgS+^p88xB599XPa}K0|h`OB4RKVRc)HkG)*-vr4YKR!pxqX)~Z%P zUDtJ7a;nvNz8UXU!{trXnzLf-N-jAS@`kR9O>hoHN}bZA;(Rs)MIa+HTLz=dtm>A0 zp+(*AoEy`)jQdsnJkM2f(=?3_`~7|?_NwH0p5}QXhPG|nRbOibrg4}B3<(!)G6OC^ zg5@t-3Y!?C$B?I5D#${vu2}Lc0;j8RJnjMc>S`wjKzJOczF(Wb5*wl-wblhqoO7-= zuUfiw>kgrGP5b2X$v7OGb6wAAQdG8xo6Y9zWLxT}hLNFd+ouE?>%#O zd-mYCKOT>L(=w7E)>4|LS#Sladbr-*zIAK8-42K2csK$;G&{e2t8crUDyZe0D_Ru| zp#hkyNnl4^7aNOBmr_wn^PHx6uEn~p%hNIEY;1^*5MzwTfGmsmzU#)Vh#zxoMY;PD-g`_U$T!s6}DQXYDG+wv-YG zo2K#Xmc>t!>WI73lRS=X-=GH{>1H_uoW>Mlw@l{=QKW>XiM|n${eB-~1hi#{JIxa! z#uyLBBLL7%E-@nkE<`gUt(6444@_Paz|=X)RD+9$%l*lE-GpdLtF~J-@TgV&N6djM#gzLsGe234w6%%IkKIPB&m zMAUap3{h(?DV=Qk=sX4*hdI{_=sdf=Yx=Hh+BVl=x4V*>Hr;I#skP=>s3?(NB3_sK zIWyL3swx5{m!s-wzdqZZs?{l{!{Kl-j~qM@&pDIYN#C8HoTO<~FH0`bD-!uPMw1!w^P%Dowz6jQh=2~erj0Qk4#R%Gcg~q15qS>FLyeiSR7CX7=UMKOKjCk#)O@-bWv{+wIFQz4Y3*zayn~UFW^OfB$|e*`j~%H-Gt8 zzy1~Y%YSan$K!B?F0D6h7(La9tc)Qs~@`Uj0?Yqvm(=_d_hax4VlH}OI$sha! z|JrmtoUiYW(|&inl3K(73$p7gCpB?Ch}E;RvsXX%>3K|fl1ER!H4gK|RaCL#@#wvG zuDQM%TpXLGo#p~+U3;do28$PucUQy1i^D@^=GNy@3?arCx>XA(CTUKB*7Q9hR#8uM zT?Q9CDilxwV2{y70I>Jf`yfh;WH-@_4zQMble~&bL{m<+R^#BkpNep7i&|W74u{<^ zm)5)M>+8tPs$WkD72!AzsZKB7`NZuzF95^4_usj?-fz~eBUeRMKI{(rD#F1p>*r~j zR$bp4SI5VatkkV<9rG?tL#?c6#`wPXh=>#o$nKq-rD?8XK3%WRx|ItY0Pa72bU7UE zA6x_!T7WcpW{R%NAP^dkK8toOPO}guZMX;N2XS5#R{Z$y4rNy zOg6N>xqSMRm@jwx&B@7BlJD+-J3M-H|6ltf|HJ?BpZ;HZhJW`z`QNYP2RY+iCHLN!9h`(Gotci6l_7hh=Mn9e^Jjt>y{9)81pFc>8owG5z~|=Gl>D_ zeEhX9eBQaR*>3%+E2Rk1ah!@lL*~im<8|A%ecuAn1blvecRU;$&$VU-3EbAINzh8J z)mE#P5Ja`>`mSq@sY6(=+q2U(Fo5IBF^we=HJ^TNyA<>}l`J*q>O&ZZv0ZOE_6=ec z9aEb2$6RU`q6Y%tGz$eJFQrW6oa0yvlw7>CqDev8*cE^=Rg|(`uS=>T64;p<2?ihc z*9X;u5dbKe=9C-?(ygPXeGq*tU^~RJB|mp~(5FW9F2Si8bt)ovLO4 z_Iy1IwQ2}q-m8cyTS-GFw)D?Sn*5>`G~ig;WTUVdceNCU69=!MRkI8*7gh{juR8HuVPF1CM=bNy;db>+c-njLW=-Cd}eR{G# zdFA@_mHu#OF5k3m6I=`Ci$s9xVLT~VpX`4O|JG#LaJ#Kjs`*$e0RxQQ`?XsI)ad;Q z?zgnNLxd0B91E&K($yiAd7gb3SF46Sn~rLwWJReAaJ<^#dVTlq-Q(e58~ReMKMAYV zW`B{MJbctdX<}cidUB3wn1@^I#;gMFbESJ{n{k{k9^E&ER20lR?`z6wnjELD>)PnE z+ALbC)-k>H?sp$Pex0B;Ar^uzAYt04c-{oiu~tcjV-9^6Ww=;vR`Wb>nr2kh=xeQ* zB`TGUqPWZ5lN0wF6CT2Jhq`+WY%1%tYGksGoW{O&P00XTa9xz^Yc2_?W53SkB$o|y zC0vr{>X@il%4(-l*HVx&K@k!$0-%b{*+1U(S%DCYt==T~BPn?Uz)+8aS*2BI zw5xL!&eqkWu@J5%JWO!l%DgXabURAV<}W`t-lr$;Jh}4%hT9G=uiVR9efbz4uiCS1 ze7BFUc<;^G(=z5EvCA%oX4wD8oo}9IIj{BGAzmqpFerf(akGhx1g1|-Q#>G{_ z*gz38=K`55Xi6dyGt(kBDYwph4?s6`V!cTc5>r3|W+YNGCv_157`|Vy0}vvC5`c-- zT0zw@np)K~CF!tI87XphI9pjUa+*E0BOtEt^}DwYQ*O7g&jp78sL!yzy|KGY+&h_` zdwsAo(cm7v(8Cwdpioo2hiwPLlhvI-_jH3LtXS2J4;&^UT+5IsLesb0DPU;Y zJm(6#P!EL&T`N(or0iRbO4NtSF6;<`D4H9)w4d1-Zvs7?i*y^6y^e>0nvHK(sV1iC zJO}ntbV}pG7INgBcPi?c^PGbmZDoc25Mv1Lc&QJTiN5VN?$VUMwQE^zEU(KwDidIKwx`_omo8?>9ZDU0+|9QdA+O97DTWt>$E9xLy?N(Z!UK3U;wI zv!Kv*ojVP?-7e?snyTJes?HN& zy4qiDHXA0_#8wr~*XKsau4bukK6sp`dDHbL>x~ES;K*DXn}%iGah{odDv^7U-@fY%nHhKv5nJvNkyV&E1m8yY@WDQp3ZSc#UP=y)U#&YybISGP=Q)FYB>$wQ-G^I^-sHstQIPyorVoy!TdR3BNLcTnjlSM+9hEp#qy( ztsTcYYvbFJtM@+VG|j{5_RdMuE!a&HB=BOEhpYhPU@keVyHv_tiVs{(3uv^UYGy`^ z%zUHj6~PwBF%jibN=dMShJs=B%mJIO>r>9doQ%lK@{|Ci?HfQ*Dptj*TXCaBAE&YA zQfuv_YhsMiGm@FHrw$wPH0%e1dUbuR3aVgIV~pOxG!LFBHqGHUb+=ckO6+ZlPEJlj2*=~z zQtI2S0M}ZFVH(F-q%bcL=Vs_?twxKA@urs{v;)MFhu9Fc;l16Qr0e=KOPTqNCS=b} z=ER=O&4;17kXPKxT(5^20D!Pa^)6NsnkQqlqJUCOqH8EsW?u_xRWxfmzv`N*I!x7r zn`o@uE6j0wGVKmeFE4-cvp@ad?JupXfAi5d3cVZj?sx8AKYI!zb47tkR}DRQ_q!OV z*>p`8*Ufo8j!$!9K;D%<{8RtZ?X!E+l%9L``HPE-x8MEN?(n$ifgS(YXFvDaw;%i~ zf8tNQ^r2V3`mJC7);AOZ#^Z#JlH?ks`U9367RXy@udkl|(&v96)v0QAr;W#@_Wg%{65c^+-~!SJoQuJ@Z`BeQ+_QS#ydmmO1Z|uyd=km5XgE^C2A{TwRr8 zdA#|(`6oWxOpE?3}h;m`ZcX@9gVy3 z!Vp9Zofj8#6+>LFPY&CX(y;EMfz~=U?29pkE_T+oU377_84OFsu3KfO)wnfZO|v6U zK*vd?&a0+R2%#!gDGmV8D=JtH>@4@ikB9!c%BIBtE+53y4YhABMmI%p>iev_;;Df+ zsWS)OCppX~{QM97;7@+@8^2!1ci(#ORuxgyS3mWG_n$ndX*#66Yq1Y<|KR%5v&Y9b zzVYstnt9HbZ--|6%&lAC#>dx3Ua#HZDz85J`t3gvZuygV!X^D@wQ`H$B%C3dt|}Dx zhbVk%?Xa6f27WI2;jHGUV-e?OnW#K&I+S?A zF!Q|0cv;#MaBB*dS6j(Gu7=3>^K|?2!860Jy{O;OdDM70u9_ZSeddLDhT*}T@zy4% zC-WSh`**(Zo4;CKlJT?8%j2tCKNmOW7vbFVENHByR#SjlifHk~hKh!z)EiZ`0{}AP z#oWGw+d`poTb&0a000A1^4@!93Jn1+Q7sDg4-j9!fBwryRTa?|TYU(g9if5f(7M#T zx?VjCC$}neSY{7<;0HyX?p4mZx_$EhHi!H3&;G!-|J|Jr@80^{y>~y)Sd<`@d9bY= zhHriGuXk9#`jv0~&}TmRAOHD(uWL77`No%i?~ne(SHAYG5G;Bg4o6f`RV2z~Hh^FG zm0zCc`HNrrE$7I2x_9s1)2C18d35CBDyCG9`~8hfPZgOR`I-e5oG-JP<(&8XeH+?p zH46a(AuX)7EVc0qy2sa?LI@$m<8kOZLs11e*&KGe-IGU;=30=+M|(` z3IO$e`QiR}$hEB3Cjz1>i)5*&%&FvgCP!w{245@}%M_@n5RlE2skU8Xgdlo$zAZJC zf`?%M0Ik)ia73m6);8Jg@_QX_wUFnM%2h4q09;e{3PP(q3BNUEGxLs|qYzvvBv}yv32&fb2xfx7Oo%{^$;|Scl3ALvYRykB z$sRg>jn`XS_1LaX%?vw5DKbKT0V*XuM-M8wWPDdhO%q@CQM=5)3>y|{dG z+#lBcc7Gg=Q0$0&_uk!)4Ab$wCy%C_?wo$erantLU2o6N&ae1-(e^&MczQStvB8qj zA-4P1UzqRH{?#z?LF=q+_B&^WEq-~Y#kx4cv;8O*Ut>+%CLV_Ye ztujJ$Y5>(SfJx0LRctm<8C3y%1RyI?MGTZx!v;H($*h{q6_fd>v=X-|(S%1UP=~N` zXWlhUZ)48$co2{foC~Z1Z4A!yc$}(L&u*S)P+Q#-8C|;PBzYk zRElIlL^E~dYORO;u!zA^nww_bG>vFs=FMhpiYb*^Ylxm5jN`F_3!Xyo0IDhg)^#m= zDy1MYs6-!*$BBrVrUB6<2VkjNOlr0&YTc2zmKD?h&y0Q7Hwr#uk;c^vZH_zG3m}6jGo@i2P6&0~s zwQE}guupc(`*j~269ea579UB@S+ky;oGkG& zv1|9kc)D2wTdCDCneZ@<0yp(`*L7zn+w0>&MH-F|AKw4)hd&IUsm>+k=wpm=97n9B z)>>-qx^9@KlYTSL^Ei%WF3t_pGN-IydNV?|svToT@oo7zm3MqpG}*99JN!wN}ZhYJ{bfQcCArBD%b~y1cwxLZuvYp60Xd z>G3c=y}YQk_P%BB*Q=AmaBNqt?{0hpsv4YIisAi!?_*d@2_ixvR@K9BY?{V#Ktz+9 z)?ZbX7<2&zyEa5&skIU!0Bn3OA}Yp2%#2DRqO}65ckDTk!x#ghk1;a)I+KSo*JpK&rLoGUuGFLI`cs zgud@{n%L1e9@%rhTAA4}jRZVTgL7`xcHR*)JL2FRGhG~Z-g)1*4rv@S0RrGS%s_3a zdN@pTnH@q5fdEW7)gn?>tFG-<&XFqQRAOvV>f)hZ;$CBnKKfytUw^W{-e2GF29Veh zun}?N*ULK+LJ%pt-A<&aD$_#2F+>b81|O7AMdw`EJ7!lz#(A8^RHOn>uHu|y7Z%ch zh&ZBJ>s;#Mpjybu0FZMAfVpJvJvyeFOiL}Lh!i2_*oAi?y$`laW0Duk-Ba_|8l=S+y+`MznE+l}|G)KWe`?>Iz*sxr^ZUD`8qskN%sH26sK zl-P6=!Zg$pPOq;YzO#Eeb)Oo#&y<%|F#grsFl_thKk||1(s6v+_lNn|Zq`z5IvgW< zQe|QWc=+()>#u$H(W3|3?e^*AMH^PP@0{Md`@-ebV=;a86CeKW8(;gIf9tPb@85HT zyWJHqxJZtr0W3B*T^~50VH(pojzuLjezo3o=+R)gW4oJJSt39TM5;iXEoD)~1=D1- za3_d-49klzB7hV^6GC8S^9lf--A$R36A>->6ACxa1ywD%E>ZH!5X;PpNUrH@wSiKq zY0f#~(ZyGS+bUM=@c>uH@V0q^z# zkFK70Ja&Tn`RqaagV(Qoiui6_d*5^*Rfg+HJ6vzKWloC@iiwGd$d(zb0ua&fid(!% zQ&3^TS}au|1tLTu4qfzIYduUU^4zainGH?awgbYPZ@Ae&?7a6OF8O}tgt!orhLDPqM!Cx)hq zpf#m>ePQ#|j*WDuMBs>6KTvlQprLpI@0u3!oNFp+?o;u#YT22kN)^>1RkVu00v6gS zqQ88SKm7cU{OkXne`7fA|K4x@=c^S1V&C_vmg~d*co;>bp>DnDV|{$SB^7GWe3_9?gI`%uSZ%tlNHoTp~^pJs_K8l#lUj@MOY620GZkcJ~$U5 zISraLYbtEu*jL0X)d7&B1yF8_VBSPTK-8DAuR{#^v|O@ zOs6YI?SC_M=SnB6JDRljb%cTz&^h*w3=yD+)LK2zf{HR?HB;2(m*iep0`xs11EMJ# zgCnh#k(e0`Ztl=d}rOM?K@Ja4G<1cUB zNnaf#io?^#UvEqI`0k%LcK+_+iN`~#x~c)%(Ek%*g1u0j5_z^wL{zy@jJKV4tT?FMVvsIU^t|g5LDy z-Uo_^OpEW^%s^F2sUl0$GMCwyaxDPBj6s<|+ar6uf|pX>8kuy%RwJ!(vOrIh72E9JoSK zsvNP-K_e)!_0@nA?10y-5~Py?tqBSlT|ClLh#;uL?i=1D{0bN14Ke+ zRgFq1WoeV#QkSWzYD$U7Em3XDxL;KXog?ofnW0ocFhmnA2$tr-Ox{OR-rRZA46H6m z0!xO3nZF626*!NZ-8@e9zPJ1F=p4n^-)^_}+?~6nLKZ3YYPxp740;ek(IRGW(&u-6kKp3;sDy{ zQW?9(bxlCBCd9GIG$+Tn+a1scv3j+CEFw*4JvZ|-m3$-!$NA9u#DCVVH%~ST0@zg9$VB}{(tQK z*ROV4mfnYrFws{}9=X}QV{2o5Xt4jx;L|Z%otS%WuP*}O-za#rA0+^Bgj?+h1abp*u zqU=X<7-P3lTh6DYRuPeuoa+a|&_eo^`;T|3Z}?LeUYzA>z<4iptdsK|fq~GZfodzY z<)UP0wo<28?5D1_BVw|j@WFu*py`?b0yQ(W)zq?12h4Pht0^E58ITbmqM8DMH8Vo1 z;1Jj`3N})k?_s$UJsq;&hzpXzaqIDd7E0To^==8@JE_}l{;l-uZ>IXIAAjhQ|NHpn zd*$*^@$;uqesoX1r_`Oh*nj)3`?d1LEkEaQ_WN+<@a4_BRNc<|r~Spy<;7kd?ERPR z(a2DIn0Rw`*V&hd(_n<0O@e}{0T3!96M})EFd0Hn?u@#o0^EGm;7!k_z@TK}OgjTo z6l0xRQ7ge#004jhNkl_O)bIL(>-l^ZYaTqSTS^9_ z)C~qX##olT>c}gL-21>jh)5T@TJz1zo2$oF<#^{jN$*}Zfr+RiG_z${eDEG= zE_t43QM1|Rr6A(acV%9@>p(4qxGeeNV&A8@9sG1WtO-|Ln+95Q&iUYLt)3kdc;~$L zqB@r>L^T)hecyHT=+Uw)d0Bkd6H%U*S?g}I)!L-giiL=_+pVB(@uC=kIfWXY3 z3?^~i@uGvxZg(DUh=2?j(K&~P2Efefbv?+>)6_oE<-PaX#37Ua1f07L31Y08%>azd zq~@Z6jHIeWj+qTqOn^|$)QpkFW$wC;YelrkUX%$AQOk@nsllQ*cbYxU=o5x&A>0PL$JbCmO8FMYml9y%d(=c=~IPaVTwI;RJ zs*OE6sg02Ua5Hq9VR$Gi8$U$uMXMnP@0gJY%93BcItdh!HqAM8;o@Sas^|0h*|UC` zGlF!k3qGxlS>rf9fARWuDqRY1J$jOY7m;zE&(qk{YD=vtp$j31U~8r8Q%dP@IJ~~S z3y%A~2f}I^$v0Jw5dg-#%yX_y0l|n%DXj`Isy?Xy3@9k$IC6+x*R4}SGl(&+1Fh&B zB5G~xSZ?i^VCKlq5jrq7+JwYJh_i-?;jQzj)YjaFMb%O3yKr~!s;-gcD)W*;970-y zapc{P&6H+FOrD_B0s!P4S_3pgAjdw2(v%u8NJpJODN^2e{OGG+`pU~+y}fA%-T%UP z^{?j5mCJK_f7m}`XTOw!|NalY_x$5mfcmX(eT$iEt*y1kyT^|nKW?%#Yf454V2X;zbfKUNSlVa-F0h0nKn3%O%#FUW`fRR7}=pp7y%quu| z6`3MAW=7=3x_)4-$J>yCiikEfWM*PQKvH{sfA`7r%3Sd4S&W&-mMc^r{hGf4QZt+8 zg=rA8<2)i!l7s-k`Bp0Xh~8BxF7O8*ytf{~a$dSF8X5pHqM4ak2r0&BrRBM>cN}~* zRgk$AZq<8L;egaE7-B87EzzNx%&F6>(AD>?W4o-oJgqA0F zzbq%99RQNi13$>n09HwQJ#ND%#_e^yDoBDvKCovrle~ykLY9;^-Qb9wbN=#LL{ubs zbRn!Zi&bP$sjCA65k0%5HZT|2w_MsXMs{roWCXObAE}Ee1a&S5LIE9y0aI$NQE>Z| z#E}p+L@{lxkdY9GBn08cCT5`Q5fC5-A8@lD&W+JUK=WGIw5k^X!h&Jfs$pBfo!Y6m zR@yZ6Zb>SQonbHlgoX&sKnt76&wlmmfAWw2nZNco{_>B0_}xcO9{;gF@-KEfeD8zz zfAcqg^X8*FLuK#gc}B!M4@kPq=jSnh`<>soytw!iKmQBA@}1xOuYc>meQ`f+hT3+U z7wMm`*PnUff#L)?5+HeAUj_Mz7vdkgPCjhl)snubjf$(M##h*;0<|Mj5oBj!Z`Qz% zq^hqyT>n8IaDE@xANuhC+3&pGKi;_l06M>%?xp2mz(@)xA`0N;K_dedkp|LK6~P9Q zBMuh!)wbv5bsfv5-f#O@ux_fA41ODqGKm%^ZF%>vZRLAI{=uid@z($B&;7@5zWL@~ z{IC9-yVu>*G=8xBsc(0mx;bAzIsX^we0LMR(0KfOv?*TB@eL?#vAMaI!=j5v^_Kxg zLNje@4{AP=TYc7uC}5yw0JQ4-L;-s?V{5F99(wl(3@SX(VD*zM1tHNx^XgC6@4AQ_ zkbwe-0veh)=ercREd*zYRaK)8!AP6BOfw%aJ>~oF=WsC|KkQ%E+b{q7w(*bN`QI=6 zq;(g#+!GYEFjAM#hy2~Y=VO{mIhWb)@BY32`Ty!~{dfNlu<`oh$uvz`%YL&xoKL~K zBF&-u?eG0oDW&iG7&kfR()9j#6JuoZ^D-mYkh)syI;XFt1wvH~A$WG{$Mz~f17Yk^ z&846yytu7|sGbnYB~U;k0y0GJf{WmY92-EbmD$%?cNg1M zq`e-9ih2RCL{2bd-#nZf`BEoQWg}w{1SG#&qX5Y9z>Z=9aOHel6m-YIp(|Qit!qn) z5qck1rykc64st z6O%}Fj@TiAh2V(5%&Dq~iB&{Yc%bdCN6k+(AT|XNLC_a-VZ*@eP>@ui*6DtlbAA0o zBGvz(iZ zsA_G|T+|6AI>kpjl%5uYO=%{0GUspEv7V847w4NV=D)SE?|^Ry^HO38uJ3y=TIRXb z+^VcD$Ogy`(XoOVLhKS)B$my_JLh7I*N-3Vb~^wobzv7ZhiMw6m_xUAK`*5Nz`dPL z2Yd4D?bGqd;D7m-e;F)%<7+?n3m*%uoU~~$P@gV zfBxV38-L?(yz|a)&%tjtJrT`wQIm^{i?uq!H!P(|t9f2T#0mKE40d?d#qC)o+-6z43`o2fhduT1vXrNuhOCu%vT`8Pv(@5X)eHvDsU7gwEq zale0gQQyO3$@9?Q4n=%>H0Rew42n}@ z#;IjMCG-wE_FDr|HpOOADLbbU=scu6&bNN+!r&1Q2umsRJZn_~4#ZvbeZQ%74Qk*# zX8?2|jdNxN1|;X=&>KQ-Qs#NB7Esl6k-T5I*kY|Q)8()g5%FPVeM>V>tX^}?W*P!) zwp|)7z?Qn?B~RO3*AF43o2K){#YL@B=EZF`NY-ir)7`!wPlptT zXV0E}{Nnl0Z(hE92>@?Dds3vacVgOwZok=^nIkTxAac&tOn|VqY@;~u5zskDWVy^a z=adqmYgL=o)%O#KhSW)G$_f@%zfK6D>-!MnJTK!seoR7@1n zt?<%yw4kb@Vzz?4#mp9m4lQ;`n=}ywaKOC_b+z7`s-j3;H_6zLnHb4L=XqY1WjdX~ zETvRg0HA3#H3AlG&QTZpsOaNnbGdzadvkYppHkXxw<)F7a3QUNsHvKoi8iPydhcCp z>Q|5p!+zV>M;FguO^6U>jm_| zIdn`9-)XiUTakNTOYNigX?S&idhy|pYJB~4`Rbn}fFPCL0OKweb&fD#l$oW8OIrToJfiPZO zK0PgCmV(v}#~Fx1M5#@+vh&Oi0H$TkrxHU)ME1beM>7KzA|q>6P0gyQARs<)Z;a5n zU`N-i?EB&q-^rlu;UBGN>==!kheDXykf5osze#dT8# zp8%i5Otc|F2%d;qYqd58WTfE2dC4)Qv>9^F<1%~iiMiCwqG4FyjAbq*ed4CiWhNqr z)IMoHB65h$lV9eUL>I{oC3e98nl>}gVsk6g(j2m&X3SNj>$)-?^8W4Pvw!(^|1;Hc zDksdv{3Rs_x@v9ajnr-q)?j>vXV2a4m7cW>xg)r)&Oe& z0zzjnmy(EzwkD03*wk7bTnJs_7`*qQ8BGbmv?;2zB1FzP&&(F9i?EoUMfL$)^nj>} zBBrDu1iCmaCHp1-z>4If0TtD4txnBRXy`zdK?oTfm!*Kh2k*c9tzZ2=m3-K2QwrDb zzW>pCAO992W@(1tV@N|kP1EsscDC(fx_*Rbm@c+&fAeR)d3|wx-JjmM`IRrd{k7op z$EUOBxV_+bI_1)YA4*l%CaiVd{7=}oXLNqv{wZ94=*I)_iOTkqRoaNyREJ@};OpI?wx+7iM+6pW3=a)#h(<(A zh-wuZu`X#lQQyOI5nSarGsrxjlI=a=q}7kd?eZI&``dqQ_XCPof4}}Kk39YGM}PM} z`ip<}R}XOc)*rc>zf`*Zxjns54g~I^FmCALi|(Vp`^NOEH&6fkqgUU5e)%)BA11m8 z(cJRVT)*D6>-HVY0Bmhe1^{PBXbi|e4D69rGN~X4n*t~R2{=PsaUW``2=owXLIV8h zaNwW3?2{V?*71@WvvnbET~~s;m8sk;J8$s$KROx2!}GU4`0e}M&-yynYJEAN+&ucu z|M^Fs|F=GP`_JLcs~FvB`&q~zZ*SjiX22 zg%HdP8JI{w5{ItqmczL2^_?fj42aJ8S{rew%ejgWyN9Un)Rtx3q#l`&$e@0l1Ysoo-^CD)> zYAqpz+^T0>8wKWN3_euTT52xMImg~97$PQgKw9S-l4F2UYI0p<7P8W6O#QMf_xJa; z%6K|&hM^x65Ei?t+QEEJ*wP+$5jXN=p3V}?5Y_m!Ozd|e!({ye1EM)5=QKd!nyrl^ zmi>O8VxMy^r2%7W?W2!AaxNfZ-**6dKA&^WA%vBuS!-REl5_65uImyaRw-+8cFl3G zZj04SH!ibt?#YuUn@xW>9HwbWAqM9~aamffWtCbKpw`w95rM&5!=`zp8BKt&bH0kM zYQUXWORYaqbZxLu9JsQo~l-np&MFjA_kw7 z`_w9>l!o3pWEaddH!~n@qt!#lB%*6MB^eP|AoiwPkFiTjtMJNWjKTBv@~YbmryAdQ zeC@73btClgxX?Uu&;@tPcw0@C0>we1YptOaY;{*pXIj#HC>u3In&xrXZm=W$=<)gc z&+^9(k7t@{){bM>Urp_faP_M(i4fUOnuY+t2n~@4!3a@PFJ_AmpysEsO=B%tLxLu! zd0e{S7+AF#7#fPmvP`FO{%il;fBDv%pZeLK{pLUV)nD1}8N5id$B&+V_H%Fj=)Lc$ zAXE5LfAUYCj?;dB{k`vf?<-&V%DeA=|L^_1zjt-D`^GoF{M+CC&OAa&$+iM#tP3NE`5 zqD+kruK7Cbp=N0{OT$_vFQn?3d^dmq;yUI=06+jB076^is7Uz9$J|fH13z8AuL}_! zkeZq`T=5v-7!<%5i~tCX5!~->0R}R#6SLf;HdRAKVAqFw2Poz{taU$qyvgr-z3~rIf{sBI!m)l1w6B05K0}C>e0fLIr z%0@C26%YU*CIC=T!8NM*0G-oM>Wcw?;^?al^mjwx zCYb5iLMzQq-52D%!kS69b+wM(hR zgq=g+n(O_|Z8vNmJ$e+K=a>*tq{SHLd6s4=rRdW*Ez>lvha@pgDM3}Ure>`zU@fp8 zhT-z^@^m_ltqv|0gRnkGa{DInUqQ@XyooX+D?^2NoKC>#z)W^Sc+ zy+cIjLI|NXS!3iPVr={EFz5Qg2OkwFF{KnEGpe-^!e+C1AZ3fh7>Dilqvy|eo32kA zG6H}VUN5GOD3=)>_d{CseFKdqV2ynusHJ$T91pQ`M7D0re@8&!Sh!QyVOUZM|MAATs$a|l2 z&hyfBovz+_#04MxKugX5*mYeu^qbA*a5$*gvMkO8PkcI`WAxs!sI6Ci9OqK2bB@T> zni;^%Jn`Z7?)r0|Yo)R?Ep097Z3dp$%x2Y<72=6y74}L2Rc#WS?}nZmHq-SQB4TiU zp66wp`b`gxro7ZSckKI^lyF&c2+;tVh+xa9io_kReEA6;FGDG@Ep9N3ux!O9dNv{Rt*IL4*Fr~HpT^8Q&)GTJZnwwd@nY!BBzH@yiVN-Ka4 z$eT)Q#2lELBev4aY)IXZy2ELF_3D)i9?6JEijX5!6*ELkF*!m;a30pmbW;Q{?`a}6 z)s8bn|LQQ^A3pfvo5Sm4dg(6`%S3cB{iyHg;>ok){p;WO&TqZ-#xvjb7uQ$c{oxO~ zq4&NZ6z`@LJppte~_?NU2X*-C>S!acMk_oGXw+x=U5SZR(7tbN(GOR z(V<~*?7||d!!Y<5rs*7mM=)k=B4s{HE72$NRHb+yW0#~Y>+A}EOtp!Od1>Q$b8#`L z1V_zO6&PJ2X|2T=s}q8pIvdC1Bs2HB`|Ajs z?{qQk-uhr8G30DuIB%7E&K5C9Dckq`~hRv4p+0O-oLRkK}W;(`u*&YP$A* z&vFK^*n4!o6a>^7Tq8doEf*0IV6GyBLX6~HPi{IjNAB^Hee~BicSj)p@YVOnJkIkR zLI}zG6j!hZ5vi2><*>B**>1N>@#PPHs8jxT|J6VL;`P6rdie*x_E(1a)wcbkm#@Cz z-q_A#>$R+ZKcOMVMGDF-xJPcQ2m4n!5| zQdlr|<>0BFVmRu!(Szyv(do6%ANcm$*TZE$J^>o%%~#+06aSN&=kHA43;*5g58prY z>FO6Zm#xZp-gbZ&$!gv1zB~Ne3&$k>>gHS5w?FvN8~@T79w-0$di;KMpRL=N$|hsd zstXcJu#@IPI@ z+Y%8Vj|d3JU}%Eo2m#wB&?!o5Dm71e=x2XP{TnYn`a5a4xx4%sj+^f0m(HgNb$q;k zYkA|#efuxc^n=^0f39ACEgk+--%b#ol?KBwV7?2in>>hXN< zTs-7rX7fBx^Sm-}Lzlew(##T)^Hu87q*htRrIC8ygSaK5R-uuo8G?c_Qj8HD!mO$~m!?QWSgLvybxlM- zaT>>z(v(X|3~GQFLuk3p;sCUL`wZ5ve4i9-t#a2ijU*M}q)hh{y~8 zKvYc-4b^IGs%ip=7z}{~5zG)&6}72W(fY|w&{W`|(1o1yD-3j%)6E`;@#|CfI%q{} z2%zM|k%4;Dp5`2s)gzICWMKny=-G{R$>%c}#TbcZ0J{J1W}2o;cJ=dbxoOPWDz=Ha7<`qhDPcFM~E_8!^P0o?piHmi`ZZ6>x;MZJJ<8O=Q)o?;5_KX5HOmS z^T%VeR&`C7`4GCah9qRJtw}Mn5JGTHq?!t&F(m*nmGLy4j`!o-z?_?MKg1YgKO_zg z%F5Sqfc~3*^vf?^ez4o6PkrX?i;G8(pFRECfB%<$`B#7Ka`VHkk4nSoI6is&_7}hK zWrE@9lSj<-^{;*7c(~j2gGxD`UT?R<*T4Sd?|$dKT5FYdcXxN3#y$n_-8@Z8u6^H& zY6^*n96$^%#^_O6liJ#cAHAR09K!D6k?Z)y%`5bHTFy=6G>(paOfiJ;(FY$o*GC5x zv2(B;0-NMEVa2|_-tu9Dw$pjM-JS1V_3`q>ANluenf>cKN4RvKuDdH~!HPKnQ3PN^ zV^H+LAuwSfLNicXZ$ly?$3R3z>+Qot0EE&2;HN3KpRV7hr3ys29tIF`ol>p;g#g#e zDg#}=ufKQG0VHR{0#F32peP6^5!AznYDK~ps$AWE=QA(=*46R@L0T@7cGp=~cclPc5&&yw{H-|-c+o5*ZAWX0|Pd;hj7&w}M zl%hFQ=^KFo@!-JVp-O$IBG<~=94e}c3K5YZO59+Wll@y0+gs9Y4IbFOS!s8V(=VElR20L-Lr#l>iW&Dxh8dsO>+i+loF=~NXRr$ZfvA%xIM_1>dlSr+djA%a55vV#7HO;=4**LNM3Rt20$1)-ALt5>hg z3;>CERq>eF3@l`FXNDxGt+)^QxWE;vUB{_gH>wdIE3j)!yV2LP(IwANPp_Nu(+ zoPX@)T6Y<>)|_(;ff<)&QdP8JHC+OLmO=m*yUn)m?(Porbc&%hvPzkX|r{1muopsr&0 zDge;OG)+^MM(_|8FH`@V0}!rrYDjwS+#MAVj2mfXb@IkH0zyk<(~oRbgUIYuG^=9mbH zh`ry&u-;Ks1OOcIYS#_q?GxvqfC-3EBf=O1f5T9Ic#@U_b zan4`}jJ)zc(abqsmLejx=9mJJGh5EnwBPp3Uc_pZQmUUZS9!F%e)a0rF)!I{%w=ji zl~$WNY^us`B|e!doQ~%e+cgXWB2vH@zzh$E(}Vn4n}~`*Gm%dk=8Ry4L5YYoWp;>b z?}hW;H>sr*W=tu0@7E*nic(o0CR$pNrmA+q=-J3MDQe(-WH4^-G=n(iiE*nB zwoyh(Da8<$WjT)LCYCn6Hd*~=9DGV{X%G-OIpV0Ts#Hd|THr-YOmnLfK~ISz9?AlX z+>iIa_2JEL?*GjF{_AgWICaF!2S;qfeYT}tKYcRi<(K~9FWsFFX_M6CFdg5#e%yC_ zJdcdn_q~~gkS;e;b2XUqG9h89lL46-vqwaWWj>$FI0*eNRI;Z)@m5;s82miTUdGCTg?C00zdw=`o<)6~4pO+BX zS42d10JPSDK7=xukSXvwel<{CGe+JMsmS_RS6FjvstVGW-Ly>0ayM-EA%YnY$0piZ z6BTA=7kG8|19&kK41tYZHFc>EE-~R!gx0D9{$zUw2xj02=E{gpRh9`IcPTBcLaWk> z)uMSZBVi{d$`H1(5H(Y&3rnCFo%3uB5l|X{6(UB*Vgzb{-Y_6yh$T>~1XX0IqLzoJ zZSJ5cv6)#F1=7HJI2;1+N-5{l?WXI!+uR-Rd~X=M#}Hzi&SRTL#6Cv9EQ_)|zP@bj zjk8jl?mqpgFMR&9-?%@V-uv*KcV2w^z0>=?)t~7nzWtjw?rZ$Um+0{7cN{FAggML% zes?F(4(o*QV*k&j)A8r^ul$+%SAWa@yB|8ZEV6Hw6(?7#H3IQS27j2=Pl9~^zwzY# zp&t)CM8$1=B%uJ}>VrUz}^(7v6mG`M>(DU-_%=e*3w9^cUX08q1gBr;^O)PM;kgKLvyH zeWfssbKS{J_S3+dATA{sy{}*W-z9f$3!mToa=X9J9N1smGI{?<;)K>SGx zNl;Dk*1>ei(XO9O$AguZ{rGWkyO)3TfBS{s{x9wLt;5^@&Wkty>>EG)i`wq)Uw!-j z3xAH@`q%XM=9mWEZDsqVbbQC^S>467ETE!`4sQ6@|IL4=7W+rP_R9v8x*_M5a|Ojs z+8yOQpO)@|KmF-XpQdr$F|Tz%B19n)mc<&e;u>e@JaU65b=kvK_-t30fT4@qv-0wO7T9&0t zVbk}j5A8i7ZQaW>(`Bwm;Jpvv5JZd`u{3gyz{SOiHBGKV2lIJ~sgE&67ttW+ObK9F z=JBx5{%Z4x5}fj)=jYqu(U7iGm9#d^Lkxrs@jk(M2n))D6hTptY4EvVoeR|BoZ}F~ zL&OJCN+BfYNJL6*hzMkBMcb;R6MOHys?PHS1bv@UN-IRt%$(yG*IHvKrM!Ikveuk@ z2q9=Q5h%HNZ^JMsKwc_{R%xx85o#6kOYG2xRO}+x4b-{X-9-$DxaL28JaQ+ZT+2hr za9cyP00>puvMi>^>_YI~Oc0uCGZ=I6j7SJ-sA`76c_NyoB4W(Eo>>UlIbseLFllpu z%Dx6yMHp&oHC=9&WofMt5e)~0~iAXN3%od!B3E3&AngKI< z_V0iEQ7d_xZ|`q@`Ey_Re1WO!FLL(Z`R*@&{_~&r-uuwkqWAa5i;G7$w=ZA5_;7_P z-rU?A4)>cr)g}ADZ@lr;hjcidU)>#J+W0P%Wkw%2sc)^dCdYCrOWyQDo@Z^fiWRhe zH-Hfen1O=5x_g<}-<|K@eDlqws;~5OpZnbY^5O?S_+j6tW~kDz-?<)7Q|nC}Sgvgu zVb?WMT&NdaBD{kqPd`Z8^UbHn_lm!G=W~C4di!fVpzzALLnAbEz-*8jksyL&Q(cWE z004wW2#5j1OaZ{a$R7Uv>H6vV{ac@$4OW?m5E0hB6rg~CnIHm~n2P?M-HS@DndWL$ zt*RIppn~Y&^GJiB>*o)*`8NoEw8UI7-PhWu0Ry2EEXvs_(sP*zx0*Ej@}=`%dQR{D zp#L`%E)kEN8*ZQdi9H_s^v+Fu%$uNXnm$-w-2L!VUwFpBp+j(J!Li3u7J6MA;;U92 z&zC+wllOjje>ueq#R_1`##%v*A53Q^Qi;e+tW^UrZL|sm3ac=H3}R@aK!}7yj7aO4 zWt{}5Dy>dM#kHFNK}{QJMH8-Is>*0)j**DBSvm(S*mN)f>UQ_U9e0F^yqfsTn2Oq=7}Q$VueBO9`k$G{;(Li?Zzb~8p>%)(V^UjSySu@NdT{} z9$#NvFVo1RmsdM&{{H^{cs#OqDW#_3y-$5_bfqMT1wmRx2>V~ zeoF%oBI4=PRNI_20uWkhb**((z@{=!%L9YaECgd-D;c%#`yc0ZAjrX9a0}tie0y@a~Mib#)Jv-;`&nHzK`o2z;0Oz}# z>+9<*cACZwVRT*vn4Q)(mrP>Yt4*L#TCE~|*DX1p=c$hgX~pE4^PU(Lz_ar{Fmowo z$prw0u0Kxa^ZDGW{`e{}pT=oqq$Xzez^z)AQB^}osdGe1h;P01)~4$TaLFe$=e;+x zHRk~UL==5c)mEw}W}|Fui56=@z|nalTvgq*ivj>V^O~Q|wVGMU?SX{J z-~gF1I066;&Xj5u)#jL2CBS^z^DIr$V_l+|Qy=13m?R|6nW0phTUeV{Ps8Ik&aIGv}v;~kSDlQK`p z$;?E|tgiF07{EEV@o5+aB3kmaOtXlZ=%ybI^LgkJB7#jzZEKIWN9R3w@1&75OQ8eP zWy&dZj%~ejT5Bn#zVBmXFd6S(kEdG=$eyg69mr+p1BcTBIcNB!=n()!Ohj1qaC;X* z=(?_yQgUVHT59l7#cFA*GPN3)$RPT#9+XVMd*5}Psh&@hsxHeSBF?j!Ez9H_#TdPF zt+loK_~8)&!F!(|m0D43Mc8|KVDw|_%`M)0F=(L^ zAVOV3Z{VF5$&xGJs9Dm z*y+6=O{)K%2Fe;9xB5JRw4B)yE)B z=Te+=LKTy%DCAvJ#np173ix9aH+#<@pw*-aJ-9=4&7G_OF6UfPAtHkZz0eUmA~f_s zRjP=RN9UYpM~vS4T$djY{?~-Zy0g;tBuU6d1hgjC*L)igB9SKt0E$pVv{oW?&XE}T z_0IzZt7blg3aywF6K&ugFqEg$u z%b};UxYpJbHEUMciER_Dtg{J%WJ)P#_`@LtYcK8qv%*fjA)&ZnOVcI<~R|RL_rFQ0wANaX5Klq_VC(2pp1vxIgc$k@IJbrM1{me*rd}yZC0StMJC@b z6Nxrpa|4T*Isqf}MATqGGJE`aJ72nX%K(*Gl})T^?FQX<-4{Re^KU=-?8hH}_;>!{ z-{d5XCP({bHIN^69RaQngbl^9oSosTIh>8Zc>VM=m%sd1{vw5i;g0&<*FO98|Hc3G z-~ZWXZ~jOB+5hps{`k73&mh9O*I%!L0sf>!{y$v4WP?6Y8ua6rkH7Rcr^`2Q|G}4D z{U83$v;Q!0n?gF*QFSSQsqWR_z=2Y`P%KF1U9(ZV%efavv&J*UqGaGzse)1ddAOGR|!?#CG&)+Y< z{QYm8r`rE~9Z^%=(Wa&4)O~e14(`5X9wV7Mhr^(Ua-GUzw59G&CJmS0fU2?_me75= z)fd-a4w6yY$%m9*0R8AQpR~yFCtO`{X!O9&1;=7^>F3(xc~NPby}{kVO5f=_f9yZFeEFA4 z{0#0s_uG$tuDkp7^z54#^R0Bl?dBI^H4n!ZPkwR zH~;u%xBap0ou}h@ zlss*d+Yjk{KF_sSe9fIiGVjRRpP3%)k(pxhVrcXjp6ZfrpSZx4bN|OI`?rt4ltcF1cotbr{tT z`{=VNd)JTSX`GHN7cq*#tCqg+*}*)|-H@Df9;xd(LLtIO*ZWds9Oq>&X0Y9E!P7WS zb*k&fAv^NkBSJ-Kh{#=0Fmf?a&KV%X!p(H8=F7P=L!!U#x&CEm};y#GuIHr^UU_Fl3 zT31Bx{r$Zv5PJZKG5Y943xJNx%*-wZXf5Y#6-ioat?RlFT+STWG4p9U=iGu%))q8r zRZ6Mge286ct(isdeJ~gDxt16AK9_zwblc6W?QHl^53ru-mSw56a%3`6RRjtlIO5it zw8k#f+SYM_q8V{%l1nipaGn{pHmMC%NWps#Y6R%nJ(N>HGZjQ++hAyw3Wzy@7y>aH z`Dw1TDk6hXZK!4fpul;Y5Yai#t+C?;TPw?Ev)M8&SN+lrTwqS|Frhykb~m?$#zLv3 zU6&pigq-tYgx)_JIG(?k?moOPRXLV0WH&zYc5cs7k*yQ%+_~A(O3%EFNe5c3B@b9e zfR)E}I^R858cWqC&JiM(QlvFvLu#HO^v*e}(hQmpgdl6;2Slqt;BB%l+Q7iy>a@wl7~k-|hwq`>gEP9$k2oa+tG1 zTx2=kp6}16)|xgO$NA%rU)o; zZ~g|kA?*)UKmO|TT*rTK^7|8iYTCa+M1XAU8hhdZjunU;J_vbxR-s5TU6dqOFfUKm&%~JGhVnHIZCJbcIx`CJ&i? zw`dy9FQ481!`<|L4sR;^J|DPE0bFVGWaiW-juz6<;)j3h=8IpLuRe3Z{RZ=U`{}>^ zZufJ~hi`0-yH}}w51w7ydyEFBMDz0sFP`jw+h1&6ou8kV^KN?`d}0FYR%9T83K7GPy($@fmRHocWI@?Ba^AE8&qH- zd;rk>*pPy1O$3a_#Ks#oCI7M5C1O-aeINoV)h z@bWRf==snv^agpp3&mw|#!p?La|?6-8N!54nnIZk=DQsn)i2@Ng4?y~rln||(w42A zR1*A_FEs*`8?g;9uX;}UbQVt!4Q^tZq0y@UB^ToBeQ?@>{-sai)mSk zs>T?P$K&eI*Ve#$=UgdeoTrtHkAY)MDzLk}xWB(QfTflY!gQLv_k`@D+iZGfTvfku z9Cy3jiVpuV6&4X|E^!ksE-oC=%B*t4D_hrYiWy7cVvrD>hdCu?Y+9Q==d^x>=5H&un%z5o9E z(DJjVj~MNo=iB39Dj7Yy7-UfbGtsCx&F2u)lP6D9wag2;$cTYb>S9^ua$d|N^<4^4 zpi%J0yZaCVp-Gcm3wzeOs&)0DP{Hxgbvfs|!wCYdg$*JP_{5xZRUvk`3MFQUOhh51 z{eFKjcxLXpxZiBe;P&=52Hx*B{Jzz z15@+OIUa@q)Nbav669JKupFk_*EcYi3ztSK%uH^jx+|DDA|i%QfSeCzRYda6doQIG zsiiank=i1K)o#7rZjYz)^XJb+VY}T&$4efiQ8nLMYqARXO+;D~Ro#34#?vQPS62*f zGwej`vMeo^ZNHiFa({n3kF$>{xwzUO)ylF+-*++et+i=BFUw-)`-@Eo!I@vDbm!BQ zixLJGcN3k8HDi)4_FMMGby|pE-5aY4BCi13))W-cXdah&T-J|W41~aCsQ?&U6kBh( z-$8-@q!r!F5IBTTTHS63L{yVfmKCD3x@Ap}=i z4I<>c6Dg&!+K~De^Ic51Grc+2`D_2`GtkrRhcd*?kN$dJew5E&aPd*U@8>p^X>_2z zcgORYz>?!Y*y?0Z;}E>}Eti}nU~uSm+u?A$o%6ZtUCtx3ABL23DS{#NYcWQxrAs?g zK!atOjlkG|0amCD5F%#C4$;7b1O!!;Km!4T5s?8jGXz*e8w}WWgZE{{pslA!t$H3u z?;H{V(CTdaF-TBo1rQ+u5+DMuXDjD|s7ez>HCkuGpbE@L2I~-Q{Q&UJDMFU18G;`e zVlJ-aL=bc_SAX-#lP9&cB88DaHTdYgpKDDqC`eOjOH<7AbS838)q7t>eGEj9bFEU; zG#Cq*io_7r`=#V{+7utP>9}lycOK{I82mA&blR%7MDzs0&^#GB|Omny<{&rOd!CcyeThQc0x;%hMRjB93zk0f`8l zLL)bsbLbretJ(5`)(9*NvAMlK!DdGZ8W1XD>eW@^H7^%x|R5Uc9*L0Jb&fb1ZKa_X-c+{+s`|zxZF-{@2FO zjaQf3aI|6oK!Ao1-ud-`^SkMyIXcCqc))tt#dL^Y|L*>a7t?p#g`*;=Ej4V%M_=y_ z-;1}uHr;$Le)>=SK_78`AJ-rH@c=>)6Tt=yW@-kE?8q;8geCzm79)aruU3F^f4st8 z&l0dsxYTX(1$Zjz5J3)ia=pLgeE7LX^1uAKKlz{hrT^ytIeqQ*=l_hFwW1U$_UJ*- z&otgXJ^rS~UF)Be7&D=OZ>Y-6R{(?-J>ne-7_rC<1TAa^#Jg}6ab_|iN@&o>&4ZUf z;3scad8lncKn7qy0D`pxgAu?O8bAXBKu`eXhuHub&?iERnF;*Vnfd==2^g4qBt#ag z6)my@Gq7N_)<}{0f^8g2rHum&OW+Rj)h{9~Qa_Y_pVHUv8xHM+F>4*v%T0E>{;el(UR+$f`1s@7*Eih;?(b`)$O=~4G*)zVx93x)zV8pG z`)xmnD3J#x&s?*J*sMa#K>-0E;9eFUd__Rdm0 z9TyM=?Vmg%=UBDmQph>)J5^eytV2Ha5}`62us5#c4bZF{(Cg)(<;nnX2q#GA1e6dF%UN2+N}LJZ6ifF)ahC zJ@O02_W)WDAA5H`mD-R!m6l!LLn%$TXLW==crqwuX^5VX$Xeq*b+s*PZI3f#(&!{& zGBe3dRSiIzR9T{EQ)#VDWn%KHRRov~0BEj7LI}jP)TJtT@KRu&Dk2U;*HpA-KtVZ9Og45t@TO!@Hp)DyVA}fHur_ov(fiMna@e+G)=T4t|uqa??3(R z`^#U5yU7l7)F;C*FID4aANF*f=dsM7eV*TOz2%QCgNiEUb}fRIT$f%;r2?zd=; z{6%Y%hy)Ly!pedql9_Q?WXhRYQ*xlSL|c|vr%2?M@pilA4nb;59d`+ez9y#czVq&W z`?O0J&n{T%XMCCuhxe8;is}9B{N!;9$@wmR|3}~d_IG~^6p5wDvhiKt_oq6I%lz^4 z4^#Bka$Cl(3&9Pi+Tr1~1_CQhO6=W}tLuMYU+D68V=2L#)rITzp&*BT)25qRi0S41 zZO+xl?$!5xyEN(hewozj`M~VivO4d~NMk#OQW!nPeOFIoxyf}HWGlO4&6jjOAa%|4 zvApVTpBrq!eJt(kx_egqrXzH#KG65(TabgU>fhD;E#;#H~%`}dN=3-)( z0I*OYTQyoH6vV|9P#>sZXq3@bIsgF)#p-8h{Z|U2_KPfecNlIW;t{ z255-C+fK?U7@-J9s%<+{a&hQtz+FU%ME8 zb8fGSZL(OOwgAnz`Je#PZYl89%ZqE}@m|u^&h9Obm*ESQ((8*Wn-4uss&D4ulx-g! zfXzOAVQC+PT(|9_9N1qajFaci+PEzBX}oY@6G~kwzv-U$^E-dE96zu>{_*9X{4i{< zh_)F-WRSYVWFBUz%$gYVe58_la#)&PTrK@}De78HL^+0;{AMo3zM)g7tkppSX_T!5 z5h<783oRW0-QSx-G__V;U^6yQl7iAG#95$gH|DC}Z|0kuyAN+a|Mrum-4vY%;@k|9 zUykQP9GX1)Bg;FtXa77rKD(_i_uq6cpUB~tr=PiNUkY|h7t!U%SI2p9AHw-IO4@H> znt$t0KkDBbfBoC@XK(T6!;I)#Oo``@3(Qm>hkWWBoSLcB+>y1R(VKgF_cQT_kKm(6 zeJG++YqOwY_x$eA`JH-TPv?3LewPpDS_*Cgk>lm{llJ_L{kYx43NUiWx~PZ&0ym6F zbc;Y`soP=Gw9ez9na*=Ya=YC{YEwu)0ok0dy~J4*R{b=a}5?qLZRh zG=&&Ln8q`k#OR>T3*dMGrKBr}KSYDpZLBdo~I}QU|7O7q>n% z6(FkF6$i_*tdtKCVdkOhQ;7HX_hf!9**S6yR~J1f)*?A)KX?L5DQ)^>rt@@i&Rt#Y zLCDOKccs-@s~WD#lxILfC7W8VwUR+#7rakCt~moop21rK6##)ZpI)pzPpvg(-&$*> zE=wk$?XU^n6XNUFuTRsw-ELpKeqBida)@oWy>tj-dcMC;De-A4O9oLe0MWrmiK&!I zKpt2YNjQwljE)KYvuDpPw}Udix_!OxH~Vx^mW;vOpQrEs`m4)F!_%ku=!yw;o8clv z`uN4osh-ZK(-6{|7f-sF)(fcCrrPf3`62`|s7vWnhXzDoSWnCBz0YRM{?+*qV+=mc z^Ld5pcy>(+I8rl5bZ+N9rY`ntmsN!ZjQgvh8nh--s&j;BrL>%D3Vj=^5U_KCD!KN~ zJ=yH{sYeQf@3gjNjhWLnb%BsHq?k{OSOd|ug&57c=($0R{p#Iu#H+m3b={c3A*U3p zsd~-?+wG9&C4gH6=w=Na=3HWoRx9MXO8f;I5;BuHT3QvULKI!BrKrM?w)#-J1R)OM z>q_NptvU|OU7o9m5tBA;Qd(=KaJju~xtRCn+)@juh3EwdjesnKluK)^sA`BE`;bdJ zo^uER1tGT{dq!_F)|NNO(Bxd3s&2?_d1r$G0JUs{V9-=h5zUy0k=I{<0UR=~LTm-d z(xUS~7rWgmvj!K<484EvqYtgg<42FyVjMBm5N0qh>O2l{lUqHWPn+#!Ywd72+#~8d zF893eQe-Aj@126xQtFbgA6IcFvYV=MFR@Mbd+gCTv zWhmcze!BSPHFYrk(zm|#D9N)ouJ6kEcDhs50FDLAsj%xh7gWKDAfg%-00kJAkE!FX z>mMMm%DU7dLJU$X10Z6@&PuHaW`^EW6%+`_%vNxpk6tTR18la^sEHWKEnbJO)3EnLvP6W!R|snq)h z%*gbKd4<7v{aRc4ovkQ?J)lq!K-WSoz_#k({E#Ba06;)10SN)X1Oz;)0ufY|5Oyqv zTumnt8n>}iRc%ch00bf+t^^)nW+BF%G;dZgo+B0_D3duix5dD~8z?xAOhj`3@&3<# z{`41)GB6x>xP2{S^WZ?N>HYZXZ~UFVl2W?9e)96}W!w~UoF}OC z>usk%-F*7C!m<1O@zF05Y&X*fwSMr!`l=oG+JDtO`b;;l;b=e64e*fiTGvUz$rcB?3jn^#x%t09fasf6xb?f81a9 zKlrUG5!q@$CjbIO`iY~?ANuh?*6bXmb)+o#&@z#z4h{!AU_MhqI>p#59P{9lgN!1g3A4RjM z-suep z?hofTKmFKmc89wUmCzFtg{voz-hKB+^I2|h?}^~$%a?1@+FBzAak~#2jdm+9W&HoIzpbT`CbG5fGARBHC;==kxjHt5@gqxy;LImifw;zNA8j zyLX%mwU!Wlm8Ke&8Pr;BxeKwBMMYwaIcG=QOdqOTKgpRI7&9B#c{;-ch*oPAYklAE zH@$bLP4=5@-*t#c%&`yWX0S6b? zp?6*u6N^3)uTaruXvF9pBcWNo9Tgdg5md`mbQq&0tEw8DcQKksX?1n_tULZsVtNl*$shZG;FIrq@6VC}v{r{q-h1b1 zZB;5(&2$A;8-kdv!C_44AY(kAKfrkMdv1X`AC}SX@AEP>AChw}qfU_XN$Pvu=J7|* zzwdo#Hs?is=mulQs5=2*0Ph@&bS}GA+w0S*wEU($?mK_>lPqnnT?uLDV4hFTxsLP2 zhWA5nWV5)c+S-I{eV?j<3ZIvSx{f-cmZA^Kxg5t+KR6X@#C;l=>I!&ms%A9J=jYE~ zsnCTGD(vZQ9Mgeh1F)APx(?-N%VNmX=iun6_3zNHNAYp0MYX zOIXg3!qcUD`&hcJtJ!wY>@)c{K4pLFd;VW|FMWA0`5rGm%tO89XT$rBCO|KG=B2=~ z2|Ht2wA~!;w;_qi&G8GCeQpGyN7Z|2NAw zUf~z$`;XeTHTLjc(F+IY>|o}>-5_3{{V<|0 zAehfXK1RZUessyFt2PA=@89UD2$%!}!0gbaO*iF(SC`m5x`QaF!e-={a8JUt$dUFuH9G1qKH z2o!wU^t)xwDnLZ51z1FP`+Z8CYi*4inptoT015yi#u%3><9KSV#N<;-gpM6=(@wz5 zL`prM&nd;-{t{509*=d(@#=Bcb+gvi+T*9!f9y|w z(M-?hbJw?5uRlH=%l;xT;9F0gZHB=auJa5c8t1Gc2q2{bfD7Ktnn-lfDT$hts;Wxb zaZ8hVsn1HZOhsSFok5x z-1ps+t4n7HjzJXEJUdW5ozG>-@p6lfO(0OfhzuB9SV=od+WYRICzc%%No{7PP4@eJ zu0;(9pi5m~m*=HR35-;AEv{HmLd@(`#Tp?p@Y-Cmw(PdnJn<^Bi&$`;(V4X_bl>|$CeI^#HPx7($RwN^85&NXXmoMjk> z7~^XBCq`yRL`@Zp9P{eKT~o6}>>LwONHOPm%8Qw?#-ORLfOkYXuqo<=ZEA+5yPhvSmjj}y8vrPb!Yesd+@ z*HV*nKBehA?zoW9mWe>CIOjZ&kDaQ5+B_~Pg-zGD)({|iS4z1%970Oi z8ZrRllqUopx&UMx!tr=tma+~S2{?Kra#ousrr?{ZTGQGP)H&B0sj6s21NJUraMAT$ zH*rnHtd@c*$fZJ+(t^`E%E_)I@fQ@_vRL#yRDZ&OeH$>_6b z`lgk#kl~8Dqu{&fXLRGNHbi+j#{K}U;{7{)s>A1NZ^S9AMQXTTANo? zwMj$9)vdKgh1RoJYYo)a4lqVq3&2!CMUgRtQ0GNWgXh-Tc^b`ZSr&L`n{xo4^kAzx zCPHE)LINhm6;Vq@4>Yj#C$2?qD^|`CGckxFk|T$Ry5fFXGa_InqE_pgV&8kbpwoFg z=NafR?D__XtsRGp?MjOZ!2`Qon_(lDTCq8=1OZlLwzcY*ky%7qtMExWZ7l_nW&);Q zxz*Mr#^{M_rPrqu1De7x45$0UuJ5)N@#b(w-<7eoydYpDL+VVsr6l9yPLI3thTnYA z`+AyXIo1%m?)KN?I3K6Sqx-5GF8lHQ@cbRu-0|7Zx-=Z8=3w@+%(Iy5iK(St2HCv& zpuBuXru#l67v_uk_S8K+S=Wl5%f?~10m$ZPI%E2Sk!pjPu4z7dKt`^2h3BB2fs9N| z4GaxX6}Tbc3Q#g81i@BwT?}1q6e{l=6jdlDM1-h$%FcZB;nAZoW2(?Uw!pIh~4KKKn*XatACC4F6doEB1m7rfPBR*jr&;|8&j<{;}(yjQ0IQKOPudr>ZJV4IV0fkpbY` z%+rEYg}W@7m?*??neX@zc?a{9_E(_k-+RME_*+2TFZ$7_&`Qq*-#;(qD%kZcF`G5JZ|MmaZaeRG!eSJQ! z!B5Le?T3zPoyVD-BleEFCdCmkLDhMi;<>bo%g%c;Ro{2XJ84$3iAYL;n4M!p%((zz zYprT!Ml(21r_24tkhb)0XV2&~7d09$z z!86fHbY3@q)&voYW&m)`r5HMhBGr2ry+@K5JOv6S zI!%)d=#ZGbbB+w0b1}wN+Uf2fAjN7m`G2$bXTO$h*?A{6n%S*pH>Zp4-njQNnaqYv zQlzM2nG)oZN>xCW4EaF@N)TWe@I(C>ezN}u!w-gERB$yG458$bC971hWU8Skks>KE zliA%SZgi*H-D+kxh9B06n@LJ4!w^L=Q8}MZ97M#~d!M!D8gq>A`+mll87cwiqPzP~ z&(6;Jr;oh%-g_pUrfHsMT!d?A_72T46@E zSZhYKMG)D=n3=Wj8WG8P?mF+riz!VtN;RbH$Tr@tLSxC?A#+U$8^*qUdU1aH>FeWR z49K}ek&>mpxp{MbzID#MeDwm9pb9BCZyWDVLsZfx^p0gX4JoC5vo>THnT@s9?hg0X zcs7`Gu31_ioDcIn=81ejgsgxB%;?b8zO}Y><2as=l7?A#@ORz(VpDY2-%WN&S_VQGtzqJbHKA5T;WiA z9A+#`+8B`%t+Oeml%`TxRYfH<&WFIRIqmZ80gfdZsLmvo=sU;LTx-?1pXcdtnv2w0 zfNc_WebcK_5j#z#ZMR8sibsbh@7qU@-R@YeRjtv406b2Us@9y7v9w;DS;O8?v+7^J z{C>BFDyODt<~TC=^`=!wSRNu4nl3br&$&ve!*pu;hAd98UYxIuF+h5Ids7fMT{zb6 zK3q)TvY1o4dj!KHd)!z1z4!m)nFN-R{Y&{_ub21n9qMH zXJaASC>t3RY86NBEKc`~WL$$5D?(v|Rq82i4M&Hwv&vjWRJ7I#jOR*=djR0UjQ{WY z3Gn0fKgC6=D5@k1s0&;d0V@9MA{yi3EdZ#>RiJ{(;y%zy+?aT?IsCJ$!*7P&OGDLR z$Z%HkE>A~eJo$F$+9CT?#L2+wEujx#!44^ab1vn=x1XzSKX%_3{5Oc3-C@_~`(ahq z_GTp?e`bF1h5Lp=Z0~P;6Au0Ap0_7D?{$Y-xh{EbM@2LuwhC@PaEhQu+s-`% z?~rD0rqz??RE}wknaQVERst5pqrm}xwvm?;{R{qux8X8P{K#JaqWz{TH|wVL&0IAZ z4BU5XZ{BMzZ_~*qe2>yh_n)S{;NwJgLTk`Av7E)Xe?0zb%ZDzE9Tf6ZQY>)-70g-Z7;9A#m7;OZMW(Z}v+Z_^4D&qC^JJX0 z#*64YPu_bGHP+=)kxkciKtm~o3H!6{`F2yJmK+g8M32Wop-2TLlH!}D^S)_)gNP+( zRXv?ffIN{Dc{a|essVm-a?$>b~kB7s0bJliUtYx+7+pgQ~cAXE~^=8v|mQ6}2mOM;j5!q}u0I=Wh zEgQB50CFvHPKP*IYsYbX2yPbi&TabD>sK$V_f2TA$}kMlNYaW#*bM*dw|-~6UO#zs z{_53`;QRh&ayE-G2Az;?dH2TjExh&M6rt1jB5MG3>0hjSowe zh0J}^s_HaNZQCx%hX7#6m|&#BI8POnEtOK{eT?I@vWZc;i`M?fYt)l#-N!hz8iQAxmxBj^kKLS@boQ8Q+Z{ z+O{pF98RYsb+g$_(W@Y zMe7KLJcW39{`m3Lr8AaMLvXpo)oKNVV~*_Is_XarV-;O4(TB&DDuCpemQle$%OWsh zCL-$FcC%WQQrfl+&Y$*25jh@@WO>{6HA!bZBBqiJQY_gRvot3g*OZh7nXI+e+M-yL z6e*RQ5iJ`uI1HmV9!y(eWw0PvO0jIo8p9Td=Q$Z`fe?s5G*yW;f+!nAa$3b8u`|wa z0Y(v#(jyAw8FNN!!8vqj)65bMukL93q%j?Z-(3IVr)Pg`%g&mR^-#MrJltG zrba~McX})$s;NGhxdrLn9N#_m(9uAxKQ7Kt-~OrerzC@ARAYbJN|gKIl6&N@pdz z!^c`b|H=57z1in{25pz*^SZy2_Q~s)FURg_Sie^@P?FZKYRtJD4=SCkGd9~nx>SyM z%fn04?atzxaefRq=JpBM#(SU8Nw;AcALN)nB)|JZsE7Rn_4Ehw7}e!+(#3TGDk#86 zq^K%Tbpi4qDpk~4xCnqqiqW^Te-_|7NqmILOai5~9SVZr+4}6w%Qx1zROUJ7G}cwW zP9>t1);DF!1e{~FeEwN_{jJOYJY9Ur8^V-wFE!dH^wJ_Y^a@#Y5CIUx>csv5>C0+a zATzRHYY_~QMXm(^0O60QUEtqv{m7386l~E_RLKCL=8~|gA-E!Y5*VVKh1g*}DwImi zu>arx>92g}<4^wH+uuHpl4kce@;9!{xjp;S*bPa>^Uy}Ku5M+ATbRc~b>sxqARAy{ zH3*BijDlSu2x=wCfUuxE5dZZq^oOjgAKGMpuj}W>>yOkDs0NWxNh*-BY@CA#a(4f_ z=JmJ3@OdDhyMNq?2~w_j`fT;{yR)xpzxM4VS53-g_`6RIe|s_?9>S)NyE8pajsME? zKk-vfpMUGSzw^!Gy`U>uAlIwS6A`(k(nkLQh;_@n|no=55EGaLPJv8o{Aa5|00aX~+=*X!+S4f#Ijoa01GswF_<-OBs9)M=W!E->5S zGy#COejLY&7CAtvgj$sjQz4QZQ*Z3{>|%`bCoevq=Gl91t)tmUrja<04G zPE||IqNpfjIE0|8xfEvh-V+g9jxm#pwGLI7jAR59fntf8;6y~LI7bGoaw=NT0aDEp zYf%Avj&HFEN0fb5b1Ii-fo<5=Ll7J1$gc6$P1E$|?frJMzPfyRIHVe9sD;_*I5U;d z2c(o@Tt4UFFgWMBt~18$Pe()jILtYwMT#ZWNueA~2i&q#w7^s>R+TZvT^!Gh5~yLU zf^3acEu~Dkr122bXuK&^iwZX?CXHq1%?1F8tz`qmsZ>?4){paIPeddHBzhO;w|D}U z1N=Llc2!m9+(Ik_fLiMkpIU2O_3a{-LFR{V0wBQMFl^mR|Hx(9_;z!Y8C-jEJ?_Sq zjMh`;oLXm|JcT^#+n0mhVtYA_yQUWDc`9Y=R1LAQjdf&VN{N`-5SV!wj^k~48DlA? zAzb$wY&lNhd-Kz@y0GPTXSYR9n&(_Jl4lbdpfY7-pN1@%@211%!Z%GvQzkLan^hNL zoD>X#iwSBL=WWiJ$(B;a@g9*Cl66>XX}kV-Jmy%Fi1U>4%*K!7m{Qv9c1tSdQrxFJ z0g%?hNL|+ez}&V1HBIHyX!jRi=~ov35X!55_p7}7RdFAFy!!G8AAD_fb_M&pwA+ui zX74S}DI!AO_f6Bp5|78DW#V9^VyQELShtMTA%r0LM(%c!@=Nmf=FH~Rw(51d^^f%f zl2Xc1s>wHd;{b=Rzxq4hO7URUar4Benp86TL51+XTc77tthMCrSmt9+y=vp_0jB~+ zga{mg5m19b#42%-Xd}>aURusTpg*<`{CNH0Eki)4Rdq4i2r3H+{%h;BsR}9pvZ5+2 zQv@W$p?KpN24l<@;v+-^MI>Gs|9yS` z<@&2H!#QTy7Iqv+i;wH->Th>{F3PznAB#3e)0)nkx4&_W<8R#@&H_ApUZyk*lMS6) zH{F!eOzf>IF#%EtVMsFuYpflnDR7n3%dB2J%;BoV0t^P63Tc{6ngYx+AAGu?-jZ=s zE-5mgQG-cp<0Fnqa}^7dM@#XdjOWvMHhe$kG(eF|b7^WxCahq-rn>8zc(D#E-+-3maq3~UC2k!ce@7>CG9irwOK}*erjV&eLynZ_kT5DB;wr!0u zITsONvYvR=HUN-gQq>Rw5sfLMp?RK<$K$J<&(6-erX|CN({bIl*4q2~`|)&aSG{JL z=ed@$+m)KdH71tqjE^xN4yRgG3AF-~MMP)wJWdAb;_T7^7__h7zL{_D+f{Fk7lrfl z%bV-n+c$5zu32sRVHy>*ZQGQ}{oS4ScGdTY7-M{Uo2)g=ydU<9$aKHocTID3c78k_ zrRH_tF9)O_uqzZqWvLlTDNE0EQJo{xJjZby*XwQ9b)^)kaDTTy9)_QI`qXeqDc$Vu zhcw5OcKf|^?%CC402D#%z8jiF9xEc&yR(anZq=vL%m`i6ns@9|AUaLsRMUPq)?7je z1bi9?Gfg3cw(Z7wav_9ZQ4xSrjHlD-?8@!$@US0Gr&CS&qmQ0!&ztceRxrl2&<+GK z&AF(oHtQPmLOLOSP~hT%_RU#UNf`+kX(1&DO95H;tCUjA$ye{aH^z8o@9Z42YGr1T zJjFOnW0B%)So(dXL_{QDpjSo-ze``~>WnzPlq_nn9|o@=4; za5BbtW7&Ab1r!V*It>E?1m_5F;Y$O+k6+&X;10sVYnV z9kEdaEg2LSkg_#~l$h=Ou5_0oP{p#H=NS#H*PEsHXRGb|-Z^)DeJ%8WHC`H{-g^R! z^Sq#yyRHL(``vDTH~>J-nGG6-C7<0IYfaO29kb0+5OFEP79sr79wk5|VrFK$Y|#tA zKoOkvCQy+&&+{@)AY+)2h$?8zX`H4h#Ugc?d-bcHTpXs67|u7_)pi2_*Y|g|)>=xM z-e{GkX`VfMMhctI8+OatD#mDy_10NybxyHqPiY>~T(cnK-SK$0+kfSw_kqa(TIN;P zGtTpvN-Zc-M6;<$Y9Tae3~^}t*M~P5DCPY+(H0Q(VXzg6E zo=&G{SMROb)jW?PqM{bbS|5j5KoLk%QK(}Cxy3k#0&d*lc7~?RG6*SjG1VCVDX{)M;D7|YFVI{P%uqQzfxx|bQok)!(ebJit z-orcV007i?ORF-XA!Z^&ynwWcDA!U-Q9wehA`3AX5vx3;Uo6?4%dSgEfvt$B3WzSG zumz~JkL4QZ2+55#zK2fDl5kW&s!wQO+e*A!0U6M1oSKs;WqF>{OKy zow2oOjFVRI#-=iPw$Gluziv8KQ5DyCy<(kZtWKh|7R(P@^WhIG!G_*lQ4hzlcRyN13@vR; zMNu@wNTgb&8W2Phky0~MAyVgj9Eaq>DLn7fcQbIt;?cwkCDtd8o__fJBf~D&>Z}dE zefs?A{(3l^?$kjRoyNNEHWrLyy{X^Zb$?#3{+OWwocDA(7tXNa@|M8JVz>cOLPAg0 z3B*5`&tXvk5s<2=Jn-ca372yL0s#F8a$Xle;17GF1>lRL&X4?fV2>wuWWkUqR3&um znVC;jQdV((rZ!D5b9Jo|oqzY^U)$Y$>-q89N%^in&U@*cK5l5 z``__%jVX?+-~Zje{`T$7pZO2|V-e~5-VhGy7=q2oEbhMynVB6svb4yt zAL7W zRuru%SDQDhm1DZTz9t0c9EyTU@GjR9Q&}`wF-8ZbrdUn2h*dMO1!QB0fDv_RXKNJz zNH|rKh)Za!F*&Ck=Rqo;hJq@_QsvTnRjNIv}ra);V_&^9*3!(tyeZQ zjFiS9=PUx&GBXv)hKQIEaY2)f<7kY@ITO2-Vv1>@F%RQuv+103UF)p%hhv%Zm}No+ zK>`hFo{$X@L@P@T>b*}f;{&h?9$c}A0FZND+#Ab5t7ZWOBur_3U554EpJY=_ zyNPy>X{yR|bwxokQWhi>k%~DX$^GX1s^j}!9p5lH zL^Lw5&brIX*5)ZeN!+H@2h-{syX{wa{Ooo2(*u?3)LI3=w{PX{U<nXgm&f#L+p22*WVsoJ&n5CCgl-q$Km4p_YEt5Sct{&UM$l z|EzWR!` z8%tIIi3}l1&5$RG1J`1zp4tXfsR}YGGk{VRGf4qfR01TGCC`QQ0l@R)_2c!2xeyVN zgs?!Zs#P=z%z6G-{$*u{%eWMm3-;moy{SV^aHQ3K`^VGzy=TKWyYYKg=V=|SZ?q6p zCaUUEXxubuV+}LMm{Uq1KtxCgstyJ;U*6+;)xt(jp*XV&lM#~?#D`OM^m&Ai(1~th z7+2vD+uiV{wr6hBLK_-mrve<0$0?>w*Y~brz_sfXP{GwQ-`%Om6d0l8RrYy_wt?fo$} zq3hPjfE6;ZB&gs44srTy&a2J#gR|GItd5&w+Sgy3LNjq|xaOf-HKpNLQ_!|4&CVji z4%nPDr_<+>URyBhDBbx*PUCf?U6fNqCcE*DO+6;;w&%{l{%}a3_v6Wl!RWetqJjey zyE*&h?V;PxKlT3Onv?;rch|u6W_MeN(6+}s%m)eH#!{N5Q3F$f80WG(A>tHwtF9F- z-m!B=OEqlQ>$UY_5SpfGT2Dmtl=l1MIL;q_^nnjH#s~nG%{Y%sWKGjF%zS-)y;`k; z^8i3ZS65e#U}b-Z)Nrko#%q#+ny&y@BC3|lL zDu_P1x>)4lOZg$7ahmqW<1mdqhk~_l*2_oay~h#f6nz_*85%$(0^Dpiq;NPKoV7Q% zcfM^dw`bnBA~L5~R4nu3r_FI^KKuCP&;R^K*7I%|m6zg5rb+uOS8URpLCaQU(N~-Gg!8A~c5h0=w!g66)WJJ;`UtB^$>)0o2L2pekX6K2|-VJiG(Gh zlxJ44##*MPTM1!PA|4L=!>QC_k=faoZ?D(uK7>$eVzSoSzVE84rZU%@Rpv#-g_u%W zZ_mEG{nE?N-%Q6bb+bis$X)0z`p%L;#8N~>Hk%a@8B6>9=@?_J`Qi1X1SrOs!~O(> zB9wDPL?WXC#wATKaTS_4@%4e7tw>|N8nDk57KN#V|?~ z{Px#=^Z$Gg{|U&eXXdx0fBL=o_W2n_dAYr;cD*0O<%tS_EG9<65E&`|3sNkM`#)LIotwdPv0R28hCBDF|0#)!yi znrn*7Y-65h8yagZGuB#{i_=+gI;c>O#3n++0FUp(QL2kao-$&t$qEg9=7u_jJ0f_D4*_) zqp0<;@7CM*@VFcQ=|Eo>`>-C!Dhe6vRkpf&K! z63Wb40RP>zYO9tX3?cNLWdQ+17mFW)f+8XkA}M5tm9c^f2!JX95fd}9tHuq@Gl>lo zAQ39mT6^E0PN&yz-hA)7-$f!L9H;c+#S6>^NgT7)V3aMk$KkDWk)aRWhio;=Z7|PP zNom>*VKmSwiV`rIrL9^NRns4o`1;|Ei{d+fjH-evJd6i^BxeKf-rE&Md;|=7~qHY zO++jYF+zkt1jRtYB@q0_>yO%cXjLI7f~bOuBO=D#lmDQ9@lAaB|4!3E*=|E(J$_<- zst%iRvz|k4+(dv{jclKXz4gN&`0QBn{q&i|##DScy{2`;wIb+xy}nvsoQCPY`({SezI=Ig_2};I4nQw1wr}6QMK%^3GdknRfT}i4lXC?@Lk$1`FcXVn z$rZ79>uSxl)?>+SZBt5y!CGQq73unZ97k`%dc8Rwk0G>kJU)7SKF?F0BPp%B^=8$b zhW)?)3;)5}>+3m=*LSxm%~eY&1priasf<+xGa6@?Xv@<$k`jP0S!1cH#H2)2G9YQr zMMPFji@-IDZ(Y9%<2W9N)7!Ui+qQjleqq@{sgkQ9s|?l{=bUq{WC4ORzXBtHs(_Tj zxp?n7PzLX7tt$8+E+~1J#?$Ha?DD+pI#qrBdR8gckX!ds^J$p6cLx}2$gtH~Vos{s zc-yV}m3HI`ip^;P<=t_tSp}6CkPNX^(p0rjKrprDIDs%qRw~Gdb87Tdm}gUy7@qXt z#mD0g{7Q&#_owUo`vnKIe|qI5P}GDW%`65%F+1%sGV+ zTp%{xG_m)dnF+volhS;j2d-g?(HQcMy)~+Agy%Gez{Yt+LQnybT;?=htTt?%qUJhh zsTJYBT4Tzph&Y#=O15kaaj`Zy_fD&&L=;D;qOv?A_2KJfIAn~0sz|iR zM$CiIBZ~-m7_JB}v)2dH4zn{^lSN2Sbgqz&dHUG2t?e34(=hHG zx;axv^62t{ru{HaJ^FFTi0E7(G~+mWU(e4j`?ZyWoNjm25K@0L(woz#SU>-2^Is9e z`(KZ5=UvAWFle1|%+{=HEmCtzch}uAqT_7i&p_Iu6u;v3^itvVH4r805% z?dSjg+5UG|`Mw=r%u;jtmh!ibr}vL^SucJ%guq}m8Jw^eiF zODsepJ*;dFF$b`qh^9&xP+b%rUGct~X`Wah@7^deDGHObm39nLkCcrAH8&);sC z+FmcVow0Cn*$I|fvq&{9=CYX%W8V;fyt=zChGUgY6TBf&s8#>~fB;EEK~xe|rD{b` zVm-!v3e>8WjCZ2b`Y@05-BkM$pZRVk`qu5K+#KG!ytuG@9<;IPb*-yD>~oq24(-#) z45f@}SH}}h8?~mn3}g|<6z?k>lXx&O*8MR#(_F0kVHmnqs|LRG{E6+Fk6*rgbALGN zI#7M_>h=3iAAw+4ueilSiJW13cG)(o!*m??@&5jPwOKcPS5>3PG)|?IP2X+SeJLq8 zYYgpnJEF2$tuC$_%LH1-akPkMPcP2T*0=Zfgj(MP^eX|V)>68&GbXxt{Ma&&2%6Drxb&+OJm&zF9~yc=ki1dzF)1{Qp#}`OPR;hv6RHt#+*;nX`biv%@z$e-UI3V zao=^_db^oY9LF)2!i9`WJd$5h+raiVk#%Nn`?2RAY=|OGN9gt+i^YmQic%nx>RO05K;( z%ry;DTCdkBZ%`Tb<{4N_jq=4prF@7-GxDT1$qD)EuLr zSyb&JvS6P{;eVns1hKZsUP;}1cT1u7& zYj3RtP^c^+kj9v_KlQ!|&U@>oIbngeYjUo>35zph>60Q-t%Z#ZZ73<{nrkhUi&?sl zfJJJ~53b?|2{vKVcc=X=S<|)MG)=~s&1SRT?^Si-S})TM>;2M&2Y_565~ire?a|e0HKzywcXxN= zI4&~tBGou+$;2eJ3R@O|<*a0kK~g4ERh0@rOTf{R?7|2+<(xAziHHI;O|!f&5mBj# zh(Hf=b!S8swKUEzj1Xi}#0+wG+&`dt2~$ai`T3*ARq+1)e!t&M&Wgy{#nzCy-|e8} z^?HrCj8hb&ZUw&vhCHQdAq&4-ycp#ks4L+kEf8LwbnVB$LVkytaaWxkRnxT z&Sb5sPMV9XhMR#Dr06&d42%`ER#45TL{zKhT1zc8)iUL2o~9&haL!57)HH1##@pN5 zT&v@Nh#>?KQAK96_^^-1qgIisF-8%Ql8rIv=i95Rt0lAO#fwijn++mrsmyG_rj!Tq##(RKEtk5r45X!Gks`Su^4gz0d3^cm z_PX)D6h_fhi`1&HOwJ0bSYwPbl#HS!DmmLF_$liD3iscuv&=dfK46`+h z-JAtW0U|J#m>Eff50DfpA*+az=f+x2$v0~vf@MO6W=SVYRSdCnX34V>L(VyaFug)yr zzJK>Oj{1Dp{;Xs^KYVA--@A-*)H2T+oM~AujIQ6`7?x&wIc8@|e^5J{#!so-O?vK^ znsez>|FBo%-)cRqW_hIl;3qDx!6@<)p0;>fstQO2W#>gfY6V3y7F3H!%DJ_TDvaS# z(KX>c5o@iWW{&g4#f73M>1y50ImH^M!%V=2t7X%L?q`1b8!unK8q-Z-NKwYyU!9)( ziFN;s({VKF@T^M>nSFthA&VNT9)OCLf|d0Trp2v5%biF>%Xn<*ZxH>+P6Ys9VGlf< zY5o;&Hc_=Qu#mqv-Bnp zH~hy!_kK%r_OU|Py0pr^#A~M)j$qyRZDkm*-!#AOFAl{6-I;cGY*A z`*Y_5Vd_#=Xgkf7+25{z`7(QR`X9EoDf7>!{ddMPNcf=h*=v+EPsQBJ@Z@Skd4~jl z>X-g(Du?6oqze1}p;mFmfi5vv0N5cD>vsuo?ftJCSEm4?$~EU8xKjkoQe{WJgJU->J4r3+`zpLV;$ z{i~O6uWxUevF&}Ci)s8)?mwu)XUPoXIHJ<1qlk!rh0hKA40IkwGyiAOrwq9DMMPH>;^}iXZ?} z>|%^HruOWdi1KD@ttC-o=zP6CKRdxE?(g@`>r$lw5?Q-ELlKp=KDxTNJUjpX+m|nncd^vT)b?>3+ID|^ zGmC^(n{v@A&8n*?sxr>;tXl!{cp8|QofRx&%z|39&hylCZD>QSB2agSec$)JZ5NH6 z)*?_-iN-l*uob*vA{%_L#;71!TdOWowOTR}5rVVkbefyS0;6}#1{g;}gEM1Mrwdfp z1;?g)UQ4Y_$S7-4QH!Eko%e5&|;}O_M zk+JXFX{;s79Ls9GR&8R8%8eFIW1(!$ww-gSdOaKtBC_bG7~f&p;s5}TuO3}Ky1Kr8 zb9aAjt?B#zG@n!}fMJV3bxtMbECMNT({y+Hy>G((?jR^c6q>MU`-yf~Sq$$hN-Vu& z$YAnV4d15RE1tK!Wit_uhOy+&KL2cb-an6MUrAJl3O$0Pt$sZbxJ&BV*Gx`;Fm3W8w>d6M1j0dlor%B9nU7uIApklrU4RSx}2*B!t#J6?1R)b9Z`)bvl~ffbH4ct?dW(+iX65F2_=vkMqyj z)0-BU4zDqPvVIoZM^9|Grqdk6aujS0Wj+q0=3>zV>rfF0(>x*Xwfch1fVO|sf7I@N z=lj>2eQyj@E$BV^O%$p*Fa@YNAysd%%s%b=d~#k(gSK|Dj`QJlnFcIS{AOOAO@1|x zK4_xxd+(+Do=<06D~xD(W3RfVWrfz+@iaKw+|Gw%){JR-_4(QQ0&Pt?I_DUy%-nm+ zkqml8V$Xu&JAjlWH1U*32Gon&X*#@BBsp)ld%6wSeQ>@`M3w2AyIo1Kar{wtIrr_= z_VUSx&on`)!`*OqeLH=2yrwo&2pP;!?C~&XMq^t)?y{koFpiN@J@1-Y(`VPOn@8_o zS~dkQoqPWLy>T96DKV9&7iaAB_4Vg%s!uMSf}l(DWoM^$gOJAgWRp7QF3)>=wM7L< zxp4*qTjz#hFvgspUjpDTO~-k5#sp{Dwzb3&viE+s+ciy-YdIbd-utU3kFB+!Ivo!d z;c~kN{d$%OIgv%fm2OYo}>g z3b0`qjWKMj3qGcqh}PT9`Fh)VAEybKO3t~KA{79a=yM|S&U?q8yrgs7+}!L>$F}R! zoCEtVw1$l}R!gl_bFNosn;^~pbUYo$fJRk^c>>9s)@}Mr%ZL5$_)mWAH@9b9+v}U_ zI|{CGwr^U)767DF0tnWPF#&)lL`CntGq#k{HLZv&`Uz$RkcFlzB190JTbh7H>Nt)d z+BJj@r-6w`A-HCF@oddB4NLQ} z>pJJcq7W~oFj)bWg{mx2vS>*)mr>{Q?Y3%3IXP#nZwPQ!Ih+neM1~Z#YDGo>NI7BE zhpzoQdN3;ZwriM{1pg&O6q)9E?)$#=?syo6X7W^T;SH#&#KM3m zx>#OfPD>X%#hKVBDS%Z~M12TvDbWxyaD_^>%sHkx7OAHciKq=t=UihwK`XfcK*rjh zoxT6)5gFbc4%T`Gyxgqk7`^wZnse!!))DTSN8Nv_y5=3tKTi)m5;CXcQ{Nz z@G08@XvtW12w1Q(L9Mf@7;>sAAXo(v7Y`NyA5c(pNQm~8gj6>d6ZC45JW%( zQc24{Tt4*<2XBOtz8hJp(k2=nx&KkLS)DqRW;;7$4k6btuCjUQi*dB5pQfQ6_|`Q z1c-u$*ctQc{>`VaKK5qy^vO>V(dl%u9a*NDQ_(pIi3p*o5pt@=+Ia|p%Ol#)(|wiH zuf%eQu6bRKYle=M)Z}E!VXPTl+m$H_9ubfHaT0g zJ4z6(n@-h(8@h5^@)gvRM{G;M{Tn^o{O}~;_XQ38+gk(=K0aNh5kHs>Em}ZYTqL6U zAgl)lRa|iHiWRh~ifXZq#wqzt)J>~z5PYqf*)e0P<>vEGGL&OH8s`i;W6Tt1%l((0 ze(>q{zW-al{`V_uv6xEOLZ0n=rOn2O--V{h2@WThTkRvJj0F`)$pBhnCS9^!{>@i? z2-AWJVBm{PW%}W`c=(Zm62Dsl|9a5hkNkLm_-d3nv4R7P!#dAjJG=Oq_utdI*Z;-$ z_&4Q!?8oQ%?I(5n?bTP&t;J%a?U});>@wj6hmfldIx``=9A|{FQY`F5l7p$L2Owkw zrUJEUtx%D%DNY1YNs$1A6<9N?%+~%ZU#FENFAiWSe1Ffc=f~@h++yGgy1)q;3AJJs z61{`t{q_p%pLqP@S6v?RcpRE-84}-rD{p_2(D`9!sXZ1UxH+D8*B5``(cQC)&;B-d z&GhImAJQu~z0~|(JO4IOwYJ$@T>aEff9>Y_v+sZB)8GA_@9)RYkH_PezVxNtZU>@E zog@MdT2?SnQyvX5Sf8?byH-Esq|Uow@;uKuD>E{SR=0R~ml?iuE=4J&1n0HZc^p$p z%v@4h_p7Um%k#6R`~6*`>R+bxd`drUjXma~g)UTCG%-p$^A`Aqyfo&P9Z+QK?X+);bIWfLJevo30H8eAH>P zYUWg?IRU_Fno9*^n?6)yoK(uW#E7b(D#oDc@Mb>TzAB%8zAwTTm)mI?#n)4OOO~*R zv(7d)%@HdQV5vyXiPZ8yuR_Cbygz4Ots#L92;Ki32)_F%nurE~(V4SlKySVq&1CZHQ<% z9+K3y?{EPO)S^`s0PzQ)sztH2PzRQC+QJ@K1_o@5F-GJ;M+Tyx6@ZG=k{1OEsH&~d zHU6UtxT?wCp3F3goVn&?sxQqpyS&F@k?AHlo2?PKLK#mJv>#6CjxA0;QxW z&7$nRH|*k^54-#BykE0pafvnbY0V*vOJzh18C(?3vP37QIL29m#gcT2!$4|$(>lXN z%W=G^nm|aZTwY$LX-@ITwp4XQlyhu++cj;@xeX!bJjRm>)LK9J)Uxw!!Y5J7(h(K-ZwE0(>OWjoC_kN{R}R=${=#> zPRE=P{o~W1N8PPWV8Y$JUpsp54EFQ1tLqDUh~y7ZY%-^4K)O{UeY;OL-x7YpVI9zT z@R}eQq0U2Yg#n<@JX2OMuDZ5ZlS@S)gG8zfBKQD_g&&X4f4u$>7a9iSDv+zpS_OpJ zF&k5Ukh=;1@J{DZYXVh4;fF)igTFr3^;q7f?xKA3pI!IA*q#3A84e!1YgrwKPcF?~ z=b;+xx|JE_%7=YCK?P%iLClh&g0XB2zZk#kqiT8Hl+H|3W4AH%Z#&Z+UKtr1f6;7D zYyI?M_j-mF)^}kOOel(OcPMo}0FL0cG2}81Q%M(39!;k~l)>`xG_-ANtsMq$DHOMl zBI~jH55M${`{CDarWnwcTs?|6InM`~iwQ#}@2n^EQc|Skdp4~tqNme+EjcU1?ke|> z(5&E8=+*Gk@OQdEB@EAdzuNRe%Xha&VK~V#mQ-`uNCQ%wZ9z-|)B=68jeD^nXk%{X z!_3$>oxw;VjW?&5_rqz9@p83s6{h`8wfKJZ+5R({y*)QqW5Ug7>zdG?Z79dnahf_b zZ&Q4G+RwMgaItxow~zacwboc_nr3u|uf6xtv&#!FYHNAC>3f}D9q(l@ZW9pb(dFgg zaM-`SdH(3q8-MAx;EGY+Znv-B992%8moGhiJP-Qm@lIY9|&lSh}Olv2|6{4AwZbIDb5u0)!0Zknd+TTx0Wbxr%^ z>LLU<+#W>Bl%`eJ8OOG1VvJdeapv;!QUp^<8D!YqYpFyuPm_oM6Irury)(udv%0u= za`nV8e|CG5bH2N~I~|ThbhbIayu9qXj>?=;B7lXjYpqp;RF(w72bm%)czOf{jpMwO zmxe461A>ZH5$7!ch*s;2H7=!Ot<5=?Qc^9u(-E1juCB0ZnrCM$I9dviT5GKmoUcGj zs7PqrX-p59MZs@2n=0CeCZ!bTd8pNUKTVUhezVy$p>re^G>;tcj7>tN~6E+5?&%m(L;16ubRL`H_-Edd9^#+b3jaU92a z&N+)9GXp`Y#X2XdpbJ<_Io0WnKf#MP!}wo@D#sYo$~RMi-EZm~ze@)22M zaxL=|7pDRO<($TGVru~K;^LBx*>1OO+kWRe-+BAy?Pk^1qN-X-jVT(}SmUgq=Qo^=rhG{q+PEFI0H8%LEXy{sF%Hvt57Q27} zRMn>ODV0*jS_>JJQZmr)c(}j2xw^Pi!dlDi{oV02w9Yx8i?fRcAGG!x*9^n3kCSr_ zv`WP^PgIqNf^|_S#+c>|C{?TXOA0-mPAA%|41#D0&UxQ}0Eo10OMqIc4<3lJ)L2Rd zDWDHYxrofDj3Scesn1U%9vPB>Fz};OxQf(HIj>*-KTps9(~J3yI=82@&GoOGz5NGn z9B-9^+gjyDc-o=b1h^6!Iu?UJ*A|h)XU?l*=8WS@Y5d<;D)Rd<0Sau!&a72j6)&MRdYb1h{GP7MKpv=5b#Fvc@iHkx^ zh1odg>k>?C-8@b;ErCmj3Iyowa(w*jfdxwJ5?okE%#S&Uxl#T!qYlfZkg{ z$vNkoy|JowoKD3cNSsGSi#63vyPC?;B5Sv;5>*RAm5j&TG!ALiZXlgt?Tu|*ozB|e z+Oyq|_x1L8TfemJnKYo-RT~{}j=qT((Ua#l^EkS|b~6-lZn%0PThvROThPLmKRG?NZQq(vLVvoD& zC!Rn5&d0w=mQ(h&d$K2ce*HI_S3j*Ed_CYEO24;5XRJ9L)gS>QB5TDWm87CF(ZBhJ z06*0CSJk2q7e69G{r!I8AHfL${N66Kd_jORMyL|9feE= z%qocyu~Ke|5zwr_z-U+j{4572s2G-NsElll8b4O?HLI#I4gg9i#t<{-Ii5m8Djq9K zsktaH*hbMZ9#jk#0Dwx8*+%vS{Q3QF=k?FH`LIU1DeEon{^yskS0?=*UVm6kgtrcL7c_U8NZ z^)LMF&;Hz3e*Wh3QybFTm+6=P#Gm?=fAIhL>7V=RXD`0L=~vgUUx`LhUn`_ktFjh# zR0Znsbc2Qgpp<02vyMf?ST04UX&*u$W2_0@2LQ}@@122MN~wtGY#>DM8pEM={lE9~ zfASyw>fitQpZNs?_QfY3Gi`qRcmDDH;nmIX`Q7!+cDsJ_^37`1efadf{r=W_fBxm4 z?L+_Dzx~^JsNJS>t=%7YXl$O6u?h%8CdSekm!}#vX=iP_j-okADLTwU*MYYV^RVrM zh`4sc%o=B7OfAu}2ho}Z1)&l%THA?8NiB=0Y6!%PDG5`9ies7Bg)EiGsFa!|F!=%o zc*wKVGUqARvRSR$#!Jj|)^Uto$Nj3!IkO|{aP0#a-E7;cb%6kb+FZt>r(F5&e!ROo zO;N4&HPyE9M37Su(VQnX;JwMYPEm(rOfWU)mxGr13|p*9dq-4eSp^1Xiq^T#VAwZ? z!H)Z5DPToJq+K<$lv)j8@RcJ}t!Tj(DW3c^O~@SQ1ZYz}t=FyLGLGYB-R4}Gji}T@ zTC}nD>86|FY@AI6QY<2vvs6=>(B>kgWNU5LUm}9DStN;OHYKH5D%xt}G=sOtTuX^D zO9;*V-En)?ORb1hM2C6y-iP4llv-PicK}cqiz@>$t=4U>MO29hBpYKK+EPkR5fBAI zwF)UVs0am%s+L+leDvh2PcDD^JKy^L&1-hfqMgZJZmy@;Yt896Ly4N_)%MIeQ)@mue?)}CFzgR?no`c%^;>2X0w6<(!>rZ; z8I7}dP&uSo**ljB=2!vPIpd&Y%}v|4;7(JjbJdcLyJP5Dv<4B4F=fuqQ`MAnCJMWK z6p26algP6Y2kRitGL==8%=M6yB6Bhvr?_dG%d2w$@o?x)rx?+a?FFLD31@>QO=^kO z7e6`qr%ik{hOPzxfI7rgUZLeGMG7c6X$Xxc<1Lb^2Ioj>ng>B?mE(<-QT=;DU>%_$Y1*KM=5edoAFTTy>8>3u^0mdp!F9|NC<|)(ZL1LuZbslxhj)-m)`j zYCST8=?IHsO`SI-)l(dtvlbAmtUJ$C*qooPPkV2!-|pgpHuW$VtrYUiP)~!NY`ekO zmlZo)nd0kwGWBS*mST>RLUvktLgtGzk&t!8^1b%(w|?>)?|<_8^*)Z_;{9X6ax+#C zg{0LTZ%>W&`xt|DH}|_S&YLETgA#$U05!&4>P^^oD@3e9vE-ZkUEi&Gb|U5GdUttw zx#mX-}kHcwlA+g9|+o*x>VX$W7K($*VlJboIRUg{P3f()VtmNr`NZi-|fEo zl`l`H)9stro37iojgiL3(5%mu=`aogr+u77!gP0Q$X@iT*&kmGyH#jA?@B87)7!T< z*MRt&-+r-Ltq#c(pvfY@>*k`UM7?ddrrm_&X>WlVw%zuTsvZtUCVQNwZoPIrI_GY0 zZ~MM?=+S1!uxfqd7_e4=yOQVA_&7Ad2FwEsJ8KVz!^QdeJ?-{)yTfVB39c@B=gmCN zZ5!sf7(-3dsH%m4h{s_%oQ_jW0zbO% z{oU^3;-Xe**yNmi6MPqpcjGjf#yIC5dKRPraP#)oyTI%Z`x86po6vPl9~wsR)>(sT z*cozGkl8iCW~n)6CJM%;g?G5zgaiP%Xcbk8bv}!F8*t95T0BJqpTGDl&eQq%xiJRV zilZq`<2+tmUViq;3juh%JzpCqIn97zk)plH!^$GsT5B-0>HFXK&6n>zdvvkBQc2#J zG0ywL{WfgdRUjfF0{{vzX+Vj>ln~=ly;^r)j-jKk|=y#2jPengz%tC6R(b zW;CvG%$%i0HL+v>sz?=TRk7p(2M%_KGpMAJ0IW9_0CLU|Dxf;%i?BWpyU<)pjHJqB zbBV+@*X$B7TehZ_gaEc#DO4V7-?z}VO9eH}V;@$v*3;?GwQFOHwY(T51yrh5g;33x zpZx?gmzot}2{tS#M-^in0Hu_SF>Tv^K{HPCZ1JHbxpahuOv-Y<-+Awyf`WKwC6^du z8#=T+#PRjb{pYVJc-D7JcZy5;->FY zj;OTmSEtjl^`Y&0XSkFM6F4!Uakuw(B4VAZlH0!5Bh3QEl1$_0pM6>x35a5G#EM{@ z-S78J--*;Irlx8B>G0N0@TH3GsY3gcxVN`}Fq7#rnMMp`?jLb0!c(Mkdv25duR?l2WX)^bnB< zTOXzrJMT*=WU)#`pwKjkm{X}7rNDX9JZZZShi8{-05e4qlS+~OA*EFIhZE4T>zY!^ z-EIgW?B3jsV>Ik)Ej49pZ5ak$wS(qTi>sh9JLinCsC7yS3!V03Xnb(SAfdu3<=r^7 zA=LelM77osoLc$({@!~(DeU+6+s(QSemC43V>aj+NX=@$KdqYeIL*gt4@7In-PzUk z-CY&PP~u#)^<=QlF~!=h``}tjObA&rv9^YQ_;wnHv4o}pHYC+ax$NJwjqf2hEqcWL zOn0@k7PuA!Z|w|O*3EmZYhE93Y5Q#Vf2Cjg$OlgM)E++R7FZkt_g_MIx1?2s7TFOoDw2rc2$IdfJAad1@8=_ z0wW_X5;PG(5l^maRRTkdh>XYxNLXtH1u3MWOEHd!iU>pkaL6+vN{x$1(U$6vEn8V4 z&QfA#XDn0%1&by^N;$D1fRb`C#;6n(NktTutf?f1#W=1|MI=wv7-O7~QgY6zEJSoL zUaa+VTqcYtQjBLnG32Y{oaUwt9W@|g4d<9)E}mO~1WHKe{&;6>FpfwglmYer_g6hbw zq@7|8FxB0ly>FY9$~4OnbM*bC*4Ye0+m1dF&Kurt^Yv%r+ZU_n&(n;S>M*(p}GL|j>8iBiQFV~qi* zl#ZrhclT=2_C3EbQ#}zOi=NMM55WafQV6aTw{mB~yy#K+dU>O@P z(aJez=bW|ni=px|#!DJ~1)KWS_37Mp_1P1X6{S zOezHw$T1RS6_8>`SC(@cDrjL}xfKCZqlE|*Pow+gPa6Y`n%AL9WoqRvwY8OYHUcETJ_&?s(Po!+u9T8b)L$JiGM2nmi zesl>EKVE+X7DkrybAq!+$|j#)cZRH) z^Ln+jKV!-{iFb$l4zJDI@6OyP%acjwwk~G^rrfseet*AR zZ%U4;HH2U>7~{Y8m7o6PvyYGa9Y<0UYfQV@lu}}h))`|SvI31Um=>aM@y0Tbbaxs`+r=x(_fl$T@af}~Xty6vX)Yqc`^MY3 z`j6Pyj1I)V`A<w8`o0}VI^cXWA zjt$NS8ASA=Ydg}aK!k+E6=F^`r&=_nWGYftLderhM2!!qQcCI0&O{_M4Z8`M*&6H7 zIdsm)_&^v2Vggujf6kkf5}~RTXBhy7)1mMCHUuUWkY~A%wo<0JU)w;X7dOpwb z_U4|*hd z8{ z5Kw@HQ~;J-EU8sMTIW=2%~3=IG}c0>mX}mw!5>N6=8xBp-dc2Xh+>Hg0}}&Vt6Zug z5)uodXsIB8iWU*=aw_KKBtR;qD(O0wV?J;kE3BNQWS?d`{A~FzU3`OJN2NhQ+ODt> z<<@d^z4LcQ1eJ2hL&eIzZH96ls98_qA|dsC;vO)xrhq{|U0l$waJ@U#UsQT+&36F0 z{r*VKI`0TcRju(1I_4=jpKE0zwzf6EXjq7eM=e2WFw9JS+rHV|4QcoM>hc>OT<-E| z2CUgnbN&1_oys^(5b|`ko`~9g#%k6hv#6_$gEsrnS+P?ZT%CNmi^rSyuO78+`{dc< z@x_b#+uPv$w(k^;s>C#@)~aQWMMS*!rARqV-i777B&7fV0S51#b7$S7``zKq&Gk48 zF_otERpt9HKE7JDkFOrT|Kzb{XW2gK&SHtS8a6ZxL)Z09*M8~CTejx)>$f@QZM&NH z$D5m*?RNY5_df2L_N-fJmG8dz)OF`G%0K#zf7IF74zZ`q{NxHj!n=6RkkHW!HK zeK^Hw>wK1qh&kuQ#=*=1-EkZidH80tLE`(}p_KC3XP>of5#;RrvdjqpoO89-w(oOQ zW-fxe>-{uMSC1~4d5W2+c3tPasYQ{=7+Z!h=DBGaZ!8n+b~~tUd)CbJ{OPBkdhbHl zk~QaNm${bRZih%uA3wI<6X2X;RjIYk^IT5H1(Q2ZQz_-*;=&Lwg@LNx-QSDArfowA z!I?!v7$Gf8+ESzH@_LL>7J{5}Dao*1jA9jZNtRo-dn6*F*4mn@A*`iZ8(zG9p;AK# z&N&9uQl*q&U9sG#=UQ^ooQr_AZTtTF z?<3VX*Lj|qxoMgh<9@$ColZ+JQ)Pbn@@Bp6S6#b=@_Fk|$5EwhR;%FYbd1KBv-Ujr z4v0&NO9i_$2ozKeD=Q+o&_HOG;Ph0A4|YlKUXq0#T+8@Cr(2o;wN_DCYT^YpI^|hF zjdNYolD8X?!*F{c&S2{x>r?02#s&Mt_P{7>9ovjm>H@dp;p-2HLyeR%ThDKej?*7WVM&XRI4cHOTW!_EC+%46$WK&&Xhno6?PUR+$< z?e6C(Eu`+8>jI80r7ZWXb4~!~oR(`z1wv^1eifRQz2EJRNrsrpVK~iG8ivuj&@`Q@ zj>mm2QHA5_P;1pH!pR!bb*;7b>f$nK70~_hsH$z>ozi@Y(>k=?kZT+kR>G-fQqU^S zHL99xk!b|2%$#bOrl}Ag_qVVIXADlml+&Zj^Ld`X^!`_mr}66Q>U264EiXR*UjxL5gLwu(g@3XsI=)DgY`{yu}b!t(iHI zcH4pZcoM)8@_vq8m|lnO(Ru&jYkK+{_LuMb59z&9n`S$G9_H#^{*ycnGS>LO0PWibza=S`iEr5P|42OIXscs)zzEXE(sgWQdsF!QtS6 z-tpjrBs2i9fMjv8R~U;3ibw!e%e{{ZwNwKJkpQ3ygr;kA-OuxCwdyzh-J9!@C3Gew;gkz$FC zlSS<>++p4`Y|@lcdDB9k#*^l3Ie-eP0@9Lg@$afmRzg)k02Er_)k;Q?2w6lKsSTah zTvC>rEt{q^QI9G-+V&o?u|l{SQ*wId-RwFRfea2D&J<5(A4cBQI)>-d`+s`szHCfV zK{#BGAuOR)%eBhzz{dE)a#Q^O`6>P0F8cBqqTg$1co-7o^tBcIrJsNQgN^=o|L|A- z+kgIIxB5C<{dAKT*e9_iMJfxE#PUNx^t-y^;a&zXG!|UU4OEq@AP5^p;PfxQJVXrO zQxOMj1t>4cHa~d5|6}8UAO89a_grr{BiRZq7FDIFXrQensS8{Ps7M{OM6wP!o0=!_ zv?72~gh;GGE`}=)Y^(tfg+vhIRB{nw*Jw~h&P6JKwN^{XaS|zIn!v-FC(CleOHl@Z z#NoL4IXTrcxh*xFY==+(Sj^Lkx}M)fWYI$wHZE~$e=uH?AFuz3F2TSmvwBnNWw#oB z@qayRo~^(CH|zc;zxZ`^4$gkE+B#3!lyTK|D!DQm9gWMA*|=f696#Nj-ppqo%<%~I zl%^i!bUW|9^~pc`(#xOy>NkG+w|?u_ZR`H=ul?G4Pam0L#!;0>MBaP;%zJ-*eT|5# zesg>U8pfsgQ>b|{9ZkJP!k|wi`x5g zum04RH>>S9{^CFU)qn8o|LoUS=j-7xyB3$HCIGNEhzu%oPIFAwnpM~LeGgg@afz5r ziUQReTZKe~(Z;--TPgqw0uc&O)n)z(wMJ$lWAYrA_O$oj5r@VL0#vD>1W15B_`dJw zVQQ@jZPQ6#N|91iEg^(hwGEA`Xce}$R9y~)OL+u9wUkt%umy^$y4tK=u%Y)rIu7%j zSJwj7P;-BJD*`~2av4qu09qf;)+^7o0X$>d_S?f*pU<1)-I1mVEKkEpnH^EX2AnIX zP2V_Uh{%+j z10Y9Qk#HGVTWfQv%R_4cK2K0nDmiN@(lidywC%dMINI(Hqx2kS;?z!jW$;{oZW;S0l#_+@3nRO9HC;~~h zP@M6!Gk50RYtA*-_kH6xevKumsT4#;M09+0@#Jtgq;Xo4=sYkY67UMjoThPF<~c3K zV1(6Ve|=bC0`ENs4@UDmPt#NqG)=R{D#qYL2wJK`s*<*Sm&@|>>cTn42xwZ;vVXV> zp~-11*+gVKJ?tNDw!0xj$INS70qiz?->+QEx#WJ>+FVr0luB~m$26;JG%b>IZaN!= zPO8&l%x-=6opaMPdEyX4o)>1`?6zwbB?r&Uv@Fcb>^GY}ElVw0>zX~#g%G^wb*f~R zO3s8~4M3Bs&I2=xxgGgYObE!#6WdbCwS{)F&#!6Zj-nSH)MBL!yXIl3D4C%Vx-37qT7LN{emn|9 zAxF4rJai$|bh1+x+K4o027yQ+%u6}y;v@6>S7XBN<*7+L!1ldF?Wl~N36A$$pVBwu z5;M0?h0o^O15S-Y4(tN6m{lW3&bzAMA+Qf^+cNXY7!W0DP(e&{HDLBBO_xLelVAGa z?%{|!_Lv*61(&X|&2v1K`kl{So`gMkQCnyu!)5XzE2H03J?WIZC#!~e3}m(BxNWlJ z&1Uo7d+&Yn$tRZUPki+&uWw%6pN@Usqxr@+k_sqRF;M`pMV1gk^xioSYK#O$)XHf& zj)z0Xp^NS7d^=4i@_u`^QIu1jBl}vbS=_cg(WdD;*ZRAM`~7}@8pqx4d|j+^&NnY! z5W?^LiQkrU)|#)jXC=+kX{-)!Px}n|m2ZCIcszdk*=MvZ8t?E zFRAOBbU4;|@s7{>okN#H(?5T=@B3fa-kn{ZF+r*I<;#~#NlQuWUDGtj z<7j5DulF%VcFjDCSzXeks(qh58Z(o3EHFX&i=LL&>7 zjuDF)8`)BpaS|Xyiy>4gD6;#zs=Ba%DyD#5+=UI%N$U+Ef&MG2$=c!i5ZiU`-wYeuk#QMgR zfMVOnR_t*j78OJkC%@?pVh8}MbBKtb7y>XqdJq|VDW%q`V5h^r-)@F>7~8p2b%0eX z6LAs8e7V`3b;I4h(W1_K15j1R0ahm>(kwNlgowd=Qw0MyK<|mjK-X8kqH-aGv}7Xk z-Xr4b%e11;t7p_IV!pVz@ZMivU!P8=#{eoKW_E4INM?ox^q4^fRdt<1th|%RK~a~Q z#)nhi_paZn;jld`<9s@u1htr4oSje8WL4&QcI3^B-#D%?@@h7<0!gcl6%mQl)qQB3 zPsjZ!#see77)4E0+qMlM%*)g?4Kj-sV`b)w^`M7y*)XtS0COy$T8h&(JJ&fk3_}P( zO_T@#zPi0c#IA3CaCfJkz)=jbNFBg!-x<*R{Wu*D4@Ty__u!#Q@IE-dTC6)_RV}q5 zyW{bG$!1CjxF3&Q!xy{ld+)q+JRTi~oO7u~RF+bQreBU@TFUD10{|-xJUXwcp>0$( zmHHOjA)?r}t?P@}{qZzqdHwLP6dI?QF{m}GdKIPha zCNnG|03`&Q?{3d8u7;tbw3ITdYG~U{-?+L=o@K^NV^jzj9wa{pcY?7H{R;iW$Ng6x zLf68wxxend|E)TGyv_LxUsZf@J?4hHvu&I;%+p=dG{bhNxlDP+JTvpqb~f9xEUH|z zAlaIs9b=qR27up0_ncctgG~svNXl77kep}{5f;u`s#YJobFQdpNlTtpRRr4DAoH>; z0MNEAfy=ds!Ro^Us}+Ko8C0q3!eXXitJZ(T=h?Y6Q-t5dNPzdOibSZYr4|Daq}3(K z%%XRXQ6EGrEUvZIT3u{Rgq=g8V&I9J5W&n6tUeu9S+FVyFp3pV{*e}-s&Dc;SJNR? zG{H5j9?Ro7%{d2zjP%CkMFKDfzogNrZ$f;V!Jt|}O_PwPhueqXIfNiZ85tDsZtnck z*mc8dth5Fsu!nIRYpu<&%QJ)?tVFr;ON;Ogy%okz(W|B-Fe)8qSW+#k8eO} zgh!l#`L`~6>}~5xZ|d*-?&SMlx}LuCXRrId-!I=hUVR;m$JcjLx+io#251`&k(t*l zkRLrGy*-QpfRx&5Frp<=L1hubqJ;c+nIxUalGw<&AWSIWAOSEv4hH@n*#m!P0>(*5 z1Li4N8vsEhX&VZdL!3kw?K;ez8(U>Gnq9LK6-Ofo)Y2$;wZN-3*e%T$>i0S0!l_eFHc>6FKDoI(hjrmHF`B@tPdWi)7Fo5!*70RTiRDw-+* z2N!Ft<2bL_ajm83SZzYY<0P>Sd`ziiGt1dIa))`GL4$8|US*VoP$9zE+4f?9yNBc5 zelkMTxrJa3vFn3}Sc|IKk~oG<+a)p=La9>|i7K2A!`a!Heg69TbgTev?B;PQqHA}G zXe$F~)3+&ssxo3;77;O#b{LkTwdS^MrRM9qdv;+MHr~7ae&6>!vm2**)pI!KYOT9n zzXB6TD}XN|rPd{#%BsFHvpG%3Fl_s2nx<*m@Aq#zLuO{?)9K_96cks-Ijg8_W}qfm z)hZal8#~=K=xS{ynVlGjh?HXMKDX3?97X3aqb2{w^=EF^HvNSzA)hAS zdRcD#hyYIH6nuC%-eFz_8uX!RthZXcdS>=9AfgKq)@|4n2&;;SPN&H^*K~ufkB5gP z%}c3CYBs@I)F{;i0Da$k@29k+Xy!-sw zIGxs(n23(Yec$WV)m6^*{{B9M;2fnipH8QyX=04d(KwDP%Ab}6U6e{h!4wIVIFwv# z)rI!McI#XZphRx0%V|F&(vz#pYMM*Q^CIlKu50^E6T)d3Q%b5p-ifT78VC*7QgY5E z%NiIMR?)YvYRA<=UC22mRMesnSXC5(9dl^?+GB?hj497z+1g;$bIl$RnvHhp^F@AA zi;yY|opJ+7g*pHNP%t)H!=8Y^6ih|Xz_VN1lA>Awv|2+90A%_m;N%PO-xu}YU{!lu z1VL35*FWmo(u$Cob1slG_xP>qoT;HG3Luz)35cN%x|G&sx^&anFF$yee(kD!_psre z4&b2p3Qow^-k9qqkS9*d*ym!v1;8UDM%=Rx0}n)kSUHiHw$Ol32Fklfv! zoePjq9PI2_qZ<$(+G6^~)?D*a0uZYsMH47iz*|7i4n>P-iQcENP{3)LRK%bh4mUY1 z-m)$L#1 zV~iiX_r9irMm5XOZAx8M9EX@ottk}~3n368nw3(}zylGIRuR$a;*ttF?AoSpyQsAr zdLQG<``2qUYs2AwJkE1!n&y&;(s+M=ZvenlR5ooB+U7gI{u{k(zx?Ho%q*p3h(+pd zd)~D}T^_c(T}tWt`kDY5=g&7=yo%>8ymNWU2F3K4<{!NHR7B3tce~h6r+Hdt(_8QT zjcA4gya?`fNSW4Lro3?GodHM9yPwyWNA%wFS8z0EvG|wW{Ip=~m zjGhQr9=)ilA%I_z?xpY>HXNe7^|og3Qpu9@c5_C|r)haO9?qX$Ms{X4jw3U>5YRbB zQq|=&zVqbi_G}kpSTA{;r&LQH8vuATrbnEnNn{}+@7r7|Ga|5=rDdrpJMIj5J>IO3 zV$CHlsn)vGC1<%fZ@qWhc9^E+```V+Zg=_g>M|{}BW#)mL~B)t4n&C@BCq6#6{}s_ zc*nS6xWPFh%B6rIvp10Sb(*YO9*YVYtUQ<|L}90@2%xG`Wjz);?~9hh!=8wSVbENf zw$oCzifdy;?0mfFH!bk9%ctw?r%0}~I;P-U2;uJZ@F+oEZP3*mQuF~Lt~>HdiccvW zk4H1}Zap-(T5HW^bq3E%+NZtuzU#WSiKUb^_p9&w5JF0iijDQIuI!~XPk22~uKeG1 zqaR|YV4HsU^u=f6@#JHe=lT8j-*5ZQt0-zPt+k>7B0A>i{QB?^(Fz)_!$kvotHyvI z;cs5F7d4AjEj2eyV?fTiWmy~(BdzJsph}1_1cc#Lxn5-McnIX7Sk|gc0Je&FRtAQM zh*ok=Rb9^0GMBj_hhmmeie^m+Aec%iQi>H|fU0b;?fW&6dwrcH}IxoJ-Hl z6Vzgg^D;Ol#Z+saCROp?0lVNl5Mo1;Q~{!M6+lX^L?MK$u3t@-&bHg*{`BGdAO7y& z@iTw@ulBf5!fN>r}+e9C)&Mwn&ECh9#Q7os~a&<9mw!7E2H!j4YWuBG=F+~3u zl`J}(?b@ML)e0rVE;@G3sgzGf$;GNHo%zDBEcbk(U%mRBpS}CjrGGAyy2H&A{m;(s zzq@<*PR-XCpxZRwH8Ec`t%o=jn{?4(6=8dKnkJ9LNMHa}yldE#i%iM}f`}?0S=SHU zTI&M4J_ahb^3&f=xz;7UN)ajKNvfojU}exVx7?f!yT*ruv6;0)Kh5)U*mH1IrAU3u z5>P=?1Jf!lcmyZ-n{0g|64AGxl((iRwbsUa1eA5msj6U76#y#MN2b4-DHuMQsMM6z zRP3?iBO)ep&LOyzQqjCxc>oi8sF?}MrumXflWSu|Dk-p7D_-;U#ni982J9r{fhT1{|#I1&qyQ-md@Y{nfsrVzTu zhm~_#Rm>zt@5!aItee6rL_1BRsxDKmP@1OMZO$GJ_oav`&P$eDP;r_j)qJ)+9|k8y zw#`7N2?)viFm%f-3I?hq_H27TG+{hWDh{v!p!cn=IXS0;Z#F=l0l`QO0119fc&6cF zS~1xg6U~NfOiQYP9iXj8XcvN`fChV+Dk8RLjCx{H$FuP4 z%U56f>5u-vfAr`7wY&R1Hy^a~=N-*NPodr@_1Og9C~O`%g>S5Nerwj-*}&V6)sOz= z+kbOq48z%!S%l(iDtk3jvlB2@qYFko!8F0S$9qP}4yy!n^^RilaDUWtH2E8go&9srsk)#i+0 z#O|zJh&;}loH^gXUp*HPZo!$7^H=E^Wo(+HMdYcTU%t@Ir^oM# zH-(xzaeMXRnj?hbo5s7r?Q0EprCB+s4wVslST#zQEd2>n29fd29 zIoT$b?6i)}MGbD*xsNx0h+sIqs5E^2y!{s6T_*~p0-nG7gOAV8LX3QSdq2-Q(o9UX*0yb%rWsH3%D9QqcU|Y1R|?YVX1L0e&(6+P880C^AVu|o+qNa5 zWy#|>0pi)&*>Lvc#pj=X=i}e_@JnC&jqm=gKk$$J(?9=@{KB96v%h$@d-C%?|MUOh zfB66W;U_N;!f*TL```RsKl$;;pFDr|;{JB}cmBk`|J9Gae06^D!AI|X>BHw$=KIqF zC^+(frmDIW7emv8zUxG!l(cd#eG`VZtF>~7<2Vw!rg5Oa zotZYFMKnh0W3!&FoO2E_#;6AKJacTird@K$Ilp{(eRgr)x-e@3#1Mj3Nht^K-&mcp znMN16W#?$q4MW?B2>Vzin}n)_h_U0_yN8FHgQ#`gP?F4cT*gz_^q`D@O-tUhcevR$ zXWQoU&mPDTI8;JVrNfx+=bQbsJKH+v9C1oXOU2;Z7@EGjzumKo$*UG#Rgz|sAj8<`*FWiK?8jB0>Yqd3QI1F!LPpfwAT7`yW7m|r`P%L@`kuuY;jh&P*VoU zo4Hx0&p-8+5KPhU9}Wuj$@|Z4k9*C@rg1%1@|(_cv~eFL_2F zVg^FzJvjs+6kEX!hMec!Lm;Y$1wt;@2ANR?7+ZQ8b!61)eB zxt1z+I*qAhAUlrf;^KU_+pO%i&1SRG(AJh)J(jvS=ghR}r0?B0rWhl_4oQ~fkQZ#* zz8@&KcIbDtPW#ibinVQ9O4;xCO=!o%Xe*_I?QQd1Yn|tLea&FR%uUlo?`y3q_%0&E z(6+6Q{gO^n3y}*kj^h|3yWrQhM6x~{_RwxzF5c!iI0!ha)l4lm?W@1(-uZb9gk7_6 zCn>vp3e6KUGb1sz^%P=&DgZE!0}&}Z1S(h^k|XeJ9u*Z;t*ByPKn25 zcIC8#`{1&cZ7s8+0T6>@G(@afmXd?-9g-DU!{NOT6{yzAS%wfpWG3IV{6Ig{y52D$B67rpOzc)Sb5TO_!8b7?kcnt;XV0FnLoM}u zyYr5lrYWVk%co{2r8;txP17{xT0XzNx$L7Oj$NA`1OTqDuTT3!^kI8(PQm@nzxg+- z)iy*W80R^JFzt`B$tHrL!)8NZd7N*qKTprEfZ_eir?&KP*Tt3%tN0W+7HkB8HWYd1}!st7*k5mBNK>tpDgb4+upZQH_{t_QHfWJGj% z@q9@q=WSW0lI?I9&G5sI-Ya=#1ZGqLQ1IR}x|9+)768>X!q5Prs?=IL_A5+}$hB4! zjqIFr0s;U`D|PpAmVk~K9?`lLi9@1CBt7`mInaT-I6EJ@0g*_|A=Nc3y$TRU?>##d z+ccf3BC@if(Fzi;!s17@h;sqGN0gGeqKKNoZ=(5#&N)PEVhaH4U#6OU8{uk$5DtR>`U#Wipu*y<{bE%XN!be~FFy~ysrfGV3csTDj z>vPhOyV)PsIHI>`i-<5%2%)61Iv1@p6f?^?BO=iot|dUzG$3ZPS`{%hGXhw0ifyxo zyQ*s1r|2Ro?%IBuizn%rN-3GZc}H50<7o{$ZJNf#z|8C&GY54^YO1A)UD0wjY(b5U z9`c-PjeRR3Wl87#W)s`GB=OG7^o_K4y~zO3VYnK0wbmHhrs)_^r9d%a-w%Np$T@US zOq;q%Qs&pUD-Hbdp_SDk8%!ZBd5wei#5rfF>Vk{xOcjlqreo%LPKaQ{Lm!uA8OOV2 z;l_vU^RuDbh{%{l9p=1b%{ApyIXTaMXaYlnP66h;_MQ1CCFYbp%q=zMd+%~EpL4ER zu*k*Pd0@AWBscxeM?a-$Ok>Wq775O$R92^yH%@9;N_k8aH)@-Pottw?b3sG|m0DvH zQ%Y84eMDnySE%&N4NlQ1Y+_49E<`dwfJ%?|dDSb3h$5^yAR+=oBtu28Hw)ccUiPD( zk1{|NDXZxjAP@tf=_7MF<@`AFLnK5rbmY(h7)BSWX(`n?SF4QE+{Wh7$P^tinJIuF zFp;iH+Zz!B0wEA0ulb-#C2CY+zkRi0JW`n+0~Olxj^esaaA=5}^0KW@$pB zCd}+zSe6XnTHhLBQ4y;|6xycdQc7{`mVzrRmujxhp2%|d9K&8BVJa!REpVJ1h@GGBI0&bgQtsVV299Jw=a+(N`$($tac zvd!3S4CAckcuwOOb_t^6Riy#OKfdw6Dq2|I3Pgtnj0lDn7yt}RtO5#ynjwN-E;%(# z?EAENe{B4r{_u%>vUN>$>3*t=(FZcN7Z3C2fB8S9&~{^;J1yS10@4+w8Rp4|Rk0L7 z#Pu^{woZKii!AmZd%d+4T3^^(rvKYb{6}L65OI%+XmJ-hq49WYd2GFJoLhjMb0VlJ zjI?s{oT?}QJe~pGZhy=abj$=^C>8;$hhPRAvqxXv3hs^YZTozKxj{QYE+$R^6e0pt z01xnAG9LKRYg_l%wIZdQpgIm#0N_L{$-Jwh^03U4Ya&_$oGcatfYjG{tRhH#aPP^q zk=wK5%Y8{ffBI^>%~cOeR&1BWA1WyM8*a_D$vISFGS=Flc&J=EQB-8~+H(#>q#X&> za);CwWW`n6l#pDm_?D(&t8bb;O6u?nSmBHM`zfm$&n986r`~}O=R7yd9%48Cp8q)y zU+I4R|Muj&e=7adA6dE!S`JEnuv8?9+Gc*8iIP%)=>VaB|933M*gU*E_n&u8pf)E5 z+s);i9(Mge*lxQgXWOA|`rr2p|HLo;ng8he^^5cK%PQ0D?e$v#Lk-E{Yw;eeyd1VcUT2)07O+v~kYM1Ga{18BEwRxVaWI0VKr4X7XL;_M3GO8(eZCpJ= zqw|8Is%RD*E!k?yYpd36hSd%Q)Wif75G?vYSS~fyBkow(Tm2 zMgvEzicCO&s}V3FIu~#agkc79s@gPqdD%Np$Fa1H-*$mX4pCjx)>^yB!_bG|e88sl zs`XGMvO1IMc}h^1u~pb^oBQLD>F?yF&QcBJ_ zAgx72B*vI?n&#=r`T0E0%bb=ar&_Yq)jsER8e@z_Dg(OUODT0pVAS`UQj(diS`&B_ zw2E3rWHVH&pp^)kCiX*{imJ>j`Mv9gd7euVX1}B)B5m6`ALg{I*7>T{p`+Cj*93I6 z&;`WDAp{Wt0wJ$0+#3Nw(fZhRuhRk)Lqcr4q}eMO#~$9Y=irO0mGya1rqT5{&jao~#Lkee<#=awZ;%N%{E zuHa*?0|Ed6u0}U+J^suL5!iFCIW#fXqN-({RJH56(=?iystm)hq?Ak9?RNcklTyk# zJ0I`w_aQ_eCuZawn~uvYqAOYfgliB;UJ5xr&Be2ur@SU))SBBaIHLPIX{bhlWm!xlsji;9EA`BI zB#g0(>~zVgWUU4+HXH&lg*L2nqtJ#nkg|aW0q?-h%!2D_S=byPLCWvXfBC8X6@Qc_b7Vkk6)I#vt%J+WoJ#c6RLf3}2dGYy6 zMBMhBq(XobR&_5gw?0uHHJ>TYB0brU=F~+_Ns)~&RfYsfRNtgM!-JUrYR8jP9n#95Mga?y1m)|9<}WI*Sf)ojhY1*V*mi2@-I6qUvY2biUnQqU|!-?iO({9|SU z;_&E|B=X2@Um@&OVzMmD%EeiS8BNpWG98Y0L%Xe|rj)L(uIA-rWog@10dmd^460_C zjEb(=gNlgEZbio58eagw`e^lSXNEav-}pDj6jBBJGtr{D{t;JTa`{Q|g9(p666k*L6eRztw`EqGwl1!3fbm{uYM>qJ)HM z>#yVJ3BXj%NL2~eSw+n`gRZNlIWuN{OFWWNy>s4e67GxSM6;3#6B0vGBXltUQvp3Z z+~=CjY_r*1?6$t^$~>!r8MbYAJRa{4_ah*ZP51ZS(L0w`@HNJUBr}bK@3K6ZV zM6RV*x4C?_vV0Jc*{__HSqn4QoW!b#6;m-tOCfZ*7Lnq8TuJDv%IvjNL)hq0u2sli7!M6#w0ZHQ>Qob%y0vVqJA0yjPgiK>>A0RYrg_0we}1_HP| z-#PN8sc)OUA0YMl_H}S8n?7&5_PD>N(46g_fKf{2>DBY|i>qpyrySenVLGLh2rc*l z5SMYvQqRuLmQ->cy`#SG0cl>!X-vQ_FC)$2E>CSe1-f|s?mwC@e&^;ay<0i>f)7iU5!$SkO5fHvjgYa z(2nD1Wi_UQ(`lO1Vu)f^tBM#47=S4-n%bL2T7LdjBcRRIGb|CqJ<)_x5DR27X7 zQ1q=!?c;Zg2m(CD+LhF5u5HV49&rXoyM7~S;&6%5rZQ2uD3!2n zU8yw}1ydA{2&e)Y!2P)TpcThKL;(Xdg6aiXi9MCFn5bhmg0d7d@y=B&b;-xN&khVP z?(yBT`fKbQI=?$RD~4Evs*Bto)7|y{SGMu1W%qMlQiuS0ZaG1WM4|Vvj)fj0YTjg2 z{Fbf%f&tH4H2+7xg^27Uq=j_~Am|+o&-T9kWL_AuFuD-Cu46<{tF@Zw&3+$aT&GZ_ zlywFfW1OA$mA#>o0_TCPVoZ+w>Iw$GX)?6Fh}qT1JYfMcBLMml?ZDrAJn%-jfNxCw zfSV`1d%aA#kQpYDC++UGmT!b9y?XTt(>pzVfcJOxbON-v%hU0k_`*+1 z{q~>7hnMx)*Ig;XHHQr%Rw>DE2#Z$}e7=q2ecJSw=i~94Zuw;6(!KXRo|eQ-fKt*Q z{Doim{&zn9m0$VKzx1V#K6wA5|LyFF4@& zWs~#rm;cIN`m=xb|HDq%>9CD|;!pfrzy9mr`pVb7etCZOr~dE%?$yQlXD>gG4Vm&` zKmNqmzVYJ4XF#?;K4i)0$joXk`(x?4j**0lBO*W|VCT@3+m#`xBEo^YHi}quE!mo( zKnk^>tVfV-zcs^EUcD>}K^sDlT2f7_*80$dV2aC>r?kYTdHVEu&bgEneF)CyS|m%V z$vHwKPYeKc&3vosoI?OrV8*6t0D_87xn{_y);LdF?AnSTx`jlKS4q z7_;bcpYQMYWa=7c*&?wb-KX3)VH27usfgrSy!S*U!7sh2nY$;*1tzxIcp58ssa){7qu3HqGi*H{3C7Hdka{ zrc{I0syXNAL#>M`O`{+hfQINI?;JrXX_@A3=r^0q;dBH8=UmBRR{OKYd%uo(%*1=o zfFN4l$Z~_DbuqD0FCKUT+F%!1Z6C@HTD`f+9v5!(KDdbBbGAa+AR`(=V2ooo-SgL8!!O+Fm$FSHb0grKh5@Q4 z9AZIqrEch8j$i5UW%u()4`|K{P^?`>JE;s&7bhHL<2+V!!OM&V2mg zczm&;hu`&n`_5(qUA#H6N-}oa-LM=|nrU{IKcaKptL<~4Iv5!smem~D^*CeLn zJf)JI^N1L{%Q@FtN=oxQr)gdZw$Cr$*=+~Ul*TdV?2v0NjhK4=;Q71XfAQ&EDI28A zrx#ClJJ0Ub{rz-2ZFbvfnh@Z47`=}rW%jO^7}Mpmr?ZsrfA+)k-Px<#hbK=iNo1NP z0lR$mbkTa67X;yw8_%D;e(6I%GAWgdXTZaBS^?8qYnm6!nqa)Wz0II5gldI?FQs&w zAq!9Kz~a|0Uf#dHxqR}Z zX_}H{nof!kLb${Mt1jaNOlyR!5&9;kGS{M=^C=}Y1JTprP;1$3wq&;MJL?M*5fD=k zs$wQ9@C^h+3;-ZdRX`uPkiXfxAs}Mcb-;8u%|Lj4ef|9E5|O90m>H8Vr7*d5NPxk4 z=HP;WO;U0ytqW|ps%*8^aU2~#`XZ`o6B^OQ3{@);x!{3tUY0pc!!Y!l!N)+34S>A^ z!WW-^Qfp?W{eJJ*zyHCrHg?Oh7}(0AR|Tf^^>Y(~mF$qwxiy5-J62Uv;Wzk~YIQDn zA6Hb)A*rbuK8A3>VOjTWxl0P1!lefQA@D7{?LOq7OFDAx31DWimpJ!EhbI zcp#k8vYxw2N}wue!Ey9HvNMs>=_JktM?)Bz)~yl&0GOs}Z6eq^5Lij>u*wYqT#Q6C zrxe#OmPu9H5S94tkYja9c}t&M`NFGsh2GfRtiNmJGp*F0J~pZ3rIzh5to^$uwki&3 zo@W5aId{H=mEeqzN(E2+qfp~pzT>*KkH;~F){{#)0qBaxs;UcqN@;K|=N!G?Y&IoV z??NeMpe|cF)e#UFqE!a;0%^s_mSwp=9!klzR^K)T;mg2wZl4#(LHtD`2k7}ua(Nm5Va zJf$4Ttq*ps)f7aeR;`xEY#qzhTuKoK*fcGuJ%8uP=Pz!SW#T6!X<3frbSimTd=s`^ z&y2aIWtpaFsZ|xQi@w$>W`Rh`A|fHU&Dm}T?NW=Fvca={*Iqokd$?b6N?4&5K->(& zuwXT`tu9gY@4B= zoDSo&_VPYYwbs*V5)pjn6d<%8r3>4LBwxGciRoKIn#5tAYi zpb-?QMq&_P4V5(jfl`Z^c@kB1MEXWUwl1fjk1L^q0RWO3Dj))&5w7twkN!e{OU)~# zBj-f)ICp7cD`MlcY&I+^p!RUsr&OA@uT`I1Z9>x=Pxp#|fHAg$Dp|T+6G9+BAj>%y zscWYCTxwa8BR-u@pvFF!RRlnXbDn|B2M)nWPC3of@zm|QxEaQIa*kqbUDs+AKeV}2 zLo{v92OsJx&xh?1rjtle2HsYfv2T{TL8c%-O=W3Gc33F zo;I(Z{yyM$ZCorg1XF{&=tY)?RfhPMw*Ggz-~X1XAGHs%zaysjM~VMVPJ)^h2_w7C zs#&dP$lxQunsjD>fcogBx7lvj^|RKxZX2sI?nDtd2x%~RUhVyjLP1in+{RloQbVy<<2=w>N9{ADw{-gWrS1y}>?m7IWAH4dN+sh@FuZPcm`|#nP z{G|T;yW^MMKmDpdeR{vo56j(fmHHMD9v6RKSt%i8nf*u0Ec9HCA%L zQ}DLcnXOuQS|vY{15Fu_%v2@wRB?#55ZFi^LEhP3*c4pJ+SOn7qj};{_0-(i^1}*{44*` zmp}O0KlxAnp_|)3iHHvmhl{iGWm#@-Z<8$T(jq_zp>I2M4pd4hsmkgBN<=)k-EQ~# z_3MYjex6TE&U>G8Eh>aAI8Q`khC-}pDa`HkP*4gGKb>W6>fFaD?d(*Y5ubX;nx zs%_JFFDqL)I3BtV(bg=&7-AbcGt;8;lpLXRuBM5@mOPnhtyxq&Gcng%VvHgZoM+~o z=GlM@rfE`dj7&spa8loPYa(ARWwmfc)mpNusA;X$xn?COsVdegxvboLW|mR`pzj7B z+&tA>Mlw1+EKL`D@PLX$T2YJ5OJ-MF*D#V4JuJ)Z{pq;RrNY1`_wMrqE*Lf~!vLP+ z_)s5byg50~L=@D#BPsQh@BUyv9mZu^PW!9vS&Xr30kQFT?x*X<`YUjIJ#DLaQ)SAfmc1mhSrl<-zs{z!4Qn6M;P()@1 z5tCXFxa$TpI~*QXpug4{g7eI`KT8J-PVmwi!qH-u04`HFF7}*5$#s5G`Mxx8a@c zx%CUr9AjL+yGHZXT2mfVSzUmV zb1sC|2T~9KtSY)l+chpQkU`g$N|L4q4>4?V{}V5_|Io|LM-85FD+IA>;1@3mT6p$M z@g8skKw@a90AyyWhNk3$uBZ^Ca!v%_JK>+^UMPKk(c; zX$abY9B2|cX~*%&->QGu;rjcfw;Ur>!zpmuZj!B z2JN_VwIj3DYOPL71}iF&oH{*@hb+~%O;PFFmYF@XnXPnVCXbP%y8U>9LgN9U=F8#S zDI6##*3fKs)En_<$NSrKthE7m8JedAWa)%#2*IT(-hFa-{k?Yi^xchiXICWp!mbXF;`L551w3E$}5m5YgSkGz>$nH8jDhl_IZRy~??A;I$dvY&P>UdG7&q#rwSX&Y>1v#xzaS zIF6T>m#Q`{Q%b3bIp@|qUiQv=uV4zc>3b2$^HN=jG4_2QV?e}je&ZYW`vV#rk4I+T zZ#T=b6sg+~+NMb)H*=&fJ4ExQnzdzJchQ5!{m70*;V{AaJZQGGSt(lpR zX@1+=6A|*GEYAa)K0>!ZZ**kZL1Xd-g!jQkO7@(wcsz)E1>@AvEBu+|!*yS%*g zE`nCIBr;D^b=+=uyCPE6Pz=?gZz4MrZJb|S`)kc+hH55?qQ-=5o;@?ys!LAMhuv;h zWHGb#P=Jo#zUBZVs;kpuszp}>ylwiXebj;&NG_5~-?x$r0eW^m@}kuf_I;0zL68yE zpr%J#b0Uf^IAS1M-LY07V{0QzKG)?2ZFCy#d>k%yvmXg6aC8IV#O6%qOCNlH1%=0o6^Jn922+jv*Cgga@<@)%Lb7f}FbbhuKDYaHZ zI2>mq29Ve`L~dD@6%i&PQSCxBv$koQ%`WE(YF*cLO=DKiHe00b>TI{$Y`^}MuSlZf z@ib=ffwLs7mU01d{iXq+-eX=fv`tafIR>@E&D}LnmWpccUp)En`3FPOcCq`;=Rd5R zMQWP%`_qwubFQ1s=KS(fOiL*yx}L_uas4r~tF?$cO9QFC2#Ve(*c~=*wsS_@x}UQl8Ro`qkH;|5|?V z*3_9tM`h@`XHD~sC+~SF^9bgsYT6$kQn|m&_X~HtK-YDkT51Ky+lx!j?*9Hhm>rif zTbasi03OK%`nJQ>_7i_Ixs;i^u6yy~@S&Jp0svI=orXgU4$4K_JZI_b!O?NyVhpxXI zhK7Aw7Ec_UM^GXN!Ij`ub-%yQOXD1R*s90A&LZV*@AZ0JDnC379;yfXR{_Ze2;- zbzX7dJjPv4?J1|g9=wdp_|C;sOZnS3zkZ_~Ui{Sc)lWU03JyUx7t8Tz{si>}#&avn zTL;3oPs^h}!9O_lqho{h3%3w+5GF$qH3kbR2&%@;IR&Va4S@U_DkE*%uG`|{z@B+k z(nPBjEmqy71tI+)!d`s7m$v=QPu5$9E&(`T+PS#5DFKM7h>!#K=3cr=>Q*;0U;_dG zK@DP9D-#)!h@@H`8zsW;fAVYp(hKLvDcrM%*Kqdv=XV2?n=!xKe*M=^{(kq(bkR@y z<2eAQF?{kXkH5;VentwSAV8i06hHwAye0G_W10Qmb|*~%fY4j-%`$fYFw_wM3Up}D zfB-_m2qCg5qy>p+b$WdB2CNrLfB;``4!@|suZprLcmRl20U#TLX#h1a<{ovj2{z7e z-~<4O%{*02qFOyZdaVGP(u`mr%671eOT?e+{^tKVmrJ3BNdShn>F>wu<;B5#*T=Ko zYtx8H_j5o02mj>1|F8b^PyXJY{vE&jKmX7E6UP$yORkCE`Qi77Ya4?g(b z$KSnuxIG-_KlA_n3;*mt@rQrzcmBTr)Bp8fUY1XvT|NH`zx0(Gd}`fuvcE6_L9xHqnV_h*S-` z&33%GeevQ2AOkzY8X7jgEmIz1IE{x*zw1i?bFZq7DF0$t`FGJT#DW$+sr0mr{Mx07x#`IZy2Or#&L8Y6wgYr)3sZ1C&zNbb%^`372^cA+%jIkXrI8 z%Jx2{T#=kJiy@ksNU353w0azyLKR6xd~6Wev^3GHDyR;9CFv5}BY>sY=lFy%{bt(L$o&y-cSmhSr5*(428tMs>Ru=|Zydd)>rcAMdYo4ea~@bC811iIO}yN5~J z4}cBxzIJgk?)3GgF9f}A&c~Aa;2@X#`(@RF_x+}pBm#ZYEz5j!{SZPpzu1%##&K-B z)NDw#b=;P5OsODJizecTMn}h%J6R zl%)&L3^~AvKMSwc)8>%D=AQ*$dn|@3b*AuKJ6*24ydveiWF7@aX8R8&GN~?_pkCLT|d8S{?gTOpQfRmgQ9B9BrW+~HIdUc^iAUe1WXn@ z0pyzO%w0gkw|JB9E$b@|X9LD#K=s{WnoFyL!i7J7rSqS^{@yay2YhxGDYZ`%emKpy zic@HsMq7TkmD3N8)2BQB{_gqk{OOb6xQc0DR&mg{j1gmcNCtM4F{OkH#uytP3@SLD z=7c^-mL=(??IM$RzG++QH_x6t%ky0Fa(8#L&-1(IPhY-#bzx~O> zvDUx*o$obmyxI=$onL(U>Pb*^-ah>3-IuSfzx(m`kN0=;I6Zy-&h_mL-0!C)WmP&7 z0L0KpsmFP|+u!XrR}IN|8#kM63{FaU{~H%|s&UwyPNx{--QCT_#g%})xVeEyXwZko zXTJ%}Fbw3~&CQKZL`1C%$OOa|-1l!j_b#^Ep>6v-FOVeHY=|P}w;d534o7A-K#|Yd@WE7ltNOC4CcK_nV%T&tK zXII|QlifuKt<)993V;Dj$&u%<%!?U1;)rBqfjuHFHM6Hu(%QYRR(>K9V_&29iY>a+ zH2N4%WlE)7!@Q z=bFc~G@xB*$w(UV7r-nUI;^2>aT(Yq#g!+^ywEy%#EL^KQ?0Mt^* z^nANnCtFPFT*x_l--yB*c)Nc%u59AEFC#<+?7sz?BVmEjvg6sgB$R8{sQU`!+; zL`Y1?@Ms()m_-~T2*el(AJOU-T?mh(DM{IT-!{=Zo=X{P0rKy*?;OX&aT;+;+~79! z003&^I35m%QfluTUqP7nr=t;G@9&Gqu-$C8+gCT&Prmvt04&R*NE{;o5E@DqFjFDo zoHG!jXEd7fv_BnH%5&Cp;NhPjzS2z}FDAMTH1Wom}uqK^$4kiqTE%gS!GWXAKo2y9`U_r?;_e%=GPAo(fPyO zeH)we^YhT2Ke637j*G`BB{bXahC@2mQ%cjUs%_iPWlkx1@B6+-1J$Z5XYG)4&ZWfG zE#r)2)%0}SXPGxc7uc1lUTn4_1Q$Gdm#6XJ;lcZumo!z4F{07ym)Ec5P*Uz=;|$-s zx^j*LtdugQQ`@$pnLVN6W;2ZA2q24;;G0^dB{T&f2yHW#w3NJtgxedB|C)x$c%`lmTFpo-D>872d2ukDgaOs zt2H+vc+VBVdtYshE~vT4N+%LAGeZLrA>wWrQc8#8mrR21jm=d#d&Irv+N;fyir3E`* zHWDu0ExFqdU*1jM$!TGv82T9F^UEhs-uv=!_RjzAfBC;c#M_7OUYuP#dGag-0(AfQ zAN&{p+F$*1fAyFDI!51G=Q!kLsZ|;9c-(Kdy=f*2j-whju|1CS<&zBl^Rab3mTw()HeZ|Kin)kAL|6tEZQ@54R5w4~*k)eCIcQ`gi`!PyhCx`HR2w zr|<5sUw!`iLJ+DNR;rfu#Y4r!Sk@=hasx!?d|P!nlCOcsK`{s7;CL$)HIEPDPcgjwwh}c?KqAi z(lpInv#Hkt#1I<*(3C`~M<}aWVzqXCR590je}7LztHnXBWxbyOnnqE_Jfiy8G;^BnUcGq2&CfpDfdGZ*+Vlm z>S*(UhTp9f7CSoRf|iwJ7F=Ym%Wk(jola(_oHL%L%ZsznwkhW&k8Rru6kr8X10aW` zE>)A&%FI4KO7<;PL2EayD()YS(=t0pA-cdcO;g)ALIZ#hnzGE}>2N%b>V<8vMQY4v zL+9q ze0Y2FO|73@#{0`>mr`#wHJ-zz?LIp&AE>o$hih9^&+Op>#r_x?Md z|KR54uIu|I`emHj5T9N?oyY0p&wlu&FMY|!$fhfAci3*8K7IcB_3Ns-zrWG_-TCgU z3Ep$?*3??})z{_LSVwvZ;)+(w+g}f^znIRc8F{U(kU3VI%`}_OV z;b)#_=iFw~GjsHQw>=-nxf+0HD^(Fn$pByg^E6S(A%v7tnR1Z@T}G-U&w;~}t8Gdt zR|r{W%(bd>p{gv?+_hV+#QS%^`%QX%LZ-2#|?AJ0@N`a71)Of{(54TW6bjo>eH4iIX4Py?FNYo%42R9D~QC zS*nQ$*a{Ch_Q3>1q-)ysqTX&^kD0v+>yKSStyaoBGv_KUG_@4fT8c=iMEu!jpXHi} zXw9a2>%tEJjc-=>c>vHV>#J2&=K_G$id9RkQndg;VAr*+%w~uoIwDep$gUr@rp|#t z10e(^R@G89>8!QR^PEzuwKm}TzBiUK)s;AMJRX~-t0F0-RYidSO=E~S&+|AfrIZ*W zk}gYmlPF}XCz6>Fxkr+_nX10UADwe*@+fe8<6PvN>$>hdHkF7B?@uSIa@lr1wk0p^ z*)FAIW~b9>&1qb3%TyBpn2OAkn6AbPA_8ue%sw7>k*sMNCPHFQrmE940xPmNvo$q( z^>qHxhnbnTA&SUxI+aqI5NoX^Wl(FlePc>4+wHb#+LfJsb9;L{9?|jhr_X?Joab>I zFE%@7o~DTiV%M5UDMhr3DiK#Bt15;nvP@&Hx#^pWv&;MY`|H=Yj?sHR41*(ocsL?M z-}fT6K6{?s`g)%1&TFj?hodtjKtOP@t+fuD-Uqke?_a%o_4Mk=^XJcNE;;4c56Pwv z-~GsYPk;}vZpu>5c74m#gqWAq`u6FQCn3g%}9<0(LM zJlwTy+r{QsmXh+&4ImU$J3sOsz2 zubZYpwEg{qBZ@vqF4RMEPj;8X!#sa-x7+&Z^H2Wb#}7Yy{lV9NvUzbfzCP==eS4vn z&d5zdhy0+1rzM#tOjSz`Aq-uY5bNPykAJDT`CEe2R3bmOzR4*|W>5v)$0P(fN5EMa1p;&#$j1NL0$rp^%!+`N{c38YeSDz;PK< znn8_-f{&+Z0s!y*W11*HRjDeUAMQk?Z`vV-Ap}Q|b6&<7iHwk<*IGDm9jln$U%&2S z*lsqPVHccN1u`IVXPfPLJ{%4Q@4cu_IY~Am$T|Dq*63_v-tBhl*UmXpC4j!^08^>D z_P+$!_y7QF4vlvXz*gZk1A(Yi01$^nU}WG4MQjZe6j4NUZ-$_1k0y(&bg+EkKTidYks>~mLExNk)q!FQff-+aW|>vy^o=vrYWU_ zXpRUBKrJ{AYNZq~bIzF=qjN|i5~FWJmzF~*h1hlN&;U>KF>czG7cXK<$|hNXny!as zTHXJoXwKQHv~5pBpM3IZtu;m;TtEY>P*Q!^-xYQ)32z3<{XCtH?Ke-)e^;QknNI$6 zJ0w_z~}*-*Q8MA8Qd&2w`O&tWP!K-*sBl`PqO+*iN!zS(397v=urGG{~xG0&)d7x>s>c|<(;3+ zPCoziH!kww-}*QIwGY4i(I5Zge;kqj=pX%`U7SB@n(j~hi9h-Kf8if__Rcr&j(6Ys z^}oeV8DY++i}T&t?#r*QKMk#us>?L(AIA14zy9tAUpgI5-uvBdS8JVgDO$*2we!YD zr(zW;j^cU=htqK!Pn+HF$NtzK`?vqifBT0&`0RrZ-usKc^q=0`9e(fc{)LCb%VFrh z|J`pt9AAI(OF#VTM?duo|L8ybpZ(=u`gi`_KRM2)P3oRLeL9zXefyeQ>e|qDF-!HT zjF58)eVCSG;{$Ohr8J?fwFVAx)0h}BuPifIo43^~%&h?P<9&9{F|R2pb@h-z;NTy7 zCsify41tN(j&3!#MFRwI&NTp2nHb5y>XOK@cNAk}@0Mj*%~;SQdnzffKhU8AVj}YF zLkOvkIcJgLy$hZIxo7x%B^{k66on=u3zQ4ZROKs1FD$1&7ZFgL9Rq@Ut z;&!`vxV`@yf8(#C(Rt`cghad!gYk`7go=PD05oBB$1_zm zgf$Hqh)BID3Ye&pcVY&$5?D$F4YCzxU}oonDIy|xbRo);RSGlpn})n2hYCs-CW~4U zc|4R+bn2hFv$i9HZ=t4RvauYQlhcGSD7FHs6%h$A=d`4(jJakJu_bM{TXuxVz0*Woy0%e%|JWmga?KGc%<-vVWKmS|l9^Tni;y~np>NHL%r6xEt zGu0{r9DpdLa(jKV*)~k(cJJC!mgND=oRP_?^@5F?c&p3JsoWz>1(DbjGBFW&7oy>* z0*0mo)Fg^zOG)E60^^crF?%ZCH3opy%;oJL*u6g%r+Yj(s9&Tj;>s@Vomzj9B3aR$u zTktc=;^pq4SWQfK6Sw=S#CTT}WHBGVv~4#soo(LTh2cMVVW+0grpGcx!bVk3uS0tM z-UWQ{Y}14Y&^hM}RFzuai!_JBVNQ!{W36=_r*(A##MM|}I?cP?4grVIuy-k!r1jx& zcy)i9O0Ejd1=_=SI9^`t-g)PpbhYgm_!R7f5k>^yF zl6~6^Z3m(y<$0V8VbevldU0{V=yz>;OLTJQU=F)3x9BDu|G zbFuyMrthxrUN6f+L}{A$Wq);b_4N6(+uPeZr^|~AM(W$9&gsK<--oJPwcqwDo-&T} z-NVE6?QJ*oS5Kdu^782qe@F)Jz4LC{w%yRTLz7E!9*H0{ei(+}l9@&4m)I;)4B@1a37BVY z+qQ4oO~0*{4yR?B_kT$$+z;WXuk!>rYA?(W$^;4r7jH-4UHGb^PGU1w%p(?HWq z%TlW_Q`fbt1WZj;)K>?9&UwcU6pW4E*xlMArS!3w2#GwoQi}H;(2mD>jRB}40u>bB znEtW%P7$)y?e6UU{(f1OmDvCQD~r6;l5^Ig>kgt-&>%Juk~tzt4*Pub;w$&c#GBSojSF@ zwD$^gtu?|gyy+XFb1t*S7^i7L#L%{NADgDxPje}y>zeg;Aca&i2H&oS98=$RF8JDo zALqFW78&njj6gu-fT5Iv3MwMPcyAhCYc#!AywrOQb5#ZXo|91>fGsKBlS>eaFaQ`< zK($2HT5?q9BAhaIUA3FOcTCBJ06CYdtE(qZp44vaJkS04MP99&rjZf>fXI@$F()9rySwYc)}dYb=E=KX zYJ-^~cU^~wQHrQeoK9uFTaM!`!^;;3h@v`VQ;OV8+k04Ro_2({MK%_ll{mpzI}dS zWw}1RSoy1T_0EUy|NGDSf9w76*B{4U?_4uUd%L9Flp*I~)mR^(9Hu2w$}1m&cb&B; zra4~|S}B~;AuTWC@mFJ-zqWb1aewa7Hf0@d_)Ox`Xn40dpE0*SJbCzF+#mB2cjIkJ zi!r9#tRJ=O)%L1uTVUCbr-P0bP$5^1E@MoUz*UA)igDIkcYosQnaWln*xf?SrDy5Xb}KGB64KZS!-=7Srx4ds(Lz2oJ-UA^X=A?6;8SH zz6rNe+Lzxmsb2S6){nur1sn0tpGmx@tDxqspsVE_ULdOMjq%1iD|0R>GqW&D)FmzxonKmKbScN7^Zj9U^}~Am zVJ_b4b{^%QeDjw9aR}a&@jw0ffAEL@@PF@%Uwr?+`I*1==l}eF@q2#k5B+cd5C7-) zzk2@X{^S4E=4}0!{>#67_29|R{@maF+K>Fu^~+~%W54=^AOGcFHRH739}kz853g_T zKKb~w?dAG(Iw9hgOa*Ay$G^3fL9Df#_1aouW1AtP>G)=}BY=X7637)X6#!%xp)+|Rj6Pggh zY8Y}htP6n&U6bY|ORflbP^cv6qKSywHmG9Ci-??UHdp?#ZLGJ>TYEa$B1F#j!Q~ij z)2>im(~Zk)5u2`!X)epGg`2kRn^rgCj2*gu)pq;+J{SWT5-Oqv(NYovBRONx7$=|# zY>a7rP}ONZS#L$aIweyj^b3&)r}_w}EdZ!)azx(ylv0({2Y{~ghRBk^Y7K`32#Qnz zv0%{{B8Aq6rfKGRMyyyc@4Y93ipVOZq*&OZ5`uAdEQvuI=Y(NN$*4$4&R82ff0Ou!j?5_72{&Z#B)KBiQnnV5dqoQvN!vDv z2nlAESTYF_SP>Y`2PC@N?^B9x+Y%WDDN+QrZQG{tM9?%|M0`VSGc0qNPBL7*b#wXS zcVpkY`~Y%v{Ei!=G-k2Izy&?bUH*EfwZ#X<6E~ z6#y=UnNcJ&t16kMlu}K?01yRTu-?B>3xmX&VnL9GAZDyS6~hn7 z^~+D1Fytk+ooO3Ad;D%U^v=0?o_B}+IF6vpx$7YGwZD*Kd~kJvO5@=uOFE*oZ9A+t zH+OgQJol^NIG*nIyNUChq89$}vrqaa^j#N1XxnywI?gFpr9Lu%OxBpLZRcZZ+SXb- z&U1=$isGDWyHH{j;q`hw48#6-0Dz`xq$Dm{`^Y!DyUWW5Xw7syUS3`Pl&E^LEP zN?E3pF($`?mP--+Q|mqsNy;*csoiR>$Ks9VY;&Ow9^_w?3<=R1Dmt0h{TjablMu z+lFRYmT{cwc%!IPhpOG(9W!5DUA4wRW*>ZY0#{W>jw}F?_rA&S>9dzpOlN0j{V+^- zhr?l)mig@LOjQABnIb9~0)y0shGR}C0g$)O8e@o@bEY50c?oStM9y2IHpVzjbJH}T z>3s_ z5diGEK6vk(o8#DZ-TUvqznrF<=Pzpo$11aRO=nNbA;ySl%-2Wu@4B-edrZzB_8iMy zoOa9pfW@g98W<3m*&+>H+qVr_V+@|1Z_I6%Bo(K|6U9g;(@mkbgFSN-xx-vkyTP*S zFVTet%2|I!puxC9&a3tM^87+k(3*kmI4s9B&YXAiX&*;1a7xp~#aSs*N{MqC$FUZ- zM1&TzFtC(TRJnQp7-PQrEK=&U))-d`E0^H06e&?kV$Ra287^1bl8>Rx{U0nB@8u+{OFBCv z*!W@X!o}(~C6hxlK7O=L(49`F`7kZAKhT`dM| z7AnwCkc~~jk+Fsq(TdLN!KfUT&Et*sMgPDrzxZosx4-(`i$Aej{Sim!KE*8T)+h*0N*E7^xGYvBRLCdAbL?nQncBfWw3@v{Ou3wZ#y2S4@h{O z&5+!i0h00cm4a4msj4baLMBE_4j7RYQHw^UMEZj@-TFK}kAIaB3>7J?Y{>S(dn2c5 zndjLA2b4`-&^t&IzI>X>>9dPJu>Hxm{j&kACq3eqxcadl*&=n%o;^KIyBDvX zrj*jWY*yY7k0Z{&e@x0zds(wyWuPheY>RS34t;hXhH|7%sO;EA$k{LjMHIc zgMf&JjWP5Zm{3Ydv3T#Da{!QYCL$tdkOaUOhiEE#%Uats-dg+W=DKMbV@y?cYMie| zE-}V=p4+yq==<8*$T?TVeDA%nzU!KKfhq0)pwh0PmV(w2fC?ZY8DB~P5kPc?1YwTz ztLqnFD}~ypc`1nj;B2!Uh5;SHJi`I|#@SU!gy?+BFs2biIhVfg&Ndrl8Fxpcu-&fZ zbU2;%Riw0JvB81CuIr|0dIQa8@EWvL}x&KVFZx8M!#LsglD87lukl|yK%48m%)8kbpBiLp{Lgy(%53^2_H$fJ@JmXkX$~QvF#-w*E}$x=l!$EO1DR6F zf`Tu276Ywu-Up=Ul^7ABqADO4lV?`Pg3Lk&7Rg_Q=m5cx>GTy=c;zu+{aE}zGoK$PN4T1oK1{{EvJf(Jc%<{wL?{(d@ z@=h|-lGK}`^KJRe=5A%%Gx4V~Uz1ha0>Ej1^ZYcjbLV~UofStHowLpuheh0WPD@U? zU`*q%y;-fEJWXf+@TSSS71SS+fBHjDa2s4}&)d+XxW7F-O91ND-Ps!QOCoYM5D~N1 zUWTf+#nyhYpv26O%CK$EpKR8fb1e)pgG5kt-p)Bjkf!gZIjTbDq9Sb*thLKDZnxVp z&huekA4zjgZP#!tAKr`$b`Q1#6VBtwdYhr_#^c?Kn|>HBFE1~yE@Mh5F9P`L?v{;X z=EB9Ftzym}e)yZGX=+SgMt%ptUtK)cD%4AHeR=_ExoFf3NR;#XS;yfc_JM@H9N*Tw| zcR^H@um}g=RGA$la=}*wdZ%n~wL2aVG38uRu9i!sXmWS_q}(fcE*h{&bzMbhHia2?X@$Eov<#;VhyV5cpSOl@pydyOYb&8 z+t$x+k?AUaT+qef`$+&wl)ee|Qaja1JaTwFEL;7)ZShCfEqE zl2)yCSk)#~Ezuhf-mYnKj8*$Fmr`LFXb1$I3o*{Q6z^Q)ya7|R+RcBRVHzb9V?;2{llPtot-T*zIA^L=W?c^zfKtV* z;Pr{FN(l9I7-L+pZ}h~OIXG8`5cZUFONcg*VbMcgjM8}+;Jm9B&b-WIt*TDb6xwEg zJXmY{wsXz}=bdvT9K- zss^bpbxQ_->XIfR&iRcR^Wk)yr^$QYbzK{puI*YMn0b~|m#ZVH6=)-3(KN~aWobUFx z$8ogQvX;7?4~ErQcR|J@qDOwVd-ZHSeRgx&5F%nQWUU9nahjf9zr-7a!p7Qm-4#%@ zW-e(?ap(qB7G_lm&R0%Sg}>GT*xk+T*|2{1op)5VqPo3eVP%0b(l0$i^Ir)lg5GhfD5{pL;lX}6V z)Pyc3=`6K@K=gEm+p9N>G_j$hAGuCSFK`myxf&5MhHpdTSt0$s0aRYAK-H1gE@;)) zoI-$~0P>3qf#(hCJ4w$Ieqv$H5=y`nUPCj|`+5!Bxo4+~{=(&L%#*gQk~;b==*8v= zZOHq!H|)O*MbbzTJ5D-`+T&piD~ks3E?r@}BOfP$_kHUey8!h)uZ-ek-@yMMaipQ9{*3uEFTtzK^oL$bF7-pPiA@95vVyB~}Vt zmgT9dB9x<9c1{vs<+7GV97Ws;@<+rovo(`eElg#B0WT1_!{TMV3(7G!2LpBRTCD0& z6$;}UCboP{#uD6Z1vaVbMJ@^}DlL#Gqb8Ls^U-asS=hpx&zC<(P}N`PnRysj_c zN#lZ3+`sK~l@f>7g@1@tyU7nBYJHR>%bt`q;{O+INLY>AnW?XPb1N8=7fIKfGQFhl zN6h8rT<6uH@g|*~)hFi|;>3?SD#E~lLChKd^--Wb2yByn&N>)?0}g&WQfd}*FYXzt zpZ)bWdh9jzG)5Y;UaFrzffI$S1W0rHSzO@Mtkj>U8{>vfAT|D9rzD(-gpf2`15Hw5 zr8==nN?xBCqa@vRV=%A=Ps zw(*J)zyoVuf(-t*@%;zy(c<0Vu9|r78uID*^J8nRYml%irmLUd&86A1iq-yg|KBHp z(39L)oiS)`0BgD6u#GAv>?J_okRA<6$ARksao+oCiTZ-3@8$w9fXBz3m0=23ZnLE1nSDv)SPt8x^v1k*=Z#1AsZ zjUnXbC?~a=O|M|iUQ)?XT*^P>YV~Sx$gd%~rM|9AA%JbJ;bx#xOUdjj%Pa{KRk=6m zEbGDj>%YD~0%AMcF!i3^6$@KoWeY(Z;9b@z45%`8gfgCB?TkqBl)lcB9=#Xoy}xVp zlv3PgDM3xc#-hwwRE}~m13Qg?1XAZ(*c)@(vmzP}u93{4z!&xz5(}9-SxPMBq`OBT z1;S>aG?yx$Dg9z&plSGtCkhlnOrPf0d$_w6_Hei-zApMma}-y=DN2pJdbN!vIyx#~ z!B9tyFRPTAZh)cj*n^CrBW8N)?*p*hT;u>1F6Ux*@75P6jpR1JnlSs zYc|OR%amykNVLikO89F%!^eX4z6si8vYUzvO^Vj*P^u&r%3*eYUYY1?n@^H0Z>J3+ zmXGiye`P8eXdbj3Y&oJ-j);!k9*$iYg-NgvtW$di&{uPQOaISt2Cv|1#4NCQ%fE^5 zdu}BaADiaCzkdfVj^m!!OZWcx&A4`R*lRb^1a;sqCKcL(0#hJB_}Opd9J=XCma3W) z4ocn%11zYQYEr~7!+=qa@#nFD{VzJYxf-9;$to+_!07xH&G_vnB)JeugT;#fjUzC0)*|jt5zOWkM39{iFUzuX^nd+)5^8F> zRIDX3*xRLy9=hl4ihuMvCO7`Y6v6S%*wQ#hkb(Ob`~o&gnG;Rn#LsKl%gsFk$$-0E z%aVGG2-E=Q*qkcRx#Ugh8QB!NKAT6F*oXd^G$R_O@^_lq^Tvc{eIk<4r=h2|!z+Gw zxKt1zN;GtG08qg9_V)Dm=XVIT#r@BIl~!_Wh2Cs?_Dp-^=6$pB6>xINEV|r4c^UNdUW^k zk;Ux(JA0Y76=1|3t0&Mo&wHXe)}!_TG>0=?Q=_ojfj+9;!sZH1mEI!BAdGC?Tn#A7 z3(#*ztnomCr;@&V3Kr%Fx#-qz({u~;evPvs{sf2_Ehw{^IBR`i>hR>6?CNv#|@i{+DB}-hV`EkosI9x zeGa+nb)r@k`hpwsQP53*2JNxPHB_nJdN zR&B+3a&Co`Yy17Wo5E()cVZ&9>ytCK&O^xRjIR&|C*&o`L*&bFAxsF`Dr47~1u98< zD8Z6N1|S18Auv~f@QCb=Qxl;uMDdsZmaeMr476jAkhW%ZcX549nS}B1$jp>uZiL2? zpuzc5eV*U0o{`(Wl~hMoN5&f_xTTwg74I7BGx!0?-Bc|B?QltuM;omA_I+x^HYOf2 zmZ`r8^5>QS_TiH)AbFppx|7x{&9#np(waY^L<$fpt2(CLIxg+PcX$1MK|F!a zX;$Pc+}$0o!jtDj*zn}~K4|}&Xs63jvO}HGz$N9OKVR;Iv`k4qvbG(3magOvhlI|!5r0u;cg%(Z8n(CWlX3K+O$L{> zI%)AUpT!4yMc;kfJqs^W%9qs1h+Z)g_$LiT*ccRJse29>eDquH>|CT{jcz*aW-2e} z0m7~S)QfEu;i)9WGwo@8Fx?WP;V1B98F)I`VqnVo1PY$lN4g}VH61RQ%on+Jn1!4R zqU`HabK?E_ctTCx%ArNw6`y#R`O(K>{J>o}b-u;j(6Y=NH$!rQOX!UoYfCr3@_ZQ! zs^Sv0Qpr>`>pTQA{~LuJ0^Wt-Fq5CN;q1x%9K4r=d>m?guJ-)-JkjafbIwkWqzieV zphF8N8qxN5XUWw0!}%E(4Jk&PNctaMN^V`A;JK302m2REa<)oLI}R+3#XON`mLXkY zJ;y^*Yjq5v6+a@_CxLiJ8Q(auDATP?eK$i-q_G&*(r@VX`~fM2OR$s=Y(~o(J`^OC z^gWM+XZI0X&WWw+P*cm?n(ex#9=1@B#%0@1U00lr)>h!!r`)RZ8sgHqwk_<$7LOXv zQ-Nn8h70>Ne_)+5exVQBYcVr3F*TG`2G*{U!?vwunG%gKB!$9DeS)sHGNJMv^pAOuw@^{PK@_RU`gl6MfK5!YRW7Gq+WKv z2p>+CVGcHYOODC-h8;8;^IyvAZx+Dt=e|ICoC=+Yx11NIjNf}XFPMpHxVt(y$l$2H z+Mog%tyDDmW=)bG1-`OWu0cX43zp~~^GMngG0RxC zmeCr?c9B=?PZHm*$M=S401`KXu6@H2Nb$xwz74*yX8C- z(uIE)Yx~$;H}K5e3NnXD#j3}Epp5APtH}d=e=p`A0v>1xOYFaAl&|)-7V}4caoGR$ zw${pkYWe75AWWGfk>GXyNmF1)n*=N6|NP?Gfwe{DvjZ=RKX!Pd=Hr+^)S z>ih=3bLhj-z_Z&^{qr4wA){HFrkZ^?zI_+A>-M-K?L_3d{&J! zTzGX=x#*~q?aRU`I(qx-JUM9z*WZAg;b&3(^tZ{c)~Y7UA-gnNfL$!_d1r6$cE@|& zHQz1tUazBIvi9ZLzzyOVnMU7ty%~fIX$k$iGKuVeyb_6fJeXG#7b~owzx~n}a*$+p zR}Q>8Hv_#Dmp{^u=WiPwNs%N=wK&$jR>}dT0QF_2OOLzodR2{$b1-M|t!cGF?QlhW z+XwBy{_7&I{;n59b!}1YJpcawZNZ{yHBOmr2UJXwC_f^4}E>h6;ZgV6xv>C>L zs>+4pz&FP=KJ>a7xPO-iWgW3`}#n@8OfmOyl zQj5%6755XrGtG=4p+;BLFK$`T*$}vdGrb*~pUDHhjJ+Q7WKGu?cEXWpa_7?XgZq`U zB+31J-pcysCZ7v35*_4J?GP!2GMf1bCjf!cW>#;I1HI^#Z!3SD8-SIy{JxEcAKIc{ zuA3q~0ZJ$NR;MYLu3@u>`d|HPC%^+M^je!#xjlp2Ny6U&HCq%VoC`)xNQA~|Js7X` z)|Oi$C#S6}vydQUYsO2;+ZwMbT?0+laxVUD`xBofmKsF&W)S zSaXWtRF~07=-#hevlbKK->wP}ty@$+zDsH+2YGSm+F-C={~hsww;W>hUzEs)rbf76%jsBv z*T>EuKQG*W9=Q%gSI)1v-|+IeH6xV<8O`FnS93X7SC2WIjq|-|X2$fcn!K(U&e&t= z=KR+xBh?2x5d_P2&B~LaECx>FhJ-}gpt%MIz1nO(3_>dJ#7W{(!ZJ-$v7n;=eBpQn zKFJ=!MQD68N}~G}E7vTt5Wf z&As~^Vx4?&@7ro>R^KSP17T}S71@2*I7`Ql;*@SJ^g(a zO4ckMw~tx&z^FoOX#TScJ*V?MdN>nC1axiEog$}*7S_r$z=d_P$JA+px#KRFyz4Jq zm_IAaj%l6|+yq2obhz|2D+;o;lEW%I;2RqoqX7rq>=Ez_Ptn3pw~Ln>G;W)56uEW< zF76%nd8)7ViFb?>$v_JYM<52J=2T(l`gRHmyLsLK8q4INu0Wh=AtZh&7lww?1Zb95 z1v54N?&^$KjsSympj!+LK`$0qR)blNb+zF~XC&?7o(;iN?`Hqj(rZBigF1SxQ)$_W zyR7LFDvai+dZ5ZkCB+8sHno@~Q>BprFaS-8@ySfhh6`e=aLq+CSV0Sb9}QBF=T z*E6_a?(Fxw2Zc#V^a03?w&X*X{T410&K#|`yx35!8pHoo=JlNxbr_!>R+Bpe-QW?d zEP_Wa?&YK-u)tg|*O2Fbr+a?lPk$b-`)}Zj($=oJDU>qkCvDUOWlT#j%fF6r%#oMzw7O2i6pT=^n+#S37}BE+B{JfOK9;=0a2i5liS8DaO$X)Wvus4B-36W^Utxit3UIKDl~> zKS$4VnUYqBfDd$Okn;TEiS0KDGq&eD%NHtU+po616Jk5r1MKOl%$N*ndWi+DDn|%w z4Ms`}$Z~hY70RN#Mj3@YWoRNi2r6lYRkINPI69l-RRO6_CJJDf#K#YnFguaONg|+} zA_&YukgtiJ$d_6RmwF~U9*VC5EB$%`hQ5A_H^MwYZc31;6$2a)q}*KN$MLHSo?Y*z zC0}Xja4mpG=U2OU_;9#Rsl7OR=kWn$(6L?k)qhN@xHM8zTo9!-bZtP2D)e8}VG0RL z=EoDg7FLycV-phvh=(Z)mUNw44`-Vl0iEdE62HyBVFTFi6YpnK%^o`Td!N5gL!&nI zciZb--;CJFS98aMgEsG59==BG|7|u3VJZhW#xhHGMqt2mmTP06|KaA(h>bz|q%beV z$!bqAZpzrS?Zxq4k`Q~f^{#OryMKmXQSXs<1XJofn&Y_l7syQNdnmn+-NE8~*h5O# zKj&Uo`lmm8XL{@tOSJw#yg~uB%2O?2MgOtJ}0YT~Tb*>%xxDbT}XIaf~qJ1(WS@r9w z0QO7!g;mBRQugeKX-qfm_&dI|dga5ksr{Jny4>9>pu&S45+vUT|5)_njfv=A3>+sdFj?jjIhq zvQd7qqM$eGEXpL62(@>@F)n|Va}h`Lr-IYOv9=0Ynj;qFYqc&=`Ia(!Y$K#{KFVD2 zQ?bJt4SFeBe6c+UG{ngLrT_6AzW!6zbJ(+Y;N5`H)z(rYlK$x@VVYR3NprX9nWK`r zy=2^|5bJqB#I?w%TOQgvj(@@e+WF@cYs-3mOw5QnVU$uu(Bz}Y!vz4%othoURkxd) zpyRwV3N_py#BD}-FRPHuV#9pM5GDAu?s^AE*u+C81yAk=%y43ZJOzk~i-%#VL#}JY zYHFJ+G0#jPSGC@c1hJmq56%e*1liIi$NC>*a!E#aP<&qR6Hbku-u)C*BcESSuKaHM zJoIiL(Cd0Zit_G6um3!zcJk!;h~xP-47qjx(XH>XeBg%SsZsXgGU;eCDNbd!;%u{F zj$a5dvZHP^-o6>n725Lr&Cv5p9)MfP(NyQ^Jh`3habkx=6C%8QeXmm7f~^$h48iZj zA6}a&LLLtrpP?Gp^Wv8;*DAu0g^ioEMVlKNSC37sr~aa$M@CzL=QmhHkE;S<3bbn& zDCi(BV!5}xeLPE8^ zGQg^2!?y(fjfdW1OF)K};nm=H?{~k!iRxa#wG!agO#L!`Fo;FZS}^pR}_d`aZk`qng*ZWPX2KkrYg=95&22 zzLX~xdNVDQ#A4V6_k;i?Cw6ugAf{rfOC`$gN~kEomyJfB6+j_U%7Nk#8F)^uOYe)8 zxXGVPU*rmn)yCy=dbcPiED~yaJD#vH!k7Y*`jztEt&z}(IBgkky_Bq2@tm6+J66i!5PqN>9PLLKl0WN zZ%D_ut$pEgNZ20JG`sUE?WumA;L-b3YM1!i2+@Y{SkwWtsworSmjdx%!Kb_ZsShb@ zMH&XK_%lddvx3k+VUGMrYB7tTy>}n4@|y4VT%Imr7PSVAbpO% zeg9s}^oSe5rgEG=X5qD*Gn;?9_HaoslDlBsc`yj*I4RO5HRjS~ID4G=KeO!^I2!N7 z?*A|@KMg+Tw?)(Eo=shP-F>00YW*WAEw=6TWrBC>ioI*aji{jHuC_`sIrnJkkGf@d z(u^-hb?wEpy%dmne^FH)LlP}iJJOx2rkUZBz&}PJYV&bwpDS!sCtK)ZSoko#M$w}0|6!qqD0p>`gpJ9~>5HX>h{U_FlxsSe4I&11wH z#Cv3)-8Z+#eK%;1y%bNK0QTSZ$xeD>6CtbGDXxq{tze&!$J3yK$q&)A^o=4%`ST8_ z4k1mihc-^?!GTEeyFaDCcM-s6a5cnlPAH@pw=WG?1CETWl#iD_zrz3GbU~c9i#dTD zxRzBlz}72vUG6ZiKAdU$($&{jm{gXv8md!%QQsbKn_r>V=qkbM7qTp%qZDG032sYK zf+#Em`I!w5kLm+=n`yo7uD7?fwgUFdShGWZ?MW9Z^6I|#o9p5|Z1G#;89Z*s-|#AK z*Rg%IVa72s(Xjt7oJ8vW*IsMu^y~fgScyN8os0h{33~uraRRh`Nq89eEBx3{bA$;3 z|L~5e+n^_y_W*nvcq#Feh$Xu^DJT*jCAozlNdX)G}>k4H1TdWERhN z2b>%ZfAnOsG#jKO7XvI)C;N)izYC^foH+Jb>MntcjmO;L%Zh8NNp)P-I&fZ~>N1wX zUZUM!ay-tqB0d91InoO?${?gpnwk%U_aH_IV{^tSKXNVWVdXX)B}=-k zRX9W^CuQUTXaO>)lXO&~-C9!2#fkp{7o=Tgp*uF>^^d$uljWc-?BSJ_(HbKKR+M9t zih;XdH?>+`e+kSVuk?2A-iw)h2=pUa)lB5kO`!zVknODl($S_I-$8|^P+(!h60%W+q2QNntOSaOf)py!JLd!>pgb- zA<*Mfy{S#D!;B4f&*aH!SfVkLCR5a}N{OW6((7PX5nuu56hJ12NEkz1~20;(F zLAKSDkQ{v-qn4me|I5uq>^6t|jWRV=>6<;<*j?G zpZwb8e^^?I_dSI*AN<|DUTP#Cdg-}haYo^pkBH^sa6TRio8}v zRV?_D<}87y0shwDQaj?2#I8C*uH!$&8p?MP2JB$yVTz58?FRo+|hs;j_aE<8Md zO|{mo;jQgyHvms&f$5|XgVqazwA#N|Xn?{6U~hnyCL&M_b?)dcroNpe0Q%MlkIt0l zRausC)5a{1J!JHyoC49D|2y#V@-nTE;Bx0HrsT@>gP6I^6?3>AB|B7*y+1t}76$**S{YZA-qj ztDX5@o2Lht+e^w^zD&7hxM`7DvcMQ`%7{AUkI4c8FPi0Xpx9D860`fdhJ0w+G&Tl3 zm|yCpsnY1Yp9oKql~uP*j@)(YKI0L5d(+k)y6qF>xDfVSJ$E&EzbkHnu@1{Nb7QnoZlRd^Kl5GXuoQ2^6R!26lTTilzTmBu}2|6(O|;n!5u4Qg;Vi(u($QxHNZ2|n#g6Y;4mOk-hu1V%6he4 z=rGshNEt8Wlu9+>A*AmVr^1nkM{F>a$z!LiK=uNS$kBzTaDUN}%nDqx3vi%3D1}i% zpUw^D+sAe_bn+`07oZ=#@Ffrdn#0(;V7oD!rQyXxcdw4T3SdDZlbz?2kdZXy;L*`) zkM%~D3)VEqt%j){5aY5&KB%b)={iTt6+&^E_nz~QLYqS4^fyxU7>b=$8iE3~W&bL1 z^>Uq3GS?>CJE?YKI#X{rk>-cdU?}2pTd#KdC4I<`o*W}R?lh7k+_SKqFy2-W{6t^WV z*B0WZu<;M5IrvwRmi2frbo&g2rf>;R-bNtH&`Mpt%X`mEipp(g}l!$HL zcO-_;%t~(VXN}QM0VSq4d8LY~cwh~|vb2)`^E4(etPe%)ZLLD&`@_s8ewnU6uASj5 zwUPT;!Aq1qVrD`~LN792yTrMAtjVmtX%oFPlOAe#WBL z_2E;=7mAieR6I1)G+hZcdGJjZvpdD)AW~OC{>mA7%_GmSJ9yQpeIVzkJ#)j0xhj*l zcv^u}{SOBO&oke7|Fp}QFK*tUpF8(huxzhBdfK(*cpP7Eg@58O9&-KK-)C?fb%qmW zZ9H(v0DmbkcgB3e`TaY3L$!kkEldqI8-*9KyV;3%%(XJ=tanA(dRg|Hwe~lZXcoJ~ z-pUv&cGh4QvndOe@Ksc{r<4!s8#K^pcu8?25$H~I=SK7?j365P25ox*vjkJ0tljt) z?b(@hSFG4>ZkQ^h`2%W}U=WH8G5VX@YuhzN>#_zl8FfM(4HR9Xzh&_ZG4C&563HgS zJ6rsie^a1oJ~>05zx$#UnvMM1_RMs17x!;*F5pl_Z=&>kZuhU#K-SZ2##F!)ep+0X z30hu!?&cR`e!S9#?&0X@2xm%DJi9|`168|dOT{KD8a3D_6<-o@q>BDv@^MJ;lW(rC z^U1lvmY6DpMp~t5D1Y0Sf*F3L@YO%`{=9W$5&+#NR|^!{E;9d)Vj%sVdCRNDbWuUi zx{8IcU37;Zg|anl;2+7=Ud(*B)yKv?+VQ4l>#|XpbO?Mdh$n|Pfka{%Y{%$Xw?k*2q{f^n(R^tZ#)dEIh_cIo@t=lCx=lg6P*Pl0y zqM-=M&V49Ay8O3BA98&>RNm>j=Rzfif;WWn>trKzk-%DkC=SRqMFpg0TaCcI=;5k6 zT|on4fbBWA0&}okUK10O^8PjCwMM`X`+BisHSG4W7L0JgqFx_xi%FwA5E83rWsD!k z6)hnX_gJf`xZELR_QGM=Cr_xH*Q1|etdsxx_ViTsr-`w%z)>soLethewN6a+e*OSW zqG#k!a4BixA$Mxd)GPr2uVY{!)!%1EG1*mb;^dgLtWTNz;bXF-sfjY=M<hO#!l-Ri2g^mb{%`!E?W=ct-uXM>FaZ^M3AIeWA14H#GqwKn zZ{OMr=%gxyY=5M9&Jp#ON@gm-=d4k32mkuG%W9%-Xj}P>L~Z z)Ktu>(3$TSUH|4lOP%ZWUpqyCkK6Fw$>oNFpW%N=y4Zieai8PS^<7gMYqK*2?56Gh z#H*cmpl!6XQij^uV1+U#jLrt7ExkjkR-W-^o6~5n`MheW3%Rb^!esb(&~rMsKPS5R zq5ol(aJ}*LpUy9(uQYCEg=V7hAHx0}2yFEUUYv|b2dH!29K90zAY#(y*0mAi#{F|c zP`I**HsW2AFL`B_v6Jo^4q_cRtR1xaHnHo;^Qi%g-jxo{-@7Z!JP(6^{;WVZ&RWbP z%KY|+L0^v8*sA@Si=1g}nzJ5;XatsEEJAjU z)Id;DvJL1i*?D8D&QvdwwZr#yL3#z|>|=Y~TcW}1eSOs36;7wVjr#%Jy{oaC8|$Zu zi#@)6UVWs@K+8EgTSIC-3Is1n!6XkNq2zrE9u+g~`9S0%eE zo;xLV@NPkvCOj0mwudyGcOO@Thz}%;Dv&$`C$7aU9~~gJ3&d#$?#K`g-}8@|hH98= zX5K}TyR@q@E(l*?kb*PA#)TgTOnAD{d0&@*czK2@bc(z?*gv{Si#^!i&7BE*`~{Sq z{d9x$6Dpl%l0j77%lv2zRXiZVVeLU_ErS-&*2l(|AYO6(*7U6ne=9*WkdE;mKl^`4 z;PA|T$)k^`5J8!$P`kh|g(whZpB+i?De^iXm~H@PXA%>nyvf|?KZEgW5!vNocY3$) ze9iX530K9e?RS=oz8}KxwyV^KU2d$6rAssrS%%oVaR^&Q1P9H|3@62P=6!hjVBGarsk@gbozg^VGQBY9X1iwq>zks!A`yn27-I=@lORD%?!Yras4XRi&f8MIo}-JT z#d!*mahne*smU(+7Z-F{`vI=OYrH$8YPiPCvWwGK>qg57c}U;?VG+#})(F|@#e@n~ zUnb;@vP9Zreks{a;`DD%$@5-+<=5k{cag1$t`>@J@x17bCXBTx^lDIKdz|%M#he$--Su$m ztKF!XKVSgowV!QP1z6SS=CS{DvPJVFV0_W5#38?*0L010qp2YQ zZc44YhWh05rD-H+OGpg)%V?XoxMDu;Th=onpy;H8DXr{3bqjJGc*n3T*70k}AGx{S z?#`W_!+mVqUBkEiS-4WI6ZLx3mHcs{^PH*E=6rvf{Hby~IsYq%>~0j}(Q_^s_pVr> zj$&sc&#R|hPviD2df}kjZ7i+P!`*@YJCZdqolMhFq;j%F`bK%|+bsM1-dWG~#`IMj zw(XA!LQ1j)_6Xofen{hQy)2qN7c<@cFipxpb%PoEDDAZxay}y&N=F ziJz2C$r|K4QsEaCyl@Hxt`M36_>B}8mNDEXK7_pLd$_m}5Wqa0<4JylL7DJfzbg*C zcMc;)pn^K~uuei-J^SC&%R57{#-|F9xylk1=AwfqvB*Y@=b&fSze8mHvpIHJE0S&c zn>)+VR#x2BA_q1jX@_G{Lwcqm(>Vt#ktc_PPC1Ox*=`g& z_Z;jkpX!u(((iRI`a8wK!iM(75db)wRvb%5?nctY zzb7oxARl)Yd618LQE`Z)MT3FWt@Q?eX=pBXVNr!c`^rs)LC)T7+-JZ|FN|0|Isvx) zXH7=+v90~$Qt62C&**ie>1#fSkvZcSNf*#8vb+MyRJ!*CfI%? zsh|>pKMP|Y?tvsCd1Tvhs$5v)|D)0Z6Jp`OH+C8Gtp&;I4+R( z+S%Eu(*j^y+bYg1^5uxIh796h)NFh1e(SECiiTn-hwY&FSY=3;qOh@jd49XT#{b&WVF14lmUU%yzGoc(8eLEvF;z8_?n>JKHPk zLN9BT4G}OZFm{i}#1`1Rx8UQ@7@~ZC7(C@$bcGyvWKZcf@F&)B5Et?KrBDRqum9MXW($peOR2LWK~?rZ%suFw5nlRd9sN9 zjXF)mOMhxBz#EUeKjYOC?Z4jCuY(w8-&dtXb;n%dTO~`PA=X=1A+li9s{9RR!$bT; z%&Hbrc8$?}Xgt6w=x_k+Me#6yWb6JR>K)AYDCM;j&FS*tng5hfkP{6OnJrYWvq$-w z5raNH{-}usp?^+u$%qs)q-p6by?PbI)u88?g_#x*77%b7OZ$MKQ&YplMI?j(dE{5< zx3F+u`1YWNI~Z2jbadSDyas)vEX0Y{MwzVZD(V58G8#IO@?xu?EX`lWmdPgbfI~N^ zJ6W*_?k@+AgiO&YsWL6Y&crrvFH26k8)(&Z=WcBmy)UhaG#G;xq+q7d!p_>AgIYTu zvE*Im>EQ07q7Pv@iO@nbVixrS_SMCor@kUiz0OV^{)Qi597c+%LHYu3X+co0`w?d>oRO$80$D+{&JEGv<~4vSjkn>u zSlF}gpY&Pm{`mad(|+c=PuRZ+$X*!toQTu4-HfVPC9`XgXlUSsCv-Bf&$R&VN3;Uk z-th&*8a4-YGbRKQe)l1on=0>Kr5#<~9Us?ac?uWOYW^`~MFqk2v0*|VV~FNx#BVCH zEHi@bc_`iy4sr_W^2tETVL?ZI@KyekOf4t{VY`fqJ>eH&F@2{fN3OToxQZ@3{>WU1 zUtjel-Z!1ZUQCmZzW!l@%rSU12V9qVOtRnYw@bS8ucP!A zqFb?QmTN{Z7c=~5VXe{(`UA#0+k3Fe_tnIa8@-mHV5akp_pg z+43yzH`?3QEpHKW2!yySS900!#E8DSU0u&=&? zg>DyO>ErvoC%R!4En}Dsof3=jqCFl$lT8`>;_~vLZmbojENisWDQ#`wTs%LlR1fgI=I@TzQln_eCmv($c;Qo~FVDNivLBLlT< zgYL_iHpxlN-gieJ2{w?XY5nT(S5_rRHS*p(swIbY(Xy-EwyW!!}L{=vik-uqxxUcDi_A+0LCO> z(?lpW-kcQ~4$Al9|NXT1W^iEd;J6D7`PQ)4p!A!~PydP|ioZB%Ps*X9g7)eK4Flmn zzn#I*?}hS@UJ8AnCkO_-!APleYo-}%^@Z%bQAa@w3e3-iMR3%nX2$Pab^+F|eRYBab1p&bakRivPxa1#%h zx?OSYtwl|BBt$8U~&F8|M|goyetJ_5f3*sG0=i_+$!Q6p9&xhhzS#KWG}h4do){RBEe;gLha zN#9uW&ogIHshniA4M^K+o&ikVE-$y&nx8z12KvwR=1u)1rlfXl1Pbax$4OwHEgYER zOLr!4!7I=D1EKAT?fgf!_C3Q?I#QU4-l9;|i#F@;w;xEr+&~fl!-DLJMSWbvcd$9T z1C+eX`MaT0;l-ktzd?>eeyt|XK^VAmu%`db-N5^74rwQeMQpkn%&<&n@N=_8mjbL+B;UY6S@Q{ ztQG@`h}w^+B|j=L+8MOFbD6Vdk}eH{!I=7|5zaE&GWxPInx<0jCR^GB;gnX1=yNSK zt>lC;-)dkjzuSuW9P;1Ch~{Q$8d`e7vEF|z!>$v`CH}~&kkM3Sbomqgvlklk7l&@0 zTv@BB<<3S$2-;8H6g+;VRO9{3OfrrnxnGz94l8MM)IHcJUs#ngJ@akjn_YzG|5>J( zSQA?zjODSa4_wZJU+)}8$%S>)ZP6Df@osC=$dkK5>?B_v^P#T(F?8PP2z#2mY7L8i zI#>_pi=iL-OK98^ZQ-!IjwXDXC?Nl5%zkeC?yy5VZT;$eikjhj#)`sQv|6S&QNH!C zn)LXpH(!R+NfUF@os(kZr}uBSe0Tjy@!sd*ORj95#)i?``9HTlkGWqHsD2sv{rPr# zmxGogR4wrD{aS@jIyr62WrXU?U(1jihFhQW`t#qvYlx;oD`Lt>naZ{$+ka1WF_gS} z142Uu%Yt?dB!-B+T&BQ~>hnJBIf8sr1mdhQK)Zp5gwvA0j2vaInxzD+=%ZaLyU@4Z zwH{QLu#gAQZUC`>!dbwmR8hE~n$;|$Q6?+-ra+s1PdWUBlHREfrY!@fLQ3gqI^u$u z9=gJAt*iRq(}7ed=4!k&9c6H6BlUp2mT0FewLZqdmya3L-R%0_=D$yV9;%bSoc;3u!-lOi(O=&e=%vvO_}Fs1$`fR_GeItPtYs7A;1vm+ z@Tzd+DuY@{nf`6ItBB8diTq0>_OfKJ6qSnQ=UrG`16nIQ>HJnKUvav+_6Z@FzLPV? zsno^q!p_t&EAp#fnu?PULkPJv)0LS2-$BaOEdY>dNGaRrwKg3qdwRAV!$$l<2@tZ5 z6Bf5-hl~#0RxpAMbAWl5%6rbJlJ=zltNIgQ0ciP(m~cgNV#ojC?+5`$V~5$pFvWL6 zoXGg4Ov`a5UzfFL^%H&BH#lqCDSjBgPhIJys;Ey*)&h}AIjieH88n)lWwx{MPO_&b`5y-@@1nF6_K!64z ze~b?Qr4q~tA@H<`vuvmX&qPtzF*!QQ(Aci=bVw%3Fn__r0cO$Mkb(j-amngUjKiJ7ZBmnqqb+A6VJ0U5y|he5g9n~*QV4^N69dj zmw}i35i0*R_&={!*Tn&LP=`D#Xi7IM20!7|mxytiU1zw^B0D1^K#6jq(a`aN`)p!k zF5LmS1*70#w+zFV%`!f^l8PmmC8*a{u^SdY{H=z zG&&^yV%M6BgiFB_lPt-6(FUqN8il@{c31;6(tPOM<0Uh^LwrxxYE1tpZ1{hawvR! z96(+_)(@QhJ1doQ*^L66@%Rmv#&zv)@D&exbfbx~X}`k7Rz&*)Jm#k_lG4P*By<5N zNvoRp?E(FB!myQ7h4>@?W17XeCgIu2ckVuQ2&Lh2bRBk{iTly4zSBvmlGW6X`zk#_ z{7?Rn_P~x!TKu?q(GxW}H|H(i+6Dr~GAJ_JKjH9q0#h7e?~mEoZmCZf5O~6`z!Uqv;W%(Kt0^SLM_fTS^(@;zvCNyiO%-$W*dqrn4E;wNDFCruDmdwb*k z`4cv5+^Cb`SX_K*ySdK)a%kOL8zjx=>w7}I@?j47xmhU1q>?h6OZLF?Ei{zb!iUKm z9SvHv0-@Z!1xftnM>zL675XKL?k%AotxrF>CQmMPn#|*W@!_YGK--3XaeoO9e1ShF z_aRh(KKG;~Eu_c(KY;CevX-iq%7uY|uwg^9(6zG&GiIsze4MASOJs>W0f0`{k6{A| z^dar00zNQn*Mh-v!BxoOn#DeMT52-T-ee9}86qRn$v~JA+S5nPX5#^hID`hsJOd(6 z_)_35l+c=;5lwcm1cu5(gU)9?m!tWe8WG7rX@z|uYUjjeC_|>xz9eXe+D)o7@e7J> zF7t0wdtTRunMueF%ER>-Z4DQkwk9sC_Moppw2i1Uvl3>7AEFx>o zq&roDY9I2ahp}Q*ctS{N$3~$a3rI!V+K?iIPE1~2?aGN4b;rHfC(h^I@^`D}#Hl(8 z&(eWvu&c-Sj%jL+Ls*pa$j)a~&@Gyo(`^(j6^v^C@XmiC0IA2+I5MhPzn?xe^F6$u z8LipcuA$L3`%j-n?SxY$sj&ZcR9qkqv;-}5?b&^=kkoD7N1|9vSIi&xA&vg)O|a}EJIO~y(Bz+teEur-{C_;1cQl*-ANHe0RcS@3S*x^S$0$)niCrsp z>|J8-QKQt}YKu`{n^1ex)?Pu?E~-Wlqne^>J@@Z9=Xw6keNN7~b51^=_qeX>^B>(|pqn)sL#E-W{ySW0_T>DK4ksva4A;wz43B=8EJKVLg9>IZn3;|%jY zRA3-83s^MpYRu>t`MF!o*44E*JccRIYUz43IZ6;j1Q-*b&_T`|O3Ve3Vtzfc%?-H$ z$aOe9e|SnhDtD~PnG&X)5sMvWQ78YL>M{6i^#Pbj-<6%oY55^&(4&UwUsbMdrQoaa z{oE(-ipI6ozo&=0Nmnu&@XKUAhPd-Syu;6tTCwyactjn@txc6DlXs^>lfJ{;J#HeW zLkZ9fFsP5tE&aIkmfl2{-V@;Wgm5cjle*c;9}vwU!)u5E{k|kOocP^_^joGzoF%A=T)Y(kU_S{v9-Ppvi~O)#bElsV|B-goamiL0b{Tih7&qm;Fc;G%jG3 z?S4T8+pUHG-#@#TsKyu6YIU6xPBDZAbdn6oEf`vJZfTr-%-b#L>6t4@jLK`hBHJYB z6T!0`d40NZZp=LX9powC!-b8Z!5)Hl{`MG%VXX#{`7}EAErXKu*1RwW2Yv<-?J&Kt z^VK{-(YVHEPX!?jaTLWsM-M~s=MT0&;-MAf=DQUUE*yc-kj~dFiF@pY4RAn?74akG zf0cQRHSj};wS6p=`lGZ{X}L4{a|IG2P`CYJ7mga#+&*uDw>{O&lFe|K6D&hi`h)nK zQ4WRWt*t4Q&)YP8k^-lH{%Y;7UP10p?G}je)I_pJ$SwL`IC+MX^w>TxNO9g<{poje z;Cl0Ce&He$FT&}!u8YNp!lsX>E2J*9lA4k|p1B&Y;V&2z>#IPY-gze* zJmK4v(~-m6;;oJ;VhAs(NtutfxcG81S^NE5m1(ZL&UFgS+H&e$d6TonZE+cswS52g z(WB5!S(WOshZ&NE!rnvplLpz233}P9$@U!c?yK^e7e~MEaW0L=KA$lHTWQ;^FNwH6 z1a;lL)nCDBoI=S{M5@h=37*R>?kS+uZZ5q1Ux=C_M_fsm_VCA-N+e)Q{i&n6sXYM7 zv8@)zDMwVw%lKc_Y0t6k$Fw`z`r4NF__V9#9x{Maj7UY9mTNwTSts!Hzv4<7W*YiQ zxin~8N>-zzl9bV8#@1`&x~#n1ncpO}t?b<;aImTFi~+kg)q_2HWutLNpWQBh#ZA^# zykbm)q3u)cqga4ydS{b`k}(2a0{XD)diLCg(yOPVgCbcY-Lr9dElIi8)xcr-O|Er< zqdCYjv=x=N{ic~H75$&J*dUWlZCzcwT7uVnt6T3?tv-u<@7@yn>xw@=Z|5$siCT%? z?rvQ(k0#UI?P4CE)UY4{3amZxjFS zOu3eut834nQlY0KG4?LqXCVtU<&r>v%#dv*^HSfg|J8)WgSUWL?B$v>V=UKWY!jg$9A~p>ZRVRe zX_kDAA2yVwi)hRcvV_oHK(PJ@%y7TtuCI%s%WLcLu?BW)YfQ{W58K|)b~h7-K2Q`n z5xiun`?2*amM-N_k`uu35Ts?P%QmML6E!7&vS|-!+EpR%24VAJt4p;_E1z-g65coab|RT=&|cii zfWDih%gb?=y%?rU{4D@P35yb+x(NktZ`0IF|4w_xUvXHc(O?lSAw5jbT_vY z^Ow(znBWp$OvL+PpR3RecLJ?lj||X`{_&qR2Hz5inal0!mz-F=$ksg&ns=#a(Mlxj z<-~NOcXaQAwxuRW>1Fg zusjmrsN$zfrLx5@sX_7!3nerjlf!TTp+yaf4Yc9oEO+9Rkogk~RQ75)zyY2#p62QE z>SXm>qRM0%<{a2DivBy}sR`YaPiWyBD#;y1se@PbaUhovo)lj(^ z*B)}v8|RkfbWp~~`I4FhuB~1{@~)+EzSyQ0mNO)ebY-9HQjUQ|8l!qPlJ0WO_aa8h5Kfr#Z`Gz zS+eY6oiJt?gNx|s`8o>|lhqC1#A1GTH44zd?al`R#1FVg-URwQqLOqY*~)Y4bW&K9_E<=>%I0bP>jVe{xoyRO_6kO)7{fD74L%UeB!;*Xy|9)4 zE_b8Z*aiuGG|X*fFRYMH^g%VqYo#gLg2bM>@9s?))0-5@!D73Zxz;Do|W=n zn7pOmpvNhqLNToJIY^5l3YR+xW!nDHkxO_ZZTQe01xz7nNBn?CgPMfPiduz==D3Qu z3SN(~$m!Ngi4c6!C`b;Tpx0bPLHwTP8Ch!BW)&SshgMC$rR9aR4R9wTXBpVTOi>uo zCT$OWH~zTdaGD1%YfA&bl+U>Z0_?XlKW74b%LEP^a#e8lNSumVX=Jg)Bnz`L8I?x@ zbg)LA(zFC?S80m|mU7?r(fXV;IbI&CMni+X!_@MHUNVK{8;%db+s1L3rfh}p=hjwN z%`GfMb8eB6=Mf*hk1Rbghpx}SQAgfYwWYZ zhJI6NgExP_%AQ-v3yHuYz#mI!^e9x~M8l0xR5E0xm-KS>*S-2Tix$U!-#qW(cc^!} zp0@Uz%ObMRS5A9B>-7kzwfWT%5p4lgcnAG!-R-w$v7Vu`dsqHHg1hT18lFdn(lXWbY{2Fw|W-WYI+;^4;*e5+#jS1K+P~{Vpcr?`zf- zUM6N-bJSd1oX^A?hW`4rGvxWy?B(@oa_H&$R`kt!=&xQ2pVQ)A-k|Yd4x7D(O*EAa zgj-2u(A3;wVyx1Ifx)M7FZtrRzz*5d(z)E&1W~<{0#uhzo~CrdYr!(;F#Qkud^HX~}b>X4#+GXM@sRUQBxGZ-V~302D}hM79p&%Fje@9ViY=dI4dWo+2P|2{*?!ZXp0obK3W3SK`Wo8<^1Ipz+r zhci`pbqKd%-?tW%Qf~8<3Fs`s^y8ohZ~K-;b?e1Up~V{lTg}-``qryIGx*>Ct+3r3*xYnqjIA-&g%WpC`QdXIR4l+HRKHVh^H<=e#eT{Wy5{UysJ`WJzH0!eJnpkzi zii9UOQm%(@*2m`AuFk&2oAu1@^-ai{B!;@S^xl(BKRmmbDRu5R4t&grPKfbiEa0gE zeM4q{f;lY4sZ<@O4j_n%=H?6)Yx*g_a^6l^%KjnylztcZvxR=(*4BxoO;w4x%kwQx zku>j7;i{rL@h1!UGZ`k*)hnUtQ$>9eML9VV13d?cVr+$-<;y3hyian4{T|`^(-H&O zE0O7PE^hH=DQMean@Va+^V#;Vd$Xqz@mUtq`PU;3+v4rU=~Od80B8Y5 z19|1wzak#hq0YBYyfyE>v%FQu@D+M@>YM~`ZrBF`&9%=<)b(6$qwx7L31e@({Ekxo z7qirVHJ5cXYjI?`O;5(AHoDRwq<9K^E_pAQO`eCF6lcSGHZ;>W zaUgW{BO1u8rp7VgI+qPgX>W^}nw!`@F~S&pc`dbuT%Kz`2@IAl)P76riG38HL^7?< zb}zaQK>gmH*J|edk5NkWuj|<^E-UY#b*(G#_ogb+ddGSE4*A8&o^*i|2JkjPw$n$i zywqX{hZ`0lfwIbMBQQf+NEk47qs##bAaqc$cO zgVcu3H@^=g>XM`T>>GP}dREO`bxq+^K4t0QpGsp?T&SJsT!>nz@@7_l0Jasp_5Eic zP#8!R{!&k$-ZXx}EOP)jU)DS!u^4eljrZc_U}m4&L_`fen4vdceAi-|WPzBOu&|tV zsyn%(ojy}JjPMPyx>ovCq??QV#)6Se4HVw=>gk#VPt0p2cgWU#nYQ9DHM3tIvsX+I zqrDQEzI~S(gOnoqAlaX5=J84wZ;|C~VvmA5fery~Ny)fjXf{-&@T)2?cd!=kG|KUA z!hU84L6FW$ukjkp4#>#_P(tPDWrQ8{@mwi)i@lWSylEyPk>FyGg-C>6Z|RJ7g5`rg zm!>1)pplF@RByxeY%q56(+M#Za7&Chux3w~WwQABRp~j*f!%riVe=e8@)^5B{KITr z)4F-!sGk37nsIQ@D%#9{SLHY}=M{dj*^u0=54{|b8pA+c#RKA_DYE;@Tgv+lt>`6y zx0+F6I?#w!>CAM(TnV?epcQzab5(K+Ve&kQ=$y6BqKXup?`fc%tLS zmDP#I<9DU8kEx-+jW%S#(dV?Ix)&_Kjq5S->H7qq&&nbMgi7WAd%B$Y->GPt4jsk<)S4)3Ib;mak56Unb@Qn?rw+9pc(O{WP?)7vAG?#dnmn_HM zU2CsTAnbnF{OdICXf7zRyb{=`;BrxCmy`8htpOiC-%bHObj)AXSv|(Oa&`40#&xdq zBv8ZF3DfscP5Y3|mGbz?KNqmKx^**WwI4}2UB+|1A65$aQyJ+;xCaJ1<;=Ja-$$gPZF`&DYa1Nk6?$9yiBxXlj1$Bn1 zG}lTnbH;gbeyWZii?*xbt(YoEOT0d#Utflazrk-L%(q`0f46@>@rg?Apz82Krc_Uz zL+rDZL(Q4_hqx~rql4MLgr>gP?Q~8AA`@6LJZsKCsJ{{&ulNK{!o-AEgc+eQ#}mxy zUS&g$Y(#PbOXHc}I4el$DjuC9jrH}N$nSk)*g)>7CXFDUc*5u!^h!Q@4W_1Lnt~!@ zP$O4Ul?L$U>tmR1^|s!J-Lw?)w~Sm_>0f15hZvSO=*htWEXn|?@!{@-|gRI86TYa;?1YI2@EJaLg z$-j6J0bPxB!4^KkAKcCDFL^sM$9Y<;1uj2vH)Ly9sUa6bp0b#E>nH-v{h#;=n3SOx zKkClv#SabS!;LO0!;(`gce79iHa$AMazfiO5I9oRyqyq77E7)7Va_y;(|RWeB*ijh z)trjV_rkhoAYP{IUVHodoJHkagaDVynO_iFP0Y7qKimcS>l>eTV^aG)OeKQWDzmth zvskg1y35CfVRddRQCU`CEb8{rg#JQv;*3-CEs*`{kc|iyHt(~tZY9LoyqTAmXVuJE z3?y_4+A@Hw6F@1M#&E{^mtWLgUQZ!!D7eL%Jt)Ax7LU6P~FEwt%a_K`~h!8(BtR%JQw6KC7Cp;q)4 zFay-OfGbMHUZVX9@*OWKT9z#`2axs(u%RbC4^n+(n<8dxt)WkKbmX5neQ8C!%OQot zOAWycEuYBSvRi|i$P|mhDH=~QL3qHg0~N%5-=rwzn%zs(rv~BCeBnqDj&MEeW->L6 z_E(?yjygW-+={@6ri zWDV=Zn3;Zh;eq+J&`cNF9pA#{I?ftc<(e*H(Q|Hr=OgZOD97~%2VE&J8dSR0jEBjE zRq>X_f#ESQyv>*eBU>Mjww@vdM+6cCdPyn=l!QjX7DspsP+DiGUVBuq;UKo+v5v#KHCD{lmD{+0L1`fe|i;*!4<_AaokeNukDKTQcxd<4&( zf1F76#ATR8?-LN78LoW_t&6^W=k92YSNHFaHGC9rp6grxvwu#QDC;ZWE{}h z1ce0q9B*%LD+~4R0%mN1vESZ7^=58A1EjJBK0f{Yv~vvmj`|C5qZBWFxcN6*z3b1C zTprSB?nV|F80@jgAg-{3CUBSXwgLLL{O$Hk`8l!VAG_XrhSk1*oYHE~Y|xa)1HSa- z=aK>il;ACHb92-GKdvNE-#(Pd-Fk|?`_VmYK zx=Kt02ws_J&)`@qso4d65uBO6xhv#bBrVS_1>6KU%i*TYMiit z0Q{KlQ}kJY2``Y|Ml34ahmxl%dS<%)2O8!4sSE}zuaSdu?5@E);GD9`=~ZS+q&_Ob zh#3oI^aX(^5bLS^q~rZ_{&23a?A+qsxT2=n`we;(2=b{1FdG*wcFE-UbfpIf?ccQk zK6GWYEd+`EPO2R=O{M#ys7+83XBx3!1V~umChBeZseVoq%En|~5qD35Lq9$RW#a;j zGp%+IYO4!=dZn2G)ul)jE_XQ_UcH%336(ZJ(##;LWgoe+tL@p-0}w?>dG)ENnV#nlJ@b7O^^)2&p|T(8l;d@D zJ9OPzZ9{}l9-YOoarpa72}@1hQ2U%yLgT*3{8MM4_M6L*yA?kx?mEre#?8(Tnal>p z5g?k3{q=TY0dW(lx}`DXDrR-_6HrmCD9634PvhKzP%PXwkptdGSrrf)wLp!^m|1Vb zlYu1jHKS*A;BzbgJdKX#nDwdVD(@deIT2$Z%DUn;(d~o9)6I$Lqu2pWV=(ncct7+96p|gsm|QY+rtNq)^Z_NZ~$xx|UA7NL{>t z_5N;e?k{}aIm_h5s~3_frqWN88EqkqL2ThZzYK!Ihuv0uZqoLImTh5m&wDy0R`&kf zw(ViPA}xQ=9r)s0Q07Yijt7GUP9OAJ#_rWFeXD1v#m&}9i1e?X)p?;J+JvfG6mM^l zvy*#rFmFIR&F;*;66%cPl71d~mOJLQ32=eV_So_^3LEMMgO;IN@R(iS1OQ?A~DoBh^M{6zqoXlL8~Y~lcBSl`qU=y|oboTcc5b3xjZ?g0lM zqPsg-0@UivpZDWHOmQP8n_88~6Kh-*BNvO;@Lyms8_InfDvz*p^II8g-t|Mb_a`Q@;l5q$24l(%>6YF_Su`oIe8EoXg z$1Z9*@G{!vkRmM9NLh~S9x}ZP?xIS6Kf5H#l(Nl*v1Qd|tT4~!nZ-oYrH8+5*fl8lUH?tZ z7F0qDz7qwCjAOSJJYO*<2dl>3!38*_5?3*0>6JE_Jpf7KIm(sf=! zA6p&6Tn~SEAuQWh47=P%PXl?#&iAQa-0zIslaw*l^o+7XwLTuR7Lxj68DdS@u(JP` z%2mEgmPNQr^6EhS+;i{R-uB6h=b?~N5o>kkK~vSwy!J0&Pg3h?YlqpVb8t-iStE%2 z^$xDo=_k7CZjf0Yt-%I!;p1?UxR|JOp#l4rgR2c$_IFh-XLHwwIyOUOYp1nK61*JX zNM}$DMvlU`*_uO=k1mBSf?VLSK*pz3FQ$)Qvt92jGW=)j^r^-~wVU|-di|)UiQeL? zU)1lF$-Nhw9^&7Bu=jTS`|ap=TypcnmGOr9etDWrZlRlu{iylz$o3xI&0OuRuy>4q zcX!3-qz(UqvMizAt81aFUxp!DhKEN-5qL?vm%RGsE8i*m+&>`HZl!13^%JrPz1)2o z&FdJuH)h|BC5WV`8>M^HbiDFfJL|KT)WUh8MR&?&FITr0Px1`cs?vd$yS($Rhz#zH z6dR}r-$^2(HMZ`(XGUE)Bi*dBdSb7OGRx#{d^OXqLiBFM{&_=C@XfDZ?tfNK@-TyPXtfU`Ao?XZ1%}4jGTX)8DzZc~o zVfxSH$n@9u^KP>T0`}?$%r+bWI{dNX8kBzB= ztxmD>5bkojZu9x74M}-A97}KF2;hc*EHXoTNzDK~pX{F~m^y~Hq$EZ|_PF+B>hQ5Y z^Km}HwfpH&@R|=WZbPIr>2VMk#3Cu!tJ)d3xE_K>O1l#XVl6`dTy?)nW{DMY3^n

s0dVmWOM$3di(dr`!)Z5HBHw6;Ju%w_^q#ho4_Et((u%)j#wzaE23oVCsw{$RNb{Kc<)?rYcMuRMNSN+vF#Z2=UB zpy>bq7yn&YhHa6AkO90B_+y{BbjJJ_zy57Dzq{5$^xJG{o)#&nT(WxikM4c&@3$24 z029!k`0}S4G62wX`sz2nb?By{@JMQDxGMO@$=P>*@e8alGbkp5B#L6ccgy4N{_a2Z z4}Ru98N$@8TQy=Z>BDEw4`Y~e2^@{lQo{Ft-}ij(__e<8Z*FcxWIv2go;_v4JMK&fA8ss)|X|qxW2w^R!2`>JU>1;I&NEs>U!vNh@4C9y|s3VG3Eq{e*8!N*y-u% ztLy8Wvk+>n>$Z94op-+Z&WBf5S4!c@@zIyQ@I}hmDdJoVxjEBp?#$*{BF6Y*Kk={s zzyIpL%cCIURECq&3$4JQJ`5YJwW~ZLj^h|oET!bc)-;GHciU0^cv=5=`y5#=PETs* zbBc&kN-4r6(>=9ybjgsZAz3DY#ioP>~2&n}!&IN)6$oFy|eDngW$k|sCPMq-R} z7&50!L=zVR)TErc0}?VP5y^%)XG_{D%goFYa~`IU!qm2H+qOzm9EX4HU-?Ua8x6sztjEr{w#`qwz|cQ3TW1D`0$Sm ze+6Nm7Wa5K!}O|6!$wKgJCU{yyeDf_?#t*}``zDhx3;G>zOlsg6eG)=@jV+=~EQWEDpAOE!Y zfyo~2tNU^J5ZhDdoafwcU%mO_Ka9f*1DzELg9dD{rU--WlTt>JAw(!S#WBaBaJsHO zqn4k(J^k#qeOPHQa1EGhage4*x z$R*EunF1^XB7}$ngjntlc%HSCQe?DJ3QIIfJEgTC7G^H9TwsgtW3>Syg1UnTGUt!r z0mS)C5s8Qb9KuupeN}0NJOxV0Dndk3z#`TdM2X|X%!ELkPz0F(08z=TazN0(O?rz( z3w;m^LT-wM!J(@AukU?>*&w+%B(guaid7N~YXT<5hQ9E|-Ss z&4&*~8x_jNN4q4qWB()Z)Ti?dS7;c)Pck6bo~ool?ZrYdQ*JVHdPZ3*$w{RazM z?Y7%rdh)?EjH}fur9i+|o0HSy)oL~7X@6@Jjn;?8U23E$+yFaN!!UMNhxpCC{!YFe z)}OD=-n71HDjQOKRhrMm=LyH>58nKNZ~aHl?|<>=gTM2+bNWLM@BQ_k{L2~J>wT*0 z+M>J~uG_Yq!Aoncw^nQ2_dR1_=DMz9j5){3xp@`XP#tr!-Z4OoQAz=prmD%@tyk_I z$~n%Wz}xaAFjcj0+ja((*QQA&ccGvChzJo1*S-qVWURfoxaj+Szu(W#pqz6- z8prWywL-)h!w?3Xkx>9dH4K9>rn2sEI3TK{D!;J#{r3mD4PfJQKE6Ep%D?%)`HTPC z|JzRkA$ynF+A3c~DI5l!RTp#uUbdq#O;DckK!w_BC{s1@ZH7PG_J78I>$?Boo7-{M zJ!yCm+Ryg5L`%IneV#7?>bl(SS7(jvWUV*d^L1B*Bvh{Dvkc+W-Cz53|I3uhb&;rC zMYU!&wJ}P~9S#7*7@c#A#UgT=5i-d#FC6D>loC`*rIK^b07!rqjdO0CVp}h~_nJtP z_O_bgc4g-|%f z7;d&Vh~g`|-|rF3`T4m{=jU&K>g`|om0z*iv~3$g*uH!bjndxK25YfK*NdZd zr0`_3g~FGYm)kgP(|CG%ddz0Ux=#md?fj)4rg6L7647f99}YuIDdn8Uaa=8y%jI&v z-=>s~+vUmnXl{24C+BAUALpDAF@&U)vc>~|2=;xy3o*qAqO00`>GSFRcYpTf)zg#= zuq>8M+t%Ya9(G$qY1=xbgrcWuQcA7Y>lovHzt`3%rARd?g_Os(ZJM?kh9Si0eJ#VT z+jl1?C+qckroD#{cH4uo&C$tmLE3c(W6W}~hQfWkxPd8fYwrv@$Qp!5b z%;PxDOYpuwl<`(;>wRU7k1?hkuCK41vky)l8e>8TB4ys86Oq=s;N6zLlrl@NX44_9 z@azlv<-5PoO<`GA#dr;nQXr#qW-g+&p21+2VvI^tn&&S75WeH4^nKsw=$u1TMC9wn zIX5Swg>FCV74A+=DKiY90B1mjh_t3@n&we-%H^XcMfq4|3jmfVmh8P(T9;B%$%s-_ zmDYL~hEfV648zd({ftgktHqpMHw=T;dIkvq;AZN!`|HiFM@0`FyvEZ1=|A?be)VgA z|DDI*62`V|iS&5rymsd&r?qulV1^s5HVSo0nFPG|T9K*zraxTmu6KuhjvlbVzS2E zyF6>XOVfDW1!K&~$;shx`0&XSFlcI@OK}7lOYNI4KKRVr>xXd|KG-~Ye0Ykg-tBgW z!$DhLRaM`QF~+uC#-R}AlFFa^(?2my)8G1;pCk=PZo^Ft&+4i__)py){rPU0vcw~U zuC_jbVx4tY2vl(-9{{j`gWs~F|JdZ`1*21_hzbL<5FnBjWlXu=*T-M@wVym0{`0NA zK&4%`nbr@#arVEw8>VK|-O!T{VeMm7L| zEFi*)7=EALCK5#vajY_OR7vAdfh(HT@3QMZD#DTtkV0bSjL4`9Qq#Yf+$m_Dy!T~{ ziG`@Ki`$zIKmDmMDc1ksTi?75X%HY$p0tg+@`lMC`f<5xowd)dZuVU_0l#r^?u{|V z<&>Bz9Mag zY5d|_U%c>(uYcRV+H5yhH@`If^6Bx({qyrh<*l;-P@o7&$_W%CE~phvVuBfP_~v^b zTpqWp<91+U@+d|3Ly)YR%2!myn7sBAMoo#C6;UD33Z1nf1mU=@mWS;QlC%q-kWvmH z2|>7I5i*JpWe7t^QClUX)0B)csbm%b;w<8{66Bn7L1yM6%rIN7h#(daU=HyvYA>Y( zGr60JQc5Xd0ue4CIZ5Hdft6BOAmkuYhzJZgZ8Gsb9Nf}RQ(+NNhAN#gdUR2WDA7*2 z#$slJ)M(AQIdS~@4}9vZ&VN7u==IU3HSTLz(bBnv7JcbP-Nso7+yhu-4*Bb1B$=Px-6UZhh1K z&|~-6m+{kU(|^AC)iLw7Ip@$X;y$w-&}zwPm@FD0CJjZRQK}+`Ng(kJQyueun8ebF zFDlD~CMz%w^0Ze*b4ikNN;Hf&hLoU23ZpB+k*hxe`fu)1FwvNW3NUI^$^)`eO3&x0 zX^1BvAn&bNL;c)bs z?Ar;h@zv;G{NiW&Uz+r!f?%>)5nCsmRES#Xs8`V~PY%DnDCvW%uO^&7iN%P%M^hrBPv5w5)BTj07c(vR{@C19k&I6XE`e= z5=)|qEs-M7C`<}OASaodW}0DkS;8nnSs)1|#LScdg%KemlLk^V>mMK)LZZZAw6X#` z^s!{;ob#TUb?Q*Gb&j+yF%gSq(S&(^PR&N0z%Vl)e*2qwhN)D_03=8b#HT~R37sbS zgZ123)&|~sGbD&PVyQGi5=8o4_8k0YRihehQQ43bh?2F{2RV4?tVvjyv5+d7xQG%2 zpg{kO`X6Qx!U76FBPD`>VxiQi4~Pmxi6Anf07zlZMkhhdqLYwF(Ig5IK?%P}*O>o+ zB2-ENQbhoe0B9Z~i3>|v@byj}u*;D-vOdNMp=6NTX}tR8S1U)0cAXd`)2eA9hh@7O z%=G?#kBU$$uZ#xdSXSB^U?lNQP~~atw6V8wvH&;JpnSdC?8gvaJGp#tbTUl6C4Bkv zdfluTsmpyo9gdDxuB9R}QVxxHk4O4wI>hbO^@G<>$S;kqBTp}eAjR+Bd1oA7ZnN)$ zj>F5F{)*Ltbh}n@3I{e1)6HZnSGTPsT32&W@N(kp;DU|xhxBxFOt1fty?2BaVBsi^ zEm=EV>DOb&&E~@w^Z+0K(z~DiSH3U&qgtC?wu@DH;M!60)vIo~I3cAr{XvL5YV@}2 zCrK1b5D`*LWTy}Xpsw4^e!roqZQDK!C1qm_C}t_l6Hj3p$EB;BGKR=jcE}M4eRjLu zj)=6jjcZTa<2=Qb!w?bxw0;EuF-Gm|G)<;jtldH><-k^}${9aR6Do3*4gJ{qs+1CQ zuBs~LtOO8|0NH>ki6Pr!V##NVcSZ!oeG*9lFY(6Q&L$ zh){%4*Py|XIj$G($i*0`@2A6#?Rq2q8}j_~c)QuZ_u}yE@M60N&Ch-IFIr!@dZDZ% zqH_>KqG^4M8US+6wOzzEBI3&Xs=@-RC(Zxx{qB!9)AOVLTWV~GXsmN9F{v{4FgJ{Hl#R99#aMuWGRWzH_w?mk^G@ualS})8r1R_vI7bRc)&NsgE!Fy9(L)#>)APRI*Lhpi`(AnT3t@Vq}`8{r1p72bmpP2abdyXGH1qU{K;T9lCuV25n8<*fEU# zz^n5&!#IBPJKuT!^6K-S|KeAFi;vatV^WOV$>~o2VP0o3@+mOQ5dYyCV`=OL_Qu!B$jz}48Uu`y5w>P!7rzc0D z!22&=J$&%UyE^B5==O#vrpRR?(p(_qkaQ`fgfP~%Ujjm7LjmWkh?0o+j?Q;C`((I; zk*$u0j!H3FhrN<_rgZo1A@-aBWPM{?T(MnkIx?-4*h=GK>{N3*0DiNK!k|mw3%h%{cy03wAKI-076vC zR*}Wk4I`tXoTGI*fkw@WSR`|bN{SZ2h`QtqGG)quIcMgKQiRJ8cuE;r5b+36Ba28~ z*OL@xnf--kpaOu0VQ|`5YwOzI{gVRgbV{kRx|C8kjq9igkW$`z_{cGJ z7>50{ubZZy`g!NxwsoF@QM!ogr+zzhBZrWK^)`e+MB8ECjiX<-fN?kM*3F`6&*D^^ zZZuM3YGc z=UfsK#*!t3X1RbYG3S({Q^sk9n7oH=zhQ-Ki8l0d&LVJu(q48eG&hImq$PH!ha8s$sAsi z8d7bN1z(Pkls7i^1joJMHN_e%2)lh)~lPB-+BG|uXgPhpI?4|UOe>U zf%3?P`?P86C88ip1g%7}G6(}@YbXGj2*Fqra3Yb>ye5ENe~v0 zMNhw9=bhiKEOILzefw@Zo_uxL4QY!&XF9(FbP$V z3#SA+^t}!I_E*0A@X@26{)a!e(et)>dDjR``zyTy^GfStEbN`5M^Gs3$IPK-X|0WQi`8DzVfAHH@uG1qn7RQ1$%SI=K) zXV=F^DNL7_m!kCU7k}xkx88d3;>D{MFW-Fd+We8u?anOMjWIHVh%hmO#3>xD*Y__@ zKm72+&6pO>)vkW{;I)!+H;k)AI|tj6(KA)Nl#-@V5N0|FGcW1}a^jTt`@Pi$QL!jQ z)S71#A*Gaa4i%-6yRNIOBc;e_E=2%JDGY^k0j)_05kc^uFv=00neunSVGsadF4icx z6XD+d;7ofN#t=eCbDW=t+I5{p? z5q&7f1ckKIjn@>K(PRg_%=^982dvKfZ@)hL$`Yf4dpX={e6j5R+C%;aTDQhhKiaBU zzqffKP3ikrADq^Q+hPja41@C~#t4{%P!JPvA*EV>f!jW0#~!M2S5EdJE3xQ$2}ptn zsuYPa71koc49aLh$~g`p3ST?tAfQOG#)yal5)u5!MqepaWp%(ZVUPk!OuKrKA^~#iinf8eO0A`ZO#qQ+;B&jLXlf^@Ey|?6&7G{z1F>`2?Yg z>j|zV=O8b9zc(!mc5&zcRYxT2G>^DIc@)f6L7TXUD3$K#SQP zjF5;34UtyJB891-QRsmbTGl9XCz1hRlDl8oI*kH2{|zwRd7k_?G+78hCJDKi3slESlzL=jFY&DX6hQE73;S^!bRq)I8AnSQ_av!B%Oiz0_qlwlF> zxhN2aDw2A^;WbqG%0(luOB?5shXdoWFMr07Z~N6+k6DD$k^I zP9zzK7a1!EYTrNe&fD*xdAWr%r-K}5Q) zt9`Xtt(i+s;bd`rD!x-~a{GuJ~Fvc;(hZkoTtHmL2;n)tjTO2QThXV>7A0JbZ zh4OI<+q8>eXxoLxx*Nh6`d)BDr>A*uIE)$k{SJm#w|d$8hl^%?Qr>%ob(Ig04jFku z+#cXMJY6~MgS$lSoqO`^g)!>#^3tVr2;F((>Gd~%>56Z31?Ge>Bd1*n3pP#_887QG60A{%oq`=G;rZ5JPy36BT@=DCiT$qh9ED}>P zIk$_&?8KdOCIGFqF&apTh}OnAhe|OBFuTebl$-4)Wl>5wV=bvl8HcK_8@oO#!ox5) z=Tb^~u?itHWE$UKinEc3DoO!qtz%9l#?@+NoW0#{>bhoTh3M}5sgL^nqORB7NUn?Z9fu_S=V0U#ADw9~py<$F+mpCmhPtj3kXckWj^pFUkH>K| zK*lPOA_&4r##T*xa&jhu%p78zImBG1aT^tx| zisz!HSf((wt9pIhbh{8jICQ&y*eWGPAp-&sp-!bJrIyR3Qf3OFl+rW{A}V>ZR?oWF zVdzRJbzNI)@1I|klDckx=yoB5Y1~iKG>+rbtLNt@r+JL8uCJTA?z(P$bTsQdXIq|9 z3V~wgt{Y~gVI0SqxJ^VkrP*9-_ShjJVVR>Tj*pLzkB`6hoo{z|h(lN{mTgtnbuE(m zuHz7}sFG(%E|b->k4?_`&dY^x7KS2XUDxA&YMMr}4C6$Aq)_Lga#7CM)!G?*y?Hf; zph0TXm9qP*SNr|`;@+i*y!YOBlvUq;|DEgWYghW$chAm`P7?C3Z8pByym9$pl=k8I zcP|zT0C;rYmlA*R7yn)^VI2EUz4ev4uGdHQ`Lid^=GEdvlkc{JJ6}?5a$(UbMLz8I zqFdN+&yOxxp1s?{nr565PFr znlF#pjnh?C)iermJ^+QHypJ*NcDq4oB0`b%dg+|AXog{!ub;V-6cOeO*zS@!y|@6bGr&boma4+t>=Kvh*U3CvfTNh?M$ zgt6qqIBd6jw3-YlrAp%6Zl{zQhCvLjRx4)6oH@(bPg2TivE1&i`$Jb*XT3Iyg&`AS zM1TPYMD*Tkt7A;DM3}S3M0@WMC5*{=uffEzFCh)nxLPcg(z8UPKu#ejYqD_P_e4}X z2i~Dpecz`c1t!r}kfu<&!(>1yP#P^rIy*UPjH4pXx!uq`fAw;R;e36(JM7Dz6PMGo z~vjQV<2|`zcVlXm9bqc1-K|w+G?wEZ8d<@$q*@Sm;PcR%!h=yq|Y%SIJ}bc|!acywWDbYTMLx}q~z756q?P;2|g-#Gq_ ze@|Zf!gTiC`yxl>$<4)&m-a-eV=aMTPid>iOvVzl@CVEg<&*kvU7x@MpVTMyd!Up6 z0iu9lnfH+bTykM*bQ**fHIGwwfW;X($0_reulL(<23g) zr|ZR?$zTW}g!O9q-h1y&+SGL|g+(Nm=qk%Gg?@PB!NUg^mr%Im{La_Ed3JeGf4+?| z0*doGmvDM{-maE?-z%~)$LBAf+?HOux*x_5K1f>G?>YyRb{Iy1xH!GMz3GGUFS=wg zZMV1mVc%Bma=EmXU$#w*QwWpQ=JTKZ+;4p28~4sHK6wBAl+y9~MD9#z?m~u@QX-6q z%GtJUwa4Xx^0a^X;?-@+dv7pGNa@XokGQ}r?q+6M>5t ztci><%y;hXA5p~t07S%AHl-v<0ANl&0Dw{oWY9=y%EnkOk`u=!8%Os@bql>mD_G}P|2h;O7KP>9jy`Za5zlUFxzXA zR?6ts$FkDT;`w8EYio_S~vEBKuig&h5k|a`PZgjXCtNPgiR(zl$<`O(&sdt zbySo8|NaL8A|L|N4FWQxJCx4RgV7<~-5t^$Lt1ilNDQPA1Ox%4V*+nl8DoTW{r35t z^W8u9f7{u)@0~qg*Ymm_*H>;g8|Az5FtdM)NGGs5thP$$juqG(EvF;w@ZOV{OeYXV zm5oeviEYw;Ez4Jw(xstCsltiPu1=Jnh$9S;+lGqxx+G+TI&4Ik%=>+1-|PnRBpDFc2tHLkeSxP?+ot-w^d-0boIDBqD$&Erqh*C zHV1mYtvk9lMtrj1gTC6Sq#g^=v- ztMz>Zc%T!M`i%br<*$4rS@H}D5WBM`NCGEvC{djhkM&(abnNpFK-s}09~KTJ_Iy^^ z1S02$=mdT&ep>bsPZl6+bS#f;^4pIE>Vsz4GaO&N2B=7}|6`34c1{!+70iMfcCMX_ zd++u!3opJLS#aJ>iMT%Of=j1I;lDw_SsCSBikj!eelDMVd+z9rHqv6reFz`G!JIO)cA<0z&Ka^%p%KzkX|1AJ@Oh{g#bCO%-Q9Wp3{V z{6lUO2>&Wub>1(IoXb=OJg6_MT`j1wgw3Iwt9wNk$Uo=>%lw<}Lh&~s4caYT9!sSk zw(d_xju^x{wzR!_%rtKg8%E-)INi1aQIKM7OLgsj-b4>1Ux{$;4{PCCxqXF_3=$E>lU+WsC9zh8u>U7W-KOKNiJA1$@l?B zyvO>diy*5v{qt;`31)C;Wkn_u*%aoWpx9_c(Uxf9U~afd0^()HF%nn61VoMBtL2}K zTwnKwPf7f|;$b0>m5d0ZcuFz)I`oFYFM?D^T!^t*y3z4SF%Q>B7J#LLgB9DwzwJ8w z6aZk&@&vPfd_Boq@$B^m+&p5lifuXk_I#Kx578_~Dw3*H_0>7vlRCSep**qH7syQY zU2sI_HqHtgu${9&d^EWt@?j@Wo%eoOGkZF*u+D_rAWR0o?cdGN0hOPreeIHt3P6`>?H&|@!Fxf^NtI1*epa~tTm<9 zcs40;X)UY%5luZDrPXUN*$0wDmXQ>eG?QzoERi9NhBQ5VmR^6abQ-oF%jbGBoUx-^ z^K~-5%0UXCH#uNXqV`_klm290p;0zW4Le2y`={*tj6^4zm2`%4U9$1Bs(W35O{@HV z?xlm83Vs=pQgIQJWs#FjRhIpSZy^p2_?w5HLrzXV7`|^XO;-p;kK0)C{CmDOfa$Ys zHxmBYY;hDYTFPdjRO zz3c4f>v!*+v`xQ${BO4{{sogSI|uHdafLkZ-12iP*Ie~i;u`S+UYk&#Neo?1%Q>&x z%fhtTpdRCEX7kFI=U-ifAd93->et*2LB`EXe&$qL8q4QldXTXn5Vib#7?foUdMZvl z^hH{76WmewjuZyXH>Tn7-cN1YK*xs{R5|roY`_lvZ-?Et&BMASy_J8K^}zUjc-i5N z97B1aOS+5c>Bc6fyRIe0Mkm$y6MyvHl<3x6 zgPZFPY&uTg>X7p?RXdXCa_wqSY1yVYAw)T7#=Pjq*4%TuCbjOz#>OP}>yd`K;0;vI z=6}7~z{VvP#Nvbw(`y|7plA32zGOkj!UhEBTo)wAM7^nKc@~pjOlb z&=szy_}k>FvRK4clbkk0H0&G{-n=-RH4SbWQ$%XZkbb8F(298MH(22i$xFw%*dBai zmsBtmG{crN2V>hl=hwo{6B(nvWjX4>ie!=wl~s&aNauzuKv}n`i#l-uwt^W6!er0J zV)p88Yzh|)2kC=HRWi6@`b^h~pa18}F zHztI6$d7&0Q}r0{0Y|T!hT9Q(c&PtypIC6#`^2D4rJv_kU(zSlclQ_38DSk1T+SG= zz5ZiBTC^%}TfsdPw4}0OTDHIF$jkM@m=HP9;%NC(CH7JCu2K6SJ6ij{D9q*Noh}=H z^ZwCQzmQk7kF}fbxA}!-3HBC67*t?t;pkVy)fWj#$wNtK5dj?x0E6yu^PirO~~L*sd-3PP-v)s`^O?MwSpGm04I!% zS5Iq{8nu?Ubz<#jF4ojE>LtAD+_*IMM&V$g-Vm})V;isx%SubrhRe8$(z#7x+!+hW zxr&}%{?0{>MXmm%{=ogvUJLG}5?Z~x=@9LXc!mTP#r-8eJyOhR)tO;TZtV9bdV#jq zByw?&_8}));`p`ly8NJ(j_TL1wM(SoCO^afF^vU_xGFbie%;j7^uR?r*SI*T%QWx+mUDhmjn8wW>ggi zS*lnkIf=kAsWUa-OMeC>NyI|g2v}*^6xkX{q$h^RhQ)h* zbN7;xV=L&MvtNFgDGs*S;4#g^V)Z7S93j%+(AK)?JGpMDW~Bb8mwD&QKG%aY!9WoT zV5U!}c7>mMTToMiPN5EUl(W9yWX2CZhWqg+(yW^mcS5O;r7(kB++(3nY-+rT`;D|z zva&cJ7D4vy#`8tdcjVt$f;1ObCE(==lQHn3E@&3jSk4avieh|1EAN$07Ney?W(}Vj@MWU)O{&ulRX$IyNnE`MCgyqawx|v3T z6^@Nj$6`JlsuVbrlh71Sd2E;*b$G7QL8b(gJXo99-FV>*R zE6ggVVPaxt=PiOSDr4bC;_2fz)~6j*>ADwHePd6@qG3@30kgv0{OHv?rv0{M<-dJt zpiBE(&$qE*X8u8UtlFdfe>NqaW6`C{Bz7ASPDS6wk|C>zFEb#XnhuK5FX{BRKn>E5 z3%ZgibO-(+;)I!ucmGLQUKwUf^?kV+lhS`3l=ATBzVp#f;z957(>)6EaBLgck<{bR z@lL~P0NpSPL6jN{oa}tCTlg&{QHw}cy8QQZV4b(QBjWbL>&3bELaPAAzM9eJg2huE zsmFLYQ^A*eg%UOkb3{LOB|q;v=z5&|Ftz!MDNh2{YlySTmKvskXpxbybni5^nY3=v zS;Uk;>EYzj5YU+_;>cvZc7_T15M<(zI(1|(7UbEb$T>I_*lF4yJSCj(;NdLrp;EH# zGYg|HYO`p*?Kd=ejGrm`Qc^bH~%`={tl*W_5XonqsVFnf@nt$|9D#k05D zc@vpF&)-{5GdI|_$}1~Ml)R+UDor2+>{ngXUsU=_$B=j%fTmo+rg2F%FDDW#D;gQ> zmrR<-1(CpN&VZTY0LeB9uEEmoA5RO~J|yl%lxrA6UQttgh<7g&prtc|pKaVCG4rsP zOu*xa_N=E=PMnVs{fh9{el(7Fi~!*i%Dslh@ww@c0Sn$7a9Bi!S?^_}4o>8k44kyo z1=%bR&8YOQEBBQ8n1t{=O>>SMnLRPRUgEuQ9C>Pa#lqIq_R|jchgH5iGciW3kIp^U zegm#aA(-5gFp#4Iq*|I`RR7YHfzF2UiA*x`UaZ=(o59dQrNpAjFHAbNxH+2xR;o(f zfher^YE3?$adu0Y5a5#a+gHGDIz40cvmX$2fc-M5Prw)eT>z*3b%bOfDGm z+}he6P*=C7xzYc4nqz5B{>lBKQ<|Gw?wyjXvzo_nUXbe|Px;xU4!;dgL(?q>11;Npme01Q5_6A4`^=grDO?Q63 zd42O2olTxG-4kY;$wWU;?WskX5+%}%y1E~!fM2uiN+9zC-y@kKmBjTxikbs1gL8rD<9Tg?bp`<;0utdeZC*1kY!V)kSgJ=E1}R*x5m66?MD ze!{tFV}$T?KnhSBk^1r^)uXPi&eYHtl>#Sx;tV&LYL?Kjp-dCNE@MvcToWf%!%)}X zH#XJ>YnA=!)*=YB#8e$3lAauH{dN}8 z)H7eY4a@c6Wolf5FD^R6=G~Wm4&dUGkdj0(b1mqHrxCo=pA*I+1N!~BHDCJ48V-XB zu0qyB(i7??(}aqu-ZyH-+Dw`t!v;#cdee)e_i&uH*Fv`v$Z2@M_7dU(KMjxH)@aSm zU-q55)1HZ^F}=`(K-hWL#J_n>WTghwN<|u+JGZQ%J#Khjk48?^O`#}d*8~6EyiIc> z4w>X1c?evqq1PQ`Ms+@A*Ae3x6UHQ1@{+{&Z=81M6{-|u$aELxC-SUTFT2Kw?&n<^ z3B7sEM9#h8!562vnbQB(?(#T?Dmh1L@*LhOXPUJ02yGC>6Y=NB=aU+-Oc86m3#in` zZpvYd!|F@$l!*c2xV?MHAGIS-K*P50m^wT0?U$jZxwR8g8Au^-EoW!O6+a=xN*~8U zQ~_rMjIuCu>P3E|Fy;6GT$Bt*@I?+>IiK*As=!9&C;qNdD5qJIAERAcnkw~`f${)M zWc-=|FGL8RFr^k|;-j z^QZf@A0Cg@h&HCNAKGIc4~JVLkK5O$@nX$k5uP2R4HIRWxANAby3d4YT2l3SBw8qA z=Ehc!(x2;-%FMhB1wTS>a2T&DrE50`N3d0WCb>!{CoRYZv+Tb%(^)oBq3W#?M!;*K z`!^u8f4@at;D0&<_*65MfG(uurz)~&<**>{y`mm*DY0!?mdV~Y6l(8!&LJ~!ro%_? ze6J$Dd9BCNJGv;Qntb@?kZqJRuJt<`aF5K}yHTos6>&b}kC9wVE3~!JqGAav403AC zAYsQ_#)u9oDymUQLi(}4n0HIJ=)sf%Sw>tPAM81|tr(#xIH9v0esFQ{^plk_rd~6i z^o_pYj>-4dg5-?rJ@oPs?G~oPOyO6X{yq0Yh?1~>t1g%-`LvK!J!-5*4(s4R^e3f- z^QRTROw=RWG(x5i1xJ~^f$XEyHW<_B3jKcgfGd?lIjCTk+2$A2102FC zp7shLTkoTgNCnn~d; zzs4LC>{eP}E zYYieO*__zRn?26aMlyazLYNyj5_~r8Y%xqb>B=~Zq6lAz@%6my9vNOV(OqnN% zL_s3AY&G~>L4xd0jfv`7h4aY;>VFO3dxj3GWWE)v4n0w}Xi^Wsk|Zv#;KcS!>SB98 zZF~)Uu8vmgHD!BBBBconwA^=-YnI?*yg(uN_#R-WmE4SuShZOeJ5f~w0?P06Y^cbd zp!Ga7>%d=4k=IXN4izuROj4Q3Tg66xk4y;QuxH*<19sb+4 zWagJc5yb&AKUH7jPtHhlz84__uqb^rQ^X<9#GKu;^x~w}J}@&U6>JYg0w_w!F#TQq zFLy&RgCTmgfSzkwni7vz^@V+Y0tq*jwf2bdEJ&cxsbPC3|20b3(K=Ei0iS?u$B0AQ zATp8AljXf4jy|s1KCo>3{pZhkq%j&zPb&lsiBvnEx$+<=OHA}BuXJk7n0Nnc4_M_k zKim6|Y-dxJFvIIZ`gQ0%KDxNrrIAl-S`2~G9FQUUaKTHUqUOBziOcEWHm4)C_Os`c zo3^oB(gm%!qdkb6KL+}Dws#fSB?yuU-`E&(DN2kJ5 z?dr9o;fl5pi}4qXaGAi<=?w%rVsAWmw&G{$lc;e?#!<)GXjb)Uz1;42ORn(l@SyVu z5555~im%$7-PzCEi@I7f!jtf~_DgK1!IuL?qZV)I z^^T3YZvZ4X+1NsuUZ(^W92TrNE9$<>Ir3;bJ#MTrf+w2vZKPfHT*1ZgX@i%P3b9J$ z)Hq_PnJKzBtnV9}>PJJGNo|Ti;{mOZ_oOWHFnrZidE6N_S9VH9LA|8I@7UiPc86BT zU#j9n3Sn`I$N+I<_cUPRnPW!dG3-gD)=yGlYQ z8Ay#`@gy(qGkltmt1A9u!Q93q zpz+~fup%E)w8o^9{kM2>y+j}k<$vq_m_&OPzaGp6xSi_X7$8tJofQ$_danGz2Uqtq zxknrJ=4)h&l!(Bwpj5aZY~ZvJ`M8#gT3CQIcn0+sc#8tjJ;i?Zec~^OB(!k+;U-N6 zm`z=HXVCcpq{nEg*9Hb<)loF51tF-tQx!nK$&9#U? z)Sv$)BUL#v?V%+Wi$~Nauv{QQsB+&^-CK$K_LUtR`sQrI7yo(HiPG+dnL{W@$EX$X zLKHDqy_1mvoL-DGp0orY(SukRjrXQz3fG#2{CPCk_o@-`Wbq$bNKIi zj%s?MQkk*yo?QY1Tdb2*0#=V~<6xg4|7ELuH1+?abS)wB+QExu0THy(d>Q76x z%8L^X-z`Zv0Icz538Jd_hg3e*oQa0o6Jl+V%Y%e??Q63(m zFlgJ^90uPM9^P-kJL=^-&6E41yD1&1q_%6@g%_!?K}y$vFyyo7@9qn_`FDN2GP1HV z@-)4KdE$Dd7LEtbxBhc^?`@v+8SK2!6h9o;E{zvCpxA?~){U=N8y8ZB zF5=y(mF=aS`w}95nGbNyURESHRy&MbA3l|`2|sI@Dm`s@@}+PPvI3XH(7VXv1 z?}Fq1bO1aE2RrN`7V$@m4g(A_E!3pjk% z7+iG$5U`k=n@iYWc@h9CG7MNVd^yWj*)7;{#8JS;1o`RJU4|? z1q3ID1Pn|rENSj({x~u+(TyM@JS)%2m~>yBr}QytuDeV)VcL0O@u!Wm$F;RiJiW3O^U?r}2>-2=(;xDpQTv zT%Vb8W3ryk+hB-~mHJEEZ1Bc%!EO35j3g(gKVWKj6uenS-Y5$Tugsv4Pbq7jiEp0#tbN)#k29 z-!glP>Hid&TbK4$pGY8xP=7{F`n?4(sB)o*0~o?=jxTG!nPn}mE)bJvaNy}GV?ZGZ z$F82C*Z*ZDd_s?^j6J;P_33gv{jtpWGOAegRZl@&5;juNc%O{N(K9DWj+9=P;z4MtoPn@1s;2AyAA55S^$(x>ri(=u!Y)rutvD%yh@MTxQP~)Pj50Ew*Fwtd zXIpegwDK#RKVl|s{^#+Awb%AH3YO^}rW=d(f}-l8M)GXDpE=2b^avaTR)cVqyvhwf zKC_eQd&tu6>j@1A&K0F$F@GQE>mfP5tKwwV zv1d&N)MV*wPHI1~$HR$DiD8x(k4VM>WH{6ebDHDG5l{hsvBk;}!6~0rx)lt<7tm%WBzeE~ZMn0z2au6uaS{IA4+} z6-=18>F9H{uCUv1@}%E=mI%mzg#5lbz>$V#2dq`gpXFm7?GdBH7dJOFk!1D!blTe; z{l`IhH~Z4|3yf>+>GLOj=y$O!DxHpXb!mj(|N z@g{Ia0qQ^E+Ajk^P}it8=Z&j&)}2T_YrRfDv& zkF7SOnBQAt^G47GjuEA%IgY!BJHZD?a6wgvTMoDs-)e8A9{dGLZ=bpHCRleV!2NKt zO-~ds`R`|b1Bs2I*_&n4oJylIKUavg0xm9D zcV!Bv+h@o8f1K422PKsDz}2#h_*ukm=&es#vL5kRGGw>6Glvt7CjNAAK?Uo9XCo zO%k6r^$Z*@Io)_BAN@;@4iDR0GHc3=@>VpB0V{$?OuaMJOD$_ z!f!v6)P1exPva|*HWfwXedR1H7mmZ2#qQPiMoL<8&d$%z;XW8a)1;#f)vro3COlG_ z<5n=*V~*Rf{8GSaS4ms>Z7s+v;t$zVyy7>kURk2Z9tx`gh)^E z%Sqs@8;iy#{@b2-~wRfYpap=gx%>nDNTo z9;aGS$0>A*E}&4To2wm6>hmJcPAtwat0(;O%HGCC+893e&`@&Ic6R$?JNq@?rwrZr zzkAh>Un3sQWG;o2krfL!=VDA@dp^xJx(L+fNL%iFxA5@~w@cI|lW9HSes_}^@*_qz z-_LbOR%$chY46y^RQ5z%pHi?&hoaz5FPTPxy^mje5L0dtNMm!Fmf??c-38&=@15&G z`|yx4i!MYfRwPL@i9wU~o;Yr-Ex7^2t#`>J-O75eN*{u1D)~K1-E%|&F0f{{Q3JBO z7SFA6+Qo&h#$y56%Z4}KH*9Bt_9n9gU)WH`VN$gM+UuBhh}n;-F5B%Qm5}lBj0Vxk zB6-DcSvDABKY7A?dKHiJR&(kK;1<7cmnsB;VIQ9J6LofW3Kg(_Pre{_I&1q~sX9H* z#nzagj|E@g2IY}@ZJaBT3uPNj8vrtV>WTt#UIR#<%?+|+an-{+J|)D>WicIs46MQuM{2Q9~p3gmmc-uYjkVEH7U z^~jDzZ@JVOip!pFb^{w@)Li>R?#@W%S8|(5yYT>rAFqtI_Uv7ZB`h_ENfgq$+J7j? z5D}H9*#B>&)|~V1!l~%yCmtq|54=9z^=|p(L0CT$t+;QVBmsl&nc>*;4h~_cM1%ck zG zEx~<6V+JQ#zaRo8SEy}x(zloA`;l=|DB>J@9d1=3&KkH~8Y}j9R(X6NK?rB1#!-Gx z>f_<(yY1^gaNO}(enC9mrkDm_vc?aCXnq9uj%|vZth`)QmL86V2Gzg~dVaU-ow4O7 z#@)iE3GeRmTm)T&cbO!^aGsxjYLiJl;gPL4&WGD(UNwu6#m3k1UNdEY@n%Sat_|@e0RUM>6Z&ik(=eS0}LjY@X zTbMbQG3e`c%17{*NPNQTA{w@wlmund`Pr-BB_{j&m9FYL4AN9tDNZL@b=odXM-SAd z$R?*RTy2gu`6pVGjn#~t6sVJ6)Cz^tbjiw%$TvXc$mYdikp$!zteVIu*|2y=a_|z0 zr-1FiSb|Dx#jrFd^3>$H1myFZ+E>XA$!$)Ty{hzn=JlXgECk-;R&^=n^Qkj8>HRGR zB_^GJEZ4vhf0A12S@*BLX@#|?ex)Yxo58-pgnS9`;TMcnWh`7P{tV=PCOk5|YmaN1 zxASkgP<>nXr8k2yq=!?K>R`M?Ba)<#@}&*5MHf#*GHE}N*)S#-_zKAJy#wkv<0T1} zvnc6c9C*sl@F1J6d7CnH-i^?_4*-A>H;?a@m81UQUBy zzG2sI9XVVI+Qn)8{Nxs0x{q&1I9ZsDM&8bpC55!mu1#DQX@~n3ULgP0`v*H9Hxsu&aNWqrUfI{w)5)F2kT=MhIb_TH3l^#v8lErnc^xs zHZ1cLw9q0DS54pf`kjb2*6YGV?%eyD#snxChyPf3}MU9Jd_mQ>=v1JsN+)GG!+jRF+QI& zO2D3ILn1+l+kGn8_i!N05nRREsF^YovcXvL?Jowd?Ls&xF5{a=h+qA-ty&!zry--s z?d?j1!~CBj9_hTJ7Cyb`N^^p1ROJli_ScZPU|p{+Gt53gAc3q33qZ~c_+F%s!#^f6 zbqo-4`7LO#%aN19Pf0_nKTR2fpDF9eAsop+?H~4?x$QMm=%JKio-7#+P9q7A5L6#T z5IsX(0(sKX!gpiW2YF=WmG*nsYOap5CIMl=o$E|^-s>)ir;$9v_mtO{5E}3?yDY86-Ng2{rZtwe%#M>CGP^tiD z&^k;Dpjg?yhQ6GGD*Qp~yn(3$0j^NYalxAYOkmGQp9ciob_KHU(pUGc zpg*dMArl?ubWwMqLBVK3YtFfOH>98iw;3;RY#z!H4_oLK%vo~gBDsI;H@HbWSGQQCy&94}&#S(FAx%My5wEhj9W8LL2yfv z@x|?ZINXF9Y(>2-jtP^@Bl`&@E_)Pm!KZ%aOH&R5p#o{!!kWYUvEo)Km^e_yi!hbS z!eF>rQ%@5DeF-CPXjtel{!-+F4~)?Vy{;_St;^cg2$z2j$H+GNMzYYXd~bd6SIJ<4 z$RZ&>Q-L$eqvcCh3>2^$qvb(ofb1@)1~PF;jHKy)nBbUKQ{a%qHt!jYY-{e zW80zO`5}RM?r37*{%L-j>CWqwKbP7B_T1Tom0kKa0?yWqq>xbBsu_+S_}jv10!D85 z0^g{|YDofZJUTmz&ORiEjMe+?_KDrt7jBa$#QGR7H46NiPX$keM1%&P9(9%wT21es zWWPq-Yq|7TH2X|9*lsZRcivVx9a;V!hzRnSe#;cMd2@l6gRQwO&K0}f)sNnP&xU^Y z`OsgAtrkg}c+>O(XdgYbjH;H?0S?;reOY0=$uLr73EnLd#e<|^rjbTgPM8tQ&<)j# z8uQ8<&9Wvxy0LI^b0e%&&m;@N$9z_Ic#&KIxU>?x(q>cYZ;qhQFKlo@)zMg=@~gvG zbM!{9WH=|1kK4^e`Kf4Bl4uq=wmG>MC@wtyI8wP0n(xguNK=c?$&cd+t`gqm$Va|& zZzcvcYHQ~zmD81K-$zcWF}Q!0(oj-V7#3vg+4T{-K0hc!4_`d`35QdqW7@O6@z%;WaXv)G> z>1KtMg&5;OuWi{bukD1N!EVQ1e5}5`ueWW-bIX~NwQaA&)CHVdub^K@?5m_R@)j6? zv;^RgMl~!Db+sL$^!V?I$@68k$J1*e2j>b*Y05A^lyw@?cpm+^<{)}d!qm-L3j~}z zY-g!piOt@&ak!hA(OvSRLT-PvTlZaBge=0K7Sa(XN4HE@d=Vpz^_ZwXILSgf2$G1Mzp{_mh7gr$0|$&@F}(BZ|W^+WCSgWF4{21@lOzCr$J*eo$c zLcoRj`fm5?zg-*)+^ZPFiJG7JliyRFKF5O%cOs)kFrSp21oBvZNwl?U^$#1ynpp#| zduVLJ?Y;E=@8@B}`T2d9-0V5FDOjc8azV*4eR)MnB~Hu^%BTauL&7EU@=>4b+Fh)^ zJ%7qVaLPJLSSy3z)z5dkZ?j(kSjYf%#snB!jha&7Fr4!z`B$;wYy>c+rw=);2Y;!<3 z|5v!=@;K4JIrfrO*aS#XHSUP%a+;+>_}wCN`4*E!)y|LF`|@{q*jM4P*;7U*^FDO4 zu&mI7cUE4hS%N!OTSnjCRAseiFP0v};q$qXqb!pb9xNF7{;6I5jS9X=PChd2E#vl-F6x zT!JNP3oWH4#7saVz4^zk?OU*_{ZgCyZS z+m|o#sT0UKMqbz2h7}la2OnS}K1+DcB7SC-Dku` zheuV&MgY%ihuz(gD~)k6OF|hJPh*-opUAo-W2h&Q4L(8T+b}ILmL6I1!SeYT!Q&R2}rhO=Vb-)b-5I zv@~(}^TZ@LnC;qcRQ4Y~{PcaSzCXW6y=_J{1uwRDM?9QfC)=6x7)z2N3T#XF47C*_c?exx&C{{0q)hwIrOJV`V|&(_Xqt&j0r7W za$%`jps1#T zB5>5HAj9s@t^PtT@}GM?upN0$BG zN!{*JBc&qJ;_t$dcS1Suf*%QIe`_#bDm+h&XaS_ zE24`ITK6bF;xHtoK3_Ye>C`D(mf01P$(W=OnfGgiV@LL*U)=le8lI0|w+qQ5%k_rx zCdpn;+>seIm7vh& zJYCx7Ovb2f3UG`o$;W*}mDLjS3#$YR_EH_x+9WFhzdX)wUineY;BXQ=BZ(c!A}hC| z&go2NeAoTIS2*0!WZnoQIJyKkB#_v^B2(lbx+PcdF`;AKen+^++V9PzPLJ4CN z+X&saC_It@PKr{WaGN9UMOJ6pU;u4;Bg5jaSNzQlAvtgZ<0rChJ;{r~li9PuTfGdU zROY$f<23ly2Ii)-Eq@KCtQR^ed0tVYEf_BhG1chqO*Wv|_j#;b1=c#M`q6uJ>A<>s z=N9RvpLFU}^*4IW@8&ybebyN-YKcd`mu-L%`Vjb9ucR-+E6+^tlg0HyteMPa*+vN; zUq8C1|8nf!VhS_EL&u~*FFU%H9EP-gqjIJb+Z!gRV zX08i}=nSd$>gwPftS}4S&f7JNein&9n7W+*y??4X=Lx*M|93V1gSx^;E7E%CAz0ht zXvrd6^Ht8bF%^}|qqQG4vU1P$p$9!cB}^E>nAdR81kjn)( zne*n(38Oj+t@8FX#61p&eyXAErnMiI9Uv~dVGs#)U&-8mVde`{TX%QdXKSpOd<=Cp zH0$!=-#_014nV<4BQ$HV8`|nji|(s`!}c5?1T9qOup|20$02-_yr$AI=;u_`SGj{(=msTA86-4MZOhqpGPcwmr#a%8%B-27V`6EvlA zHPpVM^14!k#dkZyM_DQ5_;96-S?QHiEC>jL`=pV6k`C!G;uv zj7kLfg|wkCC~fD)00a-#G;yDzaQ)95WiS^^UjmV?pwI7no8he&G-<82mSQDS1=O-I z?eSGYD&dQ9w%-$#T-Fxh%&FVZE>1xShwXMBEi8j3`#7%zU`|sB zR1p8S@IuknXIDDCYo~%^akGmqKGfHNAoD&3?GoO)I;9iegQ%(M9Fn{5-BmLXsQ}1N8T~8vay4RiNfIyjffNV1LN#=Jm;aYtS+g6UpGxtcD z+6{&CXK#=H2hDj(!2Sk+V~Zurj!&AGX=!g98N9ust+l!;UU~T2THINis~VA2=mpMs z%84^1K;ux{swWfk_#z~N(iKrq{8Zj1WuS1)IUwXF#C^R`tgwC(3O(Rv0lMWfKz5lE zT3PUGm=pec9vDRx2#9uI42%h8;qsJ~Qs ztd4wgL?NvZN^#u%CHJ7l^A%W2* zI*#n|nl8$OS5%-k3~|%cFYf`a(b%S3NzU&Mv$XP3e8}2OSD!SBkY-f*JN7i4g=-|# zU4$)Fmy!$U>_4?OD~o1-@x&sZ*Q__J~CR(cQ0=3HI;28P5Ww;2EK0@N_BP-^+ZwR1o@j0 zvD};DptGH}H6~Qtd7;WdXb+L+RG3o=mj2h*jptKSlKW=4bA@?k>s|xJnd?xYNY{49 zKS`c_@A$OWMjUo8iLVOkyFdaqxL@&>$9fHo9Q73|i-qj3Yq}&+9vTy8&Z%p9DlCRu zNy>s_MY5cUG0cL@zz?cKjR}sXY=Y!L42IKPxLSi*^|XRX_=;?CIusn9Y)`+fYW7Zd zpD_?~ks22&stWR}PXIEW+bLiJf{ug0YtN4qHqqRV47+tsq?R`*au+svtsz`GT&_}T z#TX*a%mgEKYaGucDEoq>F1dy_PBGrmi;sD235w>{j45c#TD4%P84I@Jc!n1N8#{g} zl7?Goqu`bhyT(xN8}k;mAKs!c3or_&734&a`P?wy*luJJl&Q+xZaud9wZ1F@FEW!z zvHZ#vZp{#f>M&}>Uu}FKgqO|=Tl`YbJFGqK|IRXc54*2j zFSgpH?U8v%vt>I&vjXo>ACihpxI9Y zPKCP~+7}L*Tl@7#uR=U$-iEe>zESHsT%rv!E#MeWS}y25l>*=DXj~MV>jOR6do7nP z!`d2d);hR+zgv|@q&|jNIzD2Ez4z71@oqi0r*s|>s5TE78BDRK&!el)x!T)^W%>mo zF{dzikMvH@{&4u>UVwJ)z^~^K)rjW@o|$(|szCc4t!kThdr8mI)6&X$F~U$4(zGj2b+_|=(^rc-0W4yN zN8=whC){spN75+51vY6@I`VMBu`{-p<@ke`4v{HgBJE%<{yI0p@@;u35D0(Rduk_6QZ9sgUF%8F6a&j!DB z7_n|;1y{yEKUXsT3O#s(Q!(w@-eIE{W2o*QTV*#J-sH z)m6{#w}#6?KeGUcv51}xCWZAx86Uek*U+JXZC93{jqTU}14Ti)zB#6}Zo|R+AP%`H z=gQ0{B=gc0(nnjhZ6a%RFKn#IZ1ycshSEw@(dtrHUD@i;Gy$_R{&JRzGtFA9DAHVtN;}1VvueJxv zxzVMN1_`Dzh?9M15F}+%0x@w2BEr#IjXCF{a<_tpn8uff zH07L7p+e$#%Tx^^jB*nqJyKP2&WM0SdB_%B=z`Mf-pO(2hbK=zy7%C|Hbv`mjG-{r zm2SJ;4{cw$N*Agf+Uv{buB;2LtLdzq*eSgL8U-EPNWW!tu8 zS*FaHy9%1VA53A;sbL6{!=p)6=g{7S7h$VU4j(w9wH~a6%d1ZFY@N@HpMCq;xoiDr zP7XFUv~4#p9rmk1Ph+zatUJ$;FNxjNv+!#l{BDZjaCuM|>-%8KBE*zZu#?i52>~DJ zrY=us&2$(73ax#&it%PMT;1#j?^%M;Vx6j|vvGgneJ@&`Jo~7yli&J{Uu_bXiGEBZJdxy zUA5hHiwDP6%NwUh#waiDGmjtmejtM4er1f(RA|d6pzK6zt&|$KDLLo;es7SDo1+&m zUKnH2AkFEl)5GQEWr0PzTg_$*Yf7z&A(2L7Y|a@Zaz?GSL5wk4=HBwqz*bf6&xb8c-8;QQsAH7fZk3ZdJvw!w?;jqQAp8CPtvinq<--p@tVEq0oQR0A5Mvgm zAttSLilHj?5H{!xBWFsYF`?{s`?eqIx`yO^=xtFk*C_v>v-LQ+@r+-X~{_S=|Z7rWSHO-g~H-0cUDTux+{CsjczXXQ*P zO!s1Sx)`>b`p_5CH}v;_hB&BBw}%UsF2ug6+!5<-mv`0`Exhsl7^CLt(VO%4v;`wtJ_|84umU%GDI z_O2|!e)-;{u9mJW{2zYH!jcg#HnEF6lAAZp$?}d!8&bDAyCQ4TBp)=6F<Q|Jv44}AIv>9y&IZL(8YUTR+zv+RSC3Xj1^~9m@F!2;wXM)={7dnAJ@6W@ z@fQ^Yu<){Mzj3TnuR&NV$F-_nZg<-dZu&sV3M*xt@4a(wHeEK;NzS<&dacd*+2zm< zMPW})PKNDT6^4iq$vHO+0|Ogn_xpW}v8<}=>+Ah)cYJuHwf5dmr_=3rJ37*Cx7(YW zo2sfFKYm=I>)RHX$(W|93+p6v-}kz3BXt4*635AM>YQ7z*C!`O%Y(U6=)HgO;$=N; zX7l-Kwf395P}RJlHj_TC;>GsRHgUhyI&RmSNB18bJXk(^^5l2^$#4AJ&;QbA9zXi{ z#m9QmEM{{cJ#>3#?6fFuUc4mWQ3YwVbIxqHYikP;NdiidapkC!*9xt*;=TXhi$SN3dfV2Bn5ja?8-T=WhAa*3Q?{i*Ury}qy=(WydVkU6Sypw24V6z$n#Hz# zvWJqCebR5-hK~;qKD@a(nw&l?Xi=Hi`_(h5(5^S1thXsY`qDdf_-tkBbM&*yD~)Uw zby>CEQdAjGGG@e_wfe$~-~YI|_uV^Rn>C-Q{j=$2`vf07Uz~jP>L2CYSIQZ%Y1fsM zRHd(PwvUSAof_H>c$a#Ys3_f@l4%Fz;QPSoV0n0RbJNGLK7S!1)21?Ib?@H2>)od9 zJ56d@)rX?5HrHJj0?Wz|0-1z2ey^44Q(mWG^jjE@+2~=t=g|A!D6KU1(eHLUL@dkl z$$CfSO!hC&IPKIM)9Kv-&~s7OFq(&anq~z5Y9j*@9wTAKFD;N!^62FZ2q<770b*v# zDkF+vLJ?1i0U;OepEo!7uRNsqj1I%$&wsc0u@7ZC56|l5cP8yeY5nBI(=*S-^Os7= zgbUtp*7qJhl8~GJ)KM*z4^H9d(yhBD<9HgS+S5-)>=zO7sYMb>M#sG4#$NkoQj5i_2dV``FbL{4c;AV9R< z`?4&xQn$&d0z|;f@QSMP$^(wk|7SE^UcQ zm1UWQfpFYpj7Kh{psWhXckPfFX466lm9bMjd+x8l`SEv9L1C3sFbqRxn%a`>_9wph zV?X~h|L)23@Z10Fx4!$`fA-Plna=?U`}4eVSFL&${Pw6?l+%Mpr;nVq8lfzTuG?>R z+i6iOr;}Gk^E!k8oO0@iVYA(C`%Vc-h<(+R%I4$cX}2F9-G5Zdr0ch8avWmI5-gg< zY}xMG=3wS3_x{Jf_w@P8`FwtI^e_xjD4y1fz3fxW?|$L4B6;8LD%8FkNZaLdS=Tj= zF;2)+H$ONyLM@fDfOb$>YkPd>v@V<%Cm|^WrBq!yYi-jsAq+X^5>3w8XseYCaS)N| zbXwOnDqbuW!w^OT`pJAY48z&knNc7NF~+XFQA$-+WlT|36{6a1*A7wMfS-vbIuY% zQYr%CxX&2-g%RTix`<3Bld>$~m27>)sLL5Ui}8DN&R#g@tWm$B0dczZ?gxHVSB!Q; zStN@LT{jHF>FH^VF(e;j)YdExma^RpF>E%gkSD+lASpzx_0#zD_~`WBy+;op-@iD& zIXXV5o2K!NAKJxY!JM8xed?-mIh&7qwIL<|=!bUS^+j2x972wWIP7kL9UMIRC;V+>*`V^fUK4*f6;LncHiO9vq9^(w|#7^jp0!?nUf znW-y{F>T+oGY2P!m+Q`{;KF{0lQ@(_p;lcgs`vTFb-ao8;Mw%&OaJk~?w^RnLnwVY zkv*%%cftMA)$9#&ODSfho!jSWB-E`L)gJYy9it^?Kkn zUgIwwP;UD(W~$FW-t6+KNmZsjhO0=@ZZ2yBS#pFzDG&l}P}%77YQ3FI>iK-`z3;ld zD9YaZU--pee*c?)h}ML}IcH-`2w7W=h@~rbyWMKF>ifQ_YXI2oc8mEuhv1ykS_@)I zX*!wYoUgC1@7_5Xq;+QW^vupVZAl?swd-4Z97I!SL_~sO;^vFPAN=6^&8#U3JDpCO z>2!UwJ$reXG8{AqoVL~$1VKA$lx%g=wdc#l?Bw8(Q-1v5(RRQ4-QWI=*?f6;_s%CT zU+mib>EZFDZYE`A5p7xa+1GV_d3oJ+-E=w?z|0;A{g51~vM9Q)3n6^k%#4%SzVA1o z6`cyKG(v);jdIS7!`v97k14SbV2B<-v>wOlFuF#@7z<~{Wdk$!eLu$ip~4~i(Sg@F zgBo+rD0#o{Cw0^IT}p`nP=JILl2QaIi$E5LIoxu8#*pfmQ3MEo_(&jNCCgMz>m_chjCivk<6%-jO^>OhKIiKyW56W)VpKYLVge;IW zSS_J#BNmr*;ITNeWfx^SS>~|oa?;iyQ4&tUhhgv`gcwQuCggVD#Dzg9Y$e9^3J>7P zvMvqeAs8r|4!hj8l{OXd5C&0&;3T6B+U_~~Hu+w6eAp&(P|O4gw?i7DT5Q^tF~+$v z3k`u2b6M3zVNnwR#2AD3f;^ltGlYx>&@qO>F) z!y!|8<=cZ;rfD%;-Aq1nuD@DkDNt|id}(ZtVT|`gMMQ)d7;gn1pYD5zNGT0tcO;TC zM~TeLnK_FB6ads3kwruh@+&ETLQE;at7*#*2UAHuwBiPQ}-I86{ZKaf^e&M6?U5}Wx&14n!Ol}VbRkB$kqPDi@W^0Gi`qD|I55_pU0<@u)BTO^cF~8`W{pNSML89Ha&R!4HrJc=rtjOfO^}MB@I#kl z0Bd86OVvLiz0p^RVOR#!0%ILaoOlK{uHA;1Pt@~>eciG+b&}ak)J+e%v3rCJI zeCj&~pHd3`WguY$MnC`<*eYcalqdpFfSj0wm6JA)v9po^zz7qH#WX8e&G z=-ArD)zL$L{Q2VMx6RY_iQ-PHbj{s*sBN{p*LX}(*1!9m)i*!*!Tq}rKKu6DC&zdG z`d|L*-}&x0x9h90j@Ih+X3IdXXr{}XYMK!6ME0k%fs_VL!tBaIU-;mDf z-~F*K{oQZ9+D4QkCI6nLKa%|LC{A z_Xnnha(?gr^6mki9-dC8E8cMc)geBBK<5 zQXAEh>nn_{~IkoE*Gw1B9stOz#dA67r^&=D# zO1}HO^WyuJL zdA(ls`>v_#7y=6(9v%`=?*~DMIVq#H))^xi$RXLnkTML}r#N@^aB=rwdc5m4H~sc( zx4Id^@${fUQ_p4(-uY^iT)g=HfBc{Q2h2gCO%!SHrlwh8=6`G>2iG)e6H%UZpzZ>=yN%r z4>3_y-#@%-j8RD2?Y8gxgM)*~bh_W~Nm9(&6}E6#6opo17{b6jbuKAX3WYR~&VoQz zD>qcJxbTTi?vLD*jYSsJh?%lh~rCP7oXJ==GI)pTzEg#-JA);XjtJOwWILF;~ zOGHe{mQ_sIhfXqY_Pfy@>eF6qq|S^z)acqj41-c?bg&u2App22DrPX+A>tShfFwj% z77a7AB;OAxP?lv~l|)qrnN(E>0f}zI=a7A9fieK(oF|jXE_eN|FXt6QE=&<)CR79} zhqxYwvM%bnPML)x0H>6I5D<_ENf}FH)Awr6k@~@lNZy)u%bY0hKKj__dk^m+%5>JeJi8##H`m*f<2%nTUz{FHdd+=n>tUN* zWyxsAg?9$DL0wN9B3RC+o6YY14?nEtixlF;*+psGtiHpX`aZO~jj79=vr>qtx^6!< zDoW|D>puD9lTjyoczEcX6Qoz8S^>n7S+A7l5PI*^-d9y6B7N^i7K}A6=REjc88ec$ zMi(6bAf=Rmuo&ZrsO$UferNn(jB(B(V%zSNQr21mR-;x|G68C>Nh?qQXu=p^rL>3y z8F~C;Ivg=Gf&9rzV;qYrKtzoI0&@F9ATtqY0%jyYMAV9ySp*pXwT>}L2FXd2A<|<~ zR#jDD@R_qwS|KqA0-&NS?5vv;B1VH4LuPKp`N4qK%0AxfQeOG^^5RnA|P?SKy_-SAQA`prK z1OXHP&H_M)Ke~&K*ZBV@FlQ8Dg#bcAD4>u^qn{o81cl4P%{Nuru#&7wu+zCsB~fNp zN^ktO+waS=oE{#PWt|}JFKS7Nt~k zxK!F&YtPRwZ*Fd^b*JU@t^1F))~0aK#>t!S{PB0br{||p_Ek{|MP{_t

|~-nnyU zDeU{UX`1uSH)R6=<3uKRc}@MYSxn2x{e#1M)6(^aFnKJS4+&U^Nah_@AFSL-NjG$-?@+9AsctGziVh+;?VC_-GHJR;!W7L*E;+B?pu#b>k4Hc ziJne2-5&a&t%)(VLswQ+GoST+FPUv&6G|8Sojdm)7sZPgXBXF3rFO1S+x@0;rJK%& zVPK(y#WJO|-S6wVo|?+dU16Q~UK1^5a}pSa;ZfZP6yNmvygQ((7^=$hpr_NaELBoD z|4AUtpKc1rNF=v2F9Zg_aj8tGiIak;FpCHSFzSpT)`1i(5@-++VT;0<{@ff5Oxh@k z6{|9Ww4Bji)IZZcc>KloA5P;3X@1buDW`tbhPdAx9*QdqAN%KLAAa=7w+QjCe(kRo zuJ$Iq{ovk%hj;(z55L(BeZfWz3~j87au9K7muhlYP6}r>-Au%l_L~SHxGjpz70DtZ@V!bF$9)7=~dOc@bgo zTI-Y|v(s7^u97;B-y6aZQqBU)SaBde*WDrADy23-~S(f_j>KufAqVd`|@A@g}?RLH@~=C%=f$VU;VYe`7s{(Lg4ppsl@aX|=BBlX_NNt*(qK3QLk% zlCUGEpePCe2r1^AnRCiPDTSnCOo~|>_bH)j+_sMYW>r;nU6*Cqb=~-|#`~?(IMU93 zNEFUWsUhAf;r0^`c98-`bU0zpMNB|#{v(x@W< zrIa-W5g9={3jpVr7rSkrQew=?XrY9WwKL-lL4F7jB&HN$=S>&)25lMwwv90g<2I&akJf=Oiu#wHbhRk)F-BrYdHK4{Blvb)xq%s zRhfNocP~#L2X%KFRxfUz=gE!_4^sVRs0vn0IdD8bBx6W21uIj}$V^I-mI$}zrvJ*H zM=^lO$xMZbAlA~NZqojVd;kAl4sCgOAS3Su^M%HjHUnfV4a2NigUt z*lewB!d%-@!e&$NHZ$5)~nU3z*17gHoz2%Y%c1ky)>` z&dmG$9*O1$O90+%Hs1T~cB{2LTpTQy%Ux(!yQ>@DuGjR&L49suV8hrNrL|Jiy2;5O zEsy%(tu_Y-i>FUs9M;V+3>@Os)m2@Wv)QaZIFKBNVHne&Mh27jzN)H+4`(8BeSQ7n z#f!~mvp6^$laxm85=7Kmqf?MT2yIyuV8#}xX&OZAx();h)Cv(&N|__bSh`_5 zbRwdSvDT7SMNuegh=`HL9*LQeWEciis;bKv;uxisha@7X)kvQa5dZ`*%$$Wa5d(u{ zrH#VdG7)ZB;YdVQ+fkj$d}}nXl+s#r$^fG#6$p?8hzOO^$^vrs0s!Nh$9v!Vfdob) zV-e|we*8yr&T)V>hm~kwh~~j7=Op0}2Vb^kxYOV0xZY$vHC3;##FiQ!4&Gx>Da0HT7F4>50Mn{HSsdNFd&-cz z;18N=rd_Qq8hw3zUDx$Et!S3hhllgsZdZ{`fM|4xscP!u=@jKN%lQHoX;MhCZ`&8b zrO|KRf3V)|>|z=`q%`dJ-PLYq3sY9rzHK!FB-SMvR_)DBMDE^Wq^6PlLs3Y7m^D%7UB$7+FvtQz0UR3YcTYM3NMd0Hh>b0P2hYqy=UIg5Z}dynN4X@~0}sqT4m{YiAk)4U$0cPX7zeoF@H2-$EzWz%4 zyQ*1CDzmDO4hn?rMlgGh7MQg1-Rh`Trm{)w_rCwpJ8#;*R;T~G#eVu@7BgsLj4Lf9 ziVcKfws-ROY!mG4C=OmTl$GxK-sW7F)t!@5LBJS&jhmtKgLg_>QVFD$6k~kvz4tU~ zV@zWU%{VQp`O>k^_wPQqy199A_L8EM)>g;IWnG_`i8aPrn^KBCARvk;<1Tgg{VVu7 zzYPbc#)W8O&y{=SME5EFr5ghJe^Z!^=>S%kL87o10#FK2AzREjDV+xbW=yCwA!RfH zwUG#a9_Y=nP_mLKV+whooNR@&x1Vf3`BHvQHO6Pkv3IjO{Y%e&vz(Z&_r_pZR_%WO zt?&L3k-c88PVV0M{s$ji++0tartf=;=FR&Lb4Ej^ZQJ#F4FF-~Nx|}PnflxuoK6U% ztv*_x{qpDDuutP}e%v)_vY!wrMPz_Q#fZ1=>M3RhLPF3$03bM$HexYrbQmU*a#`4F zHe|V$S-Ua3y1`lFRHQPY8khw_sREFLi;D}b^;nW2;xG&pV^vkbCjcNK=Sl|2iJ3WD zGnpTVA3$Z_`!@E6Rb{}uTwiZCtG3%aXX?66DP?Bo%D3NmuxO@NmuImbjIs)dge3-_ z)2B98AXzIl{(AI6Nw46C2!zIFXKAcI;nigGRfMembTKf#f+NW~q&QmG=a{UuF?wd! zT31y?fT530J3CvX;8TvyIp^F+qqf!p;n4f-W=BM>EZVlyS{rLe+0((%Vb1xWkK66G zZQHW0y!Xv?a(sNe-EM#Iy$d2(EEey4_OoR*YrEXk%b~3=KD^M{)W(|79~D!2>x;v| zTi>4C`TF|gT-jk+?iDD|%$Sur3evPsju$gt`EIi#q7(4&~%jL4F>d9mx8M?mTZuet6`(!eUVesD9^~72oUe(ee1Rf#)l-8t_ z4h{~8^nTxspOPVj5W;B1V~hzQj2&EE*UmX4>HB`%F;rERlT0RBYrWg;#$H2fji6%4 z1nitEoZ%Ex@F_$A&`2>xZOjm2OxZXKNjc|?QOrD>&HBOT5N$C@Ax^5A$e=(>**aYm zj!5NK4H1Bl5>i%cO=|4dl=njjVKn3!JGk-gI`)<0gG9t`=tBslD?*CKnDJ?jdI$g# z5kCyK?-W7+NQq-gBGRW}lv9ie#v!LsZ_)cfB=7gzwrvF=3U|J{*{oT?uG>q_ld>+W zI*WvutTsw1TR367xVX4k?e5>dJGw$4Q8%64RUdS0RMue2PK^sI%4i#JNiE81?(|3oXAB>Z+`*6_RG6IP{&`>%*zx znti7nJ^Df-5}k>2lZFa{aVcO?L{!3o6|w;^h=Y<|LH@JSjH<$IGrQbt8H7}_(?Kuq zeeXY3|6g|S4GV-f_diIGDJHZuY;WT8)eEVTjrb9ciozho&@JavtuVBE zt!?G%Hy=OR?su#0W*buPVY}Py_noavjw$5O@7u{_(hYr46lZ5=Ii=lhXGp}Coj6IoE%)IZ~cDqk0-MM#9vV8pU$Kdhw^q84nJU=@* zIu^k};?>pF;ruA-~N|o6VS0-*?{ovYy>NJ-L7P&c~DH;_CYP@Dch3l+0)}gc5z=csn~Dt=&b0g=0zp;8XN5D5VyQnX+aq z4Gk#;2*}pj&3?Pv?>BAsF(4voMA327R-`mqwT>EFt4$ULU`dQDn$)M-Z6X@SyC^_N z0>YAkGohj}k62)&amk;QJvfTHLkLC_A}SzIG|DLeAf%Xbav*>pnURzx1uP|5&s?ey zNm#PhL^1NfVpOxcyc53F7k^=B;#9$`2;#ri5aXsRa`Q zWsO7ZjWL-6Gn=|9ieeasoJ1&jpJI&KC<0WPQWD7=B_o0%gP>AmNGfNHo+XL0Rw)I> z{>O6A7g6#y7x(lC5F<>yz<52JEKDI@@ef&vOzBLT2V|6DovX%imQZN%hwJU%6r zXr__>i3mziKtzP)zdU>E-w+hv%A^ng5d?sV89}j(_L{FYHVBCFdOPqMKN=tfLL>;c zmRBsqY&Eq+JM(D4S9jin%Y z-dNl{v_6;9T5~Mxx+sbe2aQ~4DyPjghi`oQJDSNpx~UIb@ye^Jct_509UVdIzU>q6-F zkALcq4uAR0V*9u7x4wAu(b>wpShb&79Ax>fx;0e>p!%>u-Hy z@Vmu5NrxVrWM@g6AQ_TqF`#=7t?F`Ac5ijTX0h?*uC63gK&8qW zQZAIKswyiyXr}$X4*(h2D%~{Ay~D#=YY$+pElAIbYSZnrNHeQNL@Vt>*zNbAX#9{J zY2CW--%_CD{cm*?&EX7UpEZRP>R{ntbWr%y1q6TcWk8I#+vrN=1juBOEP{cYFt-Fl zf@QJ^ut3%XEZh=r>nzdL|LXtIpZ5MrfLam}FaV~)MbtwQ-Ot`_uYPro8*2+zR6CrD z_4!VSHxuQQGPCLO`Sa(OmzR@iV~hOtZ+{(?An48YJ{s5zyZ!!(SZ9E?ZRhj(QW*xJn$Rq+J#;Tab81*XXf@S0xAdakPt<8omvU`9um+Z0wrO)AI4h%v@-?vIMP!LyIkd84(BK5-TiR7ggv?RwKR4H2=x zrfsh;u9jEwzqi_KAj)*UNG!t;GJq+{kU0Y$9@R=Ir1awK zgBWBoJvhFza8`cutuJukLI}Ax(|S8SdJ^brueP?n#Az@vP222=n}d7ZZ~u+e-*3)7 zeBw8{y#MyeTVMRryVu*R55M=v&D@xScTP?oeEy3+v0UEI+3dD`(@eX4`^Fn@Twh-! z<#xXnNxi|hKl8BLZU5YB3%C32$HOHVU6rR7=PyGTe&(ls^3Ku2$$Z*Z z{muIN`|o}I*~>p}>f&g5{FQgV5)xcqpCxXqJ12p2J*kvW!}YEyryWW(dfLoHr0cq_ z8;EF>fTWaw^!fU#aJ5r)4A~f?Km%(I!ayN$Vj)yTQ4r}6LXU`ArI>Hku38aMOfvLA zcprRHsHfBE*!?lHBGc5as;ZR2e!m?hFDa$MXzzW_5s^X&T60-CW}Y|oe!t)M0}G$;yv41TqWL4@S- z#VA41;`F-C110HnlL8>LK6$%g<4 z$P#iG!y>HGnv{=GA^H@Gq8QqK7>25;5iy2jNC9ReMsn_al-zdMjtj*?>HTJ7s-m7v zD-lHOhY+GPrr4u;0S}Wsy_x@Pu8s^02XPIxpP?usT}i=Gvr-Bt6=)qnvSOjEB@1K& zi3*a=phaQHC=n1DSx6%kf7V7KkWwKV>?5Gh1b*Gu3ywi_uHEUA*{ssQ&d*#eetEAD3R@(d3+T94DJf)P z%25d@#X75~Mq}ZzO%epcabxfruko*m*Xx1Tc#XfnXoh@K&+_8kc=N}-+s}^5x8cq= z)|W3{Ub@QqmoG`uVlq8w7G>ECK0kcp5F%@0Q@YYsDT^jNIXQWF?{Ky4v@4x+i^ZZg zH9%HEUDu&jqk;Zvwd&dySdNa41mb49DvP?V>(kTI5CRbaaLnBMA*496>@#x`_Fe05 zhGsI^t@me7&m(6NlQ>tdDa%@G8)G~@Jyl9=Halkx0tMe692{H+*{ydU(DGe(g^;?o zuuZ!A;GWi`G?JLYRc+hv_xr_k!jO&8ckY~q%prubt~Q&EPMA_UIXSwyx!G>FF?mEh zJA3)g+qEziMfLd6gQ6%dE-rc>W|L;*r08sRY4D-7SXoWhTC2@44Euc>f+r$Z7(yMB zFA6)v5aX?tsxZbFV@_lpDUcWiQELq#aquAot#x1)PGj*$N`X;B@Jb^ikWwJRKINQ+ z1T}`3M&B0zPzWLjNJPk-ltM&M1WF-{Y%CGUw~_)883U6?ZyX|$%y^qDhjCmrFmuYH z5D|qX=M-ZchM{d+A_9cbaDSxaC_rOfZi8{|GkEsxc&BC4U>B1zq%yQ`?ccwrzo%49 zD*6x&%_{2?mA4rTDQ96s9NjSL+6~)%?$Qu*?xH|Eq_ili)Rf=e-2c`K@G*d@q#|vx z=Q#kdX2y;f57h2JKcA)-Ca$U0{jh5lCR6kQLfR$X1XutChNLZ_m>d(MCQ+P$O35)1 zvNJBFq)5qVLOMjEloKS+zR#!%WS2c^b6X~kTKMt8s#l0Pp%9IOcSP&lM$)86Z=_#Y z_T8EOD>2ojf7#C-ReJPvl9U6yVr%9cVGJK5WJ#Q0yhJG_8ZDy6{~7=Y5d{$}iAGWc z$biU70ELkSZnggaw>|uSQQG9FG8xNACI$v1Lig*zU_Byc6otvN5bph4$7cV6Ex}M|^ zNohpfw(Yd8Nnl!3=J<5pOb9?b4MdBBgCY2<&B~A}tdkriGX$wrIW4EFq2I5E)Mp-s z8>O%7>(kTY6A10I8p;jNCT??DN#KY-HdwFFgzP~-Q ze*Nz9_zU){QlwbrFAOK(_0pwR= zwJTgO^uS(m2BDB7u>quf^Q13k&X_q53dt7}SCmT(G2TYke!3y}S5HO$VN{}Y5=4n2 zj0o96%!Xr_o_u`$?&JOs3^5z+BA2tunQ6DmRZ*{3JKy=rRiAt7?Yk$(7nkQxpPzNZ zUfW`~@2u5eFmP_S+o%zYNpA7h!6k8!L;$8!~99BB{$kaHTDF7PUNc1+STn#NT@1R$-fcFLID zZU>T40225;2K4yPMS(jV+5Nr3BD}L0W=4by~k5ArweE-phS5I~ywEa#0;B)tX?8|?> zo=&}o>&sy}bG!ZO=H|uIXCHn1YklZJV6)kbe5R_IeEJ?=EDnf(C8ZRUQlAD_XoPCg zbX^xR3#a|ARWzG4v$ky!aj{rvtz(RR-y379s)|{>_kG_pb6J)}X>W;BphTqaTL5mF zN~A?pAhcPn$3`Q@c(qysvegC={l1GaI(B7QD$9M}8>0}>drw5W-EKOW)pd;^l<55O ztURgbi8DLZu{!zVC-V zC{oj^-g+<0k{Jgqh7;{Qx?HJg{ zEP~b)%nUgjGUNyVoYBIv-S2>u)_P!0`RK#yFWr=HS0&dqJG#;0$Q|U^o74j4859~Q zs6MJ>sA!Wi=rJ26Ow|yRrsN8V$@H2Tas&y2%s6Br6!^2c;N`H>Af28E&Xj$1^Mh@1 za`y0V^zXHe|KRAu|3fwTTAY*@)fe*ep-!6|_h@WwNSnAZ#~;DnqW(%%Jv*L$BVPa0 z%k!(cN{G2C3SdNn zrfGI<+xPvh?J96ShOshZj%aW?o%$4;rrGcJ`~5zrbpP(XoO4-~V=Z_)N(L~-OeT}{ zZol4aHrwrTxg5u-!!T^OTc?WB){}bn_>oPVjWKQCAD^CXx7+jc%jtApRYgi^xm@mc zZO*yUJhWZ6>m=*p!I!JE-*l#nZBrgD1@d%0Q34`jv`hU^)r~4iylAH*=NuAb0iP$f znctc8eUGRXi-WOOan8-A3xuQF(XTcX4r%f*-peOU^mhb^FzJtrcopPO1r! zuBys8%aW8*h{7z!m?4H3J>F){>M?#c5ioO1Js}2xl!8{OaE?V3L0Qh)w%vDo<*Y4S z;5>vL5lJayZ32mXFU+JB5DMS+&k7L`No$3)^X)C29C75K-}+`6GuF)_q7>@Ul#_X^ zQ#Fy*1{F!nKF2f$#37C3ad_3GGjkR(#>AXB<aX%KPNmby5c&z6wRBkfvk!+s~V zQfi3%hxt1PMZe8ug8igd3X5JT9wP3Vxbae7kh| zbF+s2b3*`UL72WIAOOVxqn)MzjRI`CeS!>tB=|as>otCqFy1d11py5y0LhXh!y->N zu`T3AaaR?08qr2wwge%B#o@vA%?&e5>!v3qnF*jcXp8{FOu$oD9!%$x*%WhPGMPCF zp*Bnu1)Qs_HtUyHm2D0V)aKcX7DrK#q_UgJKRaf=Z&Gqr& zQP}NvyVazqmSs}_+<$nm+qEA*{m40Y?s~|BRr5RX`n|WWG2$zFNI3uwMoG{-r;fo&UFW9P+_| zKRVo;aRe2z*2$DfTqWu#+cKkzSi2NmlsXpC=s`!8SO%FY>4%#=LN?16%P%^!c;`F+ zf!aMip1(Dd{Y40q{lt(MQ;1PH^$+jftCT&O%;sftyX1Di#SoiJk|KQu^qaUO`62O}eA1>$f+Xw zyEX?qSsktZ*(x6G-8&_2_EhC;LI04a(7*bPA4%X(HU!PZA3%Sm`xhqcIGLWB!A+wr zCUVn;yA8Cb{de1J-)ny1a`ATWhNW?zRvQ008UoatHXA{sTr!d&Dq&5o=>7f@zw!n? zRk+>}?{wR?50I&fxhwd(ihcaKi;5MF+KUf0bpe(5VW+pBHsudBUg#>DsUo=!fy zxVgAIzrGn_NLglO9k$))4(?Bkx!`@o-T8LEPKq3VL=hQ=PJpzw00F>SYrWNf_`w-x6|?`X_s-sZcyDS1&;oaL zS&3x8tP;*g?}-a7>#4oM@0 z5H`cUD9ofOfO)f7J$-q0v)yY`dY`+tKRv$l?AeR!>+7bhzVL-FhLpbXoo@}vcf%0& z9TAOdlht}XDsGAB?)Nj7^ z6L*I8#mf)w->X?SVveZ&tU9j4)yoI>-)jB(Ti^PoQu^r+zJ2F(GTfZ~(I5T#(ea(H z{>(3!VzyfEXZ37;|AAus`ZxaY_~0x1)#m=m@q;_Zm9_?5>8dypOWI-4ZF>Xd%(|oL z5h+;bx~{6e-OnaXxnFPEl`XARa6(Pt%=OvX`sU{9{9+h_t=+-$^u^Qno(#Lc_l-aJ z)#o2Aq5PXa`?Vi?@DO$_09$K8a<^}_HO`eOr7n0h_U?oG>s6a!sDy5WDBBH9QR9X6d0D#Kbn4$+6S4Y-Z08q;8c00sww8R;PL6f%D_|V2UjNGG~ zrK&1LF!)X>RU0#(G>9taTv#1Fj@=3XUoHKA$m)6?-t`!mQeW$vd#CUS0 zef035HHA``LNXdPshEO@2H+HZ;>ettBZ^XFP%;3;m_O|oK2^z#((N&N!5TBJ4MtC) z%siUbBFX3*HoixC?};dJW@Zp#hEY9`a|A+E6ugfyj>bMQ#>|{zwAy&@LBo@?7lEXi zP6~zA+8iPpqm9}3T_60Us@vVRuABXSP+DTfoMYnL4SnDDZQI(i&{`$VIpmO{K`6?) zA6kYKqaWW*l`XuFUDsJ-tTrSNLO=wgH3*EYuGZSeFuslop(x9J>U|rFqBuA>=-bYx zToi>NH4H;aoDv&ln`RoL=M=#S@LI$i$U$?Vtl;(*)AKv&EM7^a3R#0U_hZPMc*$RsRUl3^G~E58le88R9G3e;qZ8}TWn)pj*k^Dh_57=%)(m}B2=fJkYQy3+e=Ol>Tz z!dbl`9el6aP#%IN(abqg%Aj0Q)`LEx0Q@HwKx5u8kfQ&BNecpuq0;!nUw?ZTFav8C z1=dVVs4%jqf7AE5e-Y+Wku3d*wt)oX)k9VA+EC~wj7I;F-rL9^&li+`V%+pU-BKwrx{NM5!MJ zWlil$MC23-tvLDFK~YRjn$-Dk-hM^WOI-v+1J;52sq+T%Q9A zl!O99Xs>shYd=KEn_<^f)sKDQ$A;mCw0d~(PJgw(=$}8$+Xb47b~vZ~N9*C#`GsyS zTKyaOt@X+A98d{INQkUOAPXQfQ6&KRRze^Q05}rSS@$3SU}#+W2)&?JiPs?GPCpM0X< z^znCoa`@wIi%+loV%{v9$@5M1^*{b*ee_1oeri+pJ~+G?>W``YqPl$BZU1NHKkw7< zz1>-4Z&vGz$(!GK>%V(8c})Jw3SvSK%BUzCov{Mbn$&63XmXB4-kCzZKYQ!qOaBYl zKl`gU|5560X7fe6FDH%Ki@&+-g9#NKzPvd*TVG=epS$#NNoSNz68!zannSz(2 z#j-YTF_}Dh@!^Ynu{bDB@6TdpA5ym;p6+%#_QwZ@kYhDHJX{3ZwZonf2+=>7lz-#V zlRrA2eZQ>}+9B*J&(+U-fcFmy6$d zvhLhLBN8o`4z_GwhVrf9{b=jR%Cnx;MPXIn_bGbi3TDoB!QcGp&(85lVw%;BQAjMRqNc7YQ(j+R112dEOYFjCyV;x8)d)^i5jm$& z8l?$B3X%nt6EK`Q0SYk+kWxxGML%SYf)adGs6~*OmC-4Mswo?{JU>78Ll6-HHCk4d zx)`wUeAWpq6S+_U!GQc|Cc&aB1`6Vs-iPC5w<|D}KL z<)!)dcm9ro^x*E>ljiO}{+(Y*7k{|kUv2u`SAY5!`o8}s|M*uFM5CoN>RaFV)#Kx4 zGP^&SOg5X<#cthDDsRfpD8Zu3rc_m;hKG{C|;KT17934()rLet!?+sE` zAcf?i5CGl}t0f(CpLPnQS(+yBo0@ZtT3{l4Ap+Sm`M4G)o-MMy}r5DII;gT*rE z49XuK&0|g)Qni={(6grK`c2>M>cuhypJKMwwry{W8P;p1No(EpEfEbVTbsIeP!pq+ z(i)k|VHilO(MJf_cl#CqP*hVkg(>Q~?)$!rkKfj1}pje6(T_!p;OCkl!JeClA(4GCeLeRe7W04M^0I2s!Q z7MOq_O9m)NLxz|k5^KvEfP2k~C~AZZWi}BK{n_WDHbyd5bILeXc`(bz(_xF+z;Irv zym7m%-grE_bFlqZeD)%^e}8Y6B~^zpv0y?!!EC6C${X%C(yR{t9Yp})Nl_Lm=U$`X zJpdqp6@>Id4-Yc}C{RC?HSpS3=r#U)@OnM)8n5ve7)6W&sUGW3X1|1+|C?2i*bcxB z(HMPlc(^QF=zXDz&pv+p{K?ssyV_i@j8fI4A;A59Gn>s=fwc}%6;(|{%jL4$#KHG< zUGCdX#J6pKba*nGFO5~h&=IN+KK?*!t<-JO6cK?SGm}tZ$}q&~bSi1{#v5-O92}$& zmk0Av8Cus(*IgFQ)U%m$<>9@f&Gprtw;yjmd8v9hI5_G}0T<`1-DMJR&KV>|$RMfj zl~vW@DGEVOg11jqM1Yfo9FYYP z6_Im}L4YWw6k;f>`&1S%^1nw;)^tAUx^5gHjRrVcYaqni_8a(>P(Udnq8xKk*rKo@ zq!7}l&N;*+A}~58jTv+*iIqq7>2CGx<%{pl)#MlC{p*{S;^kCrrE*CU2pD5r$<~1E>^;BFsyBKptTY3&NrxZkPuTPAgm61Fa>Y^2n6t-a)NW-VkapaL1W307d@NNuU2mvx% zW31AZDG3)o4iZBlT9K-#0#g&xK5~v>S`q{MBne>C_b)Ea52wnj{Yt%Mp_$psk~R>U zmSzSB5mRPXz{-%pQ9xjJEA`@q+Fj7Jki}iL^IfC6(o`99X3B`VV1O(@%u{V&kHTN$ zHU8qlYPw?R$_EyIk4$4U4SimBdro<~-BL5Xb8^yDg@WkT+}J zKhr1APrkejpTzz8NJ4Kok`UW5cm3A4WaPp#ywiX1fqC%vi@$ag>@W8J>@Bl7W7@4g z`myDro4*qSUaWH82dls+^HyCdbFhuWdFb}jJF0@u(xh6PeEp*TXYNa#yK9(TWZAB< zsN=Np-MY@}M5Y6q0~9_J4!)$&)Mel)Zd+YelexNxsB9}()!}k*rL}u9M$-P~*%s-9 zv8Cmrn3TG>TCIAHr-#QcFVDB@n=EOG<%6TQ-hcM}>Ai!WyZ8CwCVl+knHU+upfX?v z31Ye!EBzwF&Dm8iI<}XO4a}t&k|OXH^IQqe%&rIA+WeA!h(KrUEf3gUyUW#+Y_!W7pfFNn&?kY`1#D z{ga#LAB9{O(X|(w#ljrJv966XmL=^sTkZ5@Hf@J)x8M6+w@sV6p3Y{o<9hb)!_O>h z=aF)|UvD>c(dSA`0rnI%TSA?AbFqh2#>D=9TDQd$Rh)w;xU~u6w_(3bTtt z4OUEX6~b3e9)^^+ySA*WF667#T0=I7wJQLisw&P9La@fc=o4u5fBlz#{@LZT!Eb)@ z=RWu2j~_YqdfMnDX8RqKpe&Fjl?5^gBQnX|d-qCLD1tZ)nYk#+ zkXR`~9KF%Qm?X?OhoP4oZB>fkX1^!Y&N&hxqU*~WrcDUJ8I?1~=n+5^qro_oijuZT zG^2t9?Ci7%W`bH-V@gLEoimUfo{dfoPGG^ z&%I&dqG+a1o_uunz1n*};h}Okt%}*{BLLX%_r4E(+=*mSr`fJ;_$F6OXk8A>fr;WHGJ>;-^r=7ran17I6OEVhHkgpee;{& zp4Jrrtgf%7(2{eGWwuBxUeiX@Q*L1-9;zV8h&Q>yDpQB*{_s%#7? z=UkR9rDUx&#<;?c-H;$yYfn$_0^D{Y+;h9PXXTU*w(F)5{C7?`;aEg4-lwTd~X zIQTqjWXBx4yLa!B@aUN~rl(X@HCpozefU&#()azSkPaarIuU`f#jq-bJapaAby2W# zMO`@WedmW9qbUk!^$>2qmk%w;3kn@;FmaTJ;;r!Xzd_EsV1KQ9iM*t!q0w6+6!_ak|*19MPL>ziQ#E^5I zOeRFS8wRBmX&rM$1v#f(*OP$K$N-6%v!Xj8vZ`H?gpno29F1t@thL%%04SAl*5wq35P*aU0;HmFx=+UG zzV}Vjl*YDg8xbyKI5{{xzq||~Y^qJSzt*Y*7-#xxD7?3c8L=XT$@5U&ipFj#dDMC_&LI}Q4A0B9tUvfA9Y~+`J*<;oC7C7Yi(85>-Abj zoxBSkViMlPuYdhl5b@>t^T~9cKJ`yOK0d9gYOI0EvWzjTR%<5ILU&zPW;s-2^ch`U z-bj_k%E>xc*7L>wQ9a#L%2bOgH)XrpeDIwge0cuuTfg{~@@UU*rEcYJyCb3)m>KKo zq-}SRQ-OreZQCshsmsz@=h+J*uCFhr(<#TOvm(#)lt4>seMedqnGT#Y?hm>GdpQC=yxU9aZ4%JWYBkAA;3!E5 zfr+^r`rbR|hr`8j@2rom+jeDHnz9PcUtM2IQW;6_{n5cek!91mI-GpDD$4Dyv;Dv! z-Z?tyoAxrL)6-)qb)6N3tgUS&5hPqLmnTQ%;o;%wWP+YHTlegfXY1Yi<@)6t_fA*d z?dQkOeEs{Aue^Kz8=+oI!ukO0;0wtc!HGB_kIxbl(NE)9bBn^|7IOpA#yHqb!l9oW zfB5i!a5w+Iw~(K!V?TsK3?VN@z? z0+P>)DmUH6%g8MIf`S%m0H<7xHg)C*k#nJ6lY^w-s?543pm5erbdmWNfWu_6h{=BK z;TJEimLETUa&-UB?D+7*AAZk!e|&NfV!XV$X4=f}+?j~|7tPht{)FpZ?uRyz+^>{3 zIy02CB_oaHEXfW`+!%>gkeVItj4bj%{h)?*%c-LNS00|;bT$0=M^~%=YFmHdDZk;l z?1!`)k}kKeRi(_2o$aXQ&bm!!!N9c86?;@*g=ktEcORWK?;khs9h9h0U%8mPJDg6B z7AH;9TwPrm%x2U5$zcQRDubY)|>0xlCxOngJvu3fFQ1M1Bu0sFf zDjkYlY2e%)dAXUXtdbH3ABPZ;K(klEV2FVkQl_#PqYXZQPn3|oG$zJZQ(lcX1jnsr;)oyZa=JTmGDl#K-mg^AWA}=2t+$l&6&i3A( z>>ZFpuV!{~+_=t(wDs+}+s*6a)oS%}y}omN%7cTgy|Z_;bHj_}^2!gBLlff7FP&C- zQT+bni>;^y5O0PGfJ0=6k%L2w3R!8&GQd6s$HHrpWSv3ph5W7O-PhiKN%=hPT1yZa z7)dDU;_&V;y*oFVe`Zqe686Jj%1JSs8EtmEogMs01c{No_jO$-;1a|C^gsCX1>kVA zndUf1YHXNk6Tt$2F`AHL^n_gGx`0`sOr=bvGUo={cP*rz`_*IuM48qRKpMlyG1=fz z5_3wyg~(P2#W6q%LI?n9j0_&MHUKd6gH+VEeMVBG#1SNsR+*G?aL!pLDGGt;Qc5fl zWAH|4^vOmme8dzXa!kQ0=wkGUIq?4BLHG2pLlEUzV_OOPaY4EBSGtY-)&4U>Pihh-MMoo(|NKn6G9lA?j26+T1)fV(H@f8 zw1X+DkmAeBr`|Prt%mKgF8BBM4$dzxj*j=f^p!6Ws@<-?y!hVn$>*y4RIB2*f9r32 z@ZmfE-k<-o2m6nf&#(6nPegu?@@hUcXZJp5n=L}1v%5CWlWyJ0q4Os7*u*tOdbLc(6@%osC{{xRVwK9)jx z?+wwo3=lxtC20HfBrEK3;e&Qk(nA43RX9FC7yV^tNS3)VQ(_ujW{-}e>~Rfbhr zbA%8Qk#siny}h~KDiMq^LI_Uj)+dd595lxmN6u4G6yqi%D~miUBXbCr$SW<78Ht&> zP^K932{0*9Fte1>Xu+&fO5-ef+)QXCs&a-g1~(WjC*@?IfkQM}`amhMkWy>2+N`C{ zRF=QEc&UsYx^B!uAfhbGAYrESgT2FUXaOLE(DY6zVT@tsrfE_FDJiDJh}L`Sy_7Nn z2jIYI+qH-S0Enaa-g~b~LjqC?06@wR0H%aeaP+c(opQg;)4F1`!#CA$DoFQe7W23a~4%m=uO%dUmU{iugJ=I!=kc3bsFNW3X#mkqM z*H>@7^~F1P?%aR)+Or>A3ne3i3#rgbkQjzRWO`I?3I>sYFbScB7Kpmp^$`6Nou13v=n{UBmqJz;W#lPqKko3LIILR zNKpX9lsH5|1OUN@0Gwx8UDr}l+qT=T4arA8s@m~W9t*m^lHD5pX^R z0D#0PC1zxf0JpaD!!V>2Qi4&b001K;p8y5YNcunqV6fJ1{l&DbFydeb7pzAl%3P9> zn~{}2M$+Bp>k<*r_(c)|00ffBOpLKmc?|A)yF@7%lU7t{tstCErIaC4c~9|tdEIo| zD4fw4oVzv2A5$-BT;?Q&YCOJ^X$*vjF^#^)(FY_(L}EY&sif8#>DB;uwDp%nDFhcn zN=Y$<5I90r)Jn>3*TpVy@+LQl$T>$SOKp<#yUq2bC@DJ-az>b8kgd)7{KU_Xy_)xY zop_nsN=FdvE5AbAU3PD3QIb>^Qb<66nurlw8+OXL$vhQVDi^t|b7?pU$2|$jOffMc zmeLd$WpwG^xO9M5c!gK^Zw}HVHnQqw5BKm2V#syb$=zt!Erb}lULpuUB2q%O+f7vz z#^?}2(=^AYrwmakl_Dfe-2hitSG^sU>s6lTckbNzSA&y%U@ia0{{tp@!~}Y?%ut7`)XF0nRVT!?}ZRbi0yVeumqRhe(~Y6%jKg&e(u3h zQSJwwf3i&E6fUtTyJLU!fngWEn0HvT-+nFeZm1C*Yqx%cRSO7@Ek1k_xj4 z0+A7)h&P_R_S(_%`_;{d&+FIga@uXN0^E>MZ2y_ZKmcOs!Ha|^?ejsYAE__=lgTf? zp+EdJFz?UK?!^z9=Bl#0vcJmop5(ZrSf}AID^SL(?c;~?3}wCQx-xD5Q~b?F?teGk z`=j*m0`@gXZCxwWB6H}~fC>^s5xpRt6pHdvkWPq#_J)g0uqZ0O4iHI-wA*gX{EYm| zm#gOVuqu_hySMj2;O~9(0la*?*{<&F?fY)%Y@dSvpdE5~Jg@JyBsMFj7|&)47nm6@ zH&;1?gS<{DJzsvJOH+zTE~bIu#ddi(*Qx^s#Y}y0Qh<$lUg$$zf9{B<>&MFc=-2th zlQ?<2Ejl;S`GYP}Hk8P}qSUJI)<1s#y>%49g-3e}&orGs@E*5h z-zYztqP^(lzWY9O;j#F4-<6NrsJP0as*Ms9LYCNsL|O;IfkIH=I6~rFX#ryJj!{Mh z8<}~Tvf)yG_?xGfKRgi^ZCprI>>(zp68Dvy6?sYQJ3>hUIQ3)bs~{4}f!+1pk3RYE z;n`gPnCRSxHVLV;P#|(6LP`^$niz=ld#4{yH^DiP>0z)grbiDR6_*#+%bP#!KA26+ zmvZ@M4%L(E^W82gKSdQ|^sF_inn82mlss97-VItJV-ztkvA0TYGnHQk_+R|qJO903 z{Dq7qCZTf0)+rzZ>2YtCLwlynI@dXK8K9g@Rh~^Qc7mrY&K2PAR>uGLIoFF z4_PrqT{g`sY2lDYsc8rvAs{47iBl-@BGYO5K;ViZD{3{wPVc7aoj0x3m6#JnssQ#j|ErV+yACL6+l5`h8>M8N?p3a>MsHPk_B zU;rVNRs|_7b=7=}z~X14ucGPT}>7{bH54-O9x zBte;HufO)d4c5Bhc(H%J+V$NwQK}}hu5YeZYb7h^hQ(|T6gxp#F6Wb}mZmPs6`9^U zA9}%J*R{^uu>Frm%sGYFZIpt z*~NPyVX}V5?#GX>IHtQNhs}D)!S7FA&nJKOoxO#Gs#wARBgNGY8SLh+ast&EmZ+Rg_cAw)`12sL;IDQc||*?aF(Lb{a* zB#xsbw=7GJj3}~9m!)9l82xQrZ3u`cNEw}v=>^u>i;Iiqa`))bBW6}YPbQPubfJ`r zkp+5YcFrY4qi-oqLI~j0bZyf#S(epxJ;q%KrTX5+=#>^qDUOjLB69FU^v(|r0E|tp z5CT&2Hb@YR;9M9d`$bV0V*p^VeT>l!T^|?>57ws;Yi+hoU)S|84Bg<`^`;Ll0pUQ= zhoUGPF>)CCCT>GnmRXhwt(x@$f%SShA@TbnJ{t!fZfmq=TM3SskcK)31grH+Zap`b)NeeueR&m zFj#Bra&q_nowjYA8-Nf%BpX1W)|#Xkvlhlza~NMl03QPoPN&mx3lc(-Qaa}#_+)fS zsVE92bbg2_lENcQ5MA#PTv?RUx_gop%{raz0mm3QB?Mqf0LTa+=x0TrVnX=TCyD`) z6Q{tzzkKa4M*0R_JXWhGny<|D@3O4F(O*2D{c3JLudtAjV-jE_>J)uPBGY5rOvKE@ zz|nIIKj)&Q98{YunwMZ@%um zx7OEnot$;fX{{%d$;j_eLTaryn^sDhWje-K6q!=08~OxXSJUmb0TfAy<+_PUkSf>P zR+O_CU9NEdbN5dE!{7MP#ryAk_iz5j=^J@{o&!`$7g3}r$@&h zz4yVz<@MQD@7Sgrh9LnrO@o4Bj4?QoK*@x}lprL(H=B>Vm~k8*eF!0>lmsVheF#2C zVNrMV}`D~oyijskx!A69~n172y5-_T%JfN&CHI$7(K>2 zO9X2z$B2YN-kSd)-;VOfu|5hR5l0W77-Qr#`sYX~$7%TJfIU(yF)=U*A*^)(NeewL z6-K`u$sFOfq$47gWso>JA35}WDBAO;~srnPmx-!+;| zRGB~u)~BRV0K$`VOHj2sYOy=A|`EBw5I zK~dL5fZgQ28E#%)FQeSgq?(sAt+ha*wHCnE+Rz0f)!uX#LU7KNWf3@8Yh&_NRmH#~ zk5VbcjE6@@Po6*f&i8)sa<{!azj*fI>E3*CdU9-}nog%~>n7U)0P;L{&Ozii9=-nT z*|U_uC%(A2k_L2E5QwUroE#mmF0ZfKOXGH@XLopV_ix+dZ__2>@VJpD{5KFd(O&md=-qq||I=u@%Bd?ow&d9Q2?3sf)k!Fg;0_{_FS` zUy5HrlPPZVev^a&81r5k01yxn5CBb`LW|v{(dFQ?OvopA@bch4>b?B7`lZH;uU`L? ze<%FC#nD{ToId*aQp6pEeZeX>t!ubEDfZTVnYzL&o7u=t^rcGa(eqR-A$ z-mIUkHI|TACb+wIQ0B$;^6Kr?C4)SfmUG!uR5`|csy5HA-rp?$-0|K0gVPJYL1<6% zFDk5meDUFTo;?<#tRB2k7y#u~ZmmU@3zF#b6vFZX%S_1ZXz`6&v=8U%%eZ_>TRSYD zwa+hxK-o0o$s)b}pY;D#QM&7)UfQEhnubKqtG)JnJ0Z4BUkleX+kl)Ws#Lsw@jgxq zD`4n*SYEGIE2H%NJ9o1z>s=@{OscZFX*fnnaaI*4$7k2;XTdL6*vfuadmyKJyL)K< zMrq#a&$AsYRy_5yvy|7A=cu6XL&<&7-H2hWn4PJc*=Z|8E%Tl!*p0dR(ZS6hiL38q zygoiYxKo{c`|(gwoCT3|S?(K|vaN?q;-shu(l89R?}b(Zu-)y-y6$Y>G{bk^{n4NL zliQ8$fSu8*o*O%O33|Vt?3yk_R3@KHCiB_w^!nnRk3ZN9-P6tW^Q()6DeukpZlJxf z@x$}YH}~&6nod5xwgCriLZB?7WRM`F%#A{ZI6w>?NR2WijO>$0AvJ|i5%P1MY?teM zle`@qw=DrD(2U6ttb5vZv}JnhbWtR#RP^mtOVyb1TNFiARg1-JI-PDd8;Mb6g=>;7 zN?BIz(3xC^=u-3mNgyyJj!6&$as=q36Dde3=aXrL95`jb5M_6KwA<}|{QgH@`^r~< z5C}Q2BmfXGG7(838Hq^7s05TFUzvb%;4;EnlY9^AWs zC#A63ZYR@0%6>a^(r96{vmu4BKbeKz&Z;sHjb-)W{=wPV8=LhIBkV6`i^-q)`2BCc z_r2dd+nZ&tJ%|Y|UpyBv&L=1P=2-ac-rdFd`3n`aaEb%9L*MkBPI9mv5tXTqDS01s zR+Z&&y}a_?RaIq-2{t6)<$5)n?jwr4s07KBpsFgV==$Q^WV)`)X47aRr*%yzLkQ9D zln?}AI<1SMKorcp+cwMVmqa9l(mER(3ILc)CfiNR5_&uM#M7>K&h`DElu^=@wQE zFiNSRZ(_0t$vJ1F3NfV+5z+SD*d=MLrOLI|&e^uz<$0cEGXPvJmwne96Xba=5(=%9 z5RKi9$p_2zs_D9Ru${Lq20%=J31CPedJh1IXq4U#-AB)!+&MgqF*1UbGR7DZ1Avrr zz1smKl7bQg00>GcCW7bBUlLJORYY=J?A3KmM7Hl^jAdRbrGgJGrVwM-byBeqB5-P& zrtQ13uJW?zoW+#Z>-DzTkrXi{QgS@)c~NzPW5z7YoO8kXkdi|1DR>(LC1sML90*cM z*12sMB%u3VIlX`Z*MjY>%qX|1z7->p{{o28NF-rmvuqdPBl=WXBk6qQmw1Q$Is zw{2TZ>e0kZ2|1pkQB$r+0e~?^AUSX$H~^rrN0n+A`le|@a3d+rdT)z^iaR6wvYs>H zi&f{!d=Dx4$SDB=(Jfma5m6AtpUr;YpU#F%0su%LKyWv4bM@B8{~N#Sp1t`$d*`kH zc~d`pH{-f6s4KvjB^k{K!xmkXA{m%B|_x{q^ zm;co9z5ngs{V(Eu5J();w%r^Y92t~PU%u#ue%ov?;;wDB+eQdsls-K^_SOw;cW|&5 zLIS?7>#3A-v$;VbV~oK?LRzo3MOArD?a*J{+@!=`eCu`Uqfha@j~{>d{`+~J-@kjO z%5$Yup67zK2r0xK7zj{G3C?Fi1OS`DV4aTv1$l@$fYv%7hZs{zq@*!YXNrA{&RUzo zV6E#rAz;pBo(UnGb19|3$@)P`DU=+XwZkpzA3t3mD5bJ2TemysT(FJ^B8=?*kr%J{ zQ%1)4^JB2p=pMw(C`gbPY4XsL;i*gRES|6Tom#q!;l+X6~k!*8i zWmt7lZDF!DM>qN|%+CVwuHOoYRhJcM0MSG}168tot~w@(oI-NUCSN(~ts#T0KtE=txiJB5@+4~S(gFvM;td!%a7gh&E9IYf60U*Io zFC72?(Th47Td0YoW?#cy zy1;9fa2~pPl9y)Y1P@U}NQ>_3-&d<&Oz(WW+xzSKo67`2DO1d+9A;?{eebSb_IGk8 z>Z#xpcV6TZ)S`?<(;1Te&^0Gnua)o4?n(17wL`7?3ZTZ?H|O~el+9}ct@`?FUseJ{ zp{7+4+@K;F5iGlF&wrzN{lklC9oKDswA#wa`te@5KCEWj&F;<8{_^KvziC!aFW1Mj zgBcblB_~ndG*7b4_l|Ty+6+>*gSy(>u&5MqQh z+i^X}g~xK{pNZJQaFd}m#4VM9`CG?xZ7?HQ zmz538#crkN8ipv9(!!Kgo{Fi|DsTeNEif^ZQpZ}a!|=h)`Re@X=kL9_YvX*eZ$rFU zUCO-L6!LO;F_rO4<>bu=4^9scmfOws&8q3Ump3<9m{|!@>Im6)e)!(6KX}vo=Kt|> z*Mxl)dZGjvljs4mAf*EcLS!st1ji8}kdl!=1y3lfRiWF7vvTMxC7dqyUq%dC>CNTd zY_Dya!G)1b4Uv&)a6JH!fSCOG#S5+V@zH5fWYKj)zsn5^>f5#%nl@9KV;bjxoB|O5 zB1Z-wsifl=QVQO)b1VpA1c<;aCGJfp>E1nSt#hv3HT!#u7`-Ir`~W0Xp&1C667VqC zggr@_X|Z1IB%xB$4}A=da=D9+GWqms_50uZVUSrdo&V(Y%dDKO3TbGVGqMFEVY8_74vBPK{9^x|~o-sX4l~U#-@c>vLmNt|y#Y?*LNNDxXZI ziT%m(9ivO7aw+973?aBu2s)6f)yfUHH@}nRI)pGXMX3R(wr_*UwRb^SWZJTAq$?+*On@lUE zVoHm}!rI=sVLq8#Yj?YybFL_gq9_2MX_^$FsLHX^Oeu}wYR)-eNd!_#5|C1YghB~L zf;lm#(Z~}KDdM?4R5n}{GlA_dA-*y~hj1iGCC8H8Cc_Gky-}U|ATnNz)PHLmIi9S@5sdKI$ zTx9P0AzDX7yRJpVJkMiHA*7IRz?rP4aaG_U45 z!oGy1AFem+-n)4<*)J!RF3!(iTy@t{D@2GKZ_kF5Wu8l|wN&FfG9qE(G4(FXO>lm< z*-9yMZ8)&C)+miqazbVhLMUa3C`O-UnbJ)Qbr**Fhq!HrG)xyPDCLYh6flbHA*C>q z1AcbR5h6weOet{yCLt9GjGUOgYva*Ot%qF}mh`tQ6u|I2NfMvp=V!^|#5 zsU#@oNVl6v2B6VoXyo(#%%0mT{M(9G>w#Bzg`Z3mUT#17_`ey*fYn7D76<#M?pf`p7Qy5o0$zUTb@0CqSBcE4Wtbv>Vt2X;bN^ai53kdT=fGqA>?5KxYl zk(JjUa7wZES2QsLgsSz*^LAs9TgPCZs=)ZP&!m zJAX;5FV2m|@Ox&}yk}sTVUj^utBNLIo^iR+ntdo%Rp4AA_F^eV4TUI_04vKUtDGPn zcyVE{&aWO1HbteMNQXTE=KD{A)q@_c7B8q#66CL|!RQkc#8Q!VqhIghK|!L~P#Cp? zX|*oGIySfqCUZwTDNbGCdjNEZU0M}6h?VI1)D`Pm#ME0@gnXrCA(2iRv5!6+lHTuh zc4%6QGk0^pHer37^i77nG*nOX`c$fNG}1_09+i-}p3qDVe%4Ez&nqK=pK0Fz3W8wL zILSpHsEqwgL@B_EOzwYCLG||lX$6Km=wME-!k;4f&mV zC7nP>3qU?<@pI`wKEFqk7XIdNV=J!7IeR4D1NdT;Y_dw#PF$eoUbzjVrwd+O>$n zFT}5|zwFE7c#TjM{BOs&3o_@bPD^Z;PN{0Hoc)fYq&IarGGtQk;1v$oa_j%mwJiWvdh$uSS>SB@GNF{A8rgtUQyn9t~ARCW&}TaZ<#m8BeMq8pB^=DNgp!5_ZvbMJd8tiE^U^~gCO48N(g(q)= zfx9;32_fn-q9Vr!b3oTkyVv-*OO=76QWxPwMk46~%zE*?y%_ zH7}D+(kk#M2Fa&?o>;j)x@ntGLTr(qmzUG~Mzq*1gwr4>KoA6QU3fe7sO;c2J<+43 z@V(bp!%CqFry>Y@0q3>RB8aLkVo`9JGK$ucS3eS<9kPp~IboLD$n>Pn(15o+X>&Gg zmPjVicWOFJ-e%)-B3_50uWjdE`I|8#za03auc(qK(V!W=iiG^aS7i;hUa|ye!BN4) zDZ1;Ae9!*v#haO3g_8WwuJ)UVSKGqp-A_H|O`76vA;jGW)eKqwB5lpLJu$AxI5;`5&RO%$c^XUqn4f~%1mj6}EA^e%2C{PSp7q|~n=v8cq;LO6 zAKNuo8L7;&SRFV#s=V)p)(>Y%T(ybsNb#%Tqoep)4{_RIYa$iN-&P@)hP}Cdu5@O$ zr{}M6v+rBf!y_Rd#|M@|2B{ZX3)4Ls3watCji&AiYu#04JG^sdasNR7{JGcF+dz-6 z{L$p1ytfKc`4aVhe$V5$aW-Vb6ZXsa=^9R8fW_M=r2so)JU}g-cLW>30Zi(yIwxP- zvW!trQ%Ql1f41`7)55Xj1T2FXi;8IW(5_sr@F9<#6^0!!MM-?wC@oDHu*M%2^Rsex zi+qeM#1^a@q5d`Bq&At7UQ75ixGOM##th0ySQLEqVs|?77Y_T(X9TwM%cKa(=eJS{ zJKo&|a>)HwVwZo{d9f=ezh+)Tf$Sc{LMHn|{b@dbM%|F+Gs@+C#p^6B)~Z3GNwJJL z?mGr7qCM4vma(88P|FHpz&PxeTXg9WJw%WCC0B-R|r~k6O zbxEvl&^eX1mYMR4S49ws1~R9_wb?X<@02EQ?jje{^68WxO893n>_At0Ei-C#qZ!`< zBO7VhaT=$x5=aRW#*mmM9ae~qt;@vW`PMsUJpVYq)GtyPV2sH1s-g<|RusN3o|zY) zB?mNoo?)`k?~F3@_*>K2ii^kviacLp||Xy9XQ=aMDH+kjIlM5=2CN;2b%d&cP1Za+ABO(Ci%Oz zPM*dsj}^=eN)k!(z5+GSkTw%DdL4aWr8vMVjuKU=;e=@Z>TbwyqjHx&J4osZqUy@8v>ipm4%cH9;K4nh0;w9R)^+pL4SN&JWiSjr$QT>Yt9xcWgCuy0B7H%gf_W;?g26X^jL zInrpSOcjil3hhnVR9VX?rU~DZ_XA44D>h9_VT6N5!ffgNo<$=|y|qm?m*#he+n2b# zJqU8@;k<7(O!XNeFXhB>cXzdM#?idVjjw3@;SeamVpon4eV-6XbOxMAkv+2k66ssE z=|rCo2*+9dn&F$4yKj@z7rACX#ZfCG9S(yT)=7oOY17f)r zypP12Ae8yTv~V8|N$`j={Nu6# z`gHlvu39Z8kf?t`t3XeUSUINa^)bG$f{vY+T`u_k^Ni9zmRBiR1}A5$ zc#1<*)No>`Rc*!$tKuuJ(c7hvDSfcyW$F_RBkMowqy9eA^Jcs`Lqf|^Qua^5&_}mh z4EKR+KKb^r8%=?62>5xlsA>@{ML481AB+AF!%U`T98Z4@(UB(qDwb8YNE9UK%FbeH zZG=LoCs+Vk5t85Whc7mFSqu)>dG%TwZ4WsN2mk7c;|Pr*c%&$oRIj)Ew^5Y5hS9By z59FU7kUd;=JWJ6H?gOi!qF^+F$$BqXQ4fUzLewZ25Q2JJ(&M~UAGt_=?*+iR7yD*+ ztNu5Mb%C%qkj5;#!f%4c9ZO7r62D%Z-;Y_C^1@Hn~2O-!6#ZgeKoeh=fgm#J?%ufguvGy19GRBr|Y2UVqTZ zfc~I<>>l?c?0`L|_4B-6?}a)sr{s6-fM(*E>g3g{v8xNVIsCzt*`=<{cD&xs=;^Y> z*qU9AVoSQy)hXq|Ty8f`#Z;-*{O0hP(^sdrPntHG?tvK(AB`1e75yhXg(ytE;ekI+ zhxnr0IT!rDUF^O#7GRz-NIlw`la-NTpWXU}#r>;^mqZRJB-*(Idb|tZIa6|K3)E>D z+`~ORikvw0ocG1sxB3DDj=mLj>XO3RoukavU~;_wm|vJH7bi7v2IbHTvw+; ztf;bTUznY#fd(UNYfc`)SF55REgh^h896|3+BgfCQ)~=$2^3w+%U<(gDRY`K#NU?j zd;&xmIOI;70sUi#mEkjS%A2OPO$X-ZOQS~L;1`oxB*n(f97l^aMoWu7QSuE6)lA%l zY#_=No*QTex(+8VW_sMEmWXgwBU4hQ90(3} z4f|_4_ZYcpibze}ObJ@`r5ZcQ3bp_miCJyOFop^EuG5XnQbcQRUvbAC4iyhWwUBMR{Od3nW}d_V?N1 zzX50Hbjjdn*%DjRDW1jBQZt1)zmtkk6rf0Vp33Z^vdT{nLdgLh0C( z$6bw|7fo=dzUTssN8FV3py;$$>rI2}(7SMZLDnTTp}I*iLxi=a+>0rgG!tiH=o&O> zxO%kt{geREV0*8y*ZL8?+AwMyk9MheBwm71H!L{X1t45E9$@wAID(EBUtMi4ewwM3 zlzadSyW5Nv>-vlxtkwjJGXDpp#?e+Os5K-g2#W@D7bT+;b*ziC#zlKpTHXD% zje@=0`l6Xyas83=T*35(%wUk}{i5;jbZ;_LQFeN~*ugzN?5$X5V}))X^qnHhSgkQN z;DNp!r4upjP|@oVN`_IRffZRHyL9#L76Ii+5F}kIhvyZN)xx-Hvg=OKW@FyJ+0KH3 z4x^%jnK{|AOly7p-ltE`P_k>#j&3E4jo51vAPrrVdJv5spF9|rE@I-0fSSt%s#2=x z#(tg%6P^RZ_Ww>(0=)u(Wg*u``e4s-oA)c7s`LA?lf0Weg z_@g6thQsmXO@gOSsuIC2v3FcV27&t)L_LV9X9C1F`_asYG*^u~_VWKb7_bVbe`AOY)-qlU4iES<$BTmfA@LpZ(7QdjLwXtgH;+ z*wH7F{PhJhif0yb_wie0=Epz({TcOWD&oq}+L-9x^NoC7Ft)p7Cu*dYl&Hh>ckZO% z$fdR0fTMnHYxigQq`?^<74kD~Y%Bm;=`7^9Z_NZWRIU|Q3-AF#H$+!KD1{pBMO|r4 ztymEj`Dsv!B_-26XHCU*rrD!l#$?8;5Em}J9Id!3o$8x4#Ty&IfVx#NbZ|p{G#g1mn(^Aa+uFs*evnub?S~Hn|0UpWZoEsLNHn>#aiVj5`2eU zOaW-X&sX;iNgxCM+TGpwGbbyY+j3O+vQknjnyJ!2%ayL?#a#E5=;aJa;W(d?*P^{^ zakRzp*E&Dd~KkJhkeMpOv zmjjvSf&X2&XOwB@n9v}swn%+j&GOSkeYot&xqpqGT`rw?Z9huDpWr;_-{$H99(J{avSj-{;f~WYU%JT{W$; zBzv7Mh!UnwW*@;K|5W`tbD5blci8FOsGX?kGppQl_GMkEKizcLDN4Ji-SnYx*2vsi zCvAynJ0f9p8RzSHvAHK{;PmLbxz};S!o|+hxs?@zEzQ=Keg~uj^9${=UUOlWO6Q!` zrk1QXN7q`m%rA!y_W$@?X~eIXPG?e0hT|kHTvKMJPGi7BhRSS?9hf04Np8>JG6`(-L=3<4CwH<-r>jw6g+;Rg4d`TAjgGTzb z!={xdD+3%`MB<}33akPC0o!IW9+6;e^ZOG1m>pTeJmq0&$xp6tq~bldmGOO(5TzhU zmz~j{pJ4oLn7_Jy_$$8}>F!)vDFaF2w>#1B{I7R$4Zz1n>BGO$)|GD*+Z_%qov(^n zJ)}1Z1FNK<(d%)%HkH|uNZeRA*VP5z$kvuNNvqp4 z=cb}b#P!=sTc*E0L!l`5$$%ZB=&j9^o#=i;^3HgkwH5=irXYH@w9b>AtH zU@7FfFf8akIFXeR)!re!2+nspkMK`p7JJ7RMxmAgd93z{ziYiC$#LGPu7@>o19JG14?ruiN$)=U-R*29a=5l^c=sH%|*As%eD#Wg?H1+<~N4IX8bJD zB41AvPust1aSj+KS!q^63M79@%U5XdQTed~~&+tNb`sa!NKbGY>Gwq-r=+&_8;TF|;eH zOblSG7j(& zHx(qfBeYflLeC)feAiE-80PAH{IHUl%qliE1WZrvM!BlSnNLTq{5B^&ih}%D0rZJn zR5I!04=p__ImnPeuWLs??dt*JREMa*;K(3skR{DM&hgTgQE~Jt(Dif=$V;Yf8s&~k8Zcz7ZTSl^FgPmS z6s%^_((2DJoqoSG=BsYgtv9~;suojM9v6-vOeww3gyYBZ!er+-22r-vs4z)aF z+3W1Kz|F|bSL%vg>(lZeZN6{9JDtr<)-(PJb!!|f*!yTiK@aA{7S=LzTjW%iL4!1V zrmz+jdhMG}wS}C?qyPOGZh*mVRfhH+C*Spmzz-CO`dQv72~xIS z!5U$Atw)X)2zGMb% zr|128V3}udY_vGxvv}L=&l>b+NlzL{>$2-|LS=IEmaFFrre_JH(^k?AbT-c477>hQ zNIOK=77pPK+;u0D%GD6F(0Tu>mZ-{rlUhqrQ>bMlz&a0oaBjbH@7~=jrIH8FDqA(%1LahzXy?~%Ev8+vl78s6NJ#`B zt~tna=M}QHrJ1%+zv!F-$ZVRs7O3{_>K-BQkb(BcJ@9W#gt35LBz!W*@$6Bvr1R6G5V&?H37jexK7f72oN z!#;(5MBj2Msr48}zzB`2OugQpOKT)(IKv?fUp<9*bp9LEDn1YTpuSeSKYCN3;lHn| zXGZVBy!DyF$65I}DEVK5F`y_iGAU8=uZ7A%pA=-xT)Jep-1Bh#t>VCzS|M=zUpc51RsEmhw{SSRk;$0t z%j(|DpFgit><}vlD=Sjq{+eg{k9>xJ&a2GGMs;)|naaY^>-9&k7x&x8IqbI*B+IOG zgd2q}UhFt5=+hiRh*=Ro6v0nqHuObP*&8_)RDN*4; z#Mgl{k0EaYO7|vjzg#Z*nvJl*`?6H;L%!lc81D6OKUQUN|b1kX`(YGdG((u6V*--wO!J_?|^V zku8$BBfJ4$l?|sJiM(_u`VY~?l&`cq3H*WNMhJ~Llj>1TgN`-yYNk`xM!ZE?Q%R=y zi2uElF^ z{jZ^vy1Gn*xjlODRqsmdf2zg~vcPF;C8F9{!m2h=xD^p6TGZ7Zi?Y$rA>M{JZfSVl zy9HhF-Tij-?rYN;VUlBKbS7KoLG#gIVJtj|a+B3BHV8UH$wwLfrN$T6(sr@ATEF0Z zcusTEG?_K=g(?_t4u76EsHz>L`$_3>lodJUv$U=~8-F`V&yichRWH)ZIW+&yKCwIu zGj%szYq4E*70Vtuj1#D7?;thr;rf9pk%?IjzBM_zoRH3iQ&NTp^K&!MQ^Sfry}yA% zyu%c%ajSc4JimC@M#EV|$=~f%GhwJ?ap&sJvf)x}x}kaANVW}!m5kt+RXp$?hF8s+4n zgUU?}^Dmq8tTtwEzXby1oTaB^DTMF%%K9N^`~kI>z#E)^g-8S!&q4OJpp*IX94s2`a^YD0>x7AuC#;}zOkiNPHK`DbMks?ILAevt%a&xI zxZjpR_&NXWD@V+YrK=Ld)X~E8c*Qo;6Ta)*xT9Oh7|E>`!~}UMyWR;`O3b#$0cjU+lZ)Kih&6KjPEh;#?(I?uY#X zF1~f1TXT2gJbSF;-GqAh!=Tqaef<7AybA^fv~QY1TP~ml39eHT`uamGHyCtQ!wHdj zq(pi-Ut6FAK~Vo&V^Sq-FrZ>I>E2@M!i-DTeC6qsp2?pzd0l?u{*?Y*z&&k)W(}Hu z=@Q)~K=tw?IN%M1l(g@pVtoG%@quf+FJ{3Xwp_iPfXjef(zpk~sm-R4GTogsnpRpZ zOg-fqD$`@CSEPU7Y_GqP~1xd4HEmLHyqoawN&&J*wSI`bpz@~PF=8-DOrs{nE< zPC+-XVz*Y|^y!}|smy*?;}w&SjH1PAY1`(Le?NjQ)17ZzHAKJe7voK|Lb{R-acbZ_ zs!0lx2eKMxMb{H&xU^p-6~kGLN$||Fgi}+{kSHe^4y! zY)l@P$a0xv+WQzKt$Z zS?0mY&;d{?;LG)UQ*pPk&hx^#<}fGg)7 z0#oi$X23NArHWA8*gtG6|AxhKWba5$xddKJI2PzRCv^vqOw$b(jyEKM^354A8JhN( z(v1G^rWxmI57dsv$R;9frjN3Z+RKU%zZx_zvz7%}dg~JSzN8<^j27FQuFlYmb60qL zCsPQfJUk;pD>i}wgP(w5$4t|z4r(F8KdX83>zv4R?{^Q9Q|kh9;9nt02WeKQ$wr83 zB>Q)*z#mL-$cM3$ku2sO-AgCK9c`Tm#T!b@?*^d|HSVY$M^PZz3ie`C2>Q%TpU6~A zM;7!1BcRLK-S>pXn6UG$CxwE(ch>+lGy52oj%MITqDFlMtz!j}zdMCP%JLJ?&Y8SD z{6#i4`7h(SK*s_yQm2d0$y;v%50~me!^0;u)ZfdC$=}uES)FK-W-3{bZ@!e4G9jm5 z_f^w^{J^yB?UtN|H=L$!wtqA7@H{G&%Pc<={D$Z02=kRP>2DZvu+)T8#s+nW=W$2f zd1@p06!hvxVqO@T%W~nRU3%Gn(xAn(n>Uqm|I?@W5C)>{qh^e6W$#N^xAnL64!;Ui zMyi29^^6hPw$$6tO`Vqm@wo@jPbBYP;ng6@MGO-oj3PJ*Y_}HCLgW+D7`@fu+~SF1 z5_jT`@qW~icJFtC^2)uX_`?3lpj<2aU$j(-P5 zLjgZu$mUUw&sA&Qr~CK)xAqrBm)V1XqDrM|1&18XbNi)Gf@ILW@%<8i>mahA1ZgtM z*mi2TeT#!_uQp4S|5e$<(h@1q*VTix?80{iy*Q;J`4MWn_nWG&l5hLtT2BuJYMu-F z2(t+;+%Dm8_iYPW6m$qfcgEanDR*7kR>U)ALT616aH(`t`@KL4;YvCH(2oBKX=g^Ge6#83-k`y zF`U(B&l226hF{e9v+ipJl{8Bk#1yRfzO;^432ZQl2`#{!;B)2Z-f2lmn~NFQCe{Av zv)GxBSz}>YFz^l*J0R+49aYTDmE#pV;6Ap#z*lh|0M53W3OxbGZ_Q z!|Q(7_gTTHzQax_S+~yiL13`o)j@R|=;t&FU3-PEnU{tsgI^cENfUUEX4EYfb&VGN z0C+Pt1O^n!kk3(*he|4H-}qfgEp&NB={Fg<@Z5fPHp!>j-fz&5u}Dt&zi+AK4*PgV z(~2YCy%vEeacUy^I>g?|n)5K#Wzw#HcfW(OUUDuM8?%>v?ywN(w^Dl2&+R_f(NDLS5ZTlYR1r#)S#Wp|C>_En#%EK!LubK2*^XIX5=5gYA6CXc zJvtn5K0p-VKhk=yS75~kqMhGT8zS^m9*MimmX%HHb{|+%G9%eXj!KQKReA&@ojl*+ zx+s%;JlK>rp0j13*Dp%Nyn9gl3hNzO(H~s_%@Goyn7S2@42sb9^Qk(XRUSPt(hr?O zYW}A-bG*28MhftIhsz=f_P{nzP8$C7pATPl522JrUk>J0RX)l~`QH)=vU!Koz*RVx zPMy5HaR%r%*3BR)VtkDC^%rg$JE&AwmWrNv8pl&Lj7f}KV#ijW_X1Xh(#LD`$ z7J|RShYz<(OOaj50%4r+aM9ufB>TxKxwFwDu5|9Au&6QZ!g6sp+AUn;#o?uj<+04J zVl6eaWSI-Geh6BQ@o4IS$)n75&YjT5!y}>P|#71 zpRVaA~~pJ5l(^60 zFqs8e#kU*xxejd}y@j4;yM;o?b!cJX?H->rt>EEO%1K|olv-7?;2M^)>%Z`%eq<3_ zdv((W=OeW1xb0kPM%O8F%$Zo4|8>%4Wq;nRrbk+)<JwVfD>uS~c>k<2PZnedl8tOzPDN)MV(0PxNxp)xO3LN!d(u-dKNe5rDHU zb|N?MoZT@*$*`6qd4^H(48@gHg?B{}@Q&O06+ z9)eyy%;1Q~7TOnZgy;#M3ASsEd@in;c zuSb&&W;C*un4{?H;pfpf=eS{y0s$YLO1LjvTFqPZKF3<6Vb0m0$yE1F-h^w5aisx& z__{2?WUP**jNnoKpqky%Sd(R}I!6!S_??EjTmkQY0MyDmheAm=xuv#|pZaTGwaE?Y zJDdQzuXttMlNzLw(g19c#(eT?g=+5DUX)ufc5I7{|CN}9!K^7ayv{PrJ58kg!{T$x!Si^>gTL>)+W?pkS@W0tIWAtJ4+=e zaf;q6zyF(NEyAkW*>D#p-sLUQ>_8Q22ik1vp)p(H@04QcDZ`k^f<6X8)4j8nW!K7U zm+$dFuC1jt^oiMEV0MC9W?Qor5F|?eUK}F|i%FyOFkQ>y!df;@O#eF=0)YmVC~1|v zG@6Y)@K7#T57ZyraV^)81)XR;e^z>b4L4(7O9Qhqis)Mh(Pzh)PN^f3(fTTguulZ6 zf%->>vw=+KPNT#D`1c1{dFl7NVYIZ*)H%_8v5EAF)T}&k4R|m&HDy$;+=Qh|&tN;I z9n76Z&k$#zVsVbWh^F2C$NNwJNBe{G2juZHb{GLLh%>G8x$7OloQ7#{+_zkP; zA>7iUAr!8hQnTu3n}%E06N28o{wwsa`_hj31@)H%cw}iG6uECk69nNrPO}gK7OTp7 zf5Szur$r808Lggnd|X#N_>`tyjQa9UbNM_7V{K$#F(T|jSz-b%Ezm>00Z)?&LuW9 zyT;!#$E&8@RC7|`xzE)me#O9$1w_>LT&mF!#BlmBPkqYe{cgTq%C=c(NV*eL$>5td zBm-6}%aa>)+I!wAw$GR=DsD*$`s@tU9tbhQThYsZ{saa}%L;DK6BIAaYvo?+E3!le zLfYUzjQ^hSz8yAIWhnZd@~?i7r9hvTa#~B}8`qW=6jlMQ3{$;v;s%x}xvFq>FeDM-?OEPBR*Pqpvr7{O?@}EEy zU-7<|_w=TP{JS@2ei?cv8l4Mmc*PT{jZBu47F(S7oM4-Ac3|j4w};PK;qe>yOV3p8 zBRv@@|6dYM#!9@uec!k^@WE|mKz{t$3B}C>uhuwb94Q3+3opYNsU|IV`d8GZbY(r$ z)+WyE*x+U=T!wQMXBHNKhUJI(72dtNwe@Se(CH+Ee7a`OUv)~W{Fy=zA*BuKZz4XT z>7oEpXlmluK{S8(=c1n)6j4#$W+}!FCWF0yGHN}hYEvasyTULq8P4)(G+0zB)7nTk zg^Ocenoog|h5YN0T7IAFhc9d%L}x-vyr{iXSbKgS8s)#Bfe3psfA(dbR-)@6Hn|_C z&Rd3bRB`cKgl{+Jluxp)LMo=*=4Q7(Wx(@QAAO%E=XX-WV7}x-g35KC8_~E-4tsK9 zswak$IVrMK5`rJvh@oeFG%%D8Cv69%WQ-xB_|*Nj^Wkpw)t#;V&uQnL1euWJTX%Xh ziO(dyPiJcKMTX-17icAtq0RBBqp2$THVhW?N2dE^QH&2@JsqMkY2+%(h1!hGJ7ZsP zz=H!I_^9$yE1^I^2p-hiv%M~ebVo^T+`)%XNRO=%Gh;7P)(vF)=)Oi_g^5q(21NUm5x?CRjDjpwloG%Oe|BI`-_>udeYb<`= ze`kkk^rS%D^vy=SbA4lFYy&P(M7R_>bJ!B1_xH4V&e}*K{h99liUSYagFU~i<1{Wq zha!agBdQJSY(F$U)(>eMkm+ zFPC&(hYGCfPm}1x$D9nKBD7(pl@5CK*GfIJ>qD_<_5Jy4R-jkl1Z9lSF_R^2tiMZd zTs8y=#(w0cUc{~d!16T{#hOVKn6hTTRQb;+qY48|TBakeY124y&6}JGMrs^T9 z?O-mVf0Bl+jX&(Smuj0nPd9fH%GY3pzXb&gL_OTe7a(nogt}zAZ3f@9RQ`mGGF*uc z8=F;*XE%J4)<-L?TAwaZ><q-sQBb;Q1*Udb<6V?VT$b zT~5EbpUr&DPpfIduhzOg{atKO=q>J$gpd}S8;W=HR<1O%J5t79(_%pg27aJH z@#P`-9%NZ1>9)`8%rkJuV}`$b`ETTn83sEJQho6pCd9xmbnB1Q&;NyF0BrNi6{_(BhAaNMYy6}Br^L8?dvj&?S_YkC7OsyeA@Tqytdan zE;!ou4)ip*{(J2zU@1|&& zZJWj!mVo}xTOvaj0N3ep_iO~73l$a?u_E|D{2;DGt2_;2*4trk4nJB9ckTP(o0Bw& zm~gs8L=W@{fZV8X@3N}UQs8b*26y2n$qVRm=|3bq=7e?fKL`r+(HAkkt_~~04)*== z(BbFiO;x@9!`3(qFCwEBSS|;#_)F6bje%<;z6^dIq@CxX#KMca!1KN9Pf$S^@?R%V zB>7|h@ZoCcvdt930fOfidl0@(8-ch6oc_c=Es@&GsfuTEMNCLe1gQP9fD~2KjU*M0 zh}Zlva@~f1rRlw0inw7SJr8#o>2*=vwDV74I`-MY^Y6<#oGAZ6^*gkftJj7e&noKO zpL*kuqZG~ixGJ3^8`7uFB)K)W^^;e*WPde-NSUds_FM4Qf4F?s2L7gR;Aa3pm zaBRVJvUxuM**8~kHi!t(fq+Cnmu*t<7u^5ZW|jupG#uL-ZC~A>CrfY#sld6Tq43w+ zhYvb^pN8XK*It{Q>UE#7g6uYPx33)Be44vfT#AB?`sRwH=s)sev21Q_bj*k-{O-2) z!OGZq5;vm>hQ-+cVg+6uRNTD7V60{cCH3PVs+bnrksNoAUp)y-4TvcW%alPC$L}Z` zpU}0(#K0-}b?OvHW31Qcn_=iIGi}|QeIVjc0Daqjc6n3AeEVya%+ga7E z#vb8u%~ds$RkD|1b6ai>|6b<~ZZO+QmA`&8qcy+KsWP9*x1_v@X*m*89GiR~pL#Hq z>vee);I(~1;b2hG_VaPPwfEYkBffm3GKJ9kuH){%P+Nk-_V8N|&#Mpr?NqE?ZRXyt zDmf2;jaeUfSJXwqbslgjX7+CAt=8(yo&om}4y<6DkaZY*KbH3X&A}q;L z@a}iUly=>p(eD|t11IzEaW7e@>0;ic{@1_%-N|S0*h-&3*b4549FNM2n!778%;b-E&Pun<-zu8Veii8r|92 zIeQ#$zCe?;F|Ex4sC{s9HdtcR9SBWh4E2-$6N@OURqZl;Zfm64B_^6bZ94Z}=%3tY zyK8F8>UC8bV>jlbivR64TdX)5Jvv#ps5c3izWmXtJ;LI9=8JQxAk_V9tmGenKVY_* z?cS2B$Q(F`8*Ck|g;9>yMwh2Ttx>An`|aw_0U$+z^Yl&bdq~+6j}Bd8pnrq^oAp}% z+}k$p9>r_P@I(W#yp#2U_+g=3j(hBY)+>_g%#`5c$wlv3X5;3bS9>j5S#+Qpe5@`a zC0_h`@Tz}bioTsR%g7IaK+zi=FGDdgf)K0e=}d~vld4_r;FxR0nmjQQJsKB&)Ltt zfA;2}mM+C~s@BOoWyMQw(zt1HWMpJ5iOXpf$O!GVELYA4{JUXZtkiZm64>(1VQr&r zJ2mj1ZQvnG;BhC1knpPFL3+K|HIA06vgmhK)h!o4e6@vi$On(~e#Nw?*Ef8h%`#a2 zce%efJpXvaCh!VUlrR~Tu<{wc_}eCN{rvnL{!m|fQifn+dxLh|DT9|x9Mibvn~ND; zJ=xjW>F6N4#$l-kjd~;U;5B!d%67fV0q0#cU{b6c#*wK2W2D$7!UOqPuCBIf+ccQB zsUh?V&iWulqtlp|F)$cJ$)OC3#KwLqe5|ZW&5cIE(1m|n>W%X#3zTC;3qc_79I6Gr zCAoTvs$4R7(ezRK4`?XA?nd+1rnyQ&2Gd8O0r)^6cA_sUTORLRk48D^rLQz355J0D znuv7qSui)vJBtHP`VD4*4B-S z>U;B)J!*ZD`8I@K?MqZTYr1zI5&e^$L%6(l@b^Pml04|P1P;|Nz=q5$*lT~r&&8v< zAdZ5dsfiM@YgwW_2Q45hnhRCb#hH8vbEU!X*WTe?{rQIHCP+EIl3-&rvwyd{5F6dz z;t30*WVd+O69&1?XGSiO?mo;4d#Yb+B9vK7k7{75O{^d(o-c+&kXF18Zg8$bX^Kiq zfA)5-topRf>~}Q%?Xai+_EVfgCVG^|++LN3*4X8(_o9E#SFJ^VSR{N8c1f7IGeM zPSWam8cAm|#*aiI;$k&B#o7&d3U9$c80>$J60+{HI!!kJlT$zDH2`#?8mH zO?o_w3Xc`IA@R%CtWcTx3l%@!(=068;M%8%?dpHNQ43GK8t!>Tr5;nzme?BKR%2mu zvKAdUgm2qD{nFa}PewnknW4;vuht`O;bGH({lOcb?VkSZMv}t!R!rFD*k|^+_j6T# zS52ODbQ~=wIQ(~wc9jn9A6AV@UvdJrgDXl8c8mMQfr4*PI9k==XSKyTb@mOT7g%d) zoz1gW0~B>45s2(N*Be(nyA}TyV?Nmtt31^!vD~r?a(1}X!Y{q%n9#dI(6hfwbgv& zomlVW8xWw7J5noZl+tp#2&48p|NDhiJH(CDdgjdLuQOd&0r~O9-QQoH|4CC_;Lg%D z@McSVo~RD=X3za|sf1VSa?**D#({E)k}t%`DcCHpZ^c58>Z}MD1s6k0IS)v_3LC6ux#k0xJEjAimx7D{lYo28;H=90OA_s zpw7==?&hxD04h0ZeV#ry8uOlamXE@d(h&|&NU}(AKmJVMMu+KA)o6#8NrbUe_N;45 z3gW4vtO9a_=%GRsJG*QGS+a{Oztgup<1N+KcEzKeU+av-k4(FxE@KMuh}j=UsEwq7 z3Rxl`@X&O*0$ly^xB})6`w%z#(w1mikmdBHyc}b{$u5Yb!0qun48AmFk^Jn%@!$=4 zHx+~!?-C+qPJcXRbdSXW>yk6GLm4ByQ^*6x?p*Kx#p~rR%h{)0pQx?3qH5;lRB;+c zhDA$@7mNiOd{rGp^K_9L@&&h-=0=%Q`O@8;UEhtVjAj}S=Fhwry^~%@60YwI0?mTA z439`Dhaur9aJWN;I_+WJYKrk7^~a8hnHgEpigo|{h%Te24cBgx#y7Q6L7wtjt<6xh zE#HAjZcjfmb-1P6(<=Nvu@mNs>sy_a0Z;$a2V}}+>kaX>nj!jeHh607tOFS41aHca z@t-)LuU&B50&<~NMQ8hor@#CH-YNIn*7zmv%U;?5boU?FFZUuNSc_-$A&Ug6>$J|Q z`0J`0Z=}Ls&kEC)Qo%4o@hIW@z0#Tx7s*eZBDa%MHTSY-%T(P)x8p5ifj)tCO*#a3 zJv^lvHR^fsLh(p-1w&5vKL~qY`oqya`{?dz)f3d^2+wZ|8UjLxjBhFs4(s^l4pTRh zbF9p(;C8VP+xs7=>*ct7ssFxSMsG}qxC>v;Nd`@|k(D&$exiJbE%d08XHyUUk?(CO zTvGKp!)kPt+&gVz33xB_-NewZUT@Z-Y3{!Lju<=H;i8Hu+GAt{5n29?jw~3)WadJb%uYSisdfA{B&jAzi{49LeU1WJ!gG< zGP>feyV2P9GSE={ELV7tI0_%c`z{mJc)D?W=nEWtW|#aVUc9ajKk?5SQLC+Svr33f z)rUWuzd**}AU-81Kl!L=q*&sDw%+dKz^J2_TntdJAkVDhBt9YtA42h&2w@Mb>=G$O zs2QOJ6#dmq%|*8~=RwN{^r%1r~UnC~iIj?q!IO2FyDbx){DpAS;^Xz;N{mBGU3`;UuKhTUBf;^uG%V zlE{Ba9+A%93N9~AmjFRt!-~`U%THeLJrhdn35_9t5Vykh$OaM`>QRRO03;Ax-*Y+h zfOx?2>eK@1-@#JSQyoS7t%oqKfBa{V4Fpb;92X-8Hd|qPEAk{31gdJX&5M>&z*;4| zLnVe1%Q<9#RfZ!O>yqIdwh!fJy0C6FZX~1^O?>cJYNQoOuKjicCO)8pGfs{JjPz|Y zffbuBzMw;LOqHO{;wRL_X?GUVYuqsECc-EH>p9cPUXhkY(dk=T75Al%mbRwZg(_Y# zL7O2{Zvd(SvnJC?fVRpCDN~A47p!mwS*60MIF$P#rr-!3{Y;uy!?fuL%IQE-P@(on zQ5x?8`1hQ$K4L+A_R$nrUdsqxexmUO;2V}_08Ke&tAd&uYi1}u;rV`XIvkE~I==#? z)?_co=lCrH3+Q&*i~aPEn)-_;x_&TI5n}+18_dhEyKfg{)%T&IUxmuEkQv)OEI4GTbDC>AtNPV%mZY>a8$k@0D&(Jd; z@2Smt$)1=AEBOPa=>+76~ShwZSF8an2+w!(i0w z+N2~zvL>jKHUe8PypCkw+p&EjRI)~lj@J)|ioY?0S2fyQs4rz1DovMv7vOerjI!mM++NMpL9CX9P-dJ0 zp1PAY3*tSu-iy%_+cYfvHkZIP58t?5Lk;Gb3v+#bb!F^uvJU*5&zGNp1UZf; z?7V}61F*e)1$4;!z>V18=vCl#kV>&PreR?H^3wm^-K-nZQ7SK{e{5?$ce9WOc^U6* z2DBebr}7eptQp0mL^?12hBcj^-t6DpbkZfo75HJ%7s0pBEONQFljxWH zjRl~0rSE@>-}f!n-eq25^Eb_(FvONv>lF!Vyo^h3tV)eXYqie%=ePbkzw&aI{Qbq@h`9sldd5Af3*Spu{JSSgYAu~PLmi?ib<()*KAx7!J& zM8nqgE}vDYHf_}hGA}s}9Xsgtx}wE#njcsrZ)xi8v|hZZyZ_LBA$ogoKl4ITDzJG~ z#i$xYr+oR}$=Gc~ozOr$mHp0&dc*C{gGK3T+|HvtjPRn9&QsLbT4el@U%MgQKgdVn z%EC=~SjNJ#vD5L)-nKFLi&{AC{Go9N=TR8gyGwP`{w21kZy zR&c^btbLxW1e&digF7(y&)n&8kDlJ+uBg6gtyG(m-*{2){M}~+)fs6Mm4;^J1s_;? zgz2)f#os!)cslRVQ^TH6Yuw-C_;ZdOCJ z9J19MU}t?wwu|HO&nM4u3~V!YE(7fAxn|{}A?;lU*W7#{IF15w53@|Mf;=6$J1#lB z!FxiSEY;5pgR*#6b(-kZEUnMXH$_}DD$pKc8StM@gJs&n<`wVRrY%1kB1x)Q=0ur{ zfcrbru6w{Y0%Bm$c|r^=p|rw%lbkF{QlO5-^_FZjvwHVX@*Q(YsB+--;^J!~6{myG zMl%Z%t)5gP38nKBK9{GLq#ORVOVSd4*fn~2U@7h=DcDIG; zsDY|Kr`u>X zspQ^Z07V*ly$0WdnJTBnh%bBpIo-Ov796&7d*-ogD$jnn-WFo2K5E4rxz|p@ekt`< zcDVa>1!okl60HJV3dOXg+KKxBHze9Yh>uAt5~^W2TzB={fR~n!W&~f@10U zxk29n*9-g)q)bO~%^Z`CK*ANOn(F+QF3^-fLa2#- z4B?b3M@NZXw!@gAjo03ez+rJC*X)ttktzFYcq~xdShA7GRi2x>PiyP~=N~0UQA^pS#~kN3W1S!?`qfF%ur8FQ+ zPM}VmkpUVnDoL^0M(qA*Yl!!ZD<36d++%cpvLP}YHG>FCMMKs(f5m*R;>-RXSy%dQ z{)*0Be;HEDR%5n2_qED@%}&lB7-9KJeV`zWnBr}9taEtY;v+KyH{TB8YrTd*ox)@X z)2D-Fj=IdOi(b%&uyh2Yw&5$=wdNYbCDkt(ifZgZgfIc^FJ$LLv` z`+khZT8@E|OauzqOj6&Z`r!Uso*&LnpxHaR-nvzXoMB1fbg57Fnik1^PS;%NG;^Bc zG3H>$wwlR8&-Yda5u~Ni5JQ>*(&TZ33FJ zdU{*7b<564hdKS!0HVCT`5?T^PTvW=w&06hl@_mN>G2Yi?hxs|2W(YmXS=(*5syd# zypo}vC_nGO&uc(#z~KQ304_c&J^*d_U*!coATU1*a7hP9D7)8pzkYoS?C$fs{N*53 z9#FL9Ly=W9qwK5kUEN8Wki{H*!4UcLQ&*W%N4v|)f@mqTQ!E}&-Uu6|nxNJ)8t^MB zQA=>5rd44NEY(KNklK`ZCZ$|s>8beXw&=xl$E~$&sBaS#h#1$dwaxi&?K^qtgqp(5_w0NcW`3Xc`*znEXcyNL6 z3m|Y-3&n?P!;Zuk3C8zx=+}q8YEZ*>sI7|xRS4h9f<{eNEXtpNS6{sf3XTr}e}n_ecLdCM;}dXD5Nru(D^wF37BT4c^A|CYdi zuB?IgnlFl>3C9v;3y*29p}=FD+_83YF;F-%4>OYxK*vE{rW3+>fRe>N%J$eiCHO%#s zU0dfvh*9Pxb>ax%bqDMR790M4_s7TIAJkUEDjcQObV7?qQa?*f$%A9wR!^KHSXtSz zM0wtNv`L7Hib{zF?c5Hn90*N~x%irS*iPJNVz9Sn?NnDU<*2V;Yl^{XD8EChBlrnb z*z0Sac?&<0vqh56H} zhB=@{xmoF0yT1?)_VzqBkjlE7TbZww&g+u~ivQhF-(y2pF+^>z8cV9MIg_beHA-k#ahfOc`%B2Q3YMMx3KH z74al$rFo)t#;Uhtgw;8|g8!Gn`jzHJ*JWaU@R8;PVax7yDD>IRp<~+-%6C06X|PZj ztA;0iuQ7z!tj&@ZlMV`SJDH~%cxK(Af9s1m4i>mhCoDxjS+ytfO)0M$BSkZp=zCmm znA~rU#S18I+!x0Mf4#R44!lkBy0-E-)zXh5c+nOvB83*K#$W1e6@GTMO2p}5=DKFr zDLqT0lIz>cLqc*~FA;e3YhZi8hM!FDOVe}Lp?SYwIhghjF%^z;-UpVmrA@2_W0=a@ z+uM=15;jDfbH*!6j3?_jTA8RWY^Ao3$bEYHk3cF=WySbOC+WJecy@xTt1BXIVGlqx zx4@=6HE_t2MmhSye0D|)7m`fCix)eUoIijC4mE)e456sK!7OfvZW zOY!4evlEvspk(zZsNZ)I53=9n$;5QuT%luKnkQeWNAwJSs5}bNJt$UjS;2~>F4zpP zFSe__X)$wcK`8*$GAo7H@DTe{2_zjm3v~>I05czq@)kGA$W%d^{m;aoHp+bsZI0+j-Udh+gp4G0k=*EBL`cW z-;Ry@ycbj4p8|Y>Zs?J_V*pbdcqA;_43w#uB*TfSDdm0@RV)41(dzGUby)T(Noi~T z%4^XaZ!qH0;T7Cr;#6&&u=S6(ggKI0lPy~t zAREz)TFqTEXc?v%=ryvwhWb=58gtINVQ3Y^b4}=dcr<*kudmZaz|By6QQ8dljHWiC zJK)EB^{24Zm*isO=CpLQK4+pd-4N{V?oMVcA=WM~$fU38RIN1{xN&~_*#r&+y)&{= zS-r?b0CTJaRZ7#v;`H=X+?fYSmf1_bl6*QIWyJ150H=ihi!Vu$)?fi;+r)BL%Q7?0 zq&JKO^4-|PINp$;ryt0?yO!$`6lKa&YT=W}zi3J)@Y6z5kUi@qC`uP=I9OP#GQb27 z$%W2S&d!! zNRpfP4hlYS;)ZR+Tc<^;rtf;t%3eZN z@d?HWv@sb33Fk>##aOFz|9$Iq{QGhq z6S#HPd*d8UD7LtkHPgsz$5kKymE_@xvBryosk7M}^S0pwoCF`@w7Lvts{|`C&iL31 zG$Q*+xRBFMyTp`1bFCG#~MbMx2nvH4}I&-tU{MEL_i@&|m*ezo&*1~v{ox~dF z$uEUYItb`qWi;A zA#xl~8M zIpgT}L)$|JTg(0|1pd>Q@CV_<(BJ+a5Y9fRegaCnxA>e&2|2vs3P9DQq5{P_c3SD6z@@QLYoe}$;p{Ka0y9h2WnMd0Iv;wPUT1zCV_@SvX!xSpVT zp5r~7i&U+X{X-_2`bG&ArZp%UJL2#X#>&PFqVYhWC>1r&R5B8sy|mg%k`R%IC2-}gsXPK#Bi(}+ zgbrq$Q^$fc+cnS5bacK<)=m_BZnta?^_N}gUccv0yAdDYX_{X>&3eU2)D z-Wu?!2fy3dR#wi6nez!qm=dxCk4h=<=vSl}!CMGzgNw%gX_{W?5#r}efWxC>s0F+_ zbc;VIaVPJ(w49Z^t&fT+tyB)#oTtB`55KS&#ij?w7yS;t-WRV>s{-LL6sG$FMJo_- z6MUKz0#OEO7=qXXAKfk)j$E*XI6NQdUA({O>I`x2D-zrvR0Zgk80uFUb?S_=t7MnM&&mRrk{cf=M zmt?zmkf@x2^z-NDUJ&;Ey52uyo~dOYE-*l%$djhp>#24<8yJ2KIrWpeoueo3IO9+d z;-J^#i27stQuT`^RAI%tasmMkd#IxZl0Z$^HP8t78W@BjkIBe6bdi4qtby4~g4C2} z{oWcIcB9+#!ZKuv+SA`%Kht*6J9`q-?y|6Gq#p@^#3rWb8>8a!$>+;BP{BLxJ~_7E zFB8E-4}xD#gfvZBa9-z62GFQ*Y~Jb}mkW3%d{ASJT8lsP=_{<_sJGgPI%qybr-64+ z7@Y!ytrWOWeCn|gW;jf{_P znAnT6vLu-pyy&n^@Ot--0Y{q`@Eag=xG4-R!~3g>8X*MQ3=B}5QO)DBAVRLk1oO&D zmS2?!9+Jy7^}ya20qG_}mc8cc!NM-toq#XDt3GXWtnB+tM$OuPO!|Q3cBun)xum3+ z$*FA}FK(~fC?^)eFp)S|O8~$xtHHE$&pxRLYPYCfCXppfo6sZyoq242&^)B)KJPlu z*!XlvPicEL;TGIMm(+E!=Eke-aQf>%CWa4uUFUnbX91gX-Q6Nr->5=9h;)12>+KYd zobC=xL@B{XGnZJu(Yq?>ZlYcN&gv7gV@MXA#8Ki{I6;JqAe0K=KfUgOsFqn;!3%z4&X z>LOcO;5l}pRFO+rHHKO;>MdtF`PdOYkR)>1DVm9pTZ9RJ;yRYtSO7Rb{I%_tv)LcWSvn19?PRvD1dj`d!9b!#UjsbnbH z(g3ghXfROL9vBkbBzFDh*Z%InWkkmX$Lu zF-}mHw9I@$M?vO?Rb@`?hnsO3OkOk`+34!lI5EeK{~3I6{0v+`J+`2zqW^TM``@H* z*LkiR`uqwVfmSpSrYUuQQ@!x}l|UOr9<1*GVSmXG z1T&+HUQuM$sJn)O`d+PT&1n5TX1-?W3@I%FYm0#vcVU@Ws=ZwWUfOauO*DO=l;Z;A z%!J<|Y}?GpRHZa>G^yC4zn-%Y2C*VJSwHYXg)C^9(|*(0ZEM!Sq*iRnQoi|&2Itu( zVjET9_~GsEGIKYkf!J1eH?@9+HdUdJ`iM%g9GA8aI&;^!_RI}E-}K57UaiMYb3n<+ z47Jlcew58TCmVVa^6zclv{}UHnAI<`mFDOd2+H2HLhh-P{yNgz%1447cO5)`lO!vH z%iI2=``!30UE;H9WKMw^xy=W;xVc;5??!j8$R6;9TMe^Aiy5^+sgOt2!bA#mzf7n> zSu#YsI_Y497Jfn%vBK-zwB||g%;$|O=0&bzX{gydfq&IXMlFYd7PX%wqsp{bS0zrQ z=mR&B{XMWD%g-h13lf9#yh!G5ix@vz{9?JAIP#GEOU`rjh?;=^OAtv0}P z{2f<@4SU>;U6XlNx0r|+^h5lTSBvqh30I+&z`L7E$ENo7_JH&KC9ArBfBQ(Wt8EQt zFPVzb1HHf{Rw{kF+t46j>#R1{e2^>%_&v=Qfg3PG?6a2%kx6--N#VX7)!(nyNY@o% zCD)JWb8`>&gc=6^yPGMZT{AV0NCwq$D2MlgML+4b&GheHM$^~rj7^+v&hD&>qfn17 zuy-jI{|fC8jhKajzh`bCm6)puM`UA2Xv|cegr4Hj&%@lpzP=J{=2YHUuaOghAGis% zuUTCY+Fq0{qrI&N?xm&xHihrM6#-TVz~_i-bF2+k0Qz@dJnbgqRz;N+jE%y6qUnDe z`sa~0xPeq)4mih~`@af?`K@7E?`|O+pR0Xfj|Zh9j1GDhZ{K=fKax-AJJEsT zxq1Cdj7bGL$&+mmTryEA>Yixc-{hJ@v;0$ypBUc!b^oWTkggQJlFUj+9S#bOS?_?7 z_fV&Oubu%^Qc127FaBqQSr3F2XA+V{5aHRrj{#=wP-0z$Fc3qyrur=JiI)T!!qmdz znRsOB8+a^DB3}Y;@|$P~?nCNtan`31{FeXTz6TQ)0z=O-02`k08UT8@n(D8antmjI%=nl z{QIAA%C=IGdb>6wl*FU$g){v!ZT-+5Ihv-BuV0I&p5qxP`c(#B@Y3C#tT_hmFla-s z_4v=>YWEI)I)AJTR$+}L^`y%UL<)H;8nH#_S?;e3(kYwz?DQRpgt{wx&d23WCqA(% zzQ(V(aRO$1npcLYad3(Z*sDojOe&0)>uv8XkJufA|>lZ?L-65!CD~Ge{ND&Ng=txZ`N`M>m>eM z(_bU2^D%`-D=4~uSAGW@As_T$D zwv9^*AQNv<`Q|vU2XH|<>E>p%vLY4SV6-kF*h6T*kPP^?&9g?Wm)pzUEEW&Ng*|l#)qmb{$@X9 zG0Aq_qc_YLH9myOf~EPV_BI@&&hzt~l4xHkKde)SOCb&WIrk3^dT(S;QC2|xKUcUN%`Xf43Z@|oxjUw&p`i^0kS(4WaP7?YS9bAOuS>w>bi6Z}MNjeu9i zzh(@$-%Zon5^kzp{Gyb286!{tNs%Q0S!tv&Jbuq6NAzAI3;?cVv?k`EJvlFITtbAm)!X6FkaK zeCIoYxM|g;je1rKK|<*aja+EQ2K~;WfvL1bhPw;W`nnM1fhh<)ecEW(#hHk0fBQ*% z;5$c#xFc<=^OCj~mIpfk5s3<=(g-E;v~J18v@{39Hvi|}yN#CHtup|J^e_oE@rf+( z0vG?|Ci=kc?53`WdM_Z*(=%;Hs8}W?fv;_(W#w$o&WO@z)){1p{d2c9AMX}?ExgBy zY`$DVFPW8}NT0CX?XTbc8ffrEo48u58Q&@w##1*(UEahu)Rsa{I>ihn-Uj-5qCF3D z`!>@e3Jo$bL()OmLiFyDO{)4JQ*qnyG4=NVv=D10%6rGhC^KXJ-`S#chU~8CPL47| zaXN$^EVj(YP!(Q zqOyHQi6?s;F>9dHvQ-B3{@5#OFjvh+)%r5K+uL>`-KL94rf=eds_fVmwRJ~nVP8)) zV8w}#p@I_{gkRDfKe3Yi8OIG>2#|QeS$^F`|0zXcsu%)~o%W6SlS3i*j=ph$vhVIL z82n$Q#qC~h8bE!Pj0rC(S$Q=v`5VZ#M5vzk25y)&|2O_pc~gyO`M?L-ibBn?uMA1KYMG*Jz!NPYKsu zk8I+!ohHcM*Z3PMKol6I1tXe=iHznZY8j_H*xqOjcaAr{;@f&&`64`pX0(xD&p%gea6-uX1

S%iM|Z z1$J4`$K{x;RH9@?!gM*u_u${!G*CqMikI*^UOVXC zPX`ADSFl|I>Swd3yYt)CRaws*ogu2bgN_XO*Uux~W$N9aY0db*y)=2K%?n}!^~k2+ zL7*Hqv2i1eX@d&kj?lvFfLcK_b!48HBn93rr^WQusD2So)6}@Re^udUqq-JuMUr1b_PU+@uWw%b_x}cFxJ0RjQFHL=@_d_tt)q_tk%1_S zg_VgCufUVtv-3~&m&?8b^{A)51VwrD|G-iDK5pUU1oS}{N1Kv)&o#Hd4N#SqzV7Jo z_47gompkX$E-BLo^P&=f<)Qk4nJ`UKM@>8*U@Pmjj*clx2Z-v9O~M-+-fM*h<}WK} zY`TO|8a%a=QT@Q_dhyJpwwv!o5~h89Wu>FNyGKy=^KQwD}MMOOnFnnKp}l^%Nx9pDRF&P;1_Upa~Xhoa)}P~otm6nM_y5T z3VhOqY31!4)EB19VM8zf{qHNh%P3gc7^!RW;mpEnXB=EITGxV#7?!Dof#MMc_%MBSml<)s9+ik*} z;xUE42BqB3VLW?jSwff$BSE;R11wu~psXoV?ngnZnW#+MZu3?$!e{+4sr@!2bwLEb z@NqRbR0}>+-B&ejT&rJc_Hu0eLuG9Jps`Z|jcMp3;k;gjKWT4&ec8bQ*{Q);8JUXt z>~ws&Bj32l@IBlK{b5+?A#j>M4pU&upx!=90m+E$yniPE5&7}rvCH1i*vOdv;eJ@X z4DFo(F6}ZDluX_*PO^O2UelyId+97Pr;u(-0|Lvy1*;r$Je`9W0ZWo?ac(^01?#Mm z`kqU-$X6qCZ_Wdvl=^&k~EBH~`is`s)w4v-f%Or@lq=w?_5F#m) z5r5ZQpQRU29xcU>~S-No- zKkL>#Jh%~bpsiuh;M$~!(M`K;Ym~{sq~9zPR~_6&4{B}{v|7q zu69f0*8uVB$(^JR-D|HlP&dAnXDxSEv)$g==ymand4u~=#iomU?%+4$*_<)cU;_gO z;oi0KY~14Stbd+BLsE)aD!FHM7W)I? z7a12Mhqagz4P?shj;2Mz>o?V+5?oez^sA;$E!77{MF-%>q#0`Yyp9EfG+cbj%~<8j zf2U&f^!ap69b;4}1+T;Q^0bx1=l&h|OpJS*Q7NXV7bP<=GZU&Aj{1eOf%j4YG z8u;D!@9+KTw;&=5*+LT5-4N4Y?9|kvg?~$_hnvT=_sfEo-z)JLs--)*FDhY#0`L{j zRQ~r6d5Buzro)+1O*$V!bc50;j~&-NZ)eaBC@gqDi;|>*c5;17++MhKn))PDrf1vI zq{~DxXuT=Q3W8I{Qp+$BLLpn0Sp<|Ue7H|!jAP?5>RekA0y<{UR2q?J}O@qc>q4n-;u z#m_d?8D*V6$5r-4!86U(&2Q`7;V^&A;m@tu!0M*&9Fe?Vh54;LoaEyYAP92Y1U?u5 zbj;rBS#5#>dZ#$9MO?_zh1m{Z2TtLqU=Pcvt|&mqy^18o>4kjhgU`Ey{@r5ln=u_pK;`If@5s7y z9{E$Fw)K7L@2)LfM)@CYMJu**>W72N;xcq&sC7UENt=<MHVX8R11qZ7hUflb9}=1G$aSCgU!mnDkRZRJ7yY9*VFf=}g` z7uQX54t+Xy*Mmat-`)Rt*Rh7%Gjf#Msr7BS29q%1lIiR6nmP5t7?Y$b%FlBwG1gwA zvrHb&x0GyBQ`Edb2(75F0p6Ym{3VTy=rPJ_V%_PGSSiJu+J%G3ab|i#LID=Utj=T zq8C(4EfyMO8OhwGUZf9?`nmV%p}ieScmtn z;R=lPhweue=-a=}+UCEstE?(tTrn%S9Jt)?yWd~iME$T8@3n@OD8*EX7;_OaxtLls z-X8bbTS>jYQnFEGl~71Ppgv}s^Bi42^V@g;SJs;XuJtgcQkvFU|{ z4Yk-P%;NbWP-1)`_rkBEU}^Yz(*CkFUC|*(Mcu$IiDzn5l?>f2CY_c~j+#)K7;ASC zWIO%BH5Pq=RacsKZLmp&v1pKi%#mk$*5j@%;-aEP+#eg3#slwv-f;q{xCJ-ll>ktT zJKSBmwBb*Yqvz&ONSJgJdIoCk>dHh6s;G2eDrW&S-dat5ZkLFpbVp}fmnXLO*zs}{ zQ~Uh+^GhI&T6#`Gl9Z%p_oijvlucV4%gsFr*tMO7mY%5xU#@J3aIjefU6+oEUoCcD zG@aB|t+xUT&ASwi`ZXW%mu$7@<7FHE5jCHBkya7O!25yOcnqhD?&%x;?9V{$RapA0 zyndk}Ce8sQ=Fg7i^XOydH3w1f@097#@<;NPK`_fKLVR*OQ{Cul&HQ&9mtqgpWP zvw~)e>K8jo&#>qu92za40i*Dgv-o5;N9L`P$Bd4pU~z;x*$6LfuJTfyZZqv8GD;$H z5N;&?%W062ouV3#>IDf;V68QA zS=C|{DJ8C&uD2F9;|Ob(+v>uBHVVe5DGZ0Fq-&|&ziE;ThgpGT$Q@F^MUHX%X6_+N zlx9>R*Egp+#{vA=`2X3&A|;=`9P^WPRx4sIs$LKHPTMY^WOf5Hb3)T5a)@6hE-RWoL$jveD%J%1?(G zGshUKP_p*B(-Yuvs-PELsETc%I4r8F<6%g-ujIBiZi16GTL4y z4&L{B?W&B7ikT{el3yXX$u@7Dcy#qKe%5Obak$4W6SL;=H>9tX0-L(7mwWLtu`Kr9 zpKQ&&=z_;D<`YRh9J?g1_K*J_gwz|~exKNQ{58KnwJvo_KH@OLVX%O4qomQOjkKG+ zUefo)`9;%hcE?!<#c@f<{XH35+q>#X@@KCrSF8CB=eLF@;x0pc=}04$iZ_9b4Zo)=+NfrnUfX z(NibiHFhRHWgKLX2+Z-9?OJ3%lb(tKR81Rb-FTi=on6fvSa{p5zNcCkt>B*kmPtfV zpv#iULRJi6Elg!U^byA*;jp4V;lhA}i@y*N+Uz2$0vR282ZRh7jD$0Yl7NDU{ew0>6JA z#S9QpQBFJh@9xe6eQkcz8_+`ew`7!yQ?TS+XW`E@>hP}wRtk(Gd^nzPhT>!dFRx-r z3^A^efL!vnuhTd)C1eV%Bdd5piSbvFwX@}FEw+U7z?lOo3#?KbX9u@?|4!##1 zG&e7N7_xTpI)>uBh9#yeq)?OD{N;+|H7>zdV~>o92(u}9kPKmdBIgC`Kb!DybJGO# z4GWHOC4D(rkGKFaX!GI%ORkEF_L^r>7EUjDhq5Ad7uX-L*c$=smlS5!&y=yMl#1TS zremC@-h#%!cvb8*qT+$+(8NO924Ma;m`4URAV}fRB+;i;?y>mGOGb4%xua(KoP+px@*Fl0`!0VxVy{ zVKO*pHRUTG2r>|g+IsVTBAW?(NGu);%ywlCh}vQ*Aqb=GhNH5Y7AXF<*EL7cvI|Nj z3@jJ7L$yQMUpAh=*Y{I=jKBINgPp&tOCBgXDCJA4{r1dTd^Ne4T`$@~w`NoghMfif z4@!uK3#XsDwx%XwBq!1lh4X^n`|@|R_~e-y`fL z;2~0CC;)grTmpoMIH|$UT5nl$Uc(g2t1cufRvKS@&SKx6V9k*$X;^()Xw|s3(90If zT)e=Fr$MQs;;uzNT)i;;XUm^E>QH>+LXs#Q!3yWo`usLQmH={7<6sJ2{?x(d>zub% z1(@c5=9tu}`eK!@Q;aJ6Zcf7j32H4p6?=sr8ME~^5&)tA{y~j}WwQ;EySkc#>ID$* z&G}AcWntRpj13%DsA~*vrk7!r<9@{K!pOGRa8aE=cm0%wc~}q5R3TfczdbolVH+Fp zd73E@PM2n7twJ#h&MCl)gBL6SQ_Y_ooNKR>M( zAAm;r@jwH*he8VG;NskcuR%dSeC^q!H!=e008fyf8-t?)(TMB+5$4P{FrgcwClH-=HQzyDhjFZJBp4C-o;u6E7T z+zilpvZ+8JSSxxZOJCq_k|^u=MzpxsOysULV*qIOVV{`SJTeECb0DUV<+dpbC3Poc zR`0Bx7cXrdSQMn(clw1`$e;afk}6q0P#Ul?w&DIYKVhsHhS2SgN)d39M9diuO?ob) zUMdOIVrcoEALdszqyAhqG*I7 zH&#$39ish?teVyVFG6I%&ZT%@!zI93NKfhIV*?huS62abhNqMN%c}n^RdLAfyg)WS zRJP3kub4@G-Ofmk8Lt9`y6()W^|#f*SfIM_kthac1}hc5q7|Ah@?)V#)G^sn2wHdQ zR`H$XW?N=mI8k<}H9UpgLk^bpGqWI^9G}?fA@Hlch>$0XNh*veo;DupH^b&8tzS30 z!T*NBXntEy%eV9qxA1^y7$PCojSC#rbyzpgq^9PD zhda>(U73LW8V>ttuozL6$IelFEI}pLPu(z1+?xgZ+_bR3+Yq{f9CTr?L-ZtnKd^`< zRELupsleVxBqK&ooSTgK)0!TJ0%U`qBuS(c?{=OSt)!YcDJm$flpK=Pu~)cIpoLS7 z3qoUAnh%sxl{0pk9W%4_3rRAmjo5n$8*RpH7{^u&7w><9XMl4NA2|gBQNPKUH6_l2 zLQ3k-=-mnQdEz|*VaXrAz;c#H35s0g%rj=z@uU()W|U$W|DE-nf3g4N#p)l8k#1ToC)r-VG|{!q zx#>~`BOxuO3n!(eDxqMh0RR#aa3V?q>`&;p6+r8hf&i^^9l5Q0C!>@ICIi8QQF1Z? zFo~!$pBIq&tWHS_){;DVrIHfb6A@aW`)q7sb(8socCBsK<*c1ZI#0?{5&@#HN-3`?nm33PuJ&H?YDRD|5hf-H-7Q2e&x=WpQUo@&nf!0uS&GQKc2tPLO%IV zUi=ro-Y&lX+MoX*{NeUsF1LQQZ+=Ri?zx)}dGUQ?<^})&fB;EEK~!62eglh1=Ko0l z+xMtex%u2!X33pW|2;mr&Z@i3nc7n!Eht21*Y{RSoxr;?f-mQ9mB=X zi_4KZcAMSt{)4l!eE$47Cz?*D#+b|ND`c3>W>#P(RZP6yZm;U)-r}Gr3g_ImzX4mK zQWxi&k7E8OAKiT{E zMD^#@uYF3X%B*h=rzRTEZMwQF3{y(eNs*1mQHW&{?c`3EFRw077mLMYGU@xisXHYV z2oyp`-!HFk5>saLl+w-3jg;~R@UWQNJG%3{dz$CmiGoCekYeycNlG9YfJgx=DVT*M z3L#L641|<`Ss`exl~g;$wlrom)n$&sIf1>5YrkYht+m#6D5Qs(G9r)0V=d&i-XT!e zb%>Z{g|(@R(085po|%2_nQ(YWvW93N9Yfo?!8bC55Ft)bFtc|~3E9O65KEEE6h>Bu z5PVmIz#_}RNaq{?eB$>gC4L+hMOGWDP1EjHtCiJeI-O=&*0!ydRtYJjq!4?bK=7bE z5SZt=%!_WMH4)pqtatrnHa(dYP17kM69aQ>+jh6x9Ubf$Q4op{8AMs1FQ0GEA3S`u zD1t~>w^!f){;z3m?%(;;bi8=)op=A)KmGqS^;J2&e>B_k4Lq*fr|(y!%V~qW7GMN~wVvN3Pi_v)4^f>20 z@oIgejfpWTA!oChmKlXU%Z%1`@XYxnggi#-gCFQSgS;SwAOtd@F=l^%pMrnZwY%M} z@0zx4A3bIiu}#I+e=I=Zii)SibmfU9Y>&UG!}-qvr=d_x0%?`wQRr-<2gI zbMLxLm-i1(TIb(+=LfahTAf*&cU?D|O}0&|w3XVNU!GmvT<-1dO-54^2uk)8r4&*t zkXV$tPwD3RW`DjnnM|7PMj4U3M*yM4py3$0Fr|!?iWy^!5W!me_S>I6UtV8b-Mseb z)xuczaXOjweNRMyD3D@IuJ2QbCbL5eW^^Wnl3EKPIK>2<5)ZD5#uy45QcTP!1WI9w zY&M%sY|*)12@yh&`qq$Y5TN8X0|1mVGiG2-DW&qvI_D&!Qc{v&pcs=<N@y5xLa*HTJjR4U3yj*^Ll zS_&&u-?gZ%F#tj$ATqL0S_ux07y%?oF?@>!f&kP$S%61J`xyIqbdRP8GgDTnzjONa7q9-ght#4_HKsi76ij+mF>QWQb(&yc zB>2&8V2JwQp;aan0ECDD162;B>H-?cq|lSh0~Fcj?B2z@H~T+RR?&v^p{r)QXLrLl z+iX^JqBB~G9+GJTpG^1fF6O`d*giFn%EP%*V2HfdLLic1(KG#rRV2T}?{mCd54^-n z`~gM`tYi~$YOs{bLyDE_z0UjKYEzwgWk2}g{$zdzxTx})`f}+ql`ogqc~K=T#(AOo z_O-=4Z<^EfmY6-Ix88cIyXot0>oKhB`uOm`PDWkbNB{629336K^2#d|!?M1bPDYyzrN{>d2ZMS2a=9FhMtkGQbTWVX z{MoMQw%cv*!)HJ9nbpwi<6^Y{l~xZ$KQNwyQ`fG+UAG* z2R?>fyJM~IPv))bok)366uWp-%*uOqz1=R~`@s1)o6WASt}ZSvnK{d{EHCfeKeXCR zMpLEKSHJqzwr%e}eDEv3`fE+o>^9r7Fe%0Dt}e^6ZrT`qRaIK+uIm6OgwS<0A`ZlA_OTV0&1m1S#H}M5)V}gGbc)k6EjO8 z5kLw7%mPGatP)b7_{5$lQUCygx^;bI zl168FZnKP;S)eqUh<0`3Q`Axo+5)AdkAY%BL`H~xkBGp6h*L_;w-Riv^}xM~F*@gz zk}<~dWIUQqc5UNRtR`chA^^qcl`;YpCkiQs5I`VNj42UTZA+X~UdS>}LNFxR__D@H zFc~XD(hwvC5fM>1*9p;* zjwk?n6ojZ1>dvDKycBXQKo`^!FM{luC}JM4OSTe1Xle;F*R3+0{kwjP|5l-Z5hNxe z*)xU!m_>r1t!P`od0vgMcdZ|frqaSlS~FU7U8l81kehnfc~1z9>zh8deHUYtD2QU` zdXU0-pJD_SL}W~6^Q;<;ZJr$+oq%_!WLD&$^WlpIh-04rfskk4G-L`WnQXdo?EAp6iLMkABb zAHk=-;QryO?fbvB|MP!#_xHwe{f**_-x+^tEwy#m1@@>jN7L7@fB6gT>0ANLHX+c&HkA??t8D@eE8SL&#qIZE&tp^ z{oUrydolgbumARMs!x4o{o~!Xjxl`XTvNaGY`MOE1nQCkg?`qW(cVmg@}seG@iFD}k5Nvf36@$qq<=gy^i zwPa>%;l=aIrfIa6S(a60(KJnBU}o=qQ51~Ad(R;ek@tf-F^Z?8VvcfTvT;SZRWi?vF@t%dF(y#ZBDY$EkZx|Sy1tPTeGEjDX)QoknHur{eBU=s zGrWGH)VnsuNF0m2LPRNV$Js;VjQ756>mn-=IZ;e0DI^rEOY6N)i75G^C{o>ff-K91 z4~~emQV7gUoB}gfc_B*CG|lPxS(atf>D0L(5TukTgb;#MNFlX-)9z?GpWMHHABAA% zrt>kvsG2_c=&{K&A7WPIQYvP?fA3C~Wx?H~$Za3io9h^3l~;SSMcevGHG1|uhM14b zY1jC7e(+m~Ym=ov^B4c>O~cgqs?i>%FRq_wAn4{IKe%g*WrP$WftSYSMb;X4SY<0=;)iqLJcyxohfrGM$dcQ>AVd4}ITT zn-BD$;mHTeybpuJlb2Guw%KmCMV=oX9%`+3yC$VH+uI*aCQ{0#X$*jO&Fqaou{{5f z>i31v+o;Fm?B3NsIa>X?gd+IZx9w#v08YlV;lqA@Z^+ zD!JM1LI^_?oXza=VzsK*34wrhX8RZlNvq{%xn8^AC$n*uW!vp`R95Y-4k2V&He|UK zS+0dVJUFPEX8ZB>h=^iJ%n)N7kH?$cHd0br-85UH^n5x?DJ9F7yQOn(;zm~Dn|mj@ z!g{^wwfIAi9xdN}@1jqk%tVATFO-mNbAiReOr}avtmCeBf;FH3GZrj3h^QbbWC_S5 zd{Q>!7<)>RP)RFLP>R5$(hbL}#(CzT%egVhJEx4HpmLLIpb3#M_!64G@ z?m#49%Nc`}91TJv%8>kY`}L!X|L66+FSkypzL{-5{8T(U(7ShbK__?rXtL4qz0-3e zc)qWm2mOxve0TV$N0O3*unoj4A!#i|7or34lgV&;iI?~{$IJD=OT5G%U^vD+L6F{& z)k2}{Ia-4eM+n);S9a;y^L}=DCaaVmKd7`06pa!Jp!2RH?TH^u$2mrn#+uUQMeF*n zeC_KKGpVW!G;g*wGr#uem7?Ij?}ZSDM<*t)j4>}>yx8C0zjyCmmSy#Bv)yjb&d&BG z6*8@s*RMT#1rd!99(i@M9Oz(Ao;+F1_b#umHk-{WuRNN~W_zPb8ao;{Dk~F%yS}-9 za_7mDCwhPJ{qKH9Tl0l4e14o2m}2a^*=W36trM^3(NH?rS9#w-=0lI#$<22_S(0<``z() za(r+&SZc-?tv1${lgUI1A1M@BZjABXqXZ#Dh~t6yziD>J>|&6TBBk|aQ@cj>dXkm5 zrvG?5lovuMsp`H1Vksp`X|45r7`QoFYmOXalu~Q0DKW$_@cM_2;^W}B+oC*Cl2U4| zBgtXr?!E8(zU#dZ5`c&Hz&SSrWHHl_=qsfZ0(=a&4*n>Gkbp>OSR@DlNQt$PLI~g? zr}UP-B&B2o1ZL*GztxWmtwM~^2d(vO9B~M%&ARQo>2%782ZJL3=(=wB(hl0>?RIBv zo|OfKIEa)HQ5ikturV_wU?vt)O2y=cqolPSe3>W#f*f8$p6A!s*L7WISvGVmN(lgn z(KBPB1OUotkfJP$h#ZI~zSyqGM<<}qpzp-PwJhuMF!sGf#gb#{rOLoajbw;5fFOcf z&r9)>^1ghDm-u}Ffyf{k1TiGZHlwVh%#aJD8VR6+1pr|kpiDuc$P#PdJgSj`Bx0L{ zARH5S%2pV!wbUskz@4&-6q|q7Z|~nWWS}ThB+67Olx~&AAUnhi%p%;(cr#MW%1V`x zy1Wpn0|BU|Y1($VT=vd`KxS4_)ODL1Ym5moG@VPxQcCaKsG2Mmi`m{nE8}AT5L)Z; zY|n+>1rNkcQ_tt~Nm(rx3oSJP_0BDqOCe!Y zNfKx*f>bO(=|oN_gH0o&B!P(;1R$djf`kP05Pu@atp&6y6BK$1B!yBLf=UDg4p}Ez zhAfY{lFla)f&?k@ZX_WFf#?(iks=J0@-df2K%g&d{{COfekd`#xjHMd*rWe4fAgo? zcSA`1cI{o;$5?m1$Yk~C^(7NX zPe0!K70a7{w)n&Qa^B{$h?jr*#oyU;-_(VEL4wx1WXB<#zhW-GBwqZv(b3kpuY>v9 z;;-#*e&bbD{nG6DL!JGd^mdJJB~m}P`CbJ4&HS@Q;V3q;&SJIlS($7f!`zUWbzKkNlf=-+B$Z+4wau@tud9O@2>IfySoV-?uwiXf2&Ul}~2dzF%Km3ZWK@ySB)hrqMEM z8YhMB`rUTBW#-e%n=H$0G(J*Zjsi0|CxmEw-?CoYuYl!nsX7-KWd z$h~vU`#~paOs1`Yj)+a&Ex#T#|?apQ#0;ObaITXMlIHUDomN}T` zNGVZBiYd!7Tjar~7iSlH(>-ZaX0rfY*Y%x`1nj++QY)ppHpG<5a@61;N)TDHv&2uxOvG>Ws;Vm>!n_4K3n4-b#mt25WNLWm!}^PSP8S{&}b z_UMn?yZ`CY$zk>3H`nK<8)Z{FBA} zp3=6dYhz^R{A?@-^Kl`>$;o{d^6Khpwc5<8U1rKY&=5sCkZ*=OnA^-@ltK<;Xg@?m zSep|$DS+VC4iw;49E`+>I0T{s0OPINj%gT2GYz6u9u^PaiL$KFDi4%~AY5j?zP>y= zyXd+u&+{SS?C$Z&a(Qixp`nT=PKi@url6HlN>QK`y;L#=7kuyACd5Wn^8Wq%yWI{E zLvXhgBPmcx02qx%gRRi;mfNLLq>>1NC=Kf| zL=*x9gObLWtZ5xPG&XBoKX}P9bMRgZIb?bRfcIVrnUYXaX{{vzN)ZD|qnoB70!HvL z4YC?WX!{;QBINOS93XDn1^`aa&Ss0f98FdhCxyLPEsu|mo<4mVy+bBrOrUfd7~?$= zxe%MCsq2 z#q_5hzW&Cz3hw&Gb-=s{cb`9h@bE;mek|A&Wf#Tabed8~DHx%oR!!5nJ{V<|yK82dNxPz6 zdv8H_w0-^VtCzogs{Z6gA5SoW$RbKjt@;T$8U8D7*T`u&0|_y*j0_2ofC-Ak1~5v& z0uq3C%B-s+8ORe%=m)=4ez)qo?*wVGDpGHW8Fi#E8RyQqRji>r5>^JBDXJ5Aq@*At z1QVGgzHBRB;`a|;t_NP?CH??|j51^hx+Y0QQVME?Ard3uoZ?OK01j+>_0RtBw12vO z@=CQ|jH>=(ZD<&Q3x0qw3?UxItMSBMpvW)m3>j*mY5=m8O}ZkEPm_wU~8y6);``PQ3nN-3+V zg6Oo?jH&N?W3t(3N-=rwhoQ65TFCz&d;b+I>zba2Vegl}KXqJrt({K}6ER2xNf3ZV ziWFt5L|;fnOD?*6ZI`Qjt5W$^7ru5`F4J~dw#&9;iZBU-2m}U!#9+>uIcMgCIbolj zR$iU{_@(#tMehM%7fBJN5kU_2bJeS>yVu`;um0B?o&c-WD#pkJ2q9BQQW{DSFc1fX zwApNwQbGtIZnWN3&wbzbLyw3`NC2=}Z(9o~B~L!m)>2<1gTyd_WP&A63FCI=E`%_~ z#F&VX1mBjPKlI*tPG%&6EX%fzIsg!3)Y@(r9w7whoV6JM+bSCX0D-nWNg|9cZXHFp z9K-EOgEIzP2+4&Ig0UfntzuyN3dWcOGzR17bB;-s=Q-nY+mL9ji75EQIn!DXuAR^4>-BmV zhAhjr9h#KVxnWz)E2X!ApIfF6IL7nj$Z<1oU8h7Rr7p|SnR~F{BIIKkgCSxP6G!5e zF-!!|F-l6rDKUl&1hE(Effx8O17l1e@I(v&xgc#R&sEW+CK5}@VoFH`#u@hli(2c9 zds!%S$|FS0OdCbmkeGuj`wztsyRP$!@#CXlGZ=pKUa|i=!*fDuL}G|8Ve-ipvCl<% zrgj_`kMwxprSh(cc2Dm}h6v90L*F~bc{!?-4?aa91f&E+(&*p=GQP97pBN~k=Vfk9 z&KPqc`WTcl5wRPFz8%)}%4C_)d~a{>D$6oP2v_+2{AZr+e`#-d`Wy7XXCuYpK|JeY zR*DSd5dAc_%_ijfPpOyaX)ssMUdG4Y82;ebXMgLodi#~|uy58~$Ja40E1HUa2|M)= zU!Of5h2=jIUw;SHH}w8**?;eP^Uc@j-M#E97pzzt+cNXhm+v0=FV5_nJa+T?$si`p zs1O6oDY008_4E&q(&rzIzr2p4E$vrpUY4blk|J%b9TU;&>ZTlxqubQW&yJ2xHtV|Y z`;XT+Im%qWv7(+5Tdx6~f40n*-Lqf%%Bz_q-_{}GFsuTaW^r|N>*bB>Mw8K^b7$x0 znAXR;Q?q|3&%}DYZn~~(!+4zET(3_rE~nFJNVGei5>dTg7F|2IFrL`iY$hTBfTBbR z=hN}1EQrV`)q))AaC3dJw=>5S1jB^MyB^gn3AX75#?Wf*e9$@zjKw;6(Ru-$cQ)4} zGg4f2eLa$MkfGtuh1xLLvj_u+Yhs1lteSDDRUr-aw~RhhDJ-8od{m6ff^pe2^?I{3 zMiy1xJA@%1uyxxF&gXf#4bD}9$=t5G4VS`NGy!{%-Oy5L5RnVPC9otRCC))GMUWB@ zq!fc^DQrm?0Gx5&_kBZa-EN$W)7hMB9ymy?)~i})xs<%?`+n#kc?@0G)~+9jLBxO< zkMG=-(r%j8hoo8Rx-Q0OjIvq}!?4+GmN$zf$H`>N2dBaJb=xSVcIJEAQT3MI-ZYJ2 zWE6`b@XQD+7dO`=I1wSTW!)7;k#YXqS%~uxLfbmYc%J8san3m38l$^`31#yvA6F@* zzE6@XWwP;1gbFov!gO^6bbv$jNF$w%jt)|PYOy`z}AfwVCf z7Z)jFOjv4uQ11BHEwAeRgX0*m>%)Tw4>oO6_p8qyeJ8ws9kM;^qfOWQM=ur8l;rA@ zS-)X@7s?_}$v1sR5VY1Ipy6G!zGTcy$K$qNWLefUO<5Ms4O6A0(nm+P7$2HCnx{{nW?6M|as;U^3RlQzq zR%>Hbl^4$Sj37mas1T#wkwUP=Vr{rxtyadEJkPhsGYmr{G$!LrY-{Q5!EXoXZP!U- zlv2oej36b~T5q$d506jQn^lN-`s{IAZ;p3BX7>&kRW|fkC^qjFQv+MOn2(jF(Q=)^TqdRx*eEjhz z40G}DPxQgAcY7CH3KX51|PVE zFD}#23(D2v$z~A*?XSzS06-snp%f!XF$yLaGFO$}I)R*N9fun`DTiL}#P7sa^+^3v zKPvZO&2iSlUWRA!N0yqZ5=#UTnI}SGAid)(a^jFQ4id8U=x5LQTb;09 z?tb?d=l`TxW#9W^akE*fok~!6y2#pi`P%NDAMIbCJ=@EdyVtp*@3StDn zIb*+YZ}$67S3^jVp(OFoxp@58;%kTF^ERB--Il*VAOL`~1V1|c_`g=z)?tzuGENL% zniZPM53ZYkQ{Js4ihuw@aRinS1ZN~DacxGC0>f6yY11FmRmpFv`J{x|`sxv)o+&|o z9Vuf;t)rafX?HqKT#a__edoR3UH6;O?)=%Omu>2H%PJ@F;N-Tl_TjT9Qpug^BzCLK z^4cgV1(T8!!Qh+_GS3SnkW#Le3!-#=vna}f7!S!me*9EOnGhLc!{An{)oQgmzq}Zn zJ3T-D{K=!bYj54Y-S<64e`|NI$nwp4gMD8x$-So-G9@zxR;int%RDnyh`rJD)!X-{ zrqD|Db*s3_O}^>cr&pJ##AfKwMU!Qnb9LW!Zg43!P0cufl2eoS^+spfWcup#{ARWK z?DF&%{=#4X;G^F$(ptqOM+KV5HS2o0u0x23NCe)6uJ1#LDIo*a^*u1=gX5Abt;f|! z3(dC~1yX7$QwW5h7-s}3%dFH^nQ>7C7p$>TN#}j$S*|lC1aQ`S*AG2W@~-Dna>fub zgfOl~f-@y0AzGy~ZK4ktk~LNeMG+a}&i7fC6?qOZ0iqIu11Z5bn;Jt@LK>56V-i5) zT_BQN`QSo|Qp%X(H-6$5K7aIH*R4XNzVA5~F{KE!>Dt9+y{I=tV@1`(LzNI#~Re*i`VTmT@z$nvl4Pk!t1k}>w@ zjm9I05cn^?viqxFTqocRUDn^|^_s%4wLj(v;)mG;f(R86i=2oNAqSKk831vHFfg&M z-dN7Q+U|TcRIkV2x$^-y0SFMBaRxjw3XB{lMSnJStAUeAJc%SwAP{HFG5m{%lZTu7 zqVd4AAVQvqlmJ0Ius>yX0%M$GVmxlmYb6NcGBU*&#sQUpue(n%*5{6!ijO070^5*U zH&lhOS`P^lisjXUd9zoJs~KxR?~f-u%kPdPNMvNw5REMZ6CPj~!g{m0Uaz@H>_`ZfzPg66mqOJhH?E17wUiXFMS#PDv1h%pq{2JjIZbRGi0P zD0(4v>KuXO5z(VEM)(Gpjw(tW5iJLd9+eEn43cN4YBP?h2VNvp(XF`j#2)MPBk=cAGp!N?eMB+)G3dERqy|`ldjFYFK3tnU-a7cYL5U&WrK7 ztsgJrY&JW*^CmTqFhsxUzW?-WlC3!u9q3~pR3TrfOeA)?+#FVRKAZJClGRq26YGGN@3}qZJxyYGBmoy0I z!_f4M%XQN&*R4^^b-e;9NccW-RBmum3(dKe{Cv}xTMH|O2%%U zLhP9>h>VuPDg+Q(D@L$4pY6;hL)T&mWl_q^Fcv=l{EL#s*%-R^oSTw|ENdV0##O5F z7`<@BA_uObX!syA?FmV6ArugDK!`5bQZeR2Ki7I@Z7S?Y;$(AP0*kJ{x;{InjX{<|H5~4n_(LfiqF$g%sM_OfVR_UT{9DDq{_2 zK$K#138jXQewfea03tJ$$H8WH zd9&Et-JQ?p&ilIS>tSG^jm4{Lv=kA>$B_Pg(PZ$`-<#N&WO_pU^%iLxO z2?>Odh!kUzLUJyUm{dHZfJlQ2T4#g;xC((1$4C^3#e^v)B4S(qQEIFaj8JkWIc!$z z<;@Kv+BrO&PAlK9`=+k)aSAXv2a=`M-$Wi=w>bZt6j=r!@^?D+Zd*Nal*gV&s&R%hl>;A;NNK8^$@)Rw&Jd zV3eZoxk#hZ3eK{u$npva5YymX2r)+1G%*J7(YK-73`1avfdnR)(usq^po{o0Az%zdc6rN?^%faCN)BcJJZ{x8I?u(@++@?=lkzJxj4Ob|ITDG z%CzAeItC6{>yfFP3yWNsqI5AbCME~R-~G<-t(NO0*3V8aJ&WT!%e3W;#~2xbu~rI! zh@7#+SR&4quKNKQTQ&{XDiSg-34n19oF&AB$T(+GAO%N36AXPD4DZ7r6T09iq@u{n zD$nz*ZCkAsmx2I*AjH@WeMB6b+cfojcLx|-x1OX(3{cB1#>Bvf)DC@MAhpi4WegGn z5=?NerIY}~0RaPr0lmrXs2u4e?oa3U$43B^X*;CV|VZYBa*zY(sU zYP>ZoGh?o)l_h}`5+o2RsBBQyk}RlFDEoeW z)_0o-a@ee*yBWA2x}8$ZtNHQda4f8rdF>hz`sgv!Ml%3VN=PA6>Wyf>KZ5X--@zdt zTlCCh;!JWRVw}g6y1qvusT2VSkcmMOK!US`i~zX^i~$MF5y2U>}$CBlZd&?hop%kBwBfWRkWBoPu%DKDuwGQ$U7Yu@#X zn|Cf&P48vKwUJ-H{h(I)s&?~|6Oi-0h-t7ptNeB1F>wH9rLdBN;EbeXK5Vyr|LSp6 z<%gyI!}A3&!I3k8&qty^rkP>HfD-|5WWb4x7C(1u_G=HXe>~sqf77|BPe0v!@Xj~B z@%6v)*Z&=2z<(%Sd!Zh9ffx8O1Px9YVf)nSy(dRMHLHhP&Gm6L7cxIx4MTmopXK`p z)3fU}Sj(*1^lVhhmky>`M)&sjK7I7;!;e22sZwQnyfdR_cy@8VdwiH>*)Vj|o$~a_ zc~KPB+8E>Y_4RDFvvrT{yxVLx)9G}%T<#wp>|hn-n*;1{?2>fedXR8_wU{YAPzEiy$@R%fn>~VIrXqj2uLYG%8cbw48t%C0|3Mr zhhY#~<58k5(}ajXaH|xf)D12sL>CZIaWMq1l;ntvF;1{01Tn^dF(Cx!oDjFpqzH(S zw*LFhIi*yVWg*2Fqqa6Ha_{}_?ta_0ZQDxrT;rTlLZtb89-P~Hgs$sN=bh9lFY@FQ zA`pkJ>nH&bK>{O9Qrr(tFpi8UAtNZVTuMnqZQDkA9!=()^WH0^I07SJ1Y5cBRuw@+ zP16j+5Cd+VvbS6+A;fq*ZQHizA%wJ*V-rA3KBWYLb18(7O7X7iwy)6l zJqTWwCFk77=M@zt+cb*CZ`;)Sd*=Z=*C{x-R zX{3x?A(Mi5RDZrk#4qpy|81cGD7X|OLI(hxkU``?JW~*o4^c!Ej0*vRb4h>hzF?YE zr{vT>6&Sc0>6nJo&HLa-dBzBA1ngU5^u0-zWyNx?X{=^^Z^6QT#jAlWhxK5U;xnO! zl&SSliztR>Q;hSYN@-y>D!*w4DY%rJi3oua7r+xk%7N5Dh~ zTnZt$U=k4`QK~D5(lN)fpezzZ#vEsoTgSMG-M~kn6?Dr+m`ri6ND>I4VhTcxH%vPL zD)#N{1j0akRMDkmbY;hW=nGBgb(U%pw&Q#Id~=oBRPGNr8mmPTGSbke^j+9_A9f{> z;o093-~PpNIWldNvQo*FGI15`^68RK$0J^5{7M~<++$#nN$I$PACYO2WW39Iy=izo z7^^1OCsqEVyZ`Qg&^&_WLf*>vV7f>Zzu0{L)tCQ){CEFn>;LVo78m`))3j#@gJn9aL0f6huL3wsrjzzyU(U0rM^<*>SBDfg8ixcEOVR3X5{7k(}SP=g^O?X z*B_RB;F*?$6k~Sovpf#Kd@8lB*G))T~w; zAo0@S{qsB5-+T1YD@T=yE5kmK4E#i2`stH?sm%d7CK5;tZNDsKZV+rz00NL=j2r|5 z*$XBx_U-EOs-I+M@7cVdLiy1v=8 zlgVg2nzUV0H!UJEE*KY+$z*HL-!xrJP?ly>H)S>I`+l3c?7g4PCX;d1we@s1-a00A zT_`dm?C@p!CM)pu#Q(GTvvp6g=0 zUb-+$3b9;m4nmymOwZ41qWHVN_suFF14}wHn|i&FYi877&(ZHJ!}n^LbT`t8%=RWfVoRrORy9y#TO1?cNVc86rvuj1glH zQ$WJlV_swc(5E&6Ve~?)2RGF1<=N$EGDAp5 zdk6KZ6WwXO-ZYnIs+{0ta#)o=tHl{zQCZB{OllQl*fdwfU*}Fq zLMWvM?}lMeMjKL^=&W_f4yQ)E&I;+Zx^Y37OCAUM;43t~+QY2%8bsI(3JAQ&nM z-eH@^mXpvN8Mi|1*!*xjx_@-w`_I=++cE%=iNGq^-x~9SH)pl)4gb8##>=+x{9{$@ zJicb<>+i<=O{Um3U@syjLPZ<|Oa8huVz9fytGr)XNN6fiPKKB$w5KgcEU9g~ysatq zzK_xG?d^H*eGJ<`?+`+asjwNqG`jQI>icW+<%Q|B-LVQqe zvbDrzi~$j2T!RnOzRet(AJDYc1 zyYAlnk1z9EjqG*UcwRA2a{1}u^=}}1bi)T?D$@)Zt@`$()3Xk;or$`0|M+{KJtoOn zb2aD@upk+aArK&O^W0C2=+A05@B%OJ&%}%Mzze*gDI5`#wUo0=X(2fdIH{HQ(wpp$4 z`s(=juwHMZ;>y_ZbhcivCzDB@=XG88eUC8=!$3sFnA^8+cY|Y$S!+4xuH8(^qAW+- z&T8m=jL}*vIPD)Ev|X1{(nf9(LRhU<er)PV6Bcr)zz;na=5HXQ=Ve9z-2mqY(Z8pHx`NsvnwW`!o zZY^#E;{ZT{5To}VrA&mBkRjn#P)tBEm1V_vLShMlah8B$QjvfQlZYoGU~GG4VvI;+ zviZrMITwgX08&cMxd<3zEG$RCoO4QMijg(S zcb*FnAY4Br7qygD>mtjPQW3FjTSSaL2&+>{!!T?ePPt^jFkt|I?Va7wZ=1iulUDraW*=*MJgYzNAq$CVJ#)!a#5ZpNjf)i21L_l)eb0W5VpHk94v}r;H z+w26XBx4MakOKe^vGl{4wBm?=UG?;#Y7x;4y;>ZydMuCnPAQxUhWRO*599-%L9ug)sO{NONzR^;f&ug;+;dRbgM>)ViJ-4Vx((@h<2HlHp~+?($@;z|odo0&ko zlqoPKK#|QfhD6o)GAY(xfB5*>ba!v3%#(++tIZc_Q|SKC-|-!Omhb&%{%|h4jFAfILdiSzFL3SOC~s zVNi@f#1H|18J?){E-CU_Ko3GlW}#VOM%?l;S3_+fIa{ErI-e%)%4x}jYy)RTU`&cl z`to-1Qy`{Ni}OAtN_q<6C*8L{()W{J`jm1e`XABfKDe9Q zWL#?0r7Ep+EDqg#eO~&7WNFbXKVQJzJYBZ=TTlPqr}uv5KQI4B4X;YXOiWl>W&TRU z=82kRcBRt(r|K2aM@pt-Y8s!}F1b-vRf_d}&p}yJ zM#8pj8Dl~yYb|3;$w5l#hgN8T2{{PPd5p0t3u|rPwQbv4Yl*@2^>sqBMmpz~iyI-N z_hE1mK$b?$V<>K(eChV_aZy5YDl0L0hA0J7vJeUo`a&AUI3g8AnG&{LuQ|aGUcdMD z56%{6Pn*|o9b48D49+P9mqkX8R@pKgIP_9smP&{Lhap8zY&(-ernHc9jzU7VM7ZAg z&mP?zvkb?5YOW?cm6^4`5$V$p-Y=>uGWy;pA3VFbeB;%(UVrd%p$sDs%0%a^Eu(jv z^|J3=Q53*g2rkRAsw#pTJLd$i-4Gr>ekuf;OeRJHVD#R{7`4_Zp!c3p6gq3$wvKUJ zjYeg)HN6>8Nk8<)`5{+^{ZUpbrr`ZR2{U8*zJ)>EKECzRgIA3-F(Ojj+1X1eZ7&oP zqsz&}IoH)q*A2#)=4Me6ycUysp)05?!whqGs&EtXd=zx?2}*IxVH`yb?a-nPwGzVZ{_d+(jmC?A~AA(RlJ?Yg?IpXUh+!4%^ttuzD(+GJUlQt)AD`c5&`_x*CY?0Ywx%}!2khY%){ z$uM-$C&^4zNvUe1v1!_Rvt-hjJ$34k=F^fTHh5QeBN9gwYBlNLxzNU#{n;KyCXmGJ z1R*Y0S1Ea8tYnfi4P;|naG|cQ7mQKcwu{B}!Qq~^q);j)0^<7S(uc$uMb`vB*kYo! z)+(P&Cd1&*PfzQ5JsyuI2he7 z%cU{qOLtznefzG~S}@wws}RC43{_Q0P{4%LxqyT`D8VwNt(MDSxg{tedKbgCF^Msz zp0@s|#Kd~D-tMlJ%jF++C|lE1Ap~iXQd+G7qEgZ*cKYz+u5S(|^Lx8HL+6HJn9gQd zmOXy_cyRrwtj5)-c7t=y$4Eq0YpV?>aKm7w$!-Hq8t&z@Y|oD;$(G!rmWrN`Est_dS3Pza1tteZvK zUl-PDZc@5|$j1UZ*)8I15$3h*6b1=~GunRpiE)sUDINGR3q2>!jSaM;5lBb2VUR>ehi@%C?64!Q-d^K z?0q$^pN*~`{^~#Z-Jduay?Xqr^TW-e8_##656kYx+L`9EbyDj|k*QZ79PwxP^z%zL zPG=XVQn0Eh*7e3(z1ggkUV`A=;)^ft+&a-(3&v6kS(Z(wQ}2E6eAjh5J3E~7 z%gf75ureQX;4wVy`FTp&#sgCV^Ed1F4t&qQ~H4xVbnp z#7mpCZR@)3V7^;SN|W0zxFXA4KiodKHJ{GD_~MH#ryzt;8NO6FXrd@gZrRT}3nM6H5{g@p z2Bo{|wdK;3Vw5q-F|p4e%EA2zONuH^$L+@)N;L!;?wC-BoD9;pUxvZ%2l?=QyVjG7{5=k(1K%7Mg--HJqM}d`L@6W@tx(PWbA22AqgxZF9k|6AN|k+de~2+1W&<`jU|v4v5y z)RN(eU`K6)12y8xIw#?2C&Y}kz1eaU8y=eh#Vfx|f&h5Uw(rz!;+nRH;fRSx+kM4$>t)X1Mx?8fOm6%c1HjOa-VQA^N!IF3P z%-yU+D?>zW>(bot9-&au`O45j@wP|rR<`y!i2C9D&{9rR=;SWgfLwC zvYWrAfr^im?vH}wonU(Hj*5Zfb??Ek5?NTEXBd#> zX|F!E5-mc9YunSEDahZydaWx|JcCF| zaCl3*ulRyJT^n1v?#7g2rUBP|du}-r)pUIjKM;1W*cI&T-pK}5 z%%ghCytc=YkG#KbPmq|tXlkrS-K-CPKUT%+D3?p|GkQBH@B46eTvS%2u;*#eeK|sO zw9@H~UiBfO0?Ejz11YaiwL$P;D{#Bs=xm-F25*?t7Gm}XuIJcS2m#zAgL8`{J$ke7 zjblP40zB>vP72u-Va}`Q^tOf)r%2-J?Co6_q%UCbEM_%Z_f6v)RIy@Xmf;zUACkg7 zpDaZuXns972t1$8Sb4naLkHd^Dp|>S9q$$nAKG}=4~eI_*{oJSos52~@)=Ui$Que$ z0QEDmHoa%l@TpSRpReEYWTErod63m2$4du*P}KpCVL+UJ1_AtY&pLNLY&CazbOiJK zsnRKTT&On4Uu@e72&VlXQhs6JTxE8qvr)XQq#CG08Z{S9;tDCU#m&5D zPZX6`j(#CqAc?iZYmQ|Oyl6Ia@ZnK;x^f2lZ*S=;6S)*K{A5x!sqJIk#v-7EOc~6A zJu<|Kc1+>_*mU%HnXfP$`l^ADSGs(m@*ub#N@*#O8-H7n^sH`dRAF{j*Jy!faHpP% z)K}tvXQpga7uljkgzwf?MvW6id;2L(z)3Hqnz)tni)sEOi>$!2Ih_2!8zDLHrJeWA zDsrTA^1BSo+L{tMM4#VreW9B#>i5Sx2+V_dm)U(*kM1}YvPF+mp%NMd5nEA{pdcp#dBg1pq&Rv0?o}R8jmX6|?$~Vi70xB!b5IKf4 zVuuQ;#29<~P-_*DaN-KP97}AHq(X9fGui;I#3iWb0sxi+0}A2lIu1mgL&eGqsk1Ov$i($;qrClHl?^m6X}kB1GbgJnPm(jFyWi_aJnu#(8@fd)5?WovfK>u!-3H+b zm`@a(W*Sq4UdEBOB?=gH`ej%X67TMq^~WTx299vW2qURBb^GVVcmY zI^s}^-}-xDyIflH^#Ad%qV5?5p^m?9(JIBO)x z^fcha(F@&NUUR6tN4a*|eb9AlaNkVf()+4h{f%nLe;fGC;Jtw9Mmp1l8RA+!=dQDx z1!$Bl#0}Y)o%PfOsTCu`2yZG;P|*8?+sB&}K-S)-?fXePE!C}plvs-KXKG;rg#MyW zz1b1hwDS|S`HARLBr&PWvl0B>QQk`k|9D9`d)f5#^p?;B*t}x8oHd3Q!7}~*g|yD5 z)Q>L-H0MrWU2UuSYE&v@iRarT9VX{=@%PHcqH=vs>LLfwXFR4240T+(RBVhfbFM#( z2>${cv{3D49}{6rtdw06E7{=3_Pqt-jq|PZB?E3XK#y)NuBgHHtalphX~}QC*+AN|kJ7=j>h&67AOn*3e4(6=k-q*MS|Cj=A-q>|@{HpuV zR{PCe)9xQOe+!wQ^n$;N&TMhT3Gk#f$YpvYO{4~8W<6C;*g2GrtTwW##iJ#a^%O`z zO~)(~(-#Z-v3%IDr}vXNVx(>4Up6t~CiTfkFW+mx-lxEFn6M29F+nctPt-yDW8}4b z9o|353--1pT-_cJ8QaT#H;q&Rwe82}MPUy`CwJazgokDEc&9xc;AAS%W65s>ya$o@ z*9sc9|K`$YLS=e#1+m2}yKpvj_Qqb?6VTERI__WPdotYuv#ueZ+W)sul%X9=4CBK8 z9_xaUa{X@_xEpyoE|n6}U>u7ydr8%^w$K?VY{uUB`MHQF)?Y|={LPFP`XZek*ip?5 z6jsb?yPE}(h>bo5)eh7p+uh&yGAAKgE>f%1H^@cr+Nk)aVMMQla( zz+utG(zPn?UJ*^e-PtJ%_VtO1zMXhAHeZ@3+z_FiVOk6b2Cbw?xzR6@Pp!c@TPO8& zdA=$1L@+k9R31{&5KLy5x5oF7Op~r2{JXaym|>e@AWeER0xTXb1@mt59-$o=83v)++Go+_&8M3Vm^gpXrO!Td6wG!n*x7v% z7Fm%nGlx^UH!}hln-|+_Vxw_YcZZe+&fCOD<&}c?u5@OQ1Wxszq=o(NCg(kE8P|tv z;-UX7&t-H-C6;CjYz<6ou6ZxutURkpSGPb+^}aNJR-;iuq5f<=F`QOKvLBC*c8U$6 z%8U0JD>C&XltAOhBJax(M+G-~@`&CVF}y05pV=ZO9{bfjj%|5dPYqved$JXB0=;TZE-t4#-HpbzVB4kzn$?~5vkkXFv>2Csg*xd_t6`rJFn zb1wV+qp*nhRU||(S-TWt+w`#aurzd68tivY{V4u;@$r0x$||nyg|Oy(y=Drqm< zDaw$gK>KH5;0N|scMc+B6;h; zs2obi^6}po0gC-FCv(y{At)X1I4>x*&9uYEM}#nA06{(Cr$D^xD)9I=8^2)U@%XO0 zIr9iQB}r=B_TWD?eXBdrQloo$;lofYTi@arBY^mIMb6v1oHrJ^BFcB{q6 z(coAp-JeLG0)%0uW?H%&?E&G$Z!iP=0~C)Ao(eg~EpC+US4LS(0Zg!(dkfy<4|SGj_ZaTgg-o-jCz%_T15~ zIJ+?8Bj`Iia?;k0-AaxcWnY<*8vGh=aDvIJ5cRz=PqkEF0%%<D@H=M&772!e!$enVn3AvF@cWCvd{&EAki|U@ z59uHn-O2lsWT7o{tc1RHibYMnX!7Bq3-T<{a?s1DG#rz;I2Ew2fw7Cy$XkDR*b2(H zp(=#)(ZoIL`&RX~fQFvO-RB1YkmzWIR(7`qUCdDut@s=wB+lz4)Re*I;&=aU0rK{; znX7jC769pma2t=fy%@RQuX!B3lnAhH)W2UhzV05!ghh`&^5siZxk24pYo}#WDxa(icChlBU z)O6W-VD~i7{p{nxr2{jd@5i^ap{Cy6&6*LHpZ1d0#uyoxSfEh0C-G6!A1{|m9B-?= zb5OuirrkZfKkM9bihpL_QHY1$sVY6vPea@M2@<^UC$_qRWr|dboE6jF`FZZAdb3PB zVpV;UYRz^GEk2L;gRZdb@klZ%)n$LmJ0H>Ixj8J5j3F0g1Kr!V=3_pF5LLa*1^?^w z^DQ4yasL~b`(8&raz}G?+N{9Cs?ql5#ZG|u!b)YeW?^4npt&z@!C`wPN0iOGJosj0 zHGV(fM1RC>-&kqC$4zVikXK5At2+?%B8OkYh?d=-DzODU9 zUl-j6j>_AmIVbH`R+?;s6O@92$vxuab^KLL?Vg0+G|_#3*p& zH)!sTujCiHOYMCe8d2E6r{73a5NZA$@;u>*9y$UW<>jL^R?a zWw~P)SvSyn`Z~;)l78!kZsY3V;rdy=mZya+^Wn>E)|r_{q0Qs6`vR1{0$y^1-`{`R zPF6x{@sZP$FKmmy zE&=rO1j2B?qD8|Z7tQ=|{NF+AU#!2iyP?j`t*b~WpQDg#Zl~_DtOE09MyNe(JxQ9 zI?{N_!SD2icCh;CD+N{xxT$go;4wJs9a@n<&DUDh+dYJx)v1$>lR_Hg^vp3qP7F1B z*G(G}ZH0lKDShJq*VXg+Oe0Ne*DJuAw83ukGzoLmiC<}F&VkpyT3i^uwRE40C{-g$ zxO8@HzLgR=2meZ?>%fBSgKe(@Ixovp2E1VU-Smg8<$QqHl_{OUj8E zcuI<;`oHK}ulSz~tXz$%;bE4LlZ|4yVKOl8+>HEK%_*8`zipTK)1(5M6mD+QS1rdQ zzJ%DO05Wzcn~B4_Tqpa+)jOXLz_Nq%_2I#P6`oW4z%%~Or>+k(*voYf58wQ5b$e=L z`GZOY$$Kk{FC5zuX~`1Ytb5nbX{GvQ@KzIMAAlRbxxPJD6K5|CnFR^!U=R2uJR(^W zs@$^mbH|q}{BIX)u>vR=|F2t}5W^s;20awQKA1=^MUNfk9p-J2Q2=JZ<$_nD{FIp4 z>jD%Ha521aJ%j(p`&b-YU+zS6a1RYURTaCRf0r{=*_LK5dihNm<`|_Tw&lPcBAbdU zsvsvLqg+JXQT2Jo5M*=sLV6FY&@}KRx-^@3Au_tz0+qqTD;?4{C3c6+>85h6|ol9Y9$yZmZKIH#I)jnZTXtq*wDf7V9Lhr`oR!@2Z=;z@E7^w}x%h0M)#2ab#u#hfgqJ>=KL+2kd3~bz@`zg7p-CsQ z(m8ec!~&+<=LZSDH$3IxL4Cv`ew}4&z}He+M%r@;Je z#&MRpu*=J!Wx-CaQF^W0jfvo{pbzjB-zy|Kz~9BW>lnVI_j~J#_h=ly4cDG)r0{*X zxKT}hV3Y0Q`K0mHm-P##)SB<8{NW&VqrhyLW7CI0ZiMkwQ`>#LV19S2e{irkUuuch zk#y6XXcnv_E82lyQ}*KDUQt+QAe+x@>)2+^Qm`%m8wPreYjDHNxAG45e0w@LBYnP9 zA!GQH^_&r0JV^z%rli)=(j68SmO%?&@mEJ^r4ikV7xaTbX!E_O$8(5{^k&XbkU~~- zZvu;*J~>n_KY-?)xtp3(b~T{mqI7%vi6h3$yMaN*%gYNQ)6FOTPRRFqq`s7*-$z`| z#wy2^)%5f_?(ySlQRd%Mt)U*kWvVbPoq^c8=+%Tu0?OM=E=200y7%uVv;{c#jKbNNJ*FK&6%kr z2zDr$wc$%jDMn=Kc#LLdCHgkp1Qr8sdeN>4#hYnXF=`I2FF@-cWWk2GwH!)vWy8w! zQp!_`*JsB7Ar9}Sg8<>A<+O}JN+OcGT0mv#Pj0GPr)^Zd*KVZg_rW6P?%Q1d%jGM3 zpi+^nAr?w(Y3{uL3uTCbn5o?2SU?K|#0TI=K%^F0~1afI3KA6;E-#&UOk z6F5Ai={^}9k+`364*oZJSljec3G zdS>Flp$Lm~cZZ=EgabPr)9s6EmAY*KTjfCq~F{<$Omg{acO<+mu56w4=z60FO z<>d-sRW-FsFQTKx*6gSVI^sz`BYV!~n6;D*f4Xr0OJk>nnW}Vy1O{`D`m)=*7yDE~}YSiiJX;Be_Ijr$wy1i)p9tRzC z_GRT}eKz)C?LIfZv|-k8_0jEY>+zo|D+*u0Q0G*!z(M=a9f^hS_%A@qz1LYMd-hF7 zkc-Q=&k3U%vz`V5n$TUXR4LcD0w^YySCOAt5>url>3pa7%UJ_r7AkS+&17=itNtSk zB_0e_CEv=KO*DSitN=d`Oj6?JZFs{``gWz-?SMaDd8x@AnODW>n-t+u_p zsA_Ilx~p}nK?-WX^LHX9CWp^ zike|sr2|`;_M3O#v{gaXh~5Am5g}m*`qk4!=-VmzAz_=hsA*n2r>ZzZ>^$zmkH$VY zdX(6QX&MxCMZaODxXAM`?|0C_=(p9vWHKbuQM`D}?9B2{go?9qqun=(B`x0Nu(WQ! zn`LvDnT@TcF#zrzPh6D&0Q2nmjD9wdc*E-7WT?*zEy??AK@jrr)w0`iz)>Yoc;{;( z7~?7Z6XJd?IF-XDhSva>UzWG z7aW73cT4V<|JJsk-xwLm+if1j=%vnIhj9@u^4HgZ3`C`Vz@W;K< zt(xjU$P)GL_k? zw(ad1_ElsXYTOeUsOgR@t6_gZLyn9L)cM@WP48iQW^EKn!@+Hy8Ef6a%qDVr{Cvk+ z%^*r!4EX}f6HP_K050*UtOLkvy~{M)^RMXWL^fUEw?ubi;8ln1i5+& z6G)dl$5jn|`lC0(u%dG=wSX5&D9w$9sCxo7cAU|nu4*KPk}%LKv&mUxIa1L};o1YC z_XXYv_ehv+6_t4uX0VLIP%$}12>$1%fbV+FhW})lk2r@m0?Y;nghoB6aFqjNV zYJv-yaF{uR73T(vEtC%^$J3>37FV{Ip_2E=W0+~d8R`qzj)~&VkJaP~xw*UUNueBap0dX{N zQR8)b;}-=fV8f%+FF8EA=tRj0YkJOH4^?qE;t>dJ#uJxil(FX>mnFAFsQYtOU9ABr z`8y*r5(Zpw)1KFYGln@#skd2e{gt`?yU{o#xrhbr!J;%ygm|e%Qq3fP|(VfYy zE4gSA>7j$0Z~@=H#jA#}qFo{__rwgFa*#&pw4MX^JIKEK zpaSKFr+n-*FTz9na8ZKl*5#ac^!AKvfT<#{p`e*Q;YPzN++iJ+jNUbWNiX zyUFSfdM`H;KtaNDnt0$cgN5Z%v{+ap6LjKfEOzy9LeJe5M3WyRbqUtrvYX+D&SW1S zqTk$YetSk{Cw-{W_4xPIK}5wU(rB_e!m8$9b(u{+zXnRI@ve2_UzXpPud=i#Ijtgg1w>eVhOHlqy}dMC6m z(j4qzPi!wo8L3&RRV7o znO*;>25Jy%%rhQ5cy>^|o_Fia;MaFP9;%GYX*%gz2k=ctG0c(TS_&U+47zj*Y3`=16Y$3_y*braF)Z1S#;p)e?hV1@Twv=reZB?d?{Y3)VN;!I z1lb1%f3nC_r~f;18XJp>-MZN?dD{D_%l&L-Y1TL!nMuwy5R$5yrOFJW#VY`E-2hr^ zX>k!`Z9wEfcyQpBYne&<8@%E9OaE>4lgMdcj-$OWjhK!Dz`5E$CxMq8ksL)^&?vz4 zTR&rq)w0(VCkITaD8pviPtNn6Ad(OCcg7sl;28pgc)cFnBr#vP)SR!=w_%)(H5dupk?F?wO=h|aZRnoGgaAl^s}irhje470q{3= zuzP2NY@TjW7aERyf*qbHB<{22+5f+GWHss{d*$vtLHuSb>|TUYcWZ6!m`MEkzRA(t z%=c5^zhiXQ#f_Sw(K2=1WkS4D;9-S?`^}~+fL(Dqog7+u9C5z;quX^en*EgDtxG;#^El&OB~a6KbJ8~YhIbeQHCnAWg?)SIqu@y;l5ht4#iokSNAYi}p&tu*Qs z2)1KAHb{_wxD#cr9E1>}2!()@lcMPTc2Iw;Z$-F+_YP}ePpy4l97h4js{ zgw>)@_DIXTosDB~D|r0C!@K4acQZNs^wga${6w9iXf@iMN#-QJfnS{YOW*L zI#$C{w>Fy$xZNl?nBNk4l>h?FWfV*HwAGTsytG!_`}fL#8igrhl`WzKUHnHT1XqAB zrxq+4`=`47SEO5x5@(Z+Du`Cy!TOhhh7WzRrYduhcBdkFjm|7d82>wuAXM0mp$L-7 zhjN~oPoS7p_>#@(K0?mhm`W0X(xwC563!as^p-ULM=ksAnJyOM*J#Mf>oKRGR0;cU3-xw|4Hfds7VBW2k-d8 zS!D;GFb~SWG@cq8R>Db58pv+S`%1Hes7~|b<{M><^ybVyxRn@D&U-Eq#>Y@)MA{=V zRdPaPpU|pKhC)BpMS>)W@K_^=Z}oWxSiALhDxuibMa~~JJ0?yvJl)+-JIqt)C-{)Q z3p3bmBb-pqu!X3k*yU#%?K8Vo!c6|3w8b<0o`P`yX6cfWV34?ANXg+bzY}OE8#XYP zm6XN{Al78We)5@wHjI^)7!*dF^5HXHTF-+e@R&z>bM2pdiwD(^Q0&`t%3!itATdHy zjJ|2WrJ>r@VBVEx$JzIAr`NCWn)!WQ`4MqrU3)94*PqU$(+up-XdM3oKaG@;;J5P! zMjx}7+%r#s-kU_^s_AQ!bY%?5-JyQ}bZWEB=-k}Cg_^}6^KSO(j;pKF%lsg} z+Tb0Vj|Wqni^+)%-`PL0>2`7QqXLH44Bze0{}>j@P%h z?`}qqPE93ElzB#|Zo?+T|5yfzW@-H^89S}RWo|H9sWd2S9Il(l6J_k4amGiQ~Kwc9Kb=YTK8pCWkYhS1Q|Op|&XhqDHPd0uV8T_T5fX z_#AO)D%746&PZntRav3pswn*XlFRY+9n<5+6Y`*sHy%cB;foK?;2&_2r9w?sjq`tu z>|qco;>j7(vzwcD6Q}gtKzbK*XL07O&NHzVLK3VEnXxZFoa~reYAIMfV|8UoZ~*DUv>V=OG*#A8>R)_(i;@UrQ?Ou{>Z}BS~pbSVl`dzDyEw_Jro`} z?eMNf`-6sy4{zUmuH#?`?LvcSC!jRg^y`}Bvc1A*feB#Yoz8|#)P>T~kk)i4*e>ZM z#Vefk5YX2UObiAy7`^oMPs3dxylDJ#L`DW=Au8tlSAldRoVK~a4?`LBlraVuPcd2o zCH*kDDtvp;?YdSVP_)edV*9;3_&GFQY*q;475#@~CcLL^F&`j$AGH7NZ8lRL*^5jJ zRvOpbES}C1ZLjEcoL|oAmW{rGFZG>tsS_kCRmH|fq~RXeSPvB^LJt)1E6ATmlb@>$ z_?Bu-U{Qy(xGs{qe@66ZQmp@N4L8LygC8zj-HVjn*DZGNp7>H^6VGIC8Z;3jetl85 zk`f#rf979;7;$1Fy!3&7*c`B69MpOXi|3G?l&xV&j^iC?=1Y|Q8B&lL=SUe@{JA9! zf_o%aM8ZV*LwI>9*DH)a9U^chPHH-jtRz=f(cW6;|KAwtTJ41NRsdQEaY$E>$Xqa z6WkNxMgbSC)9>o#B$uRTYvNX?EsCEjpE{_$==gEoW;;V&^(S}XQfQ~5smr074IA_q z8*sD%!EbMG@Ai^L__Bk5%2aV`B+?ZrKY*JAgXElY)C&sE0w*N?m*YF`z^aU&4X8^k z@vHE?QA^W=$T3+?T{&Whq>_>(ky1+8HBLu5TjX^7{ zoyA)^W^SSWs2PjQaerg27?4S7gTTb8p*;}bdhuh3Jml&^iz0_F)O#h+dyc^1cl1cT zK{GJ&@kay4uWbiw-RP0&6Ax#z<-nb=h34gbwe{fh%g2f685aL$bKI#`M}gVp4ZqgR zaTX(Y;i`D2*UQ(9#<#b*=+mE8<##9967H85+5R~7#`lkR+mn;aZk_;3OjrVawA{)v z-1UgF0xSeK6t&|K3Cm)}T(c>2T%ZXAv|^IFX1U$-?)MS$z##bFLg|i#q(`6ZwF3pKGtfAxQd1>JM3@mm!NOyep~R)u^V4rTo8I)wmA_?T?x(2 zJM+>aHIQ&!DwxU#0`u_fmBJy&G)VQciw1bSsjxUHIdLeP`ee{tsL7?({| z(Yuho?CgpB%9SN5W8-RhXw-;Jhse^>>j~i>o-5nFc~YxV1K{w#7nbh^zo)*3u?+wv zF!X+bBFUE?Sy))9<7uO12p(M_5f`B$(5J^TtjTyYW#*X@Ld?S#VK(|4@-88YgbeZv zVE4JWdO|qMyU<@&qoc?;s;lE5^>dX+z|eyCrSw7PTNXVwcUt|L*&KH|Cdh7NmywsE zQ{aW-rC6*G*QnHvC~blemwe8tWqsQxhqhg+&*Y2mat_heS$?&8?noRo7D`T!gcZ|o zxrj6bB8Q*J=NsVGQ-5+?*t*|po|46bX=_Opm%S5Odp`co%!K@V0`K>-@w%^DKP?De z{7~RtPS#_y2JwpBNOn+p=Ya_m-?S&*9$#v#Dj z`CQgB%(J^i2;rz#tQMtn{IX}T_R!=SDM7mE{zU5A&Lsm0xn6DjFaGy2z}Y35;X_&} z7s&?gt}k8&7?ZG{R+5C&PQ1hGUS#gPCg}8z9sVSi|KvaH!$cgf^L=~oiToT(Gqlgn zze;hwpW;FRF4kTjX}pML6JDXM1&j%FVWqxIPe$y$O6%VBr|M$=bYBXin3y_m($VEE zj5c01rs?=d)B&W3!1T3-KWQ02pnq7P5Od*{Q=G>&reIo%;710gzU2yTmh<_}S@G*t zt%whYn_`KHc$k9ytk<&l96Ko~cKkGZYFzo9{GIJbz>FivYvnbrRtu)3-V1=_)fTh| z&mi;yXi^(aQ9u!={BoISZU3gtUcbmAlhQ~_LQJG;q2y&;9S1euGvpAs+s6;E3dCC* z+GeYr?DmPvHu~?hq1xt~XRM__yb+;qx5yFlG2pLbN8=?1IB!-DCa+WIBCn5*yqA4< zGJ^If(|$|ulO|SIMgZ1ETPAEu87m}m;!RDBtYGHflkv_B-U1}$?BizGWmk*PQU$ZS zeadR+TM7j?W(QMs+H2@GV=@t&TRgzQ%BiYuA#YYa@<6%|rX~(r5M-`rX+o3eO-dYh zmVR9_*UM^L(K+Zej`Iz+vezxQ?zSau4(hL*kLnk<>@RqBABC}uz-NpWT!D%J!+FC? z%S>Otwldy!JMNp2$GtE)r4)RwiN=YDF)KJ?3 zi|OF&!Xr_X%<_)>O?|?RF1@nI9-XH)Z?pzCwbG(l%)vnS{~e|24FmCM1ktP=49vJ1 zEGopZxV%kp)$P@ef!zblg25*{Q$aVBn1e~Qpmp3=kDIxVi$g4RCOv+ie?1L}TXx0j zIOi=V7FJxBlVJ~J_%LIY$#K`*NBuaLlAEuQAFPsp8*?9)eQJQBqnlj_zOS8%D@cueI+bS{59e*aD5n~?be_OwSiF1FAnOAePpeS0p-J#WiOsfRp<1<9U?8lxvTkIXu6 zBsb&FREfLa!s_Bi(|A6Y!w@eHbtSGvOTltL+H10d0Ym^d#WYHQ8N2)o30JO06}v`S zp@D21$&;O&9V=d$LlL}qcg#qca8M5wPA?uUopfaIXfXaD2I$FCV#!1yj1UDy%)vCP zJOYqpb0w{9`n$%IwenaYS5sT+X_+4UJ>VsoPVN3uZPtF6zV2R}Arf z>--#SFIt`Vu}lw9$28~`5WNG=S)~*4K}Tf1Lxw@hI@W=&k8e`$;8@G=s>O5nDoo0| zG4C!vJ?7tuGb<=wO_?y7jk!Ecddf!Vf7{}BPQNfkp6P82(0T4-uya5%?w9x`P)}j{Q;5v zS?OHr?18g2ocz1r-1i2PSQ$S7207)l$0*gT=9AK15ybOn4fBM{_|>Tmy+!15(APeR zym-A$CC_Dwi%DPMkvgQXo2x@1bdt~%-K@0iHdJ3v#3DDl-UpQFF3JK{)`i)K;= ziJ8|Cwx70oFUV-YBk0Y-)FQ(__~K&xRJV!%SAZEZ3f?GZF4WFA0l5M)M0;;%pm%1afr0o=PP z22M6N!--QUw6#t7q-S{d~*q zi`;=r37;>k+gr|eizoYc%WKF!ngElASDPg@v(pY2s}~EtNq_`-Gxsu>4*`%Sy6>mE zujZ+Z=RMB+yKgVMPLVZ*N2hi>>t}Oy28t)jlt!~jtA(dFe~PTtR>WI_(bG8C_+_aQ zEen^N>dKQ(zqv2t5*rdHU5Xd?3j@&?=0YaqH4g!RB9^3HBJ3O?K>5YUmbh~sGBK*FSc<4g7d0&_=1wTPCw<64IVZ<#}7>%r6LVeJ{P` zPXj3nMxo;s*%#%|VawH&%}9a$F?_LYSzTi-_A z;*XoACyXCqpWSszKRx5lQva|;{1o!_Lxj>-N^R7gXFW%Z6fH-3Mbdb^rY^L#05Vm6 z*;QkdzI8iFP>nd{kP#%t==r!)XY#Z((M-neYwDo(ESSe!=c2&qqDrIWOGCA^fQRlZ z!ZT>G|Aak>Eh6p{&pz1+{}ala-pj@Qk(?K@Re+=C>1Mi26omd|T8H4|^KVKv-@P5p z^l4MFr+Ytoh==p!y;06yuL6h5jJs&djZACXaozmNxb}2n$C;2| z^sp_i%D-o+Dci{uyu5G6VknDTt|e@OH%?Mk9zS)aOEG@>o4ENM7im6gpGfNX3093l zMMr%lFL>CzrTHg<07Qj2H$4LdcuPH3uv0n*sKp$kd*6?xQ^2;G3D6+|SGXzgk(N+f-b~TJn=3%Y;f-E?qf%ZtmP(O9 zIeX}b1p;Q*`(~}cJ|g@t|M-m4vVZrSgH^qrwze(m{$)`{9qQdTBgT2s>8Fw;|NgD7ukY{g|NG~H5~49DnB4L`%9+yVBhRTt)Vretb-g7n zr=2?TBHhWL=J?nqoJCZb8aOk21n6h_8G-otAyjcXu39D91Ij9dMo z_;k(=!5RORIU*B13BFN;_gUI4vFLm$CbkU z*6@64N?*?&(E;LmLI~t{>2d!){H`*ZNY6<6EijRQ3&1tt$uVSLfIGnoZz5X2wuTJe zbghU7+847<6BWG#_9h$0AMJB30ZvZ=oHuZVMHw=4)-t|?E8a8D9ZP)8h@t@)^ zX}gUQpvrkC)r}QE`86gwD%m85PECfp|9=x$?td%)HJx~wh2;1^30eSs)6HYuB};b` zTrUXRG;=e*l)Ig`(8>JCgDNS>(Nd?6J-wWcEh|+PQAa>zqj(|t#>Rs1j@!q|huMb~ zm&XKyg7zmbM;3*m;y^ilre2z!ijE~22^G~L%Gp0_Dd_OTqhfrcpU3>VI5Co}Eu`7b^7tY;QE2x(CaahxI>r+;Btcy`1>p z$Tvb;n}a#b^o|t_y3!}=vVwjjRjH(An!k^ZdCuEdM{~V?4MIj%zUM!_o|0usNlk-< z5ji%?({+k8I98;NvW>?#Xlsm3JWg%Yr)tJ5eu$k@+ztRxF8!|Y3-N0aOi2F; zP!IW28JKo`qGk#CXyGTWjLy^3Au$p*PE9C5&zNRZ>TlET{D6-&f)TYjq~GqYlOHM& zLxL>EA;%+TyZpH@;tnZ3Q*2et_r!&vnB*j+O5~x*sbZnKAAH9qmhCPlm;z{*NkfuJ ztX~vJj$z(Sq?ub-Sb${c8Lwv8?7Q6=&?=z`4&yT2DQ(J<@9(v&QMH(%F>t-=RQmu1PBx})0` z?;=)b)#Py}c1k5J@}=E3E6WdI!6)N5HC;hEJJqNgC*|nI{}7f8Oz>9WzIofMJ5lC{ z|M7NGA2`B^L0_jT$N%V;rf$dCX-|*&R~b`vbH{mWAv<8fqLJ{Qe}3=R`p)lg`t-cEUs)%AxxL%)wdcl%zcEo#9?)pl z-SR>WsERqioRa`Gol9{0>ml(Q*V>Qd+G{zn5V9n$Gfhqm8*pS_uR#TRxivT-KvZR8 ztj}~ir@ql|m~G(4!s|uy#Fii0FxM<*ICML)-N6y){%5e3K z-Sl%{JvXFFnIXKR_}l6=GtF`!y1k~`_cBFI{CW@_%*XP+Q2&oL+aPzqLvC!Tn%AwP zIlp;4+w(vXl`vxZox5d!^iu2T4fow$O$*!Ok>KMbswVJAFleJjPit|}Ax+cM^A88R z*>7uaza5XphlBbAIQfossXhvYPzzU2QA5}nO~jx~(ZJwu~z!m3I&v+Lsfe70FnvtjuP~24{PV zW)Xy)ZY4t%*s3_`O1sclzYBr3=9pYU7(Uw;SSNN~n2lugw<`FKAE=S-J~jKtCa}d@ zVlkrR{i5m9o4KIi(>0=QBW|U4L492D=#JwHoO|(fd;{I3^YcTeuUv1cVm>BLTD}k- zv26TQO%r_K0{v3S%SWN@x?4gX1`-tw3fe)whW}y74{-DFXXN}31PTEu%Dn&J@gbG_ zrnb~Jt3 zilxpzKOgQ}`$cb81eg(C@z6yO6Wh)>J%;z2ZMwd4hO?uf^mI}u?=6v_Txw|-+K5!Q z+K-~`ov$J_^i(t}DU_b`5^7bA_1-T1eO73FyD%$o`DQVA8s3s!QZzNaH|c5_i@6&r z@ruyZOIf2P+Qklly!gqhRxzw9}d@^sd|vJ-5A6*Bl5dvz86wg^_I+En4d*|`0j?W>sXB4dl~g(?$L!& z0@%(x+hAp2Yq;h^m5yj?7Q{R^#G|sbRPTiE~-LA`CfNDDdWE-p5aOO%UCv&Tx6`&Lq$C;^TmrK=8Ky)jL1DNMIj0) z82EkJ9+TLHuAS&b`2fIWoSW;{_Nw-BJz_B@hX3nSjo~RANfh|T0s!I%6=(%-j<(0; zeuWcIOj*U?y~+3BBd`mpr=@6%8#@Dqq(4`j7zUdnSA1l_$Z37j$SyCO)y?8~O#k)U zkDZ=^--$aIhScyx=cQ?;$xB&P;5if7^Qoy0KsXLJG|UGE)Vq3(_c+}uzuoRF zj<2@M_$I#bbXU~F7n1pL@9^VKs%IqblTETo**1b^NUMp86e{I;ZICbCDCz5U`+Hy8 zL&*1E(`*7rd;Gcl!t4ZR1vjs+ui+=A^TJ#ksE+ni8*}q5yRHF2&bo2+HT7@-o2D)+@ zK#id{M7f3O+Kx`=msC)?mRtvqR%j$4;QxZd14Lu3HCBp@%$ztt9E=A4-;TE~KMp%E z>xLL2DFd2>X;I55{}&q+q9=l@>$8ywG(I-c4G+)_xDL4Qs(l~Wy={<*hHwi`DoE=>Q5an}7=p z7!gQxRV+i!Vusf zZDls4?QH1pzcyIbnap82>Uw=Q6?Znc?S9{uWJDsAR%Vyw(EnqLEQ&D4@?ShUQlFEX zCk2#ZuhRp9Ud2}gPQAHx?dExDs)_&uB5JrHJ8D(%>UTDA=Tgf!Ch8P1JA6vdl9fv z^`GzYZR_FPZ-?7sIvfLPGor=5$@BcyagH~2)MWlwbc@~lTrFgRBDXoNx^R?fb2C>j z?=Ej?OYQ<#7znjARfY?O{BC>n0~L&t-=c#(GF9d+VD^KT@C>6Sr>B$a-B)hLHDb)1 zkxujv0YCK@hvYGs-aH^H-dm8xa@)rdL5jg4T;tpzp_VwQ;A06A>k5^Lx0GDF`EnWz znoIY$uN_u80&3d`1I=r}$wBmDp_X*xH^#6MMIz>`8OB1ZRzG7p{YCpGJUYYx{B$n}So6*vIMihZN7eAbsQlD?-2>MSB8F=b~9 z85^6IV{Nl52j}5Me15KH-dFtrw+h+@%3~m0NuEpVVi-_;;4t7&t>btjd$t7~hQ%p6 zt>f8#Grw#rqN)P+`Lmd^URcvFB`q3g>l)n^8oh42|9$=UXk)5BQ`_z>8=hE`v#};4 zv;VPGTPd@|Z9z?d$-m;}l8h@u1HNDpTa>!;;r7cHXUG@Rfc+RhxX;mYgJ>a0TjrlYO^*Kf?S) zJ6b%at5?Z*T_Rd7TQW;FVdIFtf+rQjb#Z`9#`Bk45jaU2Ce1(du6JESxitBn$H$BI z?@rq=n4;qp5tBCc^!Y2y=drbhI(gyb%mLi?{ET*N(E7hhN>2=FI6m1VwS0RE zdB)7#N>3fhrN$g|p-?t`vMgsX?>=|9i1lB6-n!jn=o;RA(Q%K>1FNe2@~*eq5ihJ> zJ;WKzW@eBRYJwq7>gK)r#wOheCq=-M6OdQ}y$NG$W!ush{y01<=QlQP zesgF3KVdpU%9=pfTyIO9A@5GZF2sCB1(8dxgO@oKt~diyEd)Ly1EKl8F_AgqX7c#?XNhc)%BqpTrzr!F37+ zh14S|h5D{ODHkR_k~Ncm!<0l>%3V&m&U1||P5k?Nf&SX@yJF~B0GJ1!h093KbCFtLd||`j>DbC5RkGuy$f6Iy(Tf?6JDc#LU%+LTFHfip#j- z4p*Ds&z@P-9)%<-TKl=V*i}@NTF^$|eFmIDXB)sO+ie~5`)?vk!EbGcYnBc#rbI^D zkBh87>8+;`eQcB{*D2FIT;WIhxgYs{d#Bwnx9?7}eXuE_&N>z>3}ERNK_Wi3l?&}` zg`+5&P%x1`fs<1E(o#wo&x+>@Sp`|F*q9lBg5x9IgLNFfD^1O$6U{-a1QX6{024(P z(2g*1o{p>u6K6L#!{7002u6 zd<(&_XhAptSmXzwDcPj2&%%6o{_9SOMFW z=@WrsV#4jlzJYfpcXD@c+8uVhn?F2nxIrfg!Wo5?*94&K#DOdV;!<9s_c$%QQ0w1w zhIp3|PzXyQbsvGUpgdlZp-8GI^g7F=8Gut-5e4Cr295|}wl1tvwLPyrUaf+jxpp1SUyeFm{F=tF>TSmwdZ`tubDa}L4>bN!uy`l2I?;guMnR)fp zfrW}gc8sU+KXLKWVUu|g{1R}c*DhSP6WT|_AbgU$EUF?LnuFxsUFgpttE|UGTLt@M zS}#`KLbwjEb5{>9qB+O^<70izyaVU9^1<;OIAczo zq&ReQd!>UW;4;(Z{-QGVzVJ40RZWj>or_QQ2?d*`JcJ<0RbIg^HHq_HtJ$@hYo4~g zMoMUudH4jsh=42|B+RA?XSE&;nP+2V0lMIS+$2Hp~-wU=P zXFmt^Hc4|f>n%+MfEsd9mcCK`S#JS&X|DFf4+p?5>TSYM@}I32ZwOL%CuUpt3QV}| z{wVU}?)NJmPlX<686%{D0GOl6oJ~}Q#)$!?GJyD)bdliiZMY>dRrSX*Rj7qx`+?9V zp9OIsN6Z`Q*Tx0P^86K@<42x$PYmeYR!RQB=@4|UX8rBa>(=` zAYo@T0fxEw4^;vI02Ja<$^Ep1Xu{P%H)Uh}8C6%@FmE!gie@s*<0dv;hNyOi9kYji z3+V+S9@_5T(gI;5AfZm@=BDQ6`9pHPzU%^PH-qBdc*FqIwxVzXB%Igc;_t!O+P?f2 zLR$M9ML0Qkj>=VwV2Ilp<*Cdr0xMZ*1K_#S0%VQMM&nh5cJCuyU0Q1!%7^-F{KS|7 zAFNYr0`c*ri3;m!VV+VO=;$Ax8tulMHWg6_c{)%?I}XG`QI|qRA&>h(2x*zd*DHKl zsdHjH<1whbl+k-z-e9cKo2^g39donU=5Ybw60{5QTWRT)ZOJq`ML2eCe{|ZrO$Uw_ z2o%D6n0141e`faoisJtC)P*su1pJm`G*6H45ckfgLG*2YU)+!S0h|cWzWXPzSFUZk zSf~tfks|BBw%*UpYzVk_U7g?r%i5#?>{K+V>Bvvjq^CGDQ$XutBldC!2U@_9Y-Y0A0y*$D` zs&Zy5$Z1@;y&XNPHExm=*Z0HHt$O<}8=gOUPiy3htzTSrpFN=6`O5rbq6I>c{!Ndk z`-jA==44L9xZ#@^~Elr$GdMJ8f{0iwi6QTw z!m4!!!Nrv^;l$L_R5HkjEtd?Mw;OqBwrywCWw@&1>&3svgJI+tfsJC(#_|sA0kd-6 zC41_y)_0dnGWXQhsj55cR#ya@xe{K6y47a&64C zy#6G+XT7!3Wd%}LTAGMBxbS&c+2?CrVN| zI3p>kHtxBU>)5kr&v-L7{|?q#j(?d}*vKnSKVoeuJD-tJ#ebh*Bqi1Uy#?o_zo-|) ziv61kCs#cv1^T(RRCCaHsH&PG(aut_+Iq?>yMn$7R$amh_uBH-&`I`qQrI+a zSG8=v&R&l~=*VeLbI^J>wM2}C4r;@S=}nPx*5n2lL>(fsr>w>vRhX7f&NEm%kkp76 zpC-cfK9gdS#=BTf*WzJO5aJ2~6GbR7->`U(k|R5b)l3|xsLr#tta6QA-D{*i%Tt3;z3{yk0sva zd3AR1tiF+fk)eTsb;W$Y+H+j`#OU(s#?msR$u_+E5ALgU%ci%luG%F&tlFC@_F1vk zEVzcBAi*4E$`VFZo5AvqDd&*O`v5PHjQHXz8! zqSTX4@rL20msBhp6@Es~QaP;x^a&lGlaLOnl6c!s!wvLJyJ#pOfK}YE+#>wOjMH|1 z4~O3+21t^Q3kV3R+EJZ!c>`h37OR-??tKeXd_?(*Ff=?+N`a3xbRiXM7RxPB=RHlh?i)Hx;6OJDy=e~6wH0JcRT_`=+``khWA zLxA`iwkj}zVAAJ=&m3lSb#+1F7S_mz7J>h%e9lJCkocqZP(h|-vaGFXE_}mqVhU4g z+0c5y>jsYzSfa_O%YH3p&ri1SxnxI3QwdTFoma}{sZIo@w6sWxDqtlNcuV-Fkoc4X zC+FRvgUG}@aR1gVA&9N#c!YdArdpVK)koaRI*#<{u-M_QczN96=I^VF(%%Smx(zWi z=EVvzreR@TnovJ_$_&%o^^f-J)DA-%K&R~Wig=?m$xDwoKL5f4_YGZ^4T+nlg5qSL zFp3}YGzqSHfuvOU6{%&DHfHhoeg5ijxy`F7g{K?|Mb@mS6*+la%H937{Lh60a-ZKg z{N*c!eit#r#b=w!X$`v>E%H4L5@MJ%&CO=RFU8jQ0NY`-%j{$6fsqBEFwDY2dL5N$ znu6G|(VOX&zv$5Cy*nAVA6Gm0;GssUw*h7m2C%_d(kUomoVZuxlA~+G^7MgwCv`F{ z`t7GcF2F~Epfq5ukYP^9$XZAs5fK1i0PlhHmvnUYLw_bwG@y3`2mm~&8C$5w$X)Dq zO9h`F^73GFuA5g|PRfRJ#a$Mkg<3{mGcc*i%A%Y?K`*|Ve&@ia9bQn6RF9%^{ybnZ z<*)b~*X+3epUg&?k}73l2w6k|i3R5n@jCnDr>7q+0D0MI?BA({`dD+~Bx|*_)3+L^ zNsylfuC!f~3^C(3aUB`0C7xK1oyNsX-lc<=EMEx)y9wZzQW{3L&NSFFS?_^N3ue$G zYv9Eaf9hj1azHY#3DLJ*^%CgbL@T(E7Ul@QS=|3qZS*W_9UTKfz+qoUtjhOWo(CTF zX6Y56OpaZ|IfVbkWpaO31GTgtVrZJFw;tRKvgNTPYp|GTP_Nw3=cAkrZO0^M^&TE2 z{bPj?QV3$2W$W~Kqv`vdGLlKOwJ9Ftg^hqCh=m_;n^%nb=XsZ)qCh#+(Y-^D*@UJ1 zoZVc%d4^4K>w!TZhUl(d_+f%SZ~V!oKg z)r358sKnhlvwwK8y&fMuh}5*gAjMl_pzFq%ep4DG`$v-iU|Yc#O+UPDpbFCCjVIQc z5~25uLWKiO4Z*J=V}mqu6e1J}yDeqQ=XXahOo`r5Sv?E(l*@?j_j&$jOdv~|l(xSe zG%*wZ3kD5_s3(?XV%IN!hpAqMc!Gz#yHp7)%MyiRwn95C9?zy8O5UDcprl##f4LP{ zNV4$Iz7#&ZSiJi09fT4nFBL}Zf}eZVqF+FkoQ3O*^hX7G@X^*cX(qq14-;Ab)Ho#q z`a61H(QFfYi2i=@*+Xi{@4WgGiGk}xLV~ujd$>&KshN*Nk05*?t!;puKO2xsvva=v zv3`O*Hnj|Wa<{_?QiRCKGj~(}2N=PGjxbPUU5tH=lf-Vth^}mfzhPfl-Rp$2@Nt*@ z{Iu9zV|~7FoW{gf4K#(z@4D4$!G%wL%M?cI8u=w7a|XCJ7;!ez>E5&HHO#ob>s5)! z_3iqvndDL+g@LLdW+~?Bdr>z(Z{NR~XaUm~p|qsNA~_>yQ#2=?rk1>lVDlDolAVLX zrb6#$kEGYwJcw0MP$eIE#HA-aoY%7C;YO61pCA7DYOU@1EcaKY&*>>ShtiW|K+KYG z9HJBT1~ya90ag@xM4>$vOz|!rFaV1{*RbPJI+I06kCAdj3JS9c+CCn6bsBn*;1D;` z1d(=bZ(ji|Ag$_SCD#dDc;1f;;?LLE>|N(CcdMJ!+J7l&L}GYkl-DZdWC?zd_23KR z=Mdn@_W*4lNQXR{Ai$-En3v|UcYn_%vgYg$R$9un@l7kwZ<0rDGA{)Aauz;b2gkOy zE_?jFd$eC$!>Iq)TDRSQHtH#@!>R-ys=cYbDeGfHXf6u|Hv1Sc#FZ^^hm@SA)_oi7 zI#3Ov+bzzPxDD^PKd8F9#R%MdhTL2G`Z13lZNE@)>8E+#(ySzOtj?` zy{fy}|2%t`h0cyKu(cD6T`Pa2APYL1zdFXrHP!D1dQIwA9dw8DyW6Q?V9FLAg+uv6 z&sJ@C%t|PF;44lwZ2u|9;NtDgXtH$RZZP|7NkDn6U7|cAS;MAr)f9>8=h{7E1ehT8 z)0q-euFYy8!_Mf3u2VPZ;>Zv5>ll;fk}OpNOQv~OJoP4Z$lZ_mI|Il+`tG4jM0v9Rwx)|>|}n4nB@ z;d?*|BC>vB44~%MI%lUwoW8M%On0%!RRk6>p_v)JS(jRGt^E-oWDZtC57nR$v16mb z<>{V8puI&}!s!NZ6h5F11>d@V zGKz&06ORhbND`zw_l6#I#mf+lU0t<4HKhk({%f6x5$q%eY_gJo2v}_@gsWYiC!`6; zWkcUVwK9}~pIb&!TUbed^P0yefh zkqyi)W0V8^5s4arF}_)91YDm{DUn_1WL@STsEB}veNSSrn~&V7&fI2dCFuPkr*5$x zBV>+Exa$SiC#dinwL$BEJxA84Ji~s9ZSeFqz&=(&Tset=KQCRq1)t z@hkhb2D?|E!B?{tHmv=sep}|LH4A}YoebLE+-vfJ7osE!l@)3DNs{&^3R*2Udc}?< z0zicmzV3RT@?GXsyaA=v(rTsb{y=pLh)JnTt@jaARJ^j^$$DBWRuE z&himC(#ocD1i}@wN5H5|jDL&vZeht-pCVajVxVHKiAf5I$Z-J=AVefBo1#d3Sv;qZ zq&`VH{E^$dy8SUqJ`UBs-sb&+Jl>PGk-7qoI;0u}G+Zy) z`ab*M#c^166gBxrC|mMT^CyQ74BCl;&l1wjY-GOHl8BoJ??K-7u_hot0Emk^y$+Pv zSu)xl00u;8#7#%$!R--d_MfIRqRU)io+8Tf2DdXeF#* z!6(NNLJ6fZXMMwNI*@cBKcvT~CcAc-7i?SEn-Tua>mbWPj_$!&qhcT35IrW#Wle-; zhMQ8(81r5qZa~d}S2>%hXX5PY%7j~-rqA3v-)swk z^ZhMgZd=>;Ht$2|IVROufx|UCrOvAdEyiHG?V{g)Qg5Y8OTD`s$<{^`N2#)a{E4Xq z&IYslV$Yvc*krVPwax9Dkn(XCB%5-cABpZoO>Pw#Z(Z9PrDz+>=Y54mt%>z*-Ly-I ziwArC){07Z!3PXL*^b1{pq3z6W$TQGtol4VKTv?20=(xkj>W~rWa`syMAr0S##=4^ zIKs)A80T*SrXrS*t+1n?9?=`#L{DJ;*QeVi24VELOERDPmzpTGYzfLRL%*{XXRkxG zyLGjA5k)rFea<hWw_8Rz3*v zEy0Aq>6unN--YP7)OA;5%i{95A9hwqjc!fn)GO$=`0(C%?0*O-we}02-O(3dC8hkj zvpgh&ylCMcmMEB#&;q4~G7m)>0c)(O8X0>|wYi4Pfc2I*ov$JqSRj~Q`>O@#5rq)Y zEeiI$290Rj<1Tc_3yq&y%F?9;? zvwRf{iM9V{H$@E_c8?hOUXDuaYpn+BL~%1ywVd|RvkI$Bi9I<{89+N|O4Z#& z7#l~|s3&E65)Gd@z2*^H&gr1IUeW$fFFqLkh%B{HwDBfOME^}W-RRVC^lOiU|1OuF zTDDIi3=NGkYosBSGOf}3?w?4dfS)xOp_BsDJK>^n(nF@;SlVHg+9mr&d2Uo`T;q=&vcEHh%yuVDzA+^`H}0rTolTIER1`W?n%Y8ZrWl-*#~x3ZAX}0K zmE%I<2mU8zo4^3tWRH&;OHyV4J>rZufB2_GVG4hCcEes#1Kj=^BcW$`BU-^P=g0;BTa+Vq`iMCLZy3>D=QK z0O2tfTGq#s`ag-+hTCu;PDdgm5D|%W8r=9aI{Z&b*Yq?T-(-~jC?ulI))5BDUwqQg5VFsMX(g|V9zgB!i zpHNE^Yb2)>5P=F%3R0m!ZoRR|=kICZkn=ZyO_c3>LQv#@727Zg%edPU`2`@GISD9c z8HJ|~w&1xO1CiettKHd{+qvDnbh%EJIeM6nYJu>?1i7peL@GL(1g7r)@6q5t!bBs} zrC9<$0$8L;V1(9TE6am);(m?{s^`#`QBV9CM<+6FI^Hm2cQRLo&zaG_L zmaj_mw4O5Fo&MRdaD8S!&h7n6iZQ=(`}}PCQ2_Q-S$B95739Scr4u6Z%r3Tj^Zf5$ zMmLDFxM*+*m`YelV*$ppWayCPg7G*Ui?id*(C);wY`xRE{^^>&PT==UN=4FvV5dyUNR!#JYq+3!P zXFURA2IgrAo395B#z^jp58Ga2kGc=se)i_9h?yS()8av!1d`^gTJRES;vYd)UDHbH z#2@oguC9gME*>Ycz5t;@c=7-#X`w%LFll0(7MuWY4bPjYXta!p>|Zi6qVymDfI@iR zm6CjFBEbE+(SCf->-n3Y=j2A={YLms2iKZbDo^=ZcLTp7JR0tO90G5oD(_=RuXZLM zf5{)rp4McFq5OnT5<}JZqgHOgG9n#F8OcLclavG`rtwU8xdl}tPl&|xA0bzyi1J_3 zo4_pt5{DsX^xf9$%qDNYthhbKaZ+{9^}lPDp@7jWU7Lc`nB604cD)>BG)1$GO(C_kJ6|&p6HEetQNtN9Y=996 zSokH4>C(PC9Ac}ydf^fZ5#~>QTS(}_*gr`B0H9Adqi+gRE+NZ3yy=Poc>=a>x)i=< znrrjp#5-Hxkp-QHN3JgV$-@nH#CkwSGOWJ|I~xHFZQ!W|Rhun|=7NcPLPj3i7XW}& z6vi*C6TqzBPb;$`pRw}N4!2Z>0zu!w!IaZWxV4SK`OMlbBr)iGDUwbkVhfl!`ED++ zBravQZuRiSAD!O`=%gq77?eM=+1r#E6tTI$btn%hOdEcU$0|zz3Zd__S1nbBTmb<& zJi}<=^*ywy1;iZA2&}5A#I=2fq7q40-feVi@p8-lT5uN;Is4I%653yo8I+5#_nG4w zn-vwkI9wg4cW@ssGfx-uzqnatJHi9piFVwOhr3&qW|&Fr>Wf*j2C=Vr-JIZb>nR*M z2zw^F*v*GIFf%Ze4B#o`*$F%O#X~CU=7tl)x0d;R|Lp6I;b=>L$o)4`1H9@bvx@$# z-{JuT?Hg}!#Uydvolu^Pyk}o5rh#z8`QDv@``wkL)RIJVfeN-{m9EjQq=z@IA6&Fd z^*)}ZO8Xa+>2sOCvifh3gJ+F?db%xO3I+ljb0JgYx4P298>izWASAQ~OgXu}Ed0;j z@o6n2c@y>X(PyhP*2H=_qBophimX+UEwwAISLa2K{g_@Vd$mwFEs67d^{e<#;`Ddl zi)7!#Q1SQr!A>1+%JPW~Gd`k(hd)FJtLU>=>tsb5eUl5d^@$Qvhb`!rdwX3V<_5Dj z*gKqgk{c$g`HN7W&sG`2&Z-$mw?FWH%%Mu>QO)clPxE;XZ2m=Mx4&2T#Ul2PfH$V5 zWBVL2g#9Uw!7t(w3;9^f%F05S_9|2bj1?1PA6R;}(0i1itqp5xYs#7%o@}^jBXII} z=FXeY=NfYnuvQ*}DiDXz^rhjt8?acZJDG87y;)77RIH&uihd~dV*mwBpSw&|P-p)} zk^Ukf8_#e{4Ix9Q%AaDEiFS{xqu*IgK~v5Yrnm|MPk$e`nhyb2_2UvPo-?h)&XQ8% z_O3xNcm*jA`(=+~jYx2(o9ZvZiE0oO3E^Nxa2bRxY zgDyA-S#s%c4fhMPt#A;V%Q>UA)@D*0a~^NG!DL{!q)a#b^}LEqL?F$JkwBrV5dyS^ z8Hc8|nKcJx%T_w*fvKt>s{o87o?%{}f{Ke1RL+u4#r+xc*6TTO@K^A4&%p8J<<{WF z^x1(r!~1tHLP~c-CmNm>+aGroFI-i9_r9MP-7&s|iW}&Vpz5hOW5X;G7?u1J0u=yX zg*ey3*iA5F=K+rHSfh_O|1>1YISFD7W?R8;4TO*9v*7)HVjmTs=aEz8SM!nPxFUAG zx43wiM)$@^`Z{Y17Hj;A%53$ZGq~BiIsyEaA%RllTsTeh*pKkjM898q?yu{A^sld@ zp3y=0`-OPhMSfZF>)YY?XD*h-4)GP-@zpmI^0QJFD>#46-E{9;@83B6#*zpf$6m7K zVIHHl$ine>vu@NOdwF|OXXR64Yqx*EoPEl0u7l<&Hti*Bu|ZCQCw17T_-PnV9b?05 z+45aR3Y@*x;)|Llj)CIdsLz8-C){_D2Q+=CoQ*bL-1k$BugNUhcfY~i9ADE%SUD!4 zoEx4?bu@Y6@RhM~afLc%`DYD_b{BS{WaE5abQiu<&5SI$WwG$aZ*sJ!$lLCkik*hb z8cRMJd%M53w$}Z}5qFFPkNyw%U*Fue48?42)KtKK*0|*lYMZ~^_Cq3w5?DrO-d4Qi zBw7P$aUkwe(?u7AC)5!{dj9Vwl!^Lrhim=jmPoWN}WP=gJ^CnD6agB+9w@!@ zngzuTm+#-(K+-`(0R7UdMt&ewAf}n5Nej-qYmU?-0x3OrZ$%3iFEu`+70c|AA%bt$ z;P}%wSjo+g6@q;@C}j=V*n`gU3u@S&`)lSfv>s%d2e+Qvd4`)GRPU0HK@Hv|;?pN5 zf(Qn93K^5gK_iV*lLJ?K3o$Z_9)esTS-shZDytU@rFjC~0#(|lovobdaGi39`DyEuN2br#Be^la zBCV(#^yXHs!xZg}ntaMkG>EQ|$Th_`u%)b0{+H^_o`aMIEzW8!>s0KVqU+nLtd|}w^dkb#< zN;ixJOc=b$EjFm%M>m!Se_E?}kdK_v7feJY0?K(aN*VfbF&LJ==rau(4#3{UP5bhA z$qb~B+Jp#CY}W0uvVF!*3jx6NSDw9CaZ72Xhp0lPoMw7yX@M+8zVMgWS+u#P_VlM` z>XRc-=MgeA%SL*s3Wy{!1N@L{EtchA;qU!Fg~hId-aF<_?Y$&~gRcg@sp>kVY>v&ex~k#P196^p*_L z?$Y@WF@LrN%I&4hrOZru(%m7S#6k+hIMpN*suF9JQ=4Xfr)mCt?XO-`W+tTK>}q744T}C2F&xZ#pDL z=Tn-YCLnvP%R}qP4Hy4w{l3$hRS|Z#;=0qWV9-gm%ZvzJ@%^$limRjLhGn&1y2=C$t}92V(?xSHDAQxH^tm~Zc- zl+sZUe+=%diR)-uXzak^O4AInx3eAR=NL5y`)vM97%}v5eohU|d(t|sNweqIM6CR} z*VTUP-5CAikFH#!HIhCuhxWEs%m5ENE0s7nMP3J z`bs59YQ%MFL@bxQFp}=xNyFObqAScIwB^a-3p_Uu>hd3e>b{k}qn6k0^Ni(KwQr=O zqs!Xu`=H!4{>K1Qrb6`RXszGlU`bG?5Hu1aw7l@2tW9UOpxb%s7F7zz7$c|g`iP3 z*j3RRF|6CP(opQ;V+|B;aa(&d{@h8~m_%>#TU?pN#Fb=Uh`Tu5)_3THcF{f%iEaw(^DtDJB0e|gMjZ(yJ4b?^^BtYOCs7bFK~AD( zjQuH4`}4IN#DFYDF_HM)6U}j0Q1xQvVd6N)$C3WbDHNz+&t!-gK*M4XV|l-=K<;_g zy!=^?!k3gs$i{@95D0>>XFLjNww5=SdaPi+l=a+y@xz@ey-(i_O63`ASChV$WKom} z9NjH)<*0)*XA2Q{%=Rx%d{Zx~^B96xl^q;U#>C}MIAnJmA8cIS{ySQ|U076^`mnmh z@^>qE=Am#^TpZl=ej;o2KfmKuf9o5(tN<@N*TR{#P2Dx}_M7!fhYL$1{t@epFDRg$ z=)>t8{D^=FDh%$*p@WQnJLrEw7?1!bA(hrIwPvMrxP_8#T;-{uZvdN7z{VVKhuP2Z z?5E+X*O@GtuiwpAy6FV_%ddKACH2$#I&0_N_KbT^TAi4ZJBKaf5}DJ|^A7t4A3uYS zBCdOsh-eW4SL@E}J{Qb$HDa%!*!3i}9buoTbG{}ZKBb3@5z<_P8aI^R9nJ4v7q2O|<~>1|IR0a#e29t-y6@i^m$1tLJKqw^8a*7bBj3dR!gLkD~NCw|39EqT&H z6JRTxT%M%SnZJz9XK}TU z*C(18DGw1zKE{Rs3VCtX_dzEn7c;ixNk#!G9}}r&AnfN%!2knAe4jBsfIS|jm@gKl zZ2*TrF{J_s2w8GYkV%rM>%5d2{CX;ijVq{yU zSpNu=J)!Szu7D*vr8!9O?;vH-%yTKj`?Jq}Wn?#N8B05=28pgo**`Zv_R8wH^nt$< zVCk?aIBW5x%cB$05`p%36A=C#wcI5Ck?Ad|t-0*O5x_?=@suBAV4W5Y-5VLUwW*`$ zzSs38TG6A=m(r(S=O0&4M=NcyO*H^+_Z;VFdW&ktppcD;8onrRWvk!>gyiCWvC@Mr zGv!}1sL4ibKDngINF<*9d7(JAl-r=Tblf%4${td&E!2EG1 zlCI(q(+;i(giS7r9YzhcWS_A7)`yi9qGe=rBp7NVrqQksy_34`KQ!?Bw-s<%zu}f9 z`JG827h<6$^mYR;NZmFcHN-~-$pHlAsTPponD@}9JLoPz!%u}LJZA*N^MkVfJh6fe z;>-j-MNBM03DSgD#hP%o+U$;xES-8yU|)her5_mK^{4N85?tS^j$zDl-GM5D5MZ1hANgDVjf0|3NQER0?^~~aU>8-$|5h&thZ4lpyQ?1OtETPU+_Gw2G*``MPM>=>efEeo$`EWGVdX4= zZ(#7DxTo{8_0Sa;ZqLPMt{k&+`&>>sD0|=I-t&ia7sEd;FYdbx zSN(Le+65)CL$d*A%W*yXcQ7l1n6+<}ilyDZ^n7|)lg%YmsMhJ1(te1zl6@ZbHyfOt z6(x&LN-7XArBsS4FKEyQzLAPqQ4@RwQYYP8Fc_mysCp5dHLNdJGj@2={%quMGVm_U#t z3vRV$*OJr7vT>R-XA6at(zbkC?y`YAeQEFUyXT2M>U$2Q-GN`D6q%ex$Ovcy7gjsn zyIda|+{XEE#6vR6`c*Vi5LNr!DhJ2MS8c7B$*q|fIRiJD`+vt?8jD4x#`8<(ja*Gc zP+B?TiWJ0(+|t(?J>JD{3J-hVDSjp^;IOV;#wn8Uh?A+>&{x=>p+IxJy$c_VrFH79 z5-l@p7_f%2Ge?62l1C1USe_$&L&KOTas`BaR1FuY2B!b1#^Gh6$Co>1w@$at!;<_uHI}SZP>|RBTPzp`>n%KpAej;VbcZqOazrv)<{s7 zrbjBm-jewDh`xP`?j)(JJnXd zZ;dPb?H7AFlJKS|`38_G9PBy#ykpz< zbn^8+>03P=5x+4u!9OoWwA{U=pbU;@y=-XW@# zGZ8lQy4b@63F;{_RK+oGij5t|&ip!{`fcY6wrgVCg%MyIyNbtxQhwLJ=-1lr_i7i% zJ5E%mcy!D1afpHj<_euM^IH1MB9R%ZeB|?fr~69^u1Xb+N;sM*$OO4yWou`El67sB z8hPvhAHq%OviR2YMMc~?(dM}-W9iZY<}szr;q$aA+j7y*ScpLELjtO>fwa7f=R|}h zBE|pM1b&pKBsC+g5}EN9N8AU_x-d%urqtAfYOV12m}c#pM%2 z&{4k>AUA;%vnRvU@8l}kN3@<`^o|3e+V`~uTrbjf)8nstsWF4nPBvxjtO46UY3EBpZR#I_m5)O>K5Eh%ubFtZSI{ zuh9CLG3-c$3B5R|=Z~WzM`S361&SO{ftVQ$_Y33gfVB`4oy<*<+eC8l}nbSsgKg9d1>jux|?G25U znjAi!?>)|bjUQ%)`2OArUUfoAJEa~>3?B!jUg`nbMPA=~1cyX)vRa0Wpy?sPoYHtH zvW1jqYflISbl2SSnC|v%zP>ET@Tw-wzM2I1UxaV2f zXdUh8#$_hLyCk8>_QRWYIt0&dMub?tHc*L8-ZZKg#3ef8##>*bo#mn|xdol>9k8Y^ zh9tz-#?pt1F+DMOE%3*=896b|9QFY7AFfc`C-A)aS!$izaPz>b6!4?y5z6`9cE zrrAf2$8w2shNCIc)QSEdP3Ij><^TWjL&*AYjF1%PL^%it#fi|dk9{~cW$$^MWJ^}| zII<4P%s6Cc7uloiO)`$mWR>~5zkmF$%YUxRU$^^yzhAHC^YM5ks00*+ASd$OsO4Qc znT?9MwW}R&nX4Vr+(F8z^x=A;ukGnt!`_S)G$UP{96|;Gc__p9Az)o98@s(gtXbt%uPz&}0)p94JL54-jd}!w4)_r0Oz! zGTN!tqU^m7B(C#0@dnRnn>IlWjluLgyy8toV(vKp_itG74ytchUzUU8q{TIkI< z81hHkUP3}b7LxajUai5Cs#IB4#qWU zawbalSwi!G@JPXV=Nc`g^*T)w(Q^EDhpE7CE8kAIp#7z zs}C6k$dG_rwzoAq^3%dz+sVR-c-LB!l`1g7s3f$P0^z;j(_i_HVMQ_~LI!6GfEtVv zv|wibHm)uBWawGoZ)fh{!+cVL?4PQ$R`<)P_CHy{2a>@%OJ~_j|IXZVvV)I1eYl$x z%zKzmT>XuEtqv``jM%f_{d2&ztQY)3`NAXkZ}`!t@<&T6Q@BH=v+OVF?!hP0ou3Rj z<^FB6#5D;yp>3Qy9}A}e0hKa5b$lw?)_C_tv+RE<*n+q`TBe>L4@(ttqRx=tZK4hF zSm7I|DBbJTqA;7-w{PAPqc@K9i1_mMZMlq6@R7+ zykrk!MU_lA-LNKPk$}o|V`G&vE5=xaeZmicZ28p6ii)=+bnEEzp?`9{qK76LOjynM zPn`q~tKDjxae@Bm>Qd589h-)|F?rMGPONKwQ5mkUyw~(La+&dRV~$^5{h@|}F&iY( zk`taBE4D6F_rI?Hx(N=VKMW58Bf>wsZB|scJ=*H}^Y>-uJ>yyBsEeRrPyc_vR#ckA z)JD0_mm79(FG#y-N%h#){D#T4wQxkduT@H)Ki)r(d=|Jvvhd#;N$JVXJ*_g&&B?8Z zZ&r}z^7F=iQI0V|+Bv%#WAx&m{uHd7&pqxm!!2rG1hwy-Ext7{RHFKhRAi5~?Sn13 zxQo95DzwR*#3wF3JRyY~9pj64?#Weni~HBv3;qj`u(sp2j`ghHW9@#q7dSXjbhFq| z^$RK8y6IDsS>=?XNx?RCW$}CeQ?=Ly_T9xKhu!}3XWTRvFt0c^7Pc0G9*p<{*tt{*EFaRlCUbgzI*MByArZF+ z#^dZX(YN-FEpGp@O7!u=NJC7YHZVV~3AMuXHGMi=z%@0>oN?zH>8<%5_##t@TXI_6tFAi6$FUiu@x3^qd4nCY#BM9N1 zBBhIK7>T&9+Kc(6_|bwcwVOp|Zp;p;IzK|F=@FeWH{m=jt5g<0uS9+)-}ztL_hu-_ zhC(9*g#3}b`eD2F&z0v%s-cOgNc?3d)1ptMJ3{1CrQMs$wJkX4MN28_j?a|;!M;Ct z?R^rjMpR@e&`AJvU~mz`Erj3Yk>p}qVk2E33^N7u%sy&E*Zmd0Q)&U|V=L`k;*zZi6(DH$uxO=}Gt zGrr=}hM*)3#i)PZ*g%85@9=C$-shkwxbmw-s{fR;I0R6Um(4(uqH#O>wvkp*}(18QpqyJPi(9wg#G!>0;UK*DQ$ zD&%TGI{Njsly?U;JHGhJH97*LXr4Nw@f)7I4Ca`ri;ar`WpUc*UQ#E-012x3W zw7(f0a^Jv%FoEC^xshN-$k=(Jy&^^O%~xb(&l6wVCKFH9ap+dpHNMd<6L`3F{GIMk zAO9Pck6#?<7@$k7*q>&J=t!{`BqR187j4iIM3f zPLqDHocc8JX=vgsTkE%~azXRy)oXTCg9NYMK7el4JBW%wl-erma>8n%Urag&!+~-= zT3Nj$BO^py93qfUf&N)uP&N6ON*PJ@6k42k6ATKEmL&tB98)j%g{8i7@!TUxRX!pA`DcQvg6u{pqXvivkDi4BsjAaN_`NTOE2e=A z7g_D`R{i`Ou=@bSK@}-xw{gOHO~D4J^NZ_Pea&@xvVUa?zBf$sS1FpwzW-ea1?9^x zC-2Cg1MT8BfQca?4rIi1T6CU|U(1r~OPC{xZDk_E$s==WdG~lElbU_xD z>IdLsTwlD)-Gd;3$+tS7cfpq%D+x^(%Vp0lfY#mJ^O1?mRLSF zb5jFNSM|LM!WcP-{tnze9!7;Wp?!r$S@*z(H_feMf#S{ASl~vQE*2*0{0xoy`aE{T zu9L`hlJ^5f&RCgbqzB>!v1zUbkdhEFnsnmk!Yp}MhS623%8A!|P4mpcr%^AgG5eAt^Kj7h{rcINm3s~_I%pTZi7PPG`PDNRp_pNz7D7>@q zBR^(jg`===p=;vf-e%KVWmg|b4expcGwOEtBN_{?N>CJnE_7_{;Dy6|xd~kBjLUIz z=VEPh=Zi+ig11|^6fb@axp-^yDO=;V<4sdW3*Qx>816!JWz3$e7OgKG_}ZX-CrGg| zU~M?7aw%}_^tjJwDcCUWCwtpzmiv>J`87&M!-gVPtG9w$?id!vdG#it>K-Fx5xz$% zxiW1)gt&sctJ-(v;A;K&`z4Y!cPtNeN@9N(J|&GHD3;MFg{#+5 zn3E=HX=@kadwP1{*EoLU84F3?zn@i{3lPXwP6Qw*$IqmNHQYTWYqJSW{7tbbTNW>n zm0BY@aI@h=7vp&KbqPw6jj;~zVJhNE!|9}xTlYKu86&VuN`8?d1w`ljV72QcPCp2c zF5hJBxN;+X{E3mB6-(T+^~Mi}{Oa19eieWJ?>5LI_eC?XfHJAb>afuKmJmOcu`)4a zYc;=PI#2t6;%iREwsUel=@o~QE8n`S|A(}=lKyWwa=tgu(H>P0ddrfNSSLdXQ$*2FU*9{iZqN0@7!A7A=GdBw zt9>4f|4@*(cLuIW$cJQ*Mdt~XI&m}~*Wc(4Em+j-LCutIxK%V03KGWQ`=Wi>m{Zk% z4QcB%Sj&YU&BR1QW|i)`iJY!6l`*%L$MmhA!?K3V-p~sP@qHd-QcxBCv~i!D)uc@d zcFhh7p+{x&d#gRN;9nP>P(o5XuM$4G=o6~>;WjSm`(zm^T1cOQ_P3ptV< zYq!7`uH`Rp)I_zpCG&UuNQ=&NPz>Nw{;KfK1zKI@$ZR6}=n z4PmVfDJz>}ygW>TV~Ee|Ofm*yE`sFf70jmqRQc)a>x}VQdyfY46{fUSrcvfobnEOY z`)zx~I`}9MQk{={i5M#EdSeMybi=w75)|ZWt(KcLECx+<^>cS{PL*Rh4 zCXArgZ^@~!(O)g3ek)8vR^IO3F=TL&yjSM>cJjo6D1szV*j=FleZMMt+PN(1?eEG9 z0@lh8UgO2?GK~mn!Mke}5~33SoDZN;2$Ee~dbP5%Ynj)h)Vm|+$%ZOh^{%Aubq;G< zU7l2+Gi9}Xy~L(J1K`d1c58lqomGA>+3E!ie_?GkqyUUG-e~NyTz5|wy(_{tg!7L{ zX5J!%CMkz{TZ`yX>8Nwbi`+4I7EphB!)drSI!W)YT7P)IAnd;it(E|}$%Y@$J5`GNfE8_Pul^(C!E^n(;{Ew`M!C+(M)?ayR( z7Iq_sp!N$=a@X5gpF%%XP4$!8TgQP1@J8TNCEdAx!eRzc(vV5oJ{w6G18}pIQ|SJV zX*_dr7;HT@G)l}F5*|YT8BPPivmxyH-l(n%VYLv&9CyJ^?;|WhAmY({43IaR?B<9Y z|J4&nM8O{LF|7_lfD;C<4FkmHIk6mWhYh$N_9FuR7Z#rQ^SsI#yj?z^1tYWJ3%BeX zT(s2{@pj2BuzkBAeC~cZuyl4@JokHcib!^w!m0GM$0=NGgXHia6x~0IGEEV`Ca*yQ z(gE`fGr%dM_(35VG!)mAsYF!Otkbc-w~uGF2Wcqm$>>9{t|C{BW7lpzOBW53w?cmP zt^6T1sDYq`T*mfYV(kUrs@1_16&3s-h>|EzbVX}w(a%JfqQH%bdzl>FPYc9YqIhba zlO>~(BLkb+=Z7Ny{97QDyx2{LrzQ$j<{y~a2##on4krNIZ};%Rro zRN-J<6ingcEUOZ{tiA9%q!=u&Hga-tpHVLB7LhgK>d2mZs_0z0{ROYM3ryB~M^FjE zJAnh(vb~~RAL=R!AZ!#mH>ePC)+oZv{(PHYmQ~x21E1|C;V`LNwzSr-56&*?xAgT4 z4yNzc=E)y8!(2Ua*oAsmYRj4#kI&lN7LVDZdgCtkM|6L)C@e_Ci5XCP5C7Mhh<)KU zT6F#E>!uFr8Yn(&c7|Y^rc6BxwOqn7MlT z630xAmGaxVsG&cs6w9qZph8uK@!J7b+xba5CuHqh6#^yVs4X*PKtPQ^5gzBprQ(Xk zU;xmkly^hqnqPh`aiC+;7PfiR=jT5iQ8xD_?kNf(#__4oBv`%vGkV7$<>zwT_16*P@4+PVDb;`D$m z>i27(_WkAS6YCo;RYvpDPp-B*eHy0bCY*HXHwy$9e6(VvVyxBEwfAJCg4Qmta|feq zgfY7Ss%Cpv5E*s!@>9)>_sZD&q3xml{p{xc-L31A%Z>8Fn3?gdH@;OyS__sgKC&*w z7HY7b5l12^-++4gZ^0X3ZnqOdZSM7f|B5{^I>sfQsQr)ZH)|yfP>x>6*xtM%2^%u8 zfBJ||Pb~_5fqfpueE48#q+i@r&U1HQjMe|G)PW(o6fm=DF>_F};{c)$_$JR?uUJ`t zH__s=XAxDE#Spn0LXZB@Rb$_sUMhx0o15B=&r;-`25slamTdJu3jbIwM=wO3k@U&Y zX$cV5-pY5+T>tK~AW^@P$lNR%&g$Y?z;;_e$X19z;4UcSG?}h&`L1*GINoY;F+>SpM#J9RPR^!cRs1rvnS) zB(OT$Si+)UEaZweWq`n^Z&R|!rs{pxjD)x2BYWwpCKE0GN>5?zU# z$qNU9f4#>6-A+)k-x2?@HGv^gDEA{yiD0(G6?2JYjMxjD-1QPW`+ARddL;YX)#o<8 ziCaz&*QS$9Qj--1mcez#BSJ`$H|58EI(kXMT{MxKr10%Pr%bbC8ps(-DV*P>T8`T$q!6&C}sUwLs zY-0QmHOGGaDq$N{y*>ZZ*WX{9Yqq4!#dBfjmY%V5I2y>_{~9aC83PBpz4LQ37|z>! zynZLB`Rs5kvZqJIWUHt`yyjN-!NEb1qi|)VU3hbyffI%`Jbqi8ln7Ighmr}Z!;3lc z--bp7(32-w@H@*>FB5Jp=<9r;z7W;NJY--1*?m#-}Dg-aihjVG4(Sr1O(wJ(Li| zC$RXz(at1+kdvLgbM~J5G+lgEMt!-O4x5n^16jv=jZX3xc3Tos!eb|!FWSnT?KUD_ zfd)#45hn}Ji4$QW+mxQe&!8~dVIe%B%POWw_s><|Vs4j7L&Y!4?`Lh#Br_3q4NdJc z3y3psT;^t(*;*og!1JRZluEio3=Jj=)9Ix|(xSmp9%P1psdy7VL791@LYTWkUVW{$ zPV?j}Mi+Z+9uDnZx*gTr9GYqqVYl8frz{dOU5p%uU483nPdi?8zQhha?3QfX3w*2& zO2x(1XJAopm?)mp=49yVq8ugt3Ry#~*z!eR=312y=7iE9VUC$PWf`^UBx>{G?lv87 zI;VFpL%(ElNTDm>1!uc4X)(`)<|J>e+_0hQ`9z`ghF*BmZ=zf5>k8lg>Nxo;GMhFP z3f-Yc-=|*20YBO_`DGtiAC#-VM}jDx--Nw@zPBo8u=A9TbC2JWoav2CY3+sK z-3Y;mrxK;UQt62NQLI;Xd*i4o$R_q_IYQ7`wLtV5)GHcxA6YfIO6YO?=#b8p9uel$ z)d|kwzPa=-eByht7?q@u$K3?2;!!G&TSB3!lbisT*-E(gL+m|yc<&%IB+BGh#E zl@SX##frJW#w&E0KT4JD!|hUQx-JEJOMhMN||8 zg}hEs3+!deOSt?8{`;vg!J%oB5Dda$UaV~L zm#N}UZBYoJtrx*QOBIto3J#^SLED(jyQOydAyLnhv_(mdG8%n1M!1@?@9Vye)gp)S zk$BeVqp5!x0!F&v^|EIJ@Y^SmWB2D`!q2$R;~2A=F5QFHJR4?qEuNgMA6$7iOUbnw z#aVK>9}lemvZ@j#c=7AWMfH})^=2gX8pWWv8LtzMg4(Z#;79#h7k6H)NEv8a3rb4P zGbdFE+zXaEO{s}NSD&O%kK?LmT@1F+sKbm#{jx&y(<8gROCN!)4Zht8*-N=nW5m5(eD!UP>vRYrKSBDq{?S)r{5i# z7jlxtV!DvGPB-!yTTZ@z3i;Q{7$h37bqnZ7mSLvqA3`|`PZt8~jKvP@>YOsWL73s4 z79V9g(oo;}zkxLLACAy6^U*b_-B-RG5%qb%GzQpnKEz>hK4sZY4lfLYH_POX)^hKC zJ_-7}&;e~iT{@IFq)@dBCL0cP-Lhw|+-0Cs@fGS_FvuD(lmykzy2VE>__t$yw+|*x zWKm-{H)83Arw;&(Ioow?B!$uFg$9gOYr+9pQi~(`e zjkfjVa`(8cl+$?=+)`Jx7a6lN7P#0zGWDN1%#ctAQF@!xB3z4$wlKRaFIg@Qf36L57owBs+y{V&-_F-;?) z_p!O;V|9T5$@swZ%(=ifj;$sSHLnTb%ndb(c(38V92WCt78d3h*BXlodYHAwjQ;Cf zut$wqA%1snPbrLGTsfr>)w5EC!{|*!U@#c>_KRb19*LeFl|NSGmIX~l36GRDWnSwq z5`{ot4>q=$14s=PYvztbs5s-whg!F7C{$8$!Ruy1r{^~NLa89(h6Uqw=Vir)OnqLT z?~}1K!jJRSj(U1jg`|kNkTd~!I$mZ zciJH3AQvk*RWN< zuZKm_`sI?*v1x^r4}8I(Jew-j=opOhOoh>Pgx%N8)KaSZJRS1nu{`|kX>ETwEB7P0 zDu2BKJ|@Anz-ym8EE=3b9rIFF`s^p2f27kq3sUS~HD8(<$Qo zc*?*@Zq+&ZVqoCN&z0z!*)`R6EdDK5PO`fd*vm{AN=7a)Fn!bh2AGd_c5BNlT3Y7a z9`nz0_cSa8%>n=VGe-W~{=!{|p20CuJi5Ft>&WERT>@Pt+4Dvtqr4LQ0O{MECxM-_ zkI{f>ufeo(b?sv4$2EkHPmN$9Av;$bTVoV(S(AIVEuDMwOqO&Q=N@3q5%DePxCU?l zgoEmECy^HcKRJs$IRY^aJ60uXOiA$WcLnKy;Ug8TY}pI9*D84zB&xt)SYFpD%5S}1 z`T zO;=vu83W1NjBwUXj!6hL;pXan&azML7(0=gik_T4y=EsY8`jyT4Hqb^t1{NbyorKRK{{dKUfX4Se))ennZ zgtA>GVPT!i)I4$U$yr}@PAgv9L>yOWDad@5Z+9QsoD%lo%#g*P31Vs~oT*ev$kq~e zg3%UoNteFsOp+1@$<@m8j1uzWD(8%!t#F(31j!Tx0OyW-CJ%J!1o3%%iGbtI zre((C>$LyI(iOtz#NOa6_lw|~)@Oc_$&H`-O84L1yP=GU<}Q0*v852KsI%brUsUr2 z({Eerg_#-SrISK@Q^hSqU|--&WVK|{RGdvzR$L`ed@3zP8VD*E5{otvJ^bNHslLM5 z=uwlg9^quD%(uIC7nM=q<@@CVATR_j8(T|8%?eg$LXIo;>lYk}Y7O)=F@snf$asmV zlhCfmSxACjZya3H)Oi4QqsMqkII2!zFr7{PoD{S!9{fu(_-t4>xa!Vj2m8^~^~;8Y z-IjVf8kTH2?T+3?ML_I>R(4cEr6xejcH(e^+)3EMFyD@IlFWLO=5u2(ezi)M4mFfKVcI>b(3s;l|S+6hnk` zO^H4M! zuZ|6uu2!6n4u77(t*3g%D6K9e0_W*>6Tyb*&HoD2U;M(MNF-OVHD z+`?_2L`5amy+@;Mv&+os$0Uvs_FJ3vu31H%!?41ZZ&;Icsd#`Q2k%yDHmj^p)>G3( zQR!gI3!^mjT(VC4RvO}a0%NNjGgmx+F9KP?qLP5j)E`u8?CXO{NsopL@x#KytY26I zXoDKs)nQDV#viL^{x?viUVbT4?vPFQ=WAeXCSf48X5c_WhNWDZ_HqGq>{r!^Q^=2qf)^XBx zu8ABF3*BQfZ);KxC#<;yW_pE~?Yg;-uEvcbZclAJ{Qk8Z|Dj^*CtghqASsBkJv+a2 zb6>*R&lTsM4U%%&D4Q2tX&X$oE^-XFFyuj=N6VJauEt3~M0Q1hs*9u@e0F+cr(?l# zsbcf3LZ5AuU--OPYs)xMHu%t2r<8l;Ps}rHrR?)J2|M28+@mAUgtRIKr5szA5|%EH zC7bJAoB!im7kU>3!VUxTj%pYsacB4PE&g=T=hC`xIiT5uvz1rXptcXzb>Jp#)oxTD zs)KH62FOXB=E1SX59V!nvAawtqs@l4^W3vWxr>IetBbXolf&wRfK}#ii&D+fZ4C=D zGZiAoXMu8u)G5?P&Ls6Fx6JUmgB`>{54_tm6D?3v_?ZLaH8N{PcybKEm5p6owtYp? z#(MG%toDHQc1oLwj4Ph;4a|h7!)nu{pJ?7R=J-xg;JYy*npN`(j1@2f+tRn`-KNpJ zc+`shAkVD}OPV%N^qNR(}9 zzr(mZQiB~r z={tvBLmZs*dOsqPEB&8MBd!~oM+qtf;*?!Y6%T*zcubf_ zQXRLZkt5WISr?annN$-^*Wzw8VjFl4Kl>;L1h43jso=Pw}i@~0<7Lw86 z{l0&u5oHFmvKnOKY3Vi9V(&e_H4agwhnf(I6&y z;$E;$n4iBa&&2-k!at^Rw)Y}^L^dUt66RJY{Ea{BWDAWMTn!J;3MFV^{$(7v2mjp; zGTjj%#GhR}U>7cB-DdPS`_s7@CtcBQ73E9T1e^1mA7Lq9Dobt)UU|m#D0gYy|Gq!{ zmKNh(o~U>5n~G{9v@} zlJDMTn#drxbfo)nXG}s);4k%OrCWY7$B}0X?yUh6pHPUC)uH0E<}>`05S}NstalPR zy#N#U?(U0*=Jp8_z+!3ls9+rdaH8gRUL1!V2Quoj4LqW^oOs~VypUIV`B&tsio;r9 z?f6eWN6mBv_Y*&F|M`>1m6u$6wuSmajl@k)jv`0De}4+ORfrQWnp<01wwrdn{h3?b zvnc;}#c_>IctsN!4Or#ecK3#dhu1Y1d-ktIMe(YJK=TrfU}45A@PV0ruMx);B=yZa z>OoC!9HvmTz{nfu?-9gfz)T+{H19%Dmc8Lm6Qg1csIduVsJ=|v8$;Yt@a{7;XQJwp zmg`TSr}N=1JV2o0R9AM`S3Fvdi}Leh`|p5^fENgDJR-%?JKS)5;@LL4(7L}`@_?(` zaX7RW0sE{ycy?fOzndfQV|M`Im`>B2JK~E;S)Fj9#+Mjs$HG`DFzDVWG%vOuklj7E z6SE;Ik@0I#-7;LmcYFScR^ko&EgERk2e-5w#q^0ggA!w&f{|LDTs;{)eF&dPhE6b+ zM+tu2)ROxIA(_W5cQE%Ph;7rSyU-=mz3v8Q@oOV`HPg5CBO#$iA)cHb8$&-H8d7aB zSt<7!iOKLjt7v ztu>T8a2De$DlN5&CB(4m(BelAuEp1ib+2xdJwzexK;YqIF|r~82qe!+3*d(p?`U|r z&Iz+;Pd@IBBB!MRsgB&&y*&8(HbXC}AeAkgTG1)!1E+PZmgP3EUVcOk0Acm9Td^nC z#IIRSNMz#CP52I^#)qF^u?KN<$*;LDXL19r>qf3-rGkA$ zzUO~9MF#w_hoRAU5QR}PQ3DUV_A+#~nPok8$rD&O6aUZl;mg22wKgc&X@$f)xLd%U;3i=YS zS;po+Dw2W?e-CjHBLmra8VyuNoDAP`+f*uHbp+Lb-=V4=>LfImIIRPL*^=i$AC*K& z9gM%C!h{&f9!B*FF{-@|8I(EhEH9Y~jd~oJ9-h?20VKgCB_$)Pf=~KxU6lpz8eXLg z=!%$Nq94W7?DP#SROM3TmkrqgRqxtw%K0@nYdrbHw+XtTgn6z)) za(<)rx2eOy<&Nc5d3562Cnr%-4dh|q(*C!}WQernm4c!9XXpj)vnF#$8ag^Rf?zpP zan_~{TP19DI}#az$*M;>ON!ks&$)UG24{X9xgs?j|H^oDWEY`hGntmG<|0u|HyNBDd&PAJhHfwrUN+GxEe6duOBI z{j$H@-{FDgGZBJ$6ib(Pebq^GYnjQlB)!ZcOSwCbNlh={*K@VT@K|pxnvN{@_SB5P z{yu#ml;;`D5m{1Uwj0zXq?K>SvocCH;YkzMyu- zDt=-@;1ZbtFOewrXnhbt8E@ju#HCVMS5fFN^mNYn z35Fw7s^jHt-=eZsR^Jpp(vl93j_}maWC#@Lg@&5&S@ORDWtCFNTWhF}m~4TXfj9~t z`xB}KY_3l|g{YMs)06YW*7E){j{I*uVKgSqaCbqn-KZHie0{k}xh9E|(}y~bqRDS$ zRDeD(o<>jrWC9e1YrMX5l*QH=peYi+hJJou=(-kz|+d`;w>u^$^s!3S4H_`upM)u+VyJN(uo29VnnH?IaOrS zZe+#qI4+S)9jT}c_9IIB$K`@rimqF%JiKHghcaKgOT!AL5)Lb+3=DZ`c_zN(UE;aV z*XRm^I6TaB;*R{Ui7(QAAh+$|db7~gqEGuj_Ozw;=4Sun?HS-cptSn)XK04p`R`7* z=9~z4Y^Qecjv%U}tW40b!MvebIq+~qCBlP)%CxbHI~HCx!KsdT@}#w;F-K)IveUB4 z2#-c)avI($W3_VKc58RvoOQ|$Ybd@Ct)AOS?mpQ0S7f~7zW0bk8p_*g+Ra+}yC8W- z?&f)I6mXwe-~2snzl8AjhIfIH@^7C;Okx!E>ohIffKkfrV*pcmw(`Z{%=3^qDx@4M zU?Rr!SqL*`m40B>j7Rk&@}KL86o^j7@eCq^#wBtcnLIO#xUA>u&6|JAI!O!;CcnbM zgeb@pZFd#A(X@_*bvaM6!kS7rPo)}qIPy#FhJEa;K-37bkjZgl~1%S?#gpRI{lvie2PRP=VSK&AoBMzT1)Td&d1mrUJZ_y-*Wp8Zyv2nE^^o zF;RUdQ%MgZ%K9iXUHy&cj4;O+gwF+$ zGX&wiE4SsdufqKyL2Uw1SWVlo9jXxA%w;ZX7B16<-QSju(;W$n>=qGxcOUO!qx^cHC~Uiw>Cof>5?X9WkXeu*RmgPoL6@N-CTc4;P&a95cO!IbmJtAtbU^ zG(KIt22b6+nyjG8XB%gu`bK^v9ZMCY>IW+X(*A_(Y^J1pl%Q@7ata{q1d_1YRedoz0J!hP(RuJc@I4h+TIS^OR^43G6 zSD+|M?kJ-;_>wNSJ@EKX`$^iB_SK5qZkx>QT^~wga{|JLfsz_bc?VB@sMA-* zJ2b`*dY+sy$^z<=Tg)d2)LThVgPx;k@5DSsYr)VkHZ9!}`nT0i;!3x5;r3bdrFr58 z73;a0X+^G|X6)|Bmt+?&7B?&g$zfM3-*1PMNSL9mK>mH%r^mK={KZoG3xEu{Bl7 z&H_Bx=qRjkVYQiXkEamambY-cEB^@4j4T67S3_apJI00$JJqZ|>G}K%=}Ff+`(4^7 z*14>|aUsRJ6AjQj;T!7Ed_7dp%1F<)lR#)_DRKMPujA`-e@Fc4Acg8|intdq;9QIs z9=shfF+C?I4Zy-^UMe0Ilj1@=>Qu;5qPKwQVFrMTOc(1ta$X5UPtoq77d-xM9#=vS z$BI`Y;Z4e#7a}rd_)aL{VE-jOBzDZQb+HJ4+U7HiFqI~?oWM@;qxmWc@sZ0O8&g}v zsITc}zy+2qX7?orlEL1dg#5i6xp(!WONBilSAT#~z^QyeW2?Rd-{FYb8tQ&OQ!9uq zdRvAWRCB%^z{~pF*E@)bZ?E_+3t$NB7O)p!;EUw24kBgy%+_O~1GrcUI?jK-J`vH9 zGvv5RS-R}bJ=X(*C5-L=mi?Ea*FG7|WL<1rJ0DMLJ8gA8&*lce`?>xnbbB&^r=8qE zTWQ*oaxI8Q#q1_9m3@KEvsaf@--33zdepr18XY%MbF41k1=b=gmSi7GyEfUHVk$d= zPJcB2C@`Kya|t}zJq4*5Gd;Cah26GjL-C1W8_ZVCc00+wAN71Lzagc_tKjmdg4H4W z9-!Ukq|(JZKd6HnpoPYX^LAM$V+=nLLRAKZ8f8s!{M^F1rSDWm zUCur3bZ6<3NaAk&_WMU^RmB}(+#%)fklt7-0G;h}4E{95ro?;gBvOI@v^cjd(DTyZ zkjcwLf~}S-vXdhZYw-GZ&xro)#F2!rP07Q>r5$lLVjVGa#m$|Vz15G6AD+aiNABw| zeIdwE(Q>e9s+*9>YrILKe9uZV*hn~!6;_x!($AUH z0ka3&RhU1#5*n0Ogke!Blcz!LWu=?H@?-j^w>tx#mzPU#85FWArjn&jF20m7F}2Q) zhxzW^1l@q;GmUVpS*Oz)&er0@@1G61x!6?A0xzt%pvUpobMuB`>A59X>ehzyG`->+&Nqx8&I!R^y|KPp4zwN>$jJ z#(w?gy^y;dSnIFUtcuxMe1e8u~eT`D1&#zg`8KuW2$H#_|POw9e#Fh>DC6Rg4QSXdGApaKG%94HzqetM+1ot zr4Qv{15>;Y@gZ-m{V4!vvs!F#s&F!YU=U;Ch<&?!iT~OB>B_)oQ5oBM`tRy!AuD)u zwX$vQj$BjQdq;sf*NIa)?X)sU*@Nx;fvbV5756|=>%i4$?v?mfzgMV4w`I#CzQpf- zt-r@urJg;xqJI4imGS<*e$0_r7|*+I*y&eXt8k-tOsav?VUlvI++vaesW`ztt)XPw zt-(3AtJH66n>95WF@cgh`+W^+6@I3kD_(}LxAZ%UE1pxrPVaEb1^Hq9uP$Tk%f<@Z zmw;n=;Kf?Zo<$o6Ym5Koan~Pxg}#tLR_+w&hita**1{D|XjELCY%D5F&!8V_WxEA5 zg~6(VdWga9gcMzu&C&uhsS@8giXYa`e;D>iKX)^C)l^FN>GbsB=ymupkGuVT7$Im4 zPwU1IM7ap@qRP+N+@7Y+pyWE-lS41;;D-6Wp7lGBP}kb z%W8k~c=Khuo(as_rF^bb5ClD{E^AiOYmB7*?#cD7>F9+&isCb0n$`HW_kcJ_fL1lt z_q#|UrGCGze3q)bo)VjjWrW19uW#@V@>s+{{J>}|6?gElI}Zu|5lT*Oo3rW^qVXY> ztw>DegeOXr=N7b2_j9Hnq~B5RAx=_@Og=hFo(dGM%143$XHp7~eo;M}EJLPG{p9xb zIUU;D&oO14N!DTwwtME^Z(X!suwVZ33*4UwK6rMKwc@^3+%6-uWc+EG)6q#Uu?M0b z;iBgcw&v8o+wCBurqoAy`+lItu}21GW{(8~YzoyBpX##}JI<_{5izusUzLAHPzzn7 zg+f_)JB0r2jZnnSn542euoWIk2)qi*N{{HGi9Jy@&QMY_;EBrNU`F0emZ(M!k&}fe zNxWkj)W-yvs%ri;p_bRAf+}9FR*FPnshB|Di&I|ka@n_0gseYYUjAnE^YxbEy_nE) zkPKT&35Te>&eu?omnaBiU1bp7;RIQ+Xx$gNeWf`y{$$JB$5isMv)Qb^;nw7X#`f9B zR&v0pJ5aH8cIPIt|8p0A_9q8o>PUz2UC}hL!cx~5h}}qPjmh0S&qv`>zLnrQaYD2% zryi2Ye5w%-;EXh9Z=3|5CtR)DFLB9Q8>9jY6vKtbC^Jl4nNyZW>*9LQvsycWrjp__ zX>MRHUvrP7k8;9MD&8l}1#x((en`a*8)Z>ohb5b@fyPri2VKVqwPn(BM=J&RXqcd@ z-%@0U`BX_P-Iz_pXUl1Ak<+pxzXzc|%AZ0hgp?p+-xI5x*tEU?wph#GmpK`86{eL_ z73PV12|mrxSZG-Vr49N==cf;bIE9$5U&~V2+2#}sh{)#VTg19YM@|!|T+LmRk$sLg zkRctG1YnIMyZGRLe!gh^;6o@7-*+5GplDS8H@PKQ^$~RTJ*Uls51L$?kB*0mOvOmi zC8jCvQB&Ow)?Qe9O)E%AHX>*m)~h*Y_cQN%nmnzE{p}BCdGI(9 z*bTR`i50r8cSWBVA-==?%MG&U3;P^bHVw882VWiWu&+C6CzmyqS0Y!ESJ7ly9p5fD zD=!g~$H4tC81vYN)1u{s`N$T1YFhUTIow~cd5 z`M{}noEMdmZF=#*_YWq+V!~bKt#o^^Y+Q?jcF;ce+0pJF2c-556iQ$QG9L=n_2!7| z#{3NIN2LAK$$p^S<2i0($|C(h@%Lpz`{j14yQytf=~`@R#X~!RlU^BQbzMf0ZkTl} z#;}a?*!uT{Zp~)q@qygMzX5v@RaL=iTt_mF!7JWZfO9 z_o7kzN?7YYTh42EI4KOuN=@}W`du{|!HlzHxElZKzT@N5Gyk(XmVE%#LQVEQarDC* zYOkN7#$+Zkv+5SR?LD2R+q>@N=M98EaRsB=mG#=PHBQ32bNmfCf8feH?+NFFwo<15 z$JBecv-SUR|Dh6USh%SSs#$cPrXO%M(TJ52wLPPPaQ>SVNb z^qXew;Tzr!-7Ax{SK1hSZq=Ed8Ef5wGq(x&ps8g6$=3P`G#zUzQAcP01btzl1X;XA zo@$gKm5>)55UcDgN9Cy#+d))baV}UtLWeX6p&(Oczh$yO`_g3iM%ms236gi<5xO29 zBeWSdlzFazgdAg__Ibb{_a00IS|A`ozg;~!SLxX>pQZfq6aUkZ>;9`AtFtLjqHq24 z0pHh0zSH094fvY>J!!cJ1}xR4r84Lle07P+(maIx=|)b!MW+E zy?tvoR&%>x6R;o})+661NoS(Wt?ZvZkzdq*R)&SPV$no>h;7b)Y1n!!iupEiF@^mj z=TPo9{+@PbZ|W?pc@0y%D@#>53h%@c$7k#DbTt$f1?2ypH7@l#J?R6*1!q}EsRO5v zVP3@zC<~GTM87v}Xo*4&3uO(`%*&J$e!KtI8fp~93ECY7aD(smldCx+%iG&(_Qua9 zz-Z!owX~472e5)-p--qgbAPIj*4E#8d%1c3xmz)sztK>i%o{AK+Rj4cfmTtyA?6)T zWB3RuEQu%6daqgrz>i5)fHY}@hCK>n_#dc`>392FTsy#!Ha@yen313G&1|9x1_+C4t-~|jyAtFeETN(bIPgCpyX38N4%QtcKV8d!rva22{pvK5@C{0Z5Rp=*MBm_ zTjsop?0j8edXhdgZQ&YWU5MwND}8HR2(c_sJN8+tF!xp+Xkn$MQz$~2L}syP^9-%SZr1muozxBs4Q z%h^)zu1Wos)%D=mRHkDQ-i2be!BCB~osQ?w_fbvBAa5X%x{4G>@~F0?}!@{l^}YW5Ok1OpPpl^C*5JclHCpTne-pYTNAdLZRM#{q&Xv2@E7j z0}a&aZ_%Jp*Dk^z<=wcfnP3Q{rMZ zR9ibU!}y;o6k4EaJm!Z(aWgY3rxvAZ(9?Z_LJIJ%Rf7kKJ@ctgdf`NpJ9$_J(xqaM z9$5`uGULHXf!}tlZE$3euRkyBNyzjGgBd=3iK=7u!#Ec&@90Eg;@#!j383WnJ7eBu zSaT9tBVHzQ_MkY;hQ>xTeIB-!f{Yoygd;=3i1l=BqA|ep9*?I>7){fk7-`r!cL%%{ z9)Ez+t{U-8r*to=UXJ^}CrU>**!fk5n=_-}y-WLxKl_h^5%US^HU)@CA-X{jtB1s!&Xg6JCU$&;p9k&r*Mq55!RZB@1PiwpYsCBFdx z+nWbQUiMpSYqpek1$iu)BJ)5Qy8UZw%&BUy?@di|2?i4vn5CL|wHN9K?TTMSeq=jEgwRz#n$8dqU z#OIG_J2fib7A@_SwoFa=A8v-W?A~gLySVDO$+{?H@wjoavH!TlYOr~{SmQ_JiiDTD zZ1CKpcxWWP;r;PZ>$#H{&-ftkSj2A+x9Gy6fuSdfw)9XhW zhW7e*p9)(eHUT53$z87CpmM95gvoao1#L6izmxr$W-=+3BB|aTWz1Y|ST@{-r7ieP zoD)m1r7R96-`}}&gDDa1lCvY zae2E}PMe{%j-7sVL3^SxAM28ej5+EzPZY2#)UDdY0!D&Gx3+uUI$qK&&hLS_+8aj> zG1Xy@dVStT8UNXzgo!R&@te$Op|5&@{42N&cyzmZAOK(b(r9XH8;{VO{}(T>1#~e$D^c)1 zA0S$$;yWx7n$^JJo=RBdu!uvRO8CnIb`GkMTx`c5F@Ry`KmFz=wC8}}sT8FRJK zt=?Vqd33~PUJc}B{IBWn1XkDoZ`tI>Ajt)7DR5%sl&8O{CWXZ1x|mC8*%+y2%ZlE3 z;h^qD@s}{E?ylWUZ^&(HJ7tn_-MBz|y+o7{yilz3jafSL(=}C27y$OT*YnRV^LG5M zJG54U0@o&yb$AnP+q(S{fSs&bR6Y!E{==NvH@Q4~4_6|p}JMD?03k0&RO2zF>i+mf0pv;@t0`|$J6rO8D*zB}HYx?>qd&ZVV5 z9;OlxnC~0j?>|!ZJ5iSSFQ)Tk=GuhI&C_$N*uaw9AvHD)119|#M+xO_4V0Bv<}+%w zQzvpuk0NbR5j3_TWT>c`2>cfp7KJ=j%cx3u**F{OxnHceKEXJiX)2{MGbBsT+BxFw zK9|%vD$5p)*p0gvEv+wqvyRsqTJ1cQDJ*A*2Sg6Sw*^mLS6_+;=b$J-aKe;dS#&q+ z3R~zb@Rgi5mu(m@zbfd7Sr9cS{zJK)5D(Pw z>4ZU|rDIC9Q`q)4<@M|-5VU@9Z3>Qrt9xug zvkdc4KNcPD=B}|(_4SLk=r-)arWMGpbXAGf{xw^GZU!kdW8|mTH~-RzNKRKaKm802XQ# zfkiP!a0(A36$^y1lVX&A*4ZJT-5gHn$ZI!9~9GYD2sa4{T>zA}pRj-4{3RCQPR|&R8gnPCRG{4_ldR}4dHZ@@0LdyJw!R$j5 z_Ki^+?(jz5Y@=bVsl;b(c8U9c`IX*~RsMHW-bDcl(t9Jm_mL@>RO#}=!i`ZaGYGFb z&g==|qe_;tPY@6xkXL z_9ilfgfid*Nhrtmnto}V-^9U4b5R~6J-hi;NrBz4VL#4w~p`Db0dDtL{eA1 zm9)3yIr&A-jan`+gMrwVAnUTXuqbwGhCyDW1_eogsERW#^JH*CPc6A5O56W^ndA~~ zGmyYo`zF3}Z=N!Nv0rU5fdT#5Dc!5n7~0)yW;ySft3E1xR1wqFxc#lWgD zN8cM=Mn`JpU3~phQoDY=HrT*nqM98IRX=qRR+9qf{=6P`F#qIT@0`XNW#)9>`F?MM z{@=fUahb2Ava$MxRVHO}i=u}BUesjbcj!|P2^|7#(Z58LE!XKTFE2G74yBcr@o7>! z8Ya$L)knbXN{Tw9NkWV$0jV8#vAEaqXB4$CdyL}kC$+UNLP)6S?mgiH8p4J~k1t$N ze^ZxK-94*XNXT^{n9>QPwgK3g_T`&yxapJ;vaRK)&L8vO-%5U&?;OVT7*<32y#>Uc(EV<_La0sWo5Sh$nT)MqGJ9YIo@Y zD*Zca!5R<)s5+F%tn?&Esx6o~tCB;T*2XsSQ0rG$k3j1UpLu#3N^gLE{uW9KQ|z(6 z{{$}2dPDKWl+#3Qv|r!g!<>`z{~qT9aRA#Pl6&EhRm4sR(g@;1N&%x4?(w15(g9bd}A&Wcb zjkeTb-;6vLM7Lubv*Qrnnc-aq&w=6;E&KcCYoSUoVHz2j8}7o$!q@IstQ=j6OPJqo zGo6JME2+SvSOp^Ra1NDs{xoUkUqC}5NnKHSM;1j)vIl)BAEVspr&gzZ_W8dZ+Q@^m z!PC23eUhOvtb*27a`VL;Lvm!4n%#o8UQ%SDpdX4|uH+U|7;P7-AuN`a6Qq(cN)X|i z8*C+59XsP^(v)=I4BqFxs*MS4)8_{sOE{hceMsa#28LwPY$_lPK^V`st*)i}E|3-~ zZok#^8fIzb$jC<2KsU}Qx(}bnrz-a(Cnw*JWfe}!Sh*^UOAY2I5Mnv*yU@(M{41oI zr)aJOUsEGVHUn>;G;XfLv!sB#Vdk@X^h{m<+G1!0h=+$%`-MwX0B2H2(F3hOEp~6V zoGO8;FsFck*Q#AduJ}(B#Y*4Sgh`m=>%qYNBqBMz3kc70h{r$mo z;f+3R&X*8=I{hBt$41~aMXJ{qyX%t9E%l<9HX9aPwj^Fb1xyp*ai&|0dO|Np`YQU7;`)Z?Re1N4T0fgVH>|2q1SWNJZP zn4$swYMT*RJch(PJ6lVu_@@eiD~h(M`qo_@7^8l z*RzVPU^PW0Ug>8zi2s}Y=*m*!aph93ogwhHw?t>Y$$x(_e5iVI%gL#fKE9DJ7-Vc+ zBF7}f90JK(ba<+tZ);GdWAJq{Jv}io(Q<5RdYXG^r}>C@wAEk!^0)itzfGUC{3W+~ zml{S2rr^bs<7f5VT8}4 zWVp$uMz{*`}u)%y}fUE}}jHph|+{*h8z&v6Z|X z2I-h(oHER@E+sY@aWLm1`BOtkW)OyfaD%#e&@ZaIKI}Z|pL}`3Y?}FPg3B1B)F)zC z=#cYPTap8qfO2xUv+&dCQM*DI-_bQ)S^GL)e|%$Q4kj^+xd)}!2-y94C;WfGC!04P z_yXSA+MUfj9rhmHSN?j50LSLUynr*K3IP}sTTih(ObTV9R}CbuC?6Fu^aVAG?N6>P z`EP9QR&Cdw7?gd=m;JyQ)P&+*!6Ef|nFL>z8vt4pIALN06j#4!B+C~1b}v-{mb9?< zM=09WDaW<*T z>SN0ErT>24ob{)6-NoaXXGJX;1EwKPnX07!X$jhPzJU(mLyQvl^(&rtjC~0sr(}+o zB4Z}+El#`sNT>d?gxb3#_`wE5Mo8^Q6D4{3EM@HBerZlX#)|INwmqWvE z_lPMR{7A$|H27TU`W=*Acla+4)T0b^x7;{OSP%?dh}Z?ikZ+4}+U=dunoy9sz9xxe zyqKycRYj@28FObA9zQ%h+;4@5LGyNm#tpTvD#WQ+=6BndWMF7XT>xV=)~!*Z+Hq2N z%dW=jX#L)!3akK&ywp_jx>Y@tl^iZ_YQ8#}uAi!bB$+)WkA|B)_O=_gV@L)QT z^u$toLbo-O;aGZNWJ(BQEOc|7x;$4aj-&HuUSBh{tSoBcTTh%||M+;v)ZW>6(F%=S zm2o(uI(CSiO_EOeVs<>#xJnf9QfX=TM-^@J?sjsgpFydbkzD8HD`l_ur_?|0sXduI znsk0Bv53PM*HI=F6*_gV`mJJ!n+1^GcMZMDrO#*r>4GRy;&!QdvX2ge4!Mvg%br|g>=+Ba|A>)M` z6sid?p3>yHSEw1Q9w!>(y=c>+g4;jzJR(^7>9h6HkI(Q*7X7JhV* z{(5GD^-kvdlZ@oD&n@44D)e}L4h;tSC)rTjJG2;}`6?H?-e4Yg>00-tp>wl7ZxX{ZF#}shEhgiss_v&2i4W3 z2#Cz0M@CRR*3#BSrs_AWIpH$i6Tn4O^ci18isZPX!lpYZx%UIHds?#tErZnJOaL{EQp^U6^J`?|@e zoeqpFgt=9P1tE)C0^O$e=ZjSti;&>*Jv48ETOK>Pie@;=cqPZZ$I&^F5HlsJs`A1f zNs{=kJjGvMq72;A^iY83FTDDf{4G%7&7y3Ndv0a}BTLSyy4PR*4|oo@U!S@aUzTNj zKPysH@T?SfeC>RE^vsu_-CMNsae1x1Pj6miV^Z(-U+wKR&Ec!FKeccF*_Z!&(pLS$ zWq92rJxP8Pg?F=!NbjHw?CZj0(N-+ZBwxzxpX@nUl1NL@vEwqFCIaAY-;-Uxu*8Ki zR&vq%{YP>BK!X0ADlITdzaaZUIsDP{HV7OaK+YO7pLu5)H*<;=-oVql1`Lsmse6nV zXyKJB1mv_IKBD0DRrj}!Xz`ftco^Dm^Lu}Psn?ZdLJqJtS#leX$R1v@`i$MTSRQe! zn%Nm3vOZS};@pg1uCUk&N1+$e>S^pUf)<$^eL^zIB)OA^Dl~bdEQvoEI}d5anhE>7^}h2!NgH&vTu^cW2^96AG62b zJ1XkiE6?K)c|xy`BNe5{w49jUFA1n>a_buw(Sa;JSKWdLdhf`k&-#L&JhRoo)i=oJ z(ZQFoNDuemu$n>EWO-@g`xHXIm7v9 zA8&L86vd!B+JFB>RW7z#m_n5#HB>f_(ebV32=c2dPBs^h2Y-Z5fM31*6%DtE9({49 zhtwBxgmr)gGZ%~SLyD@y-+fL|`iaUzJppehf3U%Qa0)**aaE&43z_-F&c7B-BcJ$< z&$d*rdPbI&r>pDN(yTP-ml$$Nl}AP%=o{*!Q6*U+rH;JEx5CJl*UrRD6jGk>O2BH0 zxiA$;jAmxRMs$)qmtO7v8*q5GTMa@l1AnazYdl;k{d6zYC?>}o6dziEZ!Pe)l9A;j zPm*S|8kEne=)Bt1_BoyLt&-q*=7DyrF>d(2ZtT40;7Ss)EJZb@iZCu`;J6$<%ioJC&0Nd7A93HSLb@lpi4}DwTjYty&_NAe^*Do^{ zdjFqqIyjzG%=*?V-fltv$Xuy6e#)3vA)^M&AE7ZqHhA7^fw05wK+s?rX4RRQ_9ecPUPDP{or+og_BAu)nnV_AEL#li}(fa(`yxC z8OWjUub4n@R0&|qE!0Hvj{m>(meX`o?~{W8iFY&E8ax{`W7>{Uq_YdIpaqxO$6-RM z_9Y_*x6F}W?R7Sej)C*D-tzCxe7xMEm1oU_Q-LihBu_xlejy5uSsvJQFCQhYai|Wv z#22M+IHg^*Wjel2%ZE)SPC4l^bJ*n*k=pt>)f7~1qZ74`z$A0;9iUvIyj+y~sS~{* za4tYarz9BxP0@r}w{#dC1NnC%3wrasx%5Bfi4#4 zl1F{#`%x|C$f?fz@LzZLsT}crDf)7@T{nHpylL8K>hI}~WGHg(J2OQ9X#(?2;mz-AMcuHw+5EbR)k)ooYmCzew`^$Y!|{I{x;hGcolP&__+5$0tH);&ggD z=qMQI1j2qZd#}b4z=ptDnR^hSAd8{7Pm)XLc1RTCEifJqQ>1LhG60x$?5KexOXncs z3949dG!1u`o;wfE;4)G|t)|iOwiw?NB)Zs?#p$ZVHG=1(fqS(`1nt#s@i{n-c?r|B zA8l=|jd$2h8Jn2IEDRdFj9pIdUGh;FP%B>8D@Tkw45&FdIX_OuNofH zYM6Qx%?SZ6T2Y*P?l>1gmVD=R|L^IKI=E$hUxlm8H z^b}GGe~BCMdf`ii{Gl{aqgkF9cP-{I`9u(UpO)5E5(y-)} zR|6EO5S@C!GL&RC*asm$xVMTqDnqCE_ARPoU^XR1crQOs2Zs_|2UIgcXU$BMJD8HY z0uf_NebVz~P}%X1Sr1KLAcv*tmt1xwk~UExRlzI^CL)#Whg!;F*|$`yBJ#HKPg~VE z2ZgEc%R?;U5^h23CzQHjd1YJI`$f;LZ=G20thYZc6FKdF^6Y$A_n&=cZCiwTzbAr%r`D7k!!2!0dy#Ncx5%H8vrvFpd%L^T+m$G-we@86dqb{A&0Ad7S_O zgJ*OeKWMk$VXFAcjYm*Y>c>=6ccsg?$+h;x=WTjZ%;>vSYNypYKc(v)t$8mMT;Dp* zc`jlt6;l)ZV`Xx3(%J98(|<16kK-Q-^={{q7A;n)vvU7_>_n}wL0zOAEW5^pFIoL! za}x-2=dq)(ZOuEtdzzsA&r2aTb8X@G51u_M8|xhzem?yU2!oZDmbL5#KU>6|oL_o0 zY6ChSYisK;(IXj4V|0JEn+aoQicbov92q;z4jFZndH?gxjtAOs?fN3~-(90$;OD9& zvPf9rFN1XCJ8&e3P@#|URWvLJ z!oc8YA|i+m6@|DFAV6#G14U7GF2M5^s|S(HXnuBC71)QaC#P2pI-0w(TKZ!(#(l75 z7ED_HTKC$iR~1gWi-Xx-Hdk?3D^1Q&-=3Zhu4V$3`nu$Gx2mY7P;8AIWeXe>ABnhg;Pc2y6R_J&GO~ zoj{;yt=1$N!+gSXuVP!WuOg;BH5fuq|xvy<_ZEuES%k(!?5GzF$E3?)`dI zVXSMLSN!+}m4<_IgMa~Zg-RNCCH3#$?b~Adg zQ-PpMepkhR4PLXtCJQ^#rt)vy|JzW%yg1Li49;h1Si?K>6-%(KwDq-8$1&#B^PFh5 zUA$wWm+T@V7tLh}-8CKmD_(nkvZKH{x{_hYRPX-Q?C?V*R_y)&7KK8lDoflU6fXSo z-id}!=x5G@eE>PINmClZ#mj5LeM(3bVa-ed!UKOlq-{x3TB2`pmA1C45+n%vB?;1@7K{ld?SupFj9(rPN1kD z93-2I#RjiX!2|^ZDW0A##6i!l*{xRUciSZdL0|fm(;1gh)Qn8@Oq|dnVQf9$^F?4T zWGeY~ByAh?1FUJO3%*O@!dM~yG(X$tflor=JHsN=!OnsDUF+ym&)C@Y=C;Nofoao? zfs%EK^^N(e&!o~!-Rz{4itLhdS$x+mtjw#(D5Z_y#~O1lQn$9winSsSTux1S{10?b z72JN=ve$??W>)7fdcBv@HXLy@riY63#rE#nf4|N+@tA2>cz>x9#T=~seor`Bg7mRD)Nq3s0XKRxIfc;;;2 zjjDW`j|!2Ax*z))Y<$W;{nx13>(pnEFT=5mEpW)dd*|G?7>^H7@vF33wm?c>>D33y zlCkA65`*KxK@t=XZsk@Fu~C7jF^7;m3wy&9_N2)C%gn3D>mmN=+RE2z#OBlJ8R5~& zF3}j8q1r8Q!Ozp}E2RC)*AyD@HBO~n;+vrz<{uO-DCLerW%w5CY79zWOUY{Z9jXP}&mtiYQ zM?*kl0Vkn*n-|?4jev5=t-iX}y!m=Ft%-ip-q5J%#cTpk~PE ziuzUTV*K_YM}rD574W?5M2sIPchLwk#Z5(fMWo0bc-UG-Jf{06nl!Xc*<&!V? zin>r=8c74~o_k!wLa$<3e>@dkG4FDmByBxuKL3cvPy3!6JTv*O(hVeW-c4WMklbB+ z>t4A&>KWRYvVe;MPW*YlOH%^@HuMA{(x07cq)nXpd3nVJEiG*qykxUnw9&J9o_pJ|ZW0v{JN=D2&%)iYL`tjMTGguQFH1~&g$gV=Kt z@C|ooy<&G2eHvpJe>Rs~7^#t{1*kkqwkO*PN|7hU5iI)?Cle7EKpKu-i-j-Uyl&E= zy~SP0R){{_8zr%lMQgf-z`|)=&#=mLBG&*-K?^N{Hkg2D8WMhk^5&!-ObHb z2C7u7MjB-e0B}?f5;rxkWJS#ND!sq)r*4n=&APzoqGE1nd+c#T-m5b z`ik!gy}CMha_Jveuyb>XNF1t){X8{e=eSDuCq=IA(Pw8rUp+sMj>cMR%eMyAuXqp5 zYw!ta$0Dl14{zs3_*Sy>7gR%iFKF(y1^TxXx9+?;*;~lxU<9$hnI+A$={`7XC)Xpp zM-26S*G~r|H@yD;`ifj|c^Q|l58r8Shepu>wR6}JNP?3Ie@ z?exr3IzD5Bg2Mhj+8!941Y8eeM|&>sPUkDP8XEND1Pa}@45EK~MAmz5mrq7=j~X~C z(k(yc`&kUq|ArNyY2SNiVU<%|BTp7pg?Y{fq8n5>Uc||Sg^AuqQ%c#Ub-V@i3f7HV zUg(me=}v$M!lI`3vli4I;XTPgIE>=>2f>NI>C4QffB%#%9^W3JOM0dF7^R?LUR}~B z{W8)@(a7)@`;yB*XNGDxV}#8c+24V!7#|T3@>J-RsDu^2ARWGuqrH@$In_H0L(GlGU>pG|cq}R(&cd-`R9#YC6%1Hy0Ra^OkWeoD9GyTR zwY4$!a8P)dwt=iPB^rjSgyZe+N0u^Ql7VOvQ2TH!N-72;sK)}HAqnn`A|v?re`1DtCGn!FJ^I- zt7W@P0n(txn9*yog>|)N76!-#2}TBl{U+Y)Zc|Cs-je55Kl8fpm@4pw4VqVihc~7u z_tcNVlINpLL=6!SKZw*^aTHwdjoh3r{r#Y4`{wn*lt-qb?rl%^rb#90#hd1%P)YT; z`0G~Xj@SCt*n|iMd>^rtBiR(;bDo)bql3!(vQY)g7vgp%%+VdsE zfeBFjm-aY^Y{XV7TOq`+HlO180z0st&$&PelzK{1U0vjm8-HFeH3(y1 z+AA0sK(MYTUiK(!k?NkxCNlbE8PVF23I66?D$3`4pR8y&_T5#zh0g zBHZ91QRR09N>fFJwJ!mzaphch_Y2Q6=sYeG0*Qd92r_lYg@@san2I{G*FcDOnaZ{k zXWb8-H$6^($;Xf5&FYf%eC;2xX|vGH5Pgdf&Q>C zk5-S)h+F+3toeI`!=y;)n?jjTOaSWM*PH$hgR$;%w`L9fM{^M(?Xsm_f}PP=3(3PMBS)9RAI3|Q39Y=t?6&8u$UD+4NqFcCB|9nw1veHyWCE z%F3R3HX4gfffqbN&r1z!`YTKNyqeTt?vec@6&;+u8lv-T1kxrW+SdwhwO@0rWMw6|_S%-=jx9=MZg7WDQb>WrGMQ z+$Wt2??fLg5aP;dJGl~T}#JNIF^3H9}Fp@js|RgV%!4aVEEte3%pQq1{R zvl%!_I{?Rwj1hk_QcDEq4KgI~lQJ#qF$hLt2V-++TXC4QUM+R{#|1DTsTx-qQIjfo zvh1E;vOcOE1`dgr;m!m9RIMA5q!b0l%JfZTk^4&A?k8rC zG+kexuQhScd_P(g?EG^+tt%B1>*pnQx!&`~5>M0QoPd1wY8ldbAO~ik4Kui3vXwx8 zV+uyWzy@K2a?+WbwffI4%*wbj2vBpe&-K}o@sd#o(P@-Z(zV}$hAK$(xg^F>V{&%On zVxwWn&h`C!<~x@zqP32_eaMP1UZ%*S?qdasGY#JNO(u1Yjd~sk&b;5T)85Wi#{HUW zr!dD7cY=WR%HO3eM|3qQ6@+&*z1Gp#_$gDbR;7T>N5-zC5AfSIHE^qznU!xZeSJJ? z*I4RVN?BR(=usrxSv%dgt1BKtu5&`d3nE058PWF2iP zTvH>m{QCrOI3z1)Cb)7E-ifmCst*fkYc}G6l=uh+6#%r_{GOVc`U?yxDr8T;9S#^g z^aXdiHkouXAeT>9?|`b*6=fL)#flyfT(6kWDnx+Tr{5d4C?Us!fdgL;Vu1b+0dEFg zCd<|8=^Sj}SFj7BTOR+pwzekF9eTdOy1e|$$9=V#vR73bVl9cGj~~x=x2UOo6ZRHm z@zD6}LzyRcR8Srp6GYgVNS7)pD=E&TO!_Gk90;PTzrUfzp#0HXrUtnm{dwOg;X=awN{$R zlx`7KlTl&SHKPVA>#%nea6)c^Ohm1lV7g&=cL-H?m;kbhT}|Ux81_BWsS@pO@f0Vy zY;F=1YDJ0Y0N0DD^B!CyXfNqqk6(%xnr3txEt~i0-?-{7W~KLk-SPY;;a1-uYnKt- zJ%-0Td2vFFqj>?OQWndu-*gGtRYqgtKO4A~S-34`;)Zl0v1a!%aR!khgEP-7Mg zI4UZde;>uknB(RQ#n}S{TBV?^;dezN7@4yu46%< z-`&-oKh;oEfTmB0`OnU7YkFqljkO2-W>)k!TKujXD^H!5pJjxQ#5i%k#E_@tFsuio z4aa{o2M>Ok;iNk357D=PY$$<_HdMxOJq2alFTEeyZeqBBM+v=Z)_b;bwrHa*NrEdLfr?SA zH#m8*s*1$einby`5%&mL0>h1*LHsn$ycMD^5NSzfe_}&eAX1zYc0gOU_|l-R3R8T;7{rGkX8E^|K|!$Zvk zzUrsaz(7Ms`;|SD<}D1E0_f6mUw1&H_s3PjiLvkNW`H|ozhCF%Ey>SGk=cd$7v=qD zNOpV6G_OxwU;zYYNr^Dgd#4(Kc-r+rrQuXTRs^~rVT^-)f& z_}N6`&PA`pX_IsF6~S8KnrED~*flm2Hx})?Q1aYq`upkZo`M0|SUW&8GcyDn*~ZNQ z%llQ3gXb;ss+;Uzw*tm(%lv1^jman0)(S;&ceBJ58*P>&`c`Qt0r__Hh~&5OeA@>A zhA5#@L47dJONbLqpOuVf0OB0575hVfD?_a1)H?|-Rk?9836&hnZj4zmGL(mhM?ieS zWc&!M#bVg5dzn!^JtKxd5rdAu8VbLDo_sRE`T`4l@9m{WO)aio8}DkqegDqz!)#ZU zQTl~MOVt0)t~oit=kwYf%4jt8zTPNnJ_DAZJHETklrH&8<|JVTlf7Fb{4r5qN+vvB zs&WI87^2RBHfw0sRz!ms;-$E$=_d=0{61!SsiG90Oc01a=+tLA!&8=#;PR&~kkO|n zp$&Y0)@nk{SUfk5Y^&TR zZ0)6E&~(-836ojQX$0?`kIs{$2wg3{k6>%zFYX<0;}0RW>`T-|G_J^;%>5hpvPJS? zNVJMn1FfIe?ZPf^)(e+v1F*HGx6O0F9T!REE+;F$*4D6=DyiO|#$x`Gg<7*z=s&{} zYpb~IS2SWK(NZ1xjP+~nB$~1M9Qen2Nl85px}BQe`A>!J_(sK37ZpjHEwIT(2R{@V z$EmwUWG_B23=N_VmzT($Rn`>+6PmFPl~$GlYKM~H+pK(k%jcoeW3)XLH=25=wFkia zz0Mha75}Jsx1-PY999!Ej(o&=b&+=aw3t%n;HT^n)X(F1I&sx$G*kxsw^YX!qOz9^ zg(4~U1la^hZ+&#JqUUHs+<-xX!}uMugXT;NnU{i1Rz0PkP26~PC_O`x7O=Vdtbu3O zh+6Oll>nWI{HXE-6`4Gaj{tP+W7QH;pDoaRKhrWbW29$gs5gB(sA~D+Ry*`owvO{SrN(> z9u?YdpvC9Xy{-Oi<@Mip&7G&?z7sF0_1kD2- zz8U_ids-n$!VP`p7z>i>!Vula3VxxO(a!xV?}2IVF@F%5BxeepfLM;raVvVwp|B18R41oj#vHbk6Lb0TAi4rV~2{L`qq6XQYZ7+fs z*C%Lm?h)m)+|>;v2-t+t2LXUa9A1QJb6ritf7ZPnAeuw>!;XYa$&smpMzMQbKZI9_ z(~ksb8C->ZOGQKFQDoGz>S2mJks`4W8`J|T!G~5TStJ-yMZu=SfTEBs2}gqhqV6s{ zT$|917Nv&=EH=DuMM9wb7@$+ItqsadR#Q2^`A~k8kwjBgCGZ#NU076tX4oYWbcm>`G=F*kNCWvs~vx$GWIF(r8db)Y8{`~#<;!LLhVFj~TYLgaYunpQu5J1SK zGA8?myd{Gr1JkZ?)lC^t+aL^Qi@W^Vg_fS5j=6EIhHG`?=zsP%T@$DAR>H5@cdY37yhUgMA^03x*=ZTF( zkBPb)pX~<8oy9h6USF7(-W}z1CJ1NoGW_$4G|{Y+qBaq*wv0sGn9TA8d_tU25>sMa zJ}Lyq;Z3jrKpG0u=2|HZlVrM_&Qmsb|>^vMV3jPfGUFAYn`1SsJcje!fW z>fASF>nFV#O_bA~#!sLMMTxE~HSVnr8BUWrWJOlZ<0izk4hNIFBrFp}9vh(*4(oI` zZ2QD3cL-AY_OUTU$y(#F4#-&W;HX_34z0<(OqX-h(OCPeZ#1Q{8r#9ObpAPjM9+Ts zNrpMLZzEDUuK1~4iKCe&(oTgi{w8;3CwF6`t$v7$J^uToBWIn0zV7gwP6v^@00mO@ zMFe8~n?Sb~r!GYyLHI-KJKV?7S-UftC0x79Y*bYoF+|%m_$B-%Aq&XI$}Ikgxrx?_ z@@liqXN(vUD9HbM=2u*lG+0&9#4o=iqFeCX>7g-9An5<_bROJrJzf_cqKD{AbRuE& zUPAO9ox!NlJJE^Wdmo*Mo?wU`J<;oEK@h!n(Yycst#_^WBh1XYcg{I`?`Q9>hI(Ot zG|o@!rQ#y8LzYAS_TCV`isI&#FwGmn1-%CPquwyoT{Up%^xprlxm+-6-K*K#u0s8p z$;evgh%XVP%1X#x>ohh+PgZwZ{kZnjE9|$qWZ(GKG2h`kHjs@z@5`foI3H_!nC5ii z5bS4?uMqLN2Lp2iX~_tsv!5N+i@WYO6Z8p~1BX**Ol5+`IE$bCbw?8%JlrxwO3d)l zoHV}pZz2|=CK2M_tC9pnzq{IBo-T^gtX+OLU!Ql-E@+r)`O23l{F)UeMdkS`Hz`6c zQix+{{g5W`X5ysUAC-h8{oc|fFYDB(&Gb)Opx@c0H-N185gSmn#UC*=FcH6C;ONlw z{KgiknW+0} z(#q}O^|6k#6Z7i8r@@J}MX%0)7VuSfz^u_{XWDl6Qy#ZB5ku+8=F2bASR)!a6V{e^ z(=DMQ;7jS{*-tmsj~i-_hfnv_@R?kEdJA#$-)1cw$XF|KPju3jkNd-U+*ZOOy9-M? zp2vbwlXpvC>$GOC<4yU*&*zSvEU}Khbe1v}8iH6O!WQC4YmZG9Yf=K@c`hq^O)|%{ zsPT(D5|`}31n%tG+z7x{=x9(DZyE2(0TI_ z1GJrcwWPnmMp5#cKzc$OTL?9$E4V9lPX_akCY5bi3XStj1P+=S3vIQkVM!E*cP}g1INM;9ieO5>*-+WD^1Z|?nnGN*BK!~lC zlo-pM+XX>%N+fjAX2r~?F?@t9-LmZ566Y-2*dqgU*n@tpf_=bf^i=X`g2uam{8NE5 z4E0X=GXpv-l_wvlU7vWI)l(GhbT5e)cJNfgI-NThxz;RTixuoRy}@+8A2+D}OoQM3 zmeg`JwvSe9X9idm{9_EMpfM_AIWv)6%~NxO|4|4&dHn&_aRpGv<&g+e#j7F@;A_K_ zQij}s34XG?!^_7wPSacz5>U}H^Vz{*(9g@ov7@t$Ngog ze{_vMWubl?5`Jd;t(=8*LLKGXVDs)!Z8-&K09DTRkd0+%uI)zu|?z$(8M7{DTo4_ z2XUd%nvW=!=EiLQVUQWfVyaA8Z`0EtQy81hYTXEK|= z9TXhDSX@d&$AXQ7zX?BCS~?h_6be)vqu?4)rps);D*t!}gZ`G);lo}3z8%2V{-hdB z5l}#W+s^hWoi7$7+>?&amSP@8&Q%dustE~I$SRJ!zjoDoLHj^iO-9R^rdj?~i8@P} zr!Q{lO1hfQ&OFX-u#6yNrnoYgr18%GoxNF{@SBQ;x)4=y_FljIgy8 z85%u`3|@?ai0l_MQ0DJ}O7+*~u3K;-+u!=Bs6VNs#%(Le(l9%?TQ8iqM@^2%t`@rA zzdm|3iu=bZeUWwiC}z3Irna+IW+-C_Tj%#fjF(`u8Y0)XFC= zHQwkYLL&_JhNyj7Umb;ozIQ~pHy5=msD%rQE(;WmMX;qJJ=2a+WTn#f)O5EO*wQRa z)866siaKV)>aACwbBv4gq(m@!1GCmA3y~dVDx7L~YE?8*Y}yCPDXeccO6f_Lh1onu zDuhRQD}QRoP!bsxV;rj;ClhkK|K!i^_+`9#?pT4K%ICV`=76%Yvha_W211xrLZ+zwtaW0uEzw@ zBABS3<3hp^We=g_ymVk5!e4?23BUAtJmy(6MnWA%P_&yWcc=|s&q!Ap6H|Vm?+6I0 z0jStD`c{oU>0Ok8oE2)q628o>O~9u=xj88vkJowYQ}mG%S58O6_FO_X&6+dd;|VMtklGnWYqjt;6f*95^^Q64cbN{WbLHVo=OL zCE7#(#t*O7xlAg_Vxby;b4#UAgx+v6M0V3}59AIPetaJZf(%-O3-oRHQKOO8$;;3d z(cZye%*?HiToq(ht_z0(a{1sm7p*na6tclPVA0|zGx&*?FxI8@Vu){*TLo3+tY~T~ zj{D)yMMqOO5H>WmT4~E2?Jo$;ZDa}3TGKU zE!HdjSyxDpT*43O1F9-s+uNz=9zJgExi>8kvM?w?p!}#T%=JZkL$qR7H)Bs5wAZ=` zI!zsqfaZ8ujo1CLp;ff=dVo2TGkd69Xse~Nb*A4Z z3ea8zh`2eOPmmlP?T)72ZfN{q`NB*}D_Bxbptq!Nvz%bUEGFjq596=k#cBZ1n7r0d zW_9Dfbbr(T)YN)*X(jtjOB!eWhl)kqPNIOjNM$GP`|mZj;>EWPi@Q8{IRrHC{DAe< z)!n^EF}Ly8Sv)21!HyOponnU!?8UE7_Jy+&{SV|ju6#R=yP2LMG4bUp#2$&CCe?tx zHAHMh|H9?kuTcCK{{0N|*<5eXJX~wv8duSX`9<|e2;n{pr3==)yzJ5m^#51YCCpc0L!WsB`Px?NoTIBrq3{hdD@xygvk26kf|`Mxi1Tc>tfB{mq=f;vhvD|qF=qGO@AaIAz@>02>EyF z>L`a7J){)}3*sVV@USW#lWxW4^zFau&N@%=lWh+Tm@3Y*4`vc14CDHkLxwGwm@h?e)zO?_?6f1`=*hFrR9JTApTv1Z$_P=Gmt8RI`^ z1y^A=PdP!{ZsjiUG*Eb?zBytQECa~=@P>>3omPmAZx2v}sU5WO8!NrSCR!(Fy95J^TPf zlNC-2V|L$bH&vbsVuB!GmJqfs!(&Y|*9MTtigS9YZUTKaD%^5bGYT@=P|^@HH1k}b z5f-b}6dZC~F+aB^^eX<>U5AGRq@>K-n%Q`LOMpayg;-y6CUeKi>-z-!nS&sgr+z5I z*O!~sb1N!Al4rj1y-GA!;MvM8`mWiscvR)`PD#X^KV_!HmV)OK_+Fcus}U?c zuFtxytR!qLBT>j`i)&2a0K1W%ZH(-7a6+g+^zAh)z!5`l={KBlOqvs3(xEl0zLk`b zo{Az(p#uRJakh|=Y=tnm=H4faAQZZ1sCA;x!I>ZCgsmmfB&61?P2LCC?{#}zJk3@3 zAnAr6&nDsKent`?g5(f&89m15b>B~ZQ%hhlNT3T{)DTRhph`AxRdCV0A|&J{2BGWP z12lG%gXKV@?tH`sxx81<&0lY=>cDznOvO7`+7+Rt)^IN~r2qGx|&zTM9TbbR{xfN)Qlh3{fm2SqG9q&Jk

1_1Q zgT&6hyefw|;u~QODe<5z6v>t&L_18Ht~_A~0|1 z+gD!&QzClwS80Tx4RFhlO4&bUpKmS>t)C9o44J-IKJ>ZDmEJBtSRX%l1r!_p9yQ~x zlO;4KOCGh_&J_`9U(HR0w_ib_Bk9jkWn@7T3ZCV^-c?Al$`HD2YAbZvR+5$3Ozbm0 z_ogwy9Y6)4PFWM9twQ?gAcUrGp5=R=5$`vqDaR-)w>h`2r^;fUKyWnQm)dl*I6>zn zWdPuvQ_eiBX>kyl09ebTnd8>iehAD=^GK_Miaw&|@eW?vy}Kp^r4PSC?C#lB@Q>;a z!`#Z@^A0m0J1PJtc@}r zc!crmQBg`TI<|c&LhjhfAL8}gCEsGR+%TjNcq}FG^70ta|wi>DKU(hxCAs)a6ZZVi+A{){oojOoe3*wc)@OJ>p_Lz zI@+?>&C0pBbS&r$RL-`2vl=aDJ}hV*@qogoz5hfUsE>v^(BzA=s2#DNE89~$wOyw5 zB1YGT^L=-1e9G6z#s2nJtWQ_Wq%C~cnQ;(kH5KJ!3I$>)YAU;M3J;ohYqQet9fbs2 zA9^p{eTw)uhHlD!ydUZ)!1wUZy-jS4K6ZE_E=oAksXiWC@*eHXPZ_<7D=)yhi&bTO z!I!=!KF&l@`+Pyb`t*=ax>a1a970hB@@mba4Wai3z;DuCf3E1=Mh;j^M;QK9KVEbe zYb-ar`2nNa-Q&OYGG?xP!NF;!;q~qveE+8Ac`T`EzQY1%FJEALDZ#JAYl!^JSK6b- zfqY6exwqdubS2ztE%PFsYWMA@MbI`F<5;)WINn{__-M~wOq_bR8`HAu8sVWx{HX`<{WF@L{zOTtdQK zIO;QpCLnV|#Dd`O7aYivo((54$Ko!7cmQ^BGol~@b26uLiaW6~ZE&p84fyJCwPDtH zKQ!>< zpd|g(d5f{XMPGKc(u#>5=;K9^P&%fmQ%b2@RMc|m%fLOAS?YG!zM`cCMKPQU6BE{b zXVlgQNyT6kaLw3ag1C#7zr;_4Pa#RU-;;!GJS4f65BWfUhdS!thKsfPyU$g1yTJTv zz%!%XKfa26OH4>Z;in=1mkuy$_u!m(wY@tra9>=LK&bTbnTE^Mf?TW$$d4MxZ(oeZ zUjBUEKK|T)n5dqG$&!9U$^5NG@3xir$N7NLtY)!@OEb6a>(atWof2mIz0=Uq)C)=U zmVe7nX_#kCw4MvnX|Yd(iS74iUQSd4-W}le?_{0L6e#gHNb}B)wt6J}noxpdEyv>C zhBsCJgOgXiVY~Zwm1g<=Tg%bie~QYYDjy>WKp-q)>&c-gH(3qs2A#&w{EiE^RB5ci zMRX2Vt?Sbo8YHFw1c>KGU6<6tW-l8JZpNqt5ia2Yy>y+oU?e*?Wh|+mQQC)Vo{G7u z&RV*a<|;r+Na(RczJm%HbQJnJ#u%mwWLb+yRH6b0E8|~lo|Pt#z^c^vmwYX&sNI%) zb;*doHB#QPjnIwc3KaoXA}(MUt1T}FZ23ng*ZTK_cpDxze5 zqDfGsaqA1~3Q%DlyrnQ8a!v|^Gog7CqFex@-&lpSoiPrUs(=x~Lje%dp4bL=aOUPv4z3*rA+gpNlQPj-r*rzPdX=87qaBBCf8y5_qGCo?x(V85=2NKh^VqYq zW^R3urps3$rH%$Nxt!!p&dW#uVvtvq%~hu`^wNT=J@HrBj^_DNMmS>6tm~DYefJ*& z`+TN)wN4uhI)(^mp(_-Z7Gjm&z+AnNW$ljqm>PZ7a6UXzE>wEs*$3?E1993hZymfp`Sk)gl@=f zr4;NDQBgsz$dQ*Il9!|`hds!27ccJZqE2-OsMb?Fec%TtI~q3Y=dbM~<4W}l0(Q-& zTS*uJMYivfYnU)P?IkL8jyv9 zy}rYvi*EZu^O*7OB9ihY&WkXm9Kvb(*9J^=gYV|Ql%POMQgdqcKRh1}JfVH`e)v^; zvac*F%0^APb!xrUpYQM7UU<}ghDjUnaW?nym;3EA|7MB9nS^Yv!o5OvjZpz}+SUsd z>ZgTRe5X;Fzm$UAVp3~`qV^OBJUghsg-1V@P3Jsv znMZ-l;~;2+tJ@gQ>FIyj$97$?Ov)&XrPTkUAo}$)?hBk@lJVzA6o_qrsActNw6$Nj zOq#QhAPs;0a-6J+qud9}JW5)mUdh({z$`$taevX%I)U=T{i@p20HDmfJTCd-NZLZR zr{RlGXxhj5Lr+j4u#CI5E()lm_SLWrL)0o3oe7g+nJ>BRU8Vkd`Wm#tQ`|0GYQPbu zfj>0mbkjt{#FiYcB0b9*bcWz1=34}Ei4j?d2IO3aNKd924#;o)oH2~ggb#9hG_Jbx zYEVP7a$ghe?iXu`EFEJ_m40OcXrSzC{bn3D)8vM-aJVeLN~)VOM0mR|0Nkevfm`4G z*)~TT$u%>x3LoH>P2VWmu;=B16iH~FGoVUI==~^c1t`Sj<=H9Sr4?GpIecSC!}780 zsY4?;0;#8>bQ{1}2sjd4sLGzlwjAMR)w1ok?A-+el@7Sw^nFn=Ae3qCPt#iW(s3?v z2uUN-2De_Trg&@_nbbLaYV46cIn98>qhbH$Cex9wh+J2150BSfbx?lR;ltZqL~GNw zeA(gS$yR^Ic3!n#v*V4;kpkUqvSEeFx53e&&0#O&h01EzgyN61^Bs$MY~*Pw?{&|` ztX&@_4laCS%NAcX>FY1~brw{26>$b`?#mxX)QYvbI7?brmpDzmyN@cG>^>SGbLpH| zr_qTZ^NIWYZYi>Y2Zx4y;E=NHTJ`u2#&@{-=+E?qbZ_nP!(Q3InXy&8+U|YTT*hI> z{l~e}J-gJsob#4VG#=-eYTHNcZyj&nE{Q9c_aCKCX~JRgmSb|YA5+S|l4kn6GZ`^$ zT3r6Yf85V?`*qm0bD2qz$+~IDd%dAQkz-vbbIAW>$3Cgqw!GIbK!~c*{OkU$4%Ech zXan4SC97fM7*4uP6>3tol}P)JzfaG;wJPxT@dhWmdvzsO#PHCKTySAAeseS{s~0>r zHnv|VptC3{;?_vAW$FfnhHLIkbTd_VKHfS%Zf#?7ejNg%#*z0%zj1E(sj( zw$H)D1onn;JPGTSZz)1`i)$qmgb9huzrEXvBTZ(z_gk`T;eII(dCl*FMaFXoC4qPJ^i&V|G{H%MEZx=toL!< z_eKl?Qb@c-tl7zD5NgS9Ci9VL&m2NmB(VFYO(>yAh@3~)F(^d0Cw8q2@mR|-GWh#6 z=*ZT5zwqp=jw!51f9c`wN^(Z?B9FRJFjn-#C5hKgf|`k^k7gf z$1rTxFFM2&I^kz9SqnNXHoJLv^d`w_!qVx(fB;2aUY_Hf3M^f?hiVPjHr)IuVPG>e z$)E{epk_@iiE0u{8_P6qf?26c48w{{iYHrBs_>g32f%}dPr4r{rj+6?K~6KGNNf=v zFzCpLu=A|$&%b=}6Q54Z1z!l);Xea`zBE*^RltcrWL28QLeF^7^4}zOx;de~j{XXm zkFJdVqkB;bI+f2AR^pH}x3ug<7T{~xKkLFFNaN9Gr3=9L2%yUj&h;St&10nn!QZk> z;~+}*aWDG`-=Jbk+9EWSQp+o1qHrPhjwu!+9@C|4?-3X7$Xp>1cZ4a`LSrj()SPh} zjlRDpE3Ga~Go2+7-8_}?yZZ~J(Cpy4uF4#3#yYXDo{=ok{2=Bm44JDuV*Yzze(=>9 zt2xQ#0ewlbcrn=dNBFT0s!ss%WNW^PK3gnr`Qt(HrD{vx9%J_|pmW*!SMC`XuF)*F zo9TgBb7jRGV1l6?+{uxz>jJ6?7M;kY6rxdn>Qf5+uA2+P9ul_KwK#5yY-fEwID(p7 zCn3pFQ!(wKnmy#n$+E?n-RLDh3u5Y*6P>uelWb`cT&DZCs zSW_}?6Z*tB8`}fqjFVp!xKRpL!eT(-%u)Fwp#MEJ++$1+5aSUa-nUc62ey4Fj<^B1d&%AObo8v$W>H_qiTK~07WU#K{T+m zV=nA1DN4XWo_eflB(-aC5YitHrVV%B(pVBMOgeo|bvY7|cN=%Bf)Q^qIs`wLk^Cy$ z?=!A%unoNY=H`Fj=Nou+eUy{`-NXakD{LM~)_)~*PHWWgaMV_18AmXfA6~um>xZTEji7FVukVYVFR(=D@ zfaoiABXWDVlnhZao6@fB4NIF`2tlv~O{+1~LpaBGd+W?Hrj+^}Y8q5S+ijPzG&#=B z>I{75*-aPG>a9=H>2VjIQeHHTs98*07mzhZB$&rxA*-ym6~Hba`|jZZT+ zh`Wx&K~C~mY=Yl>@&DP%V^jXJyf&*4;lJ3Mr*0)55sg+}(_ma2X5 zddEUWOYKa0$$pCtWcunMk~ZLah_zbCz@aHIvVi{g1g5UGtOmeRV6rm(MLcVXp$ps>Nbq(9T9FOL_PmNucVOkv7?xM(g=*Qwy3l)04tzJ;{IGbE%)KU+{4x|G_r$n)XTlsqCzbmYP=Z z2d(pR8x*z$Cy#}Rf8qah;vXiJ{A~|lgipVhW1n9c5<-?o@fxvfLyq=_Ij>s#>u=*F zm*yYV)6+X|J{VsA!>1a1ds<{9+NkmpYRO4ak$yA+KU>}7*$Vy8z8m>;HnAUgeZNn8 z*4>y;r{(7tOeQkmB#0&`e#yPa)<&k0xzui#u{m1WBOt|Evp-+6{L<^z>1~I>%+}g$ zr>n%c;C3s!fQQuks>*^PVFN_{Ox7fqxB1^3=MJCsgH2Aq8@JX6%*YyQf7(JBTKv}U z$E}SECWnIaV3IDBBO77+n@EeX_Dw&vpfeFv~5dMIRJWTS=0m^|fG z6`HyY5scD7Q8hJl@zS$8_7w=JUV1{p0;hg4**7KVAvN{QEf$0Q{el5U_)ilL9Y!Wj z9p}@5J=El?%c3H;OUF0fvxlvr%e0O1ql50;ia>-|<_&IIc{vV93abwc$~_WGb)yB9 zrc%?tbPNJe)+Fi*G3xfOl_*P!Cgp6|$Bsl81~IosEb`}@Fd6hltOdTcRJA8ov4jf1^W=Og8fhGmq70IdBQPBl4h^beMR-Id!=m_XUw<8{rScz& z<_*>)YcL}mlww?S&(uu`#)>VBdMSmAuKxCJ!}7wUl|SG?)2O{^;b?L6%H(l2jq*MA z&a+~nESeASzqUZ8+JDtEycO}uKcvEfqpMnTlT-QmG)ub2K9IDvND?8o-=5PQU0^!vh=^4IcA3_5d7Fl6R}n^mqt)% z^Ree$J8SuHg$MFxW5c4n?JnTJyq2Qn!3lBbw9J&AVd9@w|LJtc*4A?8wvhSq?!mon z?)XXzYrFm_bR@XnzmxO*R6~($nh=?6a8wW@qYPFi2CW+sBAaifWDA(hVUSJb0+JHA zR1AICh_Z}ue=Z}o8ryG(=ohx(PcvWS;`N|V7<@+G(tRH@Kea(1@fCxP2dR_-#Au>D|0z#|LtQx7q)`g0itT^iKJsp^zs-b_V}!bwY!#;@d~U-f znbBQX3`ljGnb~49nl0&x^5M>*W=~Aar~tHS=2n*vgsMJ^M)i?PzWFH81H$q%ze_Vm zt)LpfrSZ^nKesNMfry16*JfWq|K%ZkVuY$cpxKaYW;4_3OyAyLCH~dx7?3tnK_)9H zN&+RAZc@hPZmXzTA;Mbv+0;{x>aQr7ur92hGGQ`O3dPujpKC%12uaMWK(EJEw3pAb z$~Hor{d&Kxct4Qc9E%juc_MgGOG*EHh!xXO=8 zcW{Qh9)Fz+!VA0E9L{aOVv*ucwa>Kvn=ZKLWhsp)J*wquCRn{UN#>ak=4$&?ls~H6UL5B?5g`*Z8{WBKq>U=F<6X0! z;M4d|3wWs-zlFAdJs06Ed>6i;wbXt!uZ4%*T?jLUIR*Y5S6pA_;5iq)3&9^SK#A9X zZrti0{ktAq`-Q6?yh@qM95gmFcxHiW^*LrPi1?X4nw~4Fh7OE5R}bS0-2BvHKo?C-4+cmd2YUHbd&iQJ8j2I-jlL)*&%$Y$5xg5xY%dqi5u2y>x& z;ni7qoqjksD)7z>V`h+-mj{jGnJ3VP)s&ZSe=T%A!V?W7$N%+S3GmIm-G*xhv4pMQ z43D4&VRb1&EaT=H799k9ADtU!mhJ`@kL#uQhBs@E?_3TL4Z{80z_+)1WkAxq!ISj~ zTfhFQgR-?~ntgD0_a?NWVz{X2m8HY1hnDlJvkN6IAVb3QexowZm^#0Yk-P74JUA(F z5dWRc{E@&H8-5d^?fD0&lBi7Q0$yw{i!815(R{8#$L-PrQ>V+ijmLWRV}GEm^yBBm zKp71u|aaS~F z)^7ZKr{=1nH(3gN*SP;;#Qo1fgNH78hU{S+Yc1on`uXZXAN$jai{%iYK#w^)T;)M6 zcgtJ7Om5f6JNeEf?Hx&u1a*HZ$l%lXop1Gv*&-e$GdhY{8}w<|w?^j%T@QS%+3DYE ziROozZn^B>XhG{VW$GuVPU_uy>*wFw%c7NL*#{*}@)?y$6o8(qvBDr}ny4S_8yY;V z4oE0w;pp2AajR4fGafLI(k2^;1u;*J9?kvXGv z^d_~eY0ODzvdhoYbLO`jS@Rzn)gf7Q7GFv6-xMsdVvgpuul6Z_*$ADc43ua6qMy zU)HNCr(NxrQk~luYjIcqXMZ5FdCkb3VUvc2@kDei#unQN{I%mR3K^H&Wf!mvd$u=L zw|(023_Lx*uCTuM^2{Z7v090$c!yi_+Y6%=qI>msOK*imNrg!aIe#|M3N#woE?+6_ z&-pFA@JBXR?N@X>=caDyaJ-9UQZw;AZ1^NbDMjU0X%(+&SEKJ4Nrtkvqow4y7mgmT zJ1mUTbmxyVq0esXJ_GRl5)+#i5K-Rd>c4V;9<+8qZoD4uz9}lpspX0I`_HdKA7ORY zLKy+?2Df3A!PQ~fVTFtlnNxg9<#4HjT<+q5ku2d7i#U3Qd_N3H?YLMQ`RmdZgrY|KID@s$I%dIt7+uAkq~zy+q{OSvMxy2j~4MuazTWxm3#km-D4 z!@G8^PB^%~e{E-`4{yRv`UmktK|lFKHAv7w`u16yMKMK+PqCT1NQ4z-#1PU0|MIs(MfQpnHL@fBE%w8Uyb!& z4~(w-rcv?c7ZS3lU*hL@nAyEG{z)8n$vOeB5x)!?Ds1sF{{=}jy8F)#6FF?&qkLZ`MdWqFB510ViAsUQCOvJbmz_g90V>>u%*{1ktIskhNr`blN$v3 zzQV~E2-u^R6~GbuNqzQJaLti8)UbhBfwpZSx*+==XAJc*7XQ6D5DNEmdZJ{yT9E2$ zpUlrrbkz$g-SalqlhJk*!em?q)boQX%-Q5;AheV)7+P30B{tn`ge3Cjb|yUx3&Qi` z+dHn;yp(bZkkIkBn1gs{JSfLQSxWBwUt#AVBj4V*!wVI*Ngh`i?#Iop-5zdVU7Q^E zw>;w0a7*?WXyw)2A7uHp&@b`qE4lCe+85W{Ivr$;uD`2XWUW zD#`$>C^m?Z7Yl^Sz#T;p!XWu4UK{M3rh9CZZ7 z3b)b!R<4l${)}g%5@^NUn!oqt=za0P9VmYM_}cHvxZ~+#2ipg3uUHcQz#kNvljSkN z{6y?XKPU6RkN!_Xqq^y3)lUakPb)<0hTG5BCk?*1T}I{ws{@wqB!TR?N-8c}+O3!| zSsS0@(b@Rt{;EIOqTrgJFG;z>TOg&3281q+Lc+qkEmdCrO7wk(M1;U))3u$Z>W+0x z4igl_YcV(?DjSXZv*$L*&!~a^w^9MtaCpnrKLi|Naq| z-vcsFWMuc>dcQs}h(k9$TQ8O}EQQ-?wsX0aSgB3V&09ObVdlK~x>c?%eEi?(cnM6A zR9zBiQp^A`^x~6|&gJ=e{eBDJbMWyop5-keaWhl&FcMJ7tKG=;2k*7eL~>n!Q7mx* zOX;R1aQ~hBN?-lsmcgaxa*JE-JVEAlJTJkGQsz>uINzWe z6lOIY^a*L^rRyJ8`GpWxN!Hf#R8+F1;N9e-h}EBR5HMg*@BU+CWCQ>+OQUSjK3KAW zHge01!(ZbKFuasBa^L{g25?dSiWN5#85zN#s0SvB1m*gxLGd6{>y(qD^afaln@+!x?^^(OCWS61yy1lEtX_5?~Q8qRcn_XlS~0ot_+&c&90 z+hiuiF88Na_e1fU`5m_kEqqLvaN5TId4&}?+d8P}qSMB_s5O76lOem|rJSss5mGU`{!jbDI|&rzre zLI=ukQax*OGo<@8(0N~G5_o1Fw1*s3!wEZlv;|kI>U#gFaNw2+xL@s-rxj&J#QOK= zf;YAw$7z?G8`sCBhg!QIcSGgnnfj4GCTO#Vpqp%%xEii6(Or#Z5;R93W!EeByG0k- z#B7J6?g2U+jSGiUN18c%N9`H;#+CVE{`0@jN{xlK{f5*&i@p^Rxtlh5fYO^xTN-6d zc-^s&w+ZVn-P?H6#f@oH1#!jK(e2q<>FJ5lOn+WO?7gMZ{ML1 zatd$%ZK5t?8wvUk{*Dw@(jcKn3+JoYZ(&2B3;z&orrOHaXhiPX)v*mPVGh?4Kf=+4 z8H^-Qh!gQHcRm$pxnCVQf?_}^E5L(%d5EZ^JYxfEeou&l?esY{s?sI2ceznut;Oxj zGP8KHrwYG^DboxETp50x0vkFCKt=gSu=*f2X^@^u;-3Zeeh zzoVg?a5L_U=$^=wi0QbWxE$EwOR)MllX191gozG=L>Icsv1irZQacfdIwuF5nGh19 z6QC)6=Enyak2G|Sdqo*jodJgi;~fLCeV6o_P@Rp-4;(~2$|ViE8T+&X`%O z4?1+Z`g^YZx8tz=cqU`wZta>8cOB8{mGYg=V)epmJ=e&@FyNxXgx$!+)4fs=s)7x} zTMD&UA*rpLCD&~|9L*7IQlsf53HK4pw@1w8R3xoc=Hjh`g}0ZUMJ2A!B`xCg02GRt3%TUyR9`wGE+9^g3ygsLi!DkmwbSAh!V^yL=6Jgv{rFCU^^Q$f9i#x)7=kD= z=V~!(;U>>-NQJ`$6-a;WC{Xe~V`0Fb7E=friPBIAi|PrH2)Sm+xdwi>eBE=lH)Mn_ z>D|cC81M7MoczmJ@futZZuPNS>B&mBj?BnN#IpP`aAtFDuH2vBnY{b4cyP}}l+3S$ z6`o3awh=q43Mq-JV_=YIWHZ$01#6_bM7UKc&HgSy-Ca;6sgSDU!l5HlbF|g=Hu|;= zFxH{VZv;Zz641i1WO+e$;r}RK2oe5xvjQneLPI08ZHZJ_1*I-NQ6}c6igj=giBF$P z5~5<&VXtGxQYwV(EJ?c;Ia*~X0S?be&98i5gUm#+&&UDQ8j|2%k&aV)Vk>KYPh znCV1)B|&{@UfL>7REtGNpn*&R+GO^+dNUO%9T9XmO6hzF9Vn--dg{T`S{kEU1alFCe7U8YJc@kH(pBmvc0+4p2c?B-f`1t|r zcf5D~U3y>bcPk$tL$}!GCUQk}cD>(ulZ8J<`!rA% zcqAgu?1f0_-LamC0rjHPcpuhIcG4d zlq#`O+3W_?7v9J+wR}4jl_^lrl`$oMg1Pu@>1Pq$Qnu7;VJit46(xJ60Eea=&(!g* zyjJNLV@*cJB;1s|8qm)m|K-RXR_i>sDW?-*$bQb!IS8b{=W%LLSYON+f45>$wn<{{ z3Pm(5If;ya63bWXoxy%hVxFsD-!BiH(zfIthTS&4Dl}+v+N9#-;PBgNDE-y+$vA=_ z*KJDT8RWxO)qYh~l~jUC$ub8x4aCg;GFU=VqW|zVcTS4@3zO%oPEW!Wm&Q_Q2E=*eH8_VRY zpT_YYd#dk4J1>V07w~C}tCTV#0vb|srCS$vA6AEUuQHtdn>&1~Z3Fd>A1;6OHO?RR zeS8!;+P;u743SIau{(Q&$d&J1?xK^l33)7TW+Z2{Ix$Z>e4971Ihs9~y<7i#_&Hmo zO7u?aUl+TxVZikg6Zy1fT=t70#NK$l{`0$|(~0-fi&U{i8^2sTw?pklKTe#GQs?jL zm{mS)9g4SRAR8C*Kiy`;KKhCDaQgkD4e)*ph}JHbRrohp)Q_u43dqFBX#nY_v3!M3 zBOXO1e!UYqZRhgzD>B?Qa4qq^x!P~{Q0QFHIbY0J+;97#sVS0u!+qb~vcb2lL)5`5 zgPc6~boU=NNcsFTSKM%~Xs<@*dxIq*k-7Q|+IN1OoA&HB=We;n!od3Uq$}S=-9z!j@R;COhpo5gL)d zf>H&x*cm9^w`0gXDuLgvKFVT{u^50=#A#L6=$Q37Y@848AOF};q6@b(=!>HP!b5|k zo2!SQlr5j2i(fBd^X-^K5B>uQo$enf^ap_Ta5->8rtvG$qw-4QyaEE7JQ{))G4#)Y zq5Av*v99$>Nh({f;ny7u%4B+Mu91HC5I}`Wh-Vb&>B{G9rK@XS;Q=W9bN7QsEOCoT zZN+HP7(tFPb*=()FK#<%&#`zSgd%pRSvcq3fApv`G1;wcc5&5*xV2_`3?&6~TVps~ zQJ-oQ=+-!6TDrjsP-WuPKaEp!W21(Mz7KlEz$d4&g^qx0W=YDFTH&&!fs^RVM5zny zjcODjGSAQ=qm)1DKStIBi6jUbt5nZl2U(mglq7Oj1J*sNWMQ878DcX3d08dAZ>wP!a4Q?VI|<<)Wfaf-nXO_Aly$BRC;ZXT9!Jjmy2 zPU7t%7%#=UI7~_KFB8XVB=EYc(_K(8j}@^NTz=RIrSC60ymV-6+f4{vDWH^So*OCQ za;pBEjx_rF^uhm67(x1QQAM{?^ILtEmcI^*ei|(Pe-eLA?6%a8=1rL!{BHHs8r_yP z)dKr3zbfP=k;Qub-rb;x`_d)k3s&r;s((UZJy!z5Pe?qyG9+1g z);h&!ab)|9D%IvK;0W!ms;tmVrt{uv`A8pDM!pT@omD!%bAP zV+i@GDWm#BAKD)~=gQ6c1wP>8;{LR2-g{%w;b2GEvYLDlDP{wuW0?>mJnj$viz_vw z^Rf)qhD}3^m8yYPlDF-?j$DkbXt)Y6-9MZf_O%wY8ou^s_h%}W+BM73+T3GUNWeG6 zn$ed`m>^b>)F<&vmfEi5)wfsq7BcWwzLj?7wS8YpZ1+TMy4Z>`x-ls%y!OK=43WZP zQ`Zo2NlE%5XxYZ?IY_4FguRZM*9u(Q{GUvM_IUWtb2v z%4@8j%1Meu3dv>f@t+yaHoQau#ZmTS=_fdi--dQQ9CvmF9*w@Z?6BKQty}LkOEye8NT3lC zNT|~>rbwWma@ENanPRBV>l-NN9dA5)`;0Dy0c0aHskJMRJOr@A<5lzs&k|85Z zB4MJUg+;mWNxZv2r`as*8&U6)jxKTGj@P%JL}z9HstIDkby@aJa|mB}LIoPLx~-YiX49U{xk7P}YXe z^guv@r4sA%R=!d1UaNZ&O#d`1;a;3%`X<`qd(777y^qYhv3q2F+M%^Gy5Od|)CKP0 z|8CoR;2fvkm|FoXX zo?Z;c$2U>&)%LO|3erSIGm;R85D|x~^}$+eG+u4a>rl_<^W)Q{-|a#O0kg6i0J1O= zA`*}YNQ49#07!rk36LTJK|(|_M2J%WH8Qew;d-h7c?#0dMiYV3iM>`@85%hTj7}>K zf@Yx=h>Y6C!@m32tKVa+%{doM8Akude&QtpeDv-EIt`gKt1{`}{XsIba)D8yO~6S( ziUTFSb?fEF-FEZzV(9ko4V#XszNFC;Vz$^+$gG|JEURo%xPA;s!-b1>&+(kIF7!yl>tov zW1q4E0g#Dnl)~<+O`M$~Mypim++uljbKLerAC*zF>ZqWy58Y~h{OG}bv}))^VP%a5 z6+;NjzzkLw6R*-*>wrg&f?^7j@;JdyNZ=r$D4dTB)}bxhn9&(qRo0ZGOxN!YhpTZM z5meRG%hj)&~Hv)|O=nh#|7~S!q)_H;%C?W}pm#^gi(J^6s5GM~m6T#Yb1` z_j^C+((U}-7-USOl+l2UDJ3KtV<>g$v_6cjvYMr~&XU3QZ7mv?(YtO;wm>ZdgE&?x(SJ~(fFF$(sqjw6@pZV;Y_b(q3m!JB?PyC60 z>wj>#*lLRBPu}sHC);5ZI{yR9*M2w8jJt4J$Ft+rKJ`xzV^f!d=bJ0pkCo*8Y(~~^e2A_p+aW5EEZR_a zh_R|`Tb`Z|5b4nFA8*Rya5(G_hlQINa4pAU=0We@Pfvg13xDy}$8P_@qyEFg;WzsY ztMJbIpN~;4+VRdE17cDZSD3O~RK<~x@S~spSBBvbMQ^&pVHmFV?I%9|1Ap)j{>U;78Zd!_l_&7+48?!Wf(554lrD_{BAufG1q zTYZ=Q{y+TNrmP2_mDU%#wJ~OlI>vaix;rnHVSv|PeT|}wc~lZFuh##cZZ}i2zyoOt zDMlX7PM2dBUp~I~_=6|b)DIr6+deB(K0aR)NL|(CLK&_4Jbv`#QB{=N&CVI45PVLF zb6MB|v2vxdP#DAkl+lt>GjC8~=)FR0_lK%1LW<)!I_H$thw&gF0codF$^g)~ zVlu)LDP^=|HqLCeS7ll5_uHmvrYl?59*!1E0X2@JDI6HZAVI<~jLtp7)RxBW6 z=MPRnT{c?TE{q{_$SDj5(V(m-&4m+1@%-tBi}}h`bwBv?t1UAdfP&P>(dUF3LkQVt zR1lC^5ES@+0IRbiDK+%5NE|+T^rLUQwW?N=v z^|yiOn)t4arEQfS-0I=KOU*i@(Yc3+6)OY8c05$`i;PoRWnk%1r+5CidV zdmij4723g&G6EAqPUx`Dq2_PVIW2(j9HaP*kq8i!(?v~%_Fd1TcS>hT9&-76Z2K(6 z2XV@%5L%@xCLKi*`8u0#_e=X7kz(4bI3rbL?rlynCYyl+{{9<`P9zFEXjU=;B_#;Y zil3T+r=+HwGbt*J1`rhX`1{L!*{~MxwX%w;5K)2SQ7u*7eRozxep`iP6B|laq0;FnM0*ptt3O=@1TU%`d7hK=bCKF_t->Uo$s`9CV+lTN0&;RGY+5W{( zr8n;W;D=8yN@8Up$td8e#dqr6ljT%f&0$mkRayva$8iTu4GltVU!!q5dZ*T+m39_VzDag zYP&g9<$Mr)=h0(pt=7*lu1E=#wPx}?0f1JKD`gk+qnOjfDDV~$d$!zcW}yZ&Kkie z9ei@mIa@Nr2vr6k9xJDgZj^UUPBBsU^e|SP&pAk-N?&caUFetfY}w49Pnv||5Pd)d z38O{X>^3z+KT2BEp&v|)rBa9ex*aYz`#87eM&HdduUo#q%MbhG4-dDbU3BBY+HuKd z&SQ~0*!~K0piC?nGGrues=|Tzq1~`jBDQa#ZfseVg=-2?>tQ_y+z5u^*EA?z|5okTN=> z$&d*opt8zlS8-W;av}TDSXMSe2!fSy@wdW2zI)_zgU$g9>Y-=j8qB%^?CAGbd${nV z&!W+D{;zG_NFd{N2L}b!{lu@z*O->MDnzaWJzFtL{F-rmf%<6h|35f_n7QxH{ z!Z|bYWWF=TW*}66Aek~SiV_r&D8#5>vMsBrP)xuAjMrlf-wZzyP+2KZ2#^7Q0V4tc zB{HUDF%VG#PzFdSSyGHeS#`shF_yJ6#*LD5h^$b}(~t#?3u06_eBA8+y_+BWLKXg3 z=l34=#gfy%dGqS$s?Eo0-S^{;M-{9!28fWPATqYl3Wvc<0neFE6Do(9bSCoNK%#3a zJSa-cjFJmoYXYN8k`zLS!;XV) ztu={pRy6n+g$q>+rAVkOl~iid%9@u>Z@+SS?>=dm&ofZG- z+YdhbpOuVxNaSgcko_1Jgya_$6)7f^r%^}W@%Kf-5iy0Mc*s~yT!lYG>CHTNz<&2FM6%w@CWL-#1 zQ8}e;yxiU2w|l1zQ=f#Q&nYD>n#RmI6A=MI%$zg(F|+AYK}rj7%Ak$YWOCxnve0Vm zS`e(N24mpfE2Ofprr(9`Fl;W^cQfbu{?J~mZ{4~*U*2d6rL{K3gb+5H^=`NG-h)a_ zQvq@sBI;CC^`ncYQG!l94&kxCIDh;kvXph5KoA5`HBDJsmqJXL35g>k=%Unv??rM7 z{mu`I#ZfbJnpl%%6qW>N6rloT(HTu*lrS&L<({_+3TfA7!lE}s6)zx_A9?|VLWe*V#XW)Hc)b8=^1Yns=;{_78K zoxSw)zwmS0-R00d+3nAdkDAT;Duxz#sEVa^H~KMM4!irC`>9CB&2oNc_29vSrkNQe z5g`GiusiHWpG3L)?>%i7O;gowF(-jA`V>QkEJWkF4MZiHf(oPz0c&xKWqrASGN0d` zS0_ec75mrR>gRvqkIl~NzxCJtqtE{4*A8tG6z7zHBC>r)*5GP+d%0Q>=@0IIWUakx zZt3iP_y>OK{PO(fjhDXmwYUHJ-}rz3-CzEtTeogLIe+?zH-F$y{_B72GoSf;FTecC ztFOHE%1i&o`Qs0M`IrBe_r9$37r*qGAOEqR>W9r|KmW^%`SEPNxVXC128z-Yr2zmR zx~t7vDPxP-mww|nidnhax0&O;TQ6CC-VgrJt(66hI^|f^+K29YzxPv@Pd3fWeDTY_ z{>EEx&Sq*HN2Ljc=W}O`i!mXocGnO40T?H+m=IUBMU>I^oTOPShJF_~qf&*nWm)!p zueD1lIcKaX4u^wQg|!+0VorppjnRskIrspeiO7!(04U_gh-5>^+K{tVS=2;|$P|SM zF@=C+b6F#E0%Sz720>_=CTC+kgEB=z zJ3qOFN?q;_yM3#X7-Ss#iGws-G}ctT>|sI5E}Y zs2#?EQ&|)VK_LzbGpEv3r=@>#7##G)*~`Y28L1LfWl6$0c?WEsMVB+POc!Y&Kmr3u zS_=XyqevO6l*$64fP|S7qaXt2#HmktNctF+Dl8gXJz-f8j>l;`-q2w^pc{iRJ-FEQ@AgC02$sw`Gt2Vr*fUIr|`xF|k2X3?PI`{O>Wl zRgTtBod-0kwCtjfi=y((BYtP^^M5;4iI%nr2d|Yv$mOijP&#Qkir?;+_PZ(rPLfcl zD3=E2QA|c*1r-1QXaqYgR6<066h>qz6coSD+|xr0jkd@%c75ZjIF5qoLn^fWcE7aW zRiS7D){4#b)fid?w&@ZKYjJ@w!5DyzIenDA}mcXPD*{2+NZcxm&M6R+#1RfgCF?D zf)6S!XX$fSM}O=7*MCgqu^Ytbm@pweKHn&%Ldaw=vSzdccl&;?%fj=>k*l&iw0k0I z7BkQ3;pLOPKg67`c2ZiioX<4je6iZ^_q*Mps;cZ;86|`m^I%<}tag=+5-_EH+;8`0 zf#cv0RjotcpI`2@rq#)DzRHguJvco*Et@)V9EZUeqqWK+F;4VYi7_G}bLPm_>X<{8 z3?vz{uw=;jIyaBAWQAm`si}%FW=Rs+6{ac*AIGcBRok|+<)W&K{o!zY^ArGLj6RGZ zj0%-Os7kZn57tUyGy=z%4U$#br(v-uO6O{~n3aVQR7@h-722B0Xo&f6WU#8g?f#9l= zi}upz1PGa_4a7-BCW(Fn+6mP{88D>?nGFQuXZNXRftLS&uYX}kBCIK8;gFP446KaS z@_TC=_b*DBlLi136s8OYQD=aVVxJ6XphSs{WI$#Xt>C*gidJa@A$4JNrncH}60L2U zb_G`7?w9tvszl__q|Bu$$1ui>=(*J8pc726ZvcR6t2q4j*Z=Pnxy+r_uqeHlyz4V0 zW7LiZ@!5*9WE4?GW&zETLNtgOAR{nNg=QoM0DL~PjsV|$r;L;n9igBZue?@Vrx|)Z zgV-Ty^06OdM?^l35=3hq(>T#l4xuAWrYwaqGb<{l@^Q#I31?MiWs_K*PQjW0AQMiI zAJM3k0fBB9l8@F})QUk!D=t!iH1U8ZLV_lhNK(k4jW#+Z2CF4ufNV+|Q-_GCi0FBS zFp9_+fsx5j24K#BfP{?bL?dE`oS1WrtfpE&DDJd^KyW=Nh6svjGBWK!G5`Q0U6+alK?X_iPk_-UxQeLhkr7}96cHgx z$WdZqpE)3sSc0swz{nwMK!PrfPKhdw))fqqxv)`=+5&boqt zEas3rMu=(Dpc^v-U43x*WN~xpyUboH*HrFg)VtDF%Ie5*NJ9t#1+25mRWZgOKIOz1 zv!pae^!?qlQ-x%;VOG<^AR?vcF!lgIbRC!h3JC!bl}rsGVorRGxtjW14ge6R^oyy7 zKrE^M535LpwR(9hu8gHzj)tGCw^&%mFD!1eeUZP z#irC^7uo&#jbHbkd+s@V?{BTW)*5q;G4l1QIpq4U?a&FlyYVpgX?0nZB+!Y3W`CAa z8pkmiF~)eTODVhE&JicbP1_cdJ`K%st_DH~@p66h=tgB{=zWaQx#r#PzPa0N`(ZQW zK7^!#);sTg(=JOX)>b0L&hg^m>Enyl!O?M_BLGoI)>YPcGPWdQOHT$_KN0l z(b|x*qlSd*X1<)YV~RtNQgRjn5bv$Ce7-xEm}@cz%f)=YtcV)xE8`w+E{N#-?CfB^ zym4@_?RH%ljb}>xFMsikKl|tZTw5(p)~6r)bmuG%x_J5V{r31s@4fQUt8XY%y?^lF z;ym;2q zn@WpTo)53z+}|7e^9MGhnYD&^q^vRT`c4ViS^yxN+Cbc9yH(Y0=ofp>QdQ~9JUl;t zH`f1)-}7&;SKA-?Gk^5)!!sbQ+d8CC5vr!vAubP=!!Dklc1PEqk8$k!VcTtf@~405 z`n9`6dt)3=PEPLJx%ERo^g|aHX8>AMUV7=%Kk{EZ5-9iXzy9*eujW{ufA;RL`@VnT zRUXf3|I!#@C`+ih4I2hVbC*&H2IX zsb}s!dVG%yv6j82zIAkX|8)EK^jhHnvFB#HiiLI+-$Z7h2n_qZ2$7S_ZDYulTwO9KcCMdXbiD-KILGH zQGrx)jsXp`kcc3WsuBXhmHYgaYn*1$EJDs$g*>JVJ4M7$a^?Ln3`K;Qp-7Ht97m0k zA(t_;A!7`RhBN}|W~bI#V`iq9aqLShWz2>u+jvIx)=tu>5JJwGnI)$wyW28Z?+74; zprwpqB&SShooig1^Jcfc82hawGi%y$9HYWE45_4H7-m%V>-}8Di}l&wY~BP&Tfq6v zqQ`RWxqw(eRZtBVW+Gzmh9tzq%#&Jh$`Z&q+qxoRAtJVHO`f{cMH@i{O{hR%Rc#W= zqQuq?03?N3$dDnz!kn?}FYcuIUjH5cYIpN}ySQg$*LII1iTNsd%aDUJo{F*pi4rOj zjxki`Yoi{2Ij%f95%mb%xFvbzF2aqBcN))S_Lck)?o&dDn|iB6SeXOe(&%3bARE_{`P<8kN?a6)DQl{ zzVd&dVDAC+a>yU$d!O_-I5|?PH0w86#Q1ewU6}eZo7T=L#so=Nt+hD5J-bIE16wM2 z1a4qV*Li11?!%$W`5*SB{R72_QkmN4iyP(QGu2a8kr`}8On>*!{VTDigC$JLT3y4= z!>$$`*4zt;8=mR2KHdquzqh@B7q6L(QG^;~))((&uP(-XTZQFKzAB!-vKsLr8qJRQ` zVLJ>%RaJZT?#abxTkkYmEs-phOhuDeBBH6Aaq?TZYGfM)5wC0|M5ZJFB8pPri7_h> z8e^2MlB`ln05CuatTI`b83i$`U_ndPq0LDR3Ny#RAnakRstF@YOK*1cr`g4AdjpwFmS{O&E0xrMV+bYC0VdXy|x z^)9;`cIm_{4PY)%ILp?)sQ@*4RvB z(-{AOdw2eW!KI_a#0I3#V3&G(_vQO;-LN#kIy8eCbHk6~6yREP0m7q95RL+nTY!M}ns6xPG5CAg} zVAiXM&dFAmo;($!VW3%XxIgT6aHDbO$Mv?uxf^ygPjxB4!g%B7=~p^-jy=cSMQ^gr%seI%AD7IcL>kj1dt_B?y&q zXRPaK30n3z#9Y{39bZ*-s#{EJoJyS7)+tbQTBJ%Tll`|$VT4ckZbUG}zf~SkgvotZ zRhjLTy_YgN@LiFpN1>JR^yNBFvwI z-HVfmp=6!nf4AMR*jr++q62a@LYh&Z!HOXfSBVNxDPpWgC2Q@_Z7b(dl$p1wV+WO* zaU8*L3b`1z))Hb$5k#lv6CyIiA%rPHX?hMh=ORMZyrr%vrA%Xnh%DJjDF7&nI5D43 zT&kXowmXLw6JF#Q|?Ob}Jk6qUnw#r5kJ5*4dVZ>6$N6^H4kRZ3Q|q%4jH zqlExtu7LZzsf{0V>4`*)EvB+QC~QZdD&KG!xz9Okuzf_IR9c)Spo+XTi&ZtnLPS6s zW1J$&hhYE!(o|RTqU7u>m7H^Ary`jFV@jncu@Mm^bgqgi)i7PWCMhW;Vp~@;RV^Ws zHAR4}0V&;X!zgv-bWBb2)MnVWb(UC2P)L=mv49Fhl(R7NFbvLCh#1_|7d&Y+pr|6E zwN9f<0u5t`nNv!t>b;L?FvdthQ8dnrNX*$dYf}V(5PKr9)}~ZSQNvbo;IYH}iyMbuD;IWicem_`C5gQb)LMO6u_5CRnOzM6h% zUDqLmgT-v=_S1HqB<>O&@D&OxqYs~ac7-EPiwewktK^3iY(|db5U-W&y@2i3o3t2a7=Cfh9 zMGgIK%|gzbQj#&W>2^&s8^#O(s4|-^#~}mZIPL`1Iq$0`hCZaoY?!RIKBW>usGC_y z9Y7IG%e|w=k1h_D$3>DMtgXozL^4oUWGsP*3}Enfe>Zf-P(SQ^Rh6QM9CI;lR(adh zo`_cGXR0ctSZfkODj}poWW6__LmXsY-zXHf{nk~c8@qPq5s?VB6stCbU@NbxlQMov z%8(Rq9)0K8>JvysGCQuE*BH<+s%i)U$xP_F&N(Mfj&1_wt%ROZiV`BCh)4k<0ufbZ z<|t+Q1AXZ2K$qX>&oUP@e!p=^i$A&P4Xo;m* z@2#~wC3%!m1W5(KVv$rzVZNGBr4%BNV8JE?&XNeY_QlCv!p#tRRh?qwcDvp5Ub5D5 z&feR8?B~c#?)8weT^#V<8V91cc0C|krh6G-AKvGgvLJMTHe2abjOZ}bZn@l}Wr*Y+)ANx!H`Jeg^{_qd~-XH!`fBL^RAe?FH z1~gg37y^V6BRjWPEMk&h`ITS!clYo7!@lx=pok(K38DHw{-*c+slWchw#ymGu}qrQ zZyx;cx4!Fz^UY`8JX5g@s+D#UQpqj6JyjH_z?P(hSd3@)Yu|lrP=5CFp9TEp`rIuW zD17&Te&Bt#{?ng(0i!D922K7dJ*G3J3i(Z7H1c(1mv56L72AFS50>$+>{{NH-d)gfq z;-%H&F@~dS*T-=rG|t*ML{ufqo89*G>>L0XsU(@z&B4LJk!?!OH?QAFV-S(5ZH%*B z-;beV=KcMB?8*e8E{URRfAo#EoWpDp&8p+PeYLn(y}13=Ds z9Q$z`k8a=IZns24hC>LdT1x7=E{aYWZUQ>($OS=F6~$UR?I5R})09tmu(wdv&2Be@ zk%){jL{v%%DH`IcssO-yzrWm5#bFqRG3~m`G3{#KRJO?>YaX1{(~Th(VvRWlv*$Ne8HK@!PR0!D`DhRm{Ag2gV*lhrO>!4hjyc$D2 zyZh#DUHYH@KRmTLSeTO?oNphiYTLF|)l4y%-qj+)3T%xvP6@5`MFfD9@Mzxl<0x8~ zc`7F$s`p+~QJj2f%*1$JQJF?cDUO{M+3j{f=;{iI92Nk;s-nt*2m$~zi8@nteL9@i zeHTCV@y}a>zwX2Ds|%cOuN(6*9H}U=quk(0ECiw_ltxpNUM2(HQgSl~|hiItnOwIydn1m-aUfFS9__7WOEM#O~ER}26MJW1Kc zC$3=s{U}{URJMwt2vAla)YYq^3DgL}RI4aOnVFbMDJADEV*v$1Gh*r^sE#3AsQmod z>nD#d&bnldZz0dhPHSqi!pl7*U;g0xp89qB;qu~@-4=F$+p>dIwAMCF9S9unjQetb#Ff$8i-}i{va%G(ZpxJCT)j19h4kYK@X1l+?9ODq;*pFio zT<#ywmwR9M^b1+iZs-MyNX{u5q+DWvJPjpM3d3lC_ZCaXrXRy=Z@#`7Ixqkrme>)J z!ufWzjwdpAyp1$++ zM?UzG>qkegy!QF~4_-GGRf|*gRm0?)+3fV>{Q37h|N0xRU0j?Q<)&$z<@3|W#+dnh z4gf#~DxYI&d^M5Un{Kz>ZivXL0>INxKmF+OX-bKiFE{I)3R!6Pnq5!c-S|yE_`SdQ zxBv3?^aSGUEL}_$O4X7?hha!rjIp0E{s{bNkuZVsY)p_04AU ziC_N2bI(0<|KWpN5-2wF>dku(f!y7vZk{~4hhqPizxQAM^b4Q<>7V?q8(yvHXq^aerscThDW2k+F zh#FNunJG!xZr4CqRc=hN>pE4f+lGh?Vdb0_acpWkubTSw^l?9SvE&$XDOyyN!FxY# z*sSBJIUt2dZn^8vOVPHi=gl0T|IOd~dw=QUKfUhPDW%SX=G!~OGz`5(AVg^BV^PvSyN)dR2WG>3!_b6Uo`Yb@80;9 zdGnvW@DiGW5kkqq=NR% z&s~IE@3*%YKsAD401E)d5KR+`?BpVt1pwLf=^Oj__tnA)4RNH(T@GO#Q!W%jV1xbJ zb9Q`1jHBHg9DppO&UcL zYQX}q2{G1x_wH=>6{;r{FD^9Ms{iZjYX_e?`98NgwAGudwL;&3&X1P=#xeim%Rm38 zPo7Ip{rb)7HB8;eGZo=Jye5Z#=ChZ7=;_z*9v@J<9{{kFJ`Rckh(fwevF|Qtv)P;X zzPQ+1rj%lg^LEzveNM?%9$f1C9+P---7tppu6g>I_4a~C)|gXD{i-KtW{ZWUfEi7| zgjGUcQfB6YBVj3N;JLef>u4O-rR0+buid(P1FE{)ZHzI_xs&s=5JFwoTIu=a1MfXC zr_@_#0A;h;AtzGl$IEGl9by>A5df;HN{~~=7{VlWZmZfFG^|DX^VMa{iI@Qi*dbBL zK_vQijueNkFU5MV-g{p|v*3$YPPgI0G&j-uT{rH!wUqvgk0losgi_M1HhV1}FPvjB z+XE17j1kq0I`;j@T1YN0mwp<4sYlh_=GGKv61;A|*4cU-;C4x6yqU8McX*?UrT15#%<7jtuy z*$fw-TeypT&nfv-GMH#DeG0POZuhat+tC@Dh6_)&;QMyDQAZ93(x}#AN^V>ytl>+A zfiKP3uhtW22!m;wu#0KTeJnY|rkVk9|2Fm9mn;sAkVl0vY8nz@>XME*M*(Gxl z7&D_3d7G9=lu=^=jbo6puGd2thXk?oUn5n@Hwv6`m$RVR#gvDUGOZv|-3AeOVkk6$ zhyc`ngNte|aTIh-RMA34J4sylOv{*Lh`9hkC}I_eJXj?wT_nwoMn*6f)+$1BW!f~Cqm4rQ!PkQw9ip8 zB3m-Vf`SSHGSSP(K1z)J=$ROi(18|46$Qb@RCOL!zM*a$daY@3*Rsi>FT*C5OT22yt-=tLD(3JmYr2C3gyb( zT0j*+sTfqeavYlKTL2&cDCY0*9-Jx!jHD<66X`!iY>0vmAv8@xG*w516p}&##GJ{H zBw=!vd{Hoai(h{6^MCU(oZqLI_x7v1YUbPBkWkkeiyWwwANhrc_|g6Edg}f7=3`R0 zRbRXB-V~9HIcWqD1lzu#nPr=FI&AkvpovX1QoKpbGVSCwc*x9{y zzmE?K6knkuQ%X*0Sgp2aXBl+1Sk9n!>#?uvcEJ8*yI#!q44M$eZ8y>|TJMCJoilZf zMPewHDpIpsZBdEYj@`%vmub6Q50jLE081&`?RM!@B#Rcd1PF+R*q5SP?Gykd#a+xf zw{6|jRY)NP@pa?ob5)Fh3Tjox94qBCj{QZ~G|jx3jbp%|b61CohhZR6Zz;yO>vjO( zy&w9%?*jl`yLO}NR&TuV+Trn0Hw;;Z*>aIm+Vv|0E-4ey{&IP|So*q(G4`PwcHL~Y zaJ&~&S}vEWx?QbO$>SJ|Zy7iaIfQj8ap-qSil~TCHH~C5?AAjF$i_I^mo5(>mYp+} z3@?tF*u@8DCn1gA*p}?xtTh%=3aib|IlJxF^{n-+jVV1idGRm(G9Dm^c6SFt8cymT!Idl zeRjET+csFNYbUKA#}P5(#%dg6)l{MmW32a&&d<+tjE^6jj${AayWVr_=JVd~z53## z#r*m>rp0{k{PIF1#}u~P^>#BbMN5@xS<^|Ml-9RNbary@!Ts{`D{kBEw*7X0(~?O9sqae4W6l}GY%X@&^$v=;esulg=~vG# zF3kXZrw9*M7a}`)x2>Wk{*tRFnnH z;kEtq^YiodxfHP8ov*fi=%@8}Qol`Iq|972$3j(A6-eFKIb&^AjbS&g*OSO#KA)?W zsb*%ajj3EYH&TNNV?U;38($&vuJ3cs{n)80J6`okRSmZaI*enHltMJbO;t5jog@#i zy?6O~D$!PckXS^CjE2|+U}kICs^v&2r4YjU;$*(4RLg3;Szn%t?9deQuv@P;tCgzG zmTv5~p&w%2rm{QRTzvTMvp0PG#j$(oa_nFlVd<{$!ZP}Q?YCW zK^W85S$oYo=2eViQ7oAwfnp9>D3!OVxjGlR*o`5ic{5+{`q|-uQBFzY+m+}13NV_4 zv}goMNk)atSl&9w7=;KzKxTR>imV7j*7j=@&GC(bO{LB-rZD&30d%pGwpqi-`J1ag zP=S(W#iHJ45Mj+l3?O}-MX7wPz-=8%ASP8DVn7GvO&53U-=43#F>R1_hB?> zvQgU__Z5oVh5qjEhx6RcAzO9%@TvduLH)BAZ~o4svkxt|;p8&Sk75jZc;`30yI%jjjYuA2NMkxY+#mB@ zKWn7$ZaWO2cg}^B1<-(xK~n*18ZAYNrmRQHc^XkN$p|Fu$!y1dyXki8@SfYxAfm~x zgws%D|2R4*H@ z{GbD&BNNC>R24B*01P#AB+8A@5d)4mqO0;!Ud``wd?9?+vnhK|Nwb7;TgHp+FwDWm z@yzs_2FBLMEa+_&bUi>$8PCIdt)>gu8qu)5)|>~NBQ2pgi7Cd+!c8{Oe`y^4{bUc= zmWR}PZ=oNeRrG!rMkqpWQ!hf%oC{|bLP155%tozc29O-G1_o9nGDzMah`dczbXvk_ zb!!C49vfPEr>H}PM!&iCKWi8(u9{Me!L36HVk?J3stEp0p+J$TOZOl4CI5Y4ZhW64 zYZ64uV8}GWduoXlumA{K7NbRT5ETJhGz2Iyl$?oCNy-%On*jg>MS%cB@k%wGu{nJh z%*CtU+unP&-kn|;My>5&b+7jIZr5ii-c_EeSfrvE&y0w~7*T0`v2)H$JoxGMVO3Ra zTNf23@ZP8jfDm}l0!jipRWOk(0?o}*L@*LFH`o}qitJ1_L|J1V#{l4n$txlQTyRn_{2lQFz=S}|Y`|(RQ>Q#Lu~-DpP#bJB1X3ihq(BOYO3IV3bnWK_ zIHbHup<+k5s1|0>5@m?aG+58PWJo=@M3P)Cl4DO`iHT7}a!M)%R3^`SV8fH;gosL> z8ZNbnWdtO)>daIj2@IL#F(c*g`8tmw+N(RK%~?`?Up%?DUB%yHHHv9 z14z!Pmw=`@P-V&mm{5$yTo9^-S2OE|{#Rdk;o+ml^{jo@v+qh`jhV%KS=%NRfFMPO z@0_R4-EUw0;PKLje%tRxKwEkCz}Yb7G5DdovA_7KS57|r((6C?d~0Ht@TiqgTx--q zDJhrO_bCjO^WJzx4&$zmJ4CFi%6kulU|GCZKv9AMBA}`nupqKz08mi6a=iisL`6*n z02C3(AR$h97pg!-zf45(r5Aw&G-YahMUkS4CG%vepOYYhA`+#!qF+cRPulN%U=v|=QlieONTXW$eB43 zXfCSS?*>(!&Gs3Ih)U5Cizpn;k9Xa0_TVx}x!KImPEYoa_J*O?ssi%eMSA+F=k4CD z%gb}nA~B9*KtyMJiaF_Ob-7C=H%(JF#&GU;my*Nb-f`mz#fhqQRNNG80z>RoLdpYS zQn0AHzpS&`7=o0TB!zKXO7Y%1XWE)`>=6}F03>HgIT_XzH4-6EN+m|As-~$9nx;-E z_*t3Fml^o{awEC00m;!54W(agR_NKxY&O~+&aXT_`>fuUQo7w{7zS%CGk3d z!Tu{ReCBl+uN@y8A0OX7xV_$;AGG_WlwotW+wEp;y;@x^eZ^HMT5{;THH^AlUk=0I zy${<+L|SC(WCCQxDrvG2y_s(=dlYYsk)*9%5)!t3?e;R54}bdy*&3~fgX`~qH1rSO zJUu!(T3-z7%|;ck-MFrqZXDkZ<2DRoRxkJF%fIr|Kld%)^8RoC$hZ9B$JYH27mGy* zLsffg`Rwc@r39?Ep1JWWFMOgKH_n?+eDW8bzWMxne#>wDjo<$Ri ze(zJuy~RKCyMOpcfAl{$Fkh`!-}imr_pwiWoSBcVxjT1m-@A8j+vS+E4ux$M$3AuH zxjCHIuXk^X6z6QoxL&VCfhYLoB;)mGkK8rmpHu*Y(}DZkEQiA;G4LF;Xm9!vFxlmaZzhhyXfy;vtnH z#1_dRp|Pc|o4T$=caoJ*5s|4XB{3NQ1vQmtRZS&_D3-0YRc-yOnLWO^+-&JJO_ZRA|fh4hK+N^m=s1O3uvsdzG`BYl2TSK1&xXi$);$a z0GI+o%8l-Jg+^6T@-qW~lw%CB#7v=|$%-#7#KQtPTb2w;U){ca?Uk)9W2~8(Iix6! zTT{0AYr}V0gIO~wFew>i1`q)zDPLdb3xc?u?-dzi8Cy3WOD>93v58+_*ZH-A5^H6c zs85~Ut0E@AG)Rx;8~a@I)nbHN3xZluVg&<}me)+9-w=>WMw)i;5Q_j38B1@|DB7Sg`>;!rY_ zsPkqSa}w=Wn@z(N6D*qA+`RGf8?V-lW5=n4(~HN~=hw$D454?%8Un~tQUZdCa6OC~ zV%yZ9dbC)Oqc z--%6ruPfdi)PD9<4wnxv9zPoYRs}o{fBQz)1Ga&%RN(t zbWP9i>#dz)Q(~KTm5$0ad1m-B6PUk`S5!&KDtkp16cGV6KowDbo0{q{Arz#7hS`vb zXh4c2;i8t9!Bf$Y1c?zjrp)~I+z%+J1q!*V*jS{oAF+sIgm1F_&y1^>RflYlC5P73 zT}d8H`Z7)sh~=V16b!P z%Y~8@6k=y*l`AQyl-Au2x?Nq@`}_L>NkJ8%k3%m*kl5vsY9=HA5CADsij< zG{U_)I0_}7O#(_@gI!7A3+E~30;YkfCpOdGiYF3UWHHde8p8mr*;JFP4+*$UkUfMb zR>Sh|`33txJsLr9eC9RI5zWEuk6{LIxcE&+JxCEO%>DiU%F>_O+Ku=7af zd)cB_l3|G_)mzN`Ee8#vLd5LPO}RzJpPLVD1?8wG?z}Qi^5lWOtHuNOfpF4T(r|iu z+O#U2Mom{$I7-ig@clry?aL$%6k$VZvC>R}ja&@E`2!pB{CuOTx>eP6JMO}n0I>c& z>HW_{8ey%+*s%%)z1c(8-K3jOjH9Pi{g?ZyZ zJJ|86yFm1AM!VqVC;@Z!ME248n}y~k*$vmf^L5h&xXS{G=EUw@pQ0~qd(|wM%9f#C z!PsO{$@hJzt>Jx9Wj+)9p$0iI8b2g8>dvlB1p`df2aSOu^=?d2;b6u%)VF{C5lV}o zVq>A;&t7RvfLSn@kospv$Eb&uEj2qaYaJ=cB&m_5KTu7Lsz{Iyz(oKYWw#ZL0`?fFVaN!8iEeTFd6^V4F!@{;51#;?w)-6VMp0L?HKMz7&PV{H&&W}bQN@PmFs)KUnt)Xw z)bBnwqQtAoU-lu38Ou(f{z=K8KXL>?maNc{st2`Uduc~8VgUQo((dE;rK5x00GMLa zJWFoF%YCnqflqlb`OyQFKowNWl27_xm1|#!V>WV=^HUe=`216s)GPv96|8lUdkp!s z4j*HBTm(CDDq(-kN)+uG{xeVfaBq}>o|`;a{fE@L^i(@4cL&QC!TGBtA2yw#>-q$5 z7;aa?C^t+O5s$?7y$5n9)9I^I5#cnIk(_#lbo}-vb6OocV}^~DPD!7ANRILROC|MZ z4rRrJ>JZu~>*hfTW0giXO<7)hc^aqmR_>+E*9)KLa<63nIG--YwA*=xT+UhElpYt2 zR~|$(t@z%oiud5hLKcmWx)i^!q5R>D;a^}=lx%vO_)smb)wYgAI?~kCDnG`iC2`R1 z;9s9bz6=c|X3y8O>vNzPj7!lj_Mmkk?`CBD_AX?>?UEtkdMU7=jr`)5kT#3Ir9;|W zF7ay-Kg(CIEOvdnmzU$0Bt!oEGi*&0TMqiwC?<=waAV2O4?16v{rfZSjGx~?MP+8* z9=B6ga63&Ix^eVp&mrWxKd4gW{=>LN?^(Q`>%>*{L(+Zb$(dtn|(eTxMv5uPsxxa(jx7VXg+HTX`m1h>iy6Ue^ie2pYBd#dJh+6<6+nLImR+=_?&6 zQX-CRDvvJ5|5O!)5Bk$5K`2nYGik>pKvFt7z{4V2(hyf~ zUKcD?5;{%~7O!MmMO9y-Po%*e?S8j9z;tzhZ3;P}p)~3Lok<$LJtStca^^3b-_{8J zc_#C;@K>sh%Z)RJOO1YbeJOyIh<~&1$FJT?NAz5(IG&eVvk)acUGplnz@x7&=yG|t zAmGNM(PVAkmr}c|xlW(2Bo5!rdW#^mi|whl?VcfaTHC=14LID`3_5Sx`U zM92}Sw1fni$tpXm;y;nK(Ao4%o3X({wwFn{jfL5@Vj*cdN~C}{bE>o&7*SVJ)9|K> zL6joPL^zNBKAP%Y>00&R-_s3=4(hM8P%k6-&qy^XEB5YlRc@H_@jfYQR}xF&dCs0^Er5*n1ES<9L)jkDvY>Mw zInkdMlCTObK)Yg@&x_$KXA-3RRI7)>9G~;vqRMT`!Cy;I=+5WkIH9oVWxv*)<-i;B zj{PjdyM2b`6P)bLp+1XTxRlH|Jx|B6d4Z{=D|%ynTXxpl{8dKcNufuaJ?8QDNP^R< zH|f>r-ou3y12Z;(z7r1n@2HCYOcn0T;ib>JH7}kW2}g*=;B$N>o}FTL1MQbxL}OxN ztUyrDnS{7aXRIlWQkU2IrPU60cxs)^_i1eO`~5!m1C1xTdNWNmE3#Pe(4EqZ>NC8$ zKv@6Z`9IC->b_K6R4_;KZr`&gGklN~s$AciQTTMdJW&Y`&x$BP^>n7Wk6G#2+E`C! zsyInJb6V1kijCm5E21ihap86USfd=k5gk}t7We)hpzDxz6N z?Ll7f)h<@|Z=Ke{Xu#E<(@B}238{LL)2V-gVdnEb@U7r(Xa9yXrzftmcdMh8wybdu3zUD!a8-Hzl+v5ycVmOrQps7n#P4{qDwBP6R8b5}4 zjxE*gDEaL_?6?^_#LAsdhI3^4qm80Nv`kW}r!(-g`-h#*^V$Q#sfG{%ZWC#>cO3P63PqPOd6Cq2xS~zIX^?JyTa#S;r2u%w+odKyWUSq&ylw231Hk4YvMbM{3b2PMwT`B!-| z6z4#gT<53qb`Pfwk9k~TbZIBUz-D$H*8FOkhs!ilL}6;iAC%t@S+dF#ZB#Ozrw=C~ zxvLDJu_ehg^6wnR9`SKFa*rJzoiIVR^gV{=VwIrVPJ_gm)o#KL3$wx-X4KRS@72kt zKm3y5YpDr6sBrl27emK+FGJ{w(A|jKiPW%t6|=kJ^QF8@&y$nKUvIl(4&B;m)vdV-dCF1Yr1D{U z(%$U$x+GJCM1A4)@&iBn5moM`BtW5eq&y6Dmy^S1~az_!}hH57)cz z69;c{U3wIak5^NwIQ8%wfczJ{A{X%Q_xZU%>EZ9AHmoo1QA|-MY5`>Pr`H)2&8(*< zlGrmjoOn4jk6hN^9=Wjs4*t??Sdp5QlaQ+BdC}8n6z@LUi(F#r9~i4d(f|DvznL6i zqZHZxw=dJ|n>{vP@Jr&TJ$h6^EDaaqy)d0t0qb=fQXULwu+8CluDSes5Ae8zsyxh- zUxPLYRUNIVSbwBH^ShdnI;nkoe^=W)-*UF+XIgiJ5`A)&0$z?&iC-uD2lXT$ zON0wYev)nx&G-T07Zk=2ua=#CilJ>Dpt2cg-T=Ruw;$Y4dK9-cJS&10jTHB-`KgXN z^_WfCo;VhxybhYfXAfP%3y$!7 zMdVYN2M6ye{}x2BWu)3-vG2`hFxCbD8r=aNTZElD2U-Mdouv`$vQ$F-UruI7>l{`9kPd=Z0X=;ri3_WNX-ckoKY z8)iMyq6RJXxBhk7(43`dMhiF5LdoNk#dGF~h^qwG#gK#Lb(;S?TtqVP0HqTIVFPEi-Pmse(Tcd|;MFiM(P{hW?(=bsbSGcS)VUgN)t`Iv?R4tM3#h%qN!> zD@naH#XkHgIY_@>u;c14ireOIHV5AI*`JJPwl7Fu+hBb~f>mHEQ~URS8HQ@y3tgKE z`uR`0lEO|%l_TxHizKClI`p)Ks~F;o`0Vz{;NYC`*u{HUv1JR3J?o)p=iTSl0)146 zOQubhA)9elpq82iX9KN?NpG1xD$hsXK%Pd`Gb7B3na-q0qE@TDwal3BajNSJF_bJ% z_V7`VEKInAC9LmzFY$u$(4&ly3YUeLXZU!UJlcjmUGlHtPt+w; ze93IB2;E*xS;Y#f2|j02Q5E@JSl%V!#JBA(<#fgt&W^NYyO;W!nNu(Lz*g?kINMTt zDnDRYlcyPxG=RSa_>;eVPqE&AXUd0z#gGrC{@AW&eBosl-~DJ}cFE6;0h<{A{CV_E z&1+klV6^!9I6m!jlPTe>ozAs&g`d(XhliO6z&T6|=z7N21MdYDV_wzt02t$+dZ)6Q z*2l6Y(*Z~a#Z}-2DWW09zCjjXkzohE)V<0M1<2=|>4MHLI^ ziK{@Ex9n9Xs>SZx>A-8XioGR4hg#_!iqaP};%zc#!noDPtq~p1bK!3U%;=_s^Tkh^ zTbd25F{6GyiPPcUg5A8E_vU=!*E0iL5vht0rbe(9BJI1&XQT`EWNb_}p{& z-$g;d?OE^fs0eQ%C-LJ)fN;T_ES(we91a+_TC#bMUqJgx!QCxx{%Q>C>w4LPeA4MU zoskG5#Vid4hWh#8E2VU*Z~Cs|5@K31PEO$O$u=XUv=Q7$YefCBpb+h#gm%_xX&47) z`q4Nb(T2M_L+!*}w7@&WI zSJd7JYEp=HepzzOVpOX{d5mfG5lU>&UnJ9IRh?DP0y!{Y@VC5%=7;6srG@Y?RaOpg z_a{<1HG)_G550e--IK=D%&Tvjky;ZQR7qa3$G{|Ab@jK&f&wX?HRU~-R7kH0AOLYf zX2|y6#GN%K?Du6+>RRq@zXj~idfG->#%j}uWOP`dORM7KZC7)v|lItG3yYfSM)8L}@c2jP{QcR3iWYs3fAR z;HB)~b;O-6&Ckp>r#u&cd@MENwwAgY2R}WYXgKGcBO6Vn%Mj?W7yo&kNu@sy#Rk@Q zL`iOwqrA;4Q6gK0vOCC+-}`xq{xD2 zio1hMXN21o^;rq~cF}Fgo}l&RWefhxH#6J0_zJ?ku8Ae4e&N8C`H72I4#iIauW3Qa z^)Gndar|e})^v*z?utA)>0QXgXarNd3(NdOhQ*b}v5bs`^v!{9=oRRNrT+ywR={#> zL^98!9VzB|9PfaWg{$;WQ7I*UU0GZ{4K|&MUl8isi#Av9-jY7xtEBauvD}izH1Efl ztGn0d`g390Q+t!%xDckD8R{9>S>nD~s2!Q!)hPQ7}{@&hj8 z#g@w#`p$N{KX2}ySyXF}=h&Z?-J$OmtV0f6?gq|+uJAR|-I>Z=5WWn7yDT*Hnu_6K z2dsUZ>mS%OfBjmu8(>0^V=r33r>7pxwVkWmFr8q2x1$}}lKS_2gN?8$uKBcXxyE5Q zOK93e18P3QXrnI@DPGNr5axrEGAsVMS~kYx6&~y|_gjJRre0g8ok1L-BJ|4$N2zXX z<1~l{ub~(o1LcxrHe&v2eewrIyCbl4y}3^=?BU+4`wOh}@Qb-6nY+)aChhA0F;)cz9x(#2&HlE?L+6?fkP?OVXdgj!ot} zj9y@#SE9?daWcbYFU%|)>x1@pzT}Rg{NGZab&f#_Q8qvI$jJRKgts^1 zL+ug1hZMI@&SN(}?5G}Y@(|!%+ot}zi?s9ny82Ur+K22Aa-f%qV-kXU#g?S~fA<+5 zq`GVKCpC7C2KOeX3$cn2T2JL|2$Jna=ylYx;Y1%c^ zX0hjeGslF702(YZHN<;9(jd z-!E--a?}qQA2m~yW|tzxq#2qtk8`j}85+UkhU;A+fyWXZ(%-0q9}xshZw<@2Dz|%l z;$LfhX8NCFEGbh2d1_qcJK#$eSAVK!%aZaijl>&Zuij??;iP=_=4UA4yOJSibCJp0!_MvUl<&DZKiu z{UvF^H#)A@?fUbjY5pHOANP5`Cp}@+g;^PS<|i_}jeKm%zkk$5n`48ID1$c+&)Uz- zGpD2A=aJ9GRm)Ze&X!bU0rrYZ_b0@hMtiXTsbgt3IG z+{(gF*{H)cr;PK>bX4M=o~l|sm5#1j zMNbIUq`IncIWXj#UIMo|2=-c7TeDw()&OR2u*Uz=tqS7G+)v&MdaAn>y{eXzgrcY8 z6dqxGO#MN`>%^N1_*&JR5&%xt(5D;jGZyTN1SzA5hyjDjFj^omTEN69Q+z`Q3LsVU zWE?C;HL!6l#)PyC=th#3QElV`v+w~fM!RZ^2sLgC3yUp&0vbv5(__u!$NaxqNLOWqgi>ZqYIt4{sq1nsFj-+HCbB71;NnRo6NZ_@N$rz<3UC^x zYD1pKL;x;S=SYBErq|}BGE_A)MU`IA89@UaG=?hoh|xU~H3Ifpag{=ys%@P3X0+^{ zw-~uuBMiK)zhCaVVlpfLID~FyY#(nPkITJaliKEeNP|dLXYhe$ZG0A^xEpl8epoAg z+k+7$bx{ps6TFXZWlG=NrF zphlc2t|jYnbW!9Y6%ErARUgoUyH8xC&lGH6&1&NGBKx(L>AD~-CkOwXY66oW3 zfJ4yjAODz@>B1C%WB`XZ4HTVQnL53!$qM&))ktZn;u|M2frEgs-y+ZYiJak$J-=A| zZ!d8dx0KiYp`K}khIBR`ISMPB*ot}v9ksaJ2W+2gZjLXfe6~AlySl;q?lLSw-s3h- zF8=OJU(PjhIFmWhXw-&}8H`EbZYE>PQ0+B7*vG(L2w?AEX;)7jJ*P=m-H2D4W0yp= zJVQb#w0W$SuRgEu9$&P+#oqB*EbzpSh z(_n|{yh|Hh_4aoU-~1+7f9~Wm<4xYu)RX}&yJVqdj8^chnCmfXh5hjJsK**vdU2mZ zN_A>1J3?B`Fw^u%?jtEAq?8E2E8d1&wieRmE?4i-Mkb+9j`Tn%V+%zVxeoUlyeMDG z^dFU?-E%39Le}(5Pff=nm-ORzq7>#osCCWuG57m;7q&(%W`ExPx;^!9$vQiuCwW`z z@`~ROPMWSa^v>THmnIt@+@Uu@tl1`PUC?qdNg1-@_I4>K)cqX4O9)+;stJ}mT6VDX z+ibd_c@Y!~8c-&_EsJ&9;o)fNs-+liKm6tvvdpzj$Fy(Aspj843n`_m$K}sGn zW;&zG8IoK2dMR7a=J;!3q{n;D=O0^Q2^_cMZt(0;AF}uL?Th;cczHJ4z~yQe9ueI= zbHEgn<7(sK>vncody~KNHBUeRG`4oD2!xAvlFL$9KcjXy+ zb3XrMBFJCh;p51kpGI$LL_T})TW;x zuj0=2cTZ|azJYA~3GLI(6iK@UBB|uP>W%zxbhd(L;}UWq*I(T+hAx$tzgIy7$a3ZyY935>8sF4f?ta8A4aB9zMf1`FLz{SQ|b zO_+K8=SHH(oc-$yJ{6?Q0~a9w;tSY11;|N0Ry}>qt`0G_L9XS*ejTb*9AX42lLBbN z6?GfQqhY1w%rvq`l)S}R`m}TeN@KVy0Yxh27$;oOK|?1!;qG81;Jva0VP}R!-gWnK zY>4-FJdtNx%J07Av-5%t(DA0!4+uhU{8J-6a0T3<%i}`^eqp%84`2o3$glo)hds$0 zx)Gc>@k*w6%ZTWhdusDPmUA?rymp zT5GoJn8j2>=~i4ZC5eN}q$U?8o_T+v6B7EYSI42wU%HX}lROzrCdon3m%#CXD|T(R z{XHlZ2)cGkN{37H z<-_L>s7(ZW<7|oKxpj$1MQVWvF$8kxd65hyJ%uT1!T+hQYHL&V$ylaoQ+~&yGj{(T zP+IcF>Evjl_DbqTfH%jufoR}w328N|1OO+DU0GfJ{r`iKyPa~c0b<$)1sAW8Op?FF zBZBj00gwKZd>GT${KF|Nz5j28*t!36t}mZR;SY((H=skGJoOC3k;mOkcSX029fT{l z-~I&1v`XsR!&rRe+a9u)l6{W|Ec~U*;M~f8^j*E0p&1y_6 z(I_xA9AXR7Pz#GxB}_(xg@mqJH~~;zFqo`LuU4{HouDB)t5blnQZjbHRSX|C^tNa` z1@P$snI@)~%W}w)vK8hILjLTHNsexM>Gkla@1PHv#41`?*~a!yzX zB2&NF%EzM!e?XPD&Ge@X@w(b)KyJNe+hQSvl|CRVoj>y*!6CFlLdHI*Qp%!Z(RhLb0PK}JQonLaA z1E`|o*#rwWs$V^|`kO|m113t+^_q9l0wxADioG5pEVoN`45JO>hL(KIQm_}zQXez! z|DorT#$rtcgc5PX6NjNJpV?SR^bhH;Aa#Yi({kt4<2O@=?dSCjVxApbJ|H%%5umif z;htVPf-qbj-Ifvk4{a$PdZ#ik6&Qz}TNa46#Z)% z`rcu3Ur^`|Z}t)kS>#(riu(=Q5ZH5u-`v=ZHM{Iq-2VIhWAJ2mW@h&!;y$J8O1o!J z(DlEGacMjPeoxAw`H24`MoT8T)uQ%?QQvd95AMuU@ALPXX`|~Y+3m$~ zckv2PYp*xf%*tuicGLJ<=WTJ>1AGGc=g*&8YVa(~EGk9yoz*u9@Mx>wEj4em1zy6c z@@jXpIqy^djG4jz5A)#W&I2~F7R#Y@cs^0TPaVZ-~QF#lpZqr2z zg}(#5B~-aoVNyyS`wcB2>lZCD;vIhH#~tS@=C0%XSUoVcT}i{Dr8D-Vt84jUEOuib znPWJ-ogX@6csg^Ny>dPwcVj3Udgu0hn={=RGe3o({u0L0(embone8n?emr&^GguV?Y;Y;~#o^8)D zR>`S*h4M4rahnJ@0N%Bcs{{hp*EqT5K{&UoSW||n!o;`3s0eJ5*bpO&l)pAM*te@l zKD?KV%FivR_;YlmL&{dR(dOw~{<{iAdU#k-R%4DmJ-n?PH^XiP(XdxzZ~v_c$AX&; z9&ZRbp>8%mut`x^foxVNWr(02n1*~zM)1B27?}!s0DbXd<^kj_V-ptVhr4ncp?@3P z{PEtOszpD!J#$~>dqnXjuRp31|1buy5^OW;%5wu=t3wsHrNMv1!PW$p9wej5HW2=Z zZ1LCgAMF9k>;M8wSV+l@t$nQ_LRg1c&uc>Z5-}y@Zqu(*dT9=~6<#)*lvQ(H8{;ZXQGYOERs~C39QXy08g> z>9Orxr3Wrj(v)D$oN zdOCE+d&&v*2OeK`KD#$nI51$#gQ4h$WQ0faVwOP>tlS*>5hS_jNenv))Z-I}@}Pox za6V3we?etB-Lf(R$ag1oFhf5eAd0U2QjWqdD!gK)hB>XxQ)SFdC&?>N=s& zCdL^UKU3>OKl8IrZPTa~#THKtAWsf@ilbfW!dIVn=%lFrc}x-9;4!CM)zI4X_VUdY z$*+ipc_VFWz=+P*ua8+mH`YogHct$99YlM@N|j+7zZ+|99FnPKAT1i@p-zE+i$w2K za;p5zzsIga`r%og0&V+iCjJATAAjT@vkcI_UF$fGV7dISDPhI$=2zveNFCoQw{r4c zmDVH`7Fkn|M-!1BwVg>g;P?t7#lKcaoy5Ar0i7fdlKy+-HA3#y$i_g#AFFEkq=<}| zRzqhB{IQGh9v8h@VK`jgIZrQ*5LK)zuO<&8f;Ga$^@<`0C7Z#Z5r6=27oG{@yyPv_ zm(qsw@FWk@-RCFm9D6S%)&~$|Ey?xNu_4#@1jvKep6lELGCFzwD3(By7Q@w{M1|mF zp>h*@n`->KfmRJ%=a~I*bNqsb!R7DoVM1l<*vVxuDxK&xkCI-L5eLK&+0dP6gYX_5 zagJBnI(a8W-t8=EV-NcX3IB{i;iq@*6zU_(qbNx=aB)jb)?720z8+$FeqO9E(>V-o zE6GusiigPO{iE*#0kb4;G`Oua-SSE}?>|d=zSfW$p84#>^jm=r7`xxMN2K8LSX7BV zvi~HVb-kJ!|B&M0fPtQuQ6+uZ;uKh_<%U)~&yd@AV{Pr5pR{=UwD4%l!ba*N`(eDX z>4g&Fs4qUpP=smAh=^UHWHngn_7-mfIp}U$xnc=Ayk)um_c!$H;m%6X%HKJMi5Kk` zhuogco4*XNwiy0UUgItG{s;KYpsB)a?nia&4#P$pwer>0dq}OBGUg;L;9qZ>xv&j0 zL9qV}l>*zFm#oyUt?g~7jObv;UO2)|c;|p2$8%+bjkxb-(cxZMvw&~oSsQ9A-ARq@9@Qs%pfj~)+Ilo_=}X@ihH7VanZ#VF zz_G|w0SADiLws#Bt|-;ES|QQuH-3RLf!Qw!shbu=Lr(OCGLi4qD@yREMo>AT5tB+C z17v(68*u#aMf*UFW&y5D&R61XTBbjC>EH#WH4j#t?9Bq^>bPy8f2RI@5;GFT1~Kj) zVJ%0E3BX^OBiu$`*z*?y4A<}JKy_dr>%x#Ab?)*vSp*M0zOw`tZV>{w%9Y9ID=4fo zHHOf=7QDt!#~(8S5Sta7Is-eGqf##pqFZY4@5AGZYwMWmaHk% zAG!f?vGq@Qo9lW)M;#d?0LM^w?R$I3a=Cidan+0;N2%QXEx0)J4Ozv8jJnq5 zW{}`)_u;`HEX2P2Vz&R*M@sH;42RFly>h#g(_RW*!Cm|c-RaGc70MAI-NJjqHuB1F z&iyJYN9K2qA2j+zKl!&=^k+VkYOmQnJw3%QK`DdQqO{4o-&CJ1_vb=wF^-0nu%V*2 zZ4hL|JE7h$Ts^|m-ut<6kS$#gGJv36*~i0wqW1Pti>W|~$AZLTFe|8KTnU1xdx7C@Xxgm9(BNw+)npLP?|HaC)etT_VT&_vV|HMk$?m$<(n=EAY z)%6Pl>c(It+wTU@M=y0BOFIR{xczbW zJ(G1PXd5r|^zsr8^I}i5?S#Ie>WT!=Wf47t6-l$7CY)X%F!_nZakUOqh%Z zOnaN3dG9>Fzx|k{>g>~sBxSat-G0$F+u6;h>i^id62sHqaI$#H0ltURoBr;1FBi*T zi|j2vn)tHddN%EZTi*k}%|_yUy9I=`CNS01uh>8^dHe*-9gze}@^3VGyvK4on{YQ< zS=&A|z1-~SFymcXiOD!C^8jl+7&JEZeEX3YukWK(=xM1+e4ES34AXBtL%vtm=>ZMa zHTq<8-V2(ov7M}8`mWxs$LSji(mofHbDVZK8J;(*1sZYI(K_jJku&r}6-VRFm@G^Yl61vCq#Q+G48UQ&FaB117Q@iv!+m5^g&wOYE45_dGFHvWJ6d*tT_A~iV z$NaOghpn9tuRWDGBjnZe<D+#PM6 zCwl)`Z2t^wTr|oZA<|DHc9G#rziHZAz8wB~Cvbopr21k`MG_UlJQ3w)BYW`bUE})Z zrV6gpH^>%0fxSga)wKWnn6cYC83SQ-FgKRec<^wm;f<+{IMZd8U=HNIYf0)*%S(m9 zA!k%EwHCMX6mMbQp(e-3@_RyIqM^gRVsb(?=$2h3f@Vr$=1J0nDbP#BBtK8o^60^v8jH+{V&cD< zgyDEz;u|28y{rVKJ}I7|hK>~}huXYg3lKGGE-p7i2!2{$4B-W1xMB&Zg@g=YR=HYa zj#@gjnHO1a_3cCY13Qd(p+7*~BbS=bLtPIGny)HVouIIIptWZEueWXhKo~#`s-O=L z*PA*v&14axbQrnfwiyZPh=fC5Li35|&0JcRCy36LQSXU*<~+m7(fG{c)`SURI#`x9 z!YX_4#kSe2uE3QuDJ6tz@QTzs1cdR2$!^*5RJ@(%`g~)CWk@KftcVHL8E0hwv4P2F zI6AiNWMan}DB5v15b}%TBB5P3$cijOte*=-p%1QEn8a$?{32bS8#PDXP0X^wSM8Q3)VVPOqH|7RpIs!jb zj(gqmd;sJh`w^nb%ZuUUaCNw+eH3+hAPCzGOL8WMxNoZ)w%cnnY^ zjr^4Nlt0&d-{Y6wqH|MOSy>#0!-mQ9oHp*~V`gUkvHbiel))b9z@3}316et>>P8gE z1`(2ttjtd7E#@B9g%(rdO*>sWe6o<$Kyob^CHZ(V6C3zP?(wJb!|N)mmjeQ|<)}njWubel5oa_6Gv0Vo z%>=C?SFH7xD9{+%=bk@MKZ4;&ObvQvH#Lhq3L&EE@xoSoN=VQU#9=+p{{01OhP-JH zz7o~K0|^gzE|>Em2Oek%oRmHPcRI|h3tsB+YsfzHQrSFNf$Yl>zSkX>RZn_qe(NB2 z&QgeBSn8EK-_e%wySyCz>~_7@t>NH@o?5xCck`{hxpHg0O-cw}w>$F%+KI^;vn+$L ze|%(yzJ~hE3Tu(!xxiDm;H?^yS>|*Wzx`6JH7PM!zvIit64xK_Uay0$JeG^$(A(|X zt}=cMlh)3SuSI(xu6yd;k6x>JZ#Fe`J*zXhqa#DP^Ln@QKc=-47ydIcbg>Bqt|+9W z#N4MY7V`x+dOKV*cGg5?7t?n86R%_O+a2t_PoFiL=UU*g(^ql4yb`?gbp6$BpXHrf zjz?(Ya+!NzyM%v$$92?;JL{P;r`>cy_W9Z4x#TzgO^07 zO@MPgdF~X(Su@NWxNGKp|x;H>$^lH{FDYnoer`wP7ldNcFou^>E_~IKI9SP25OM1RGvJa zan)W>7~VAWS?4g*p0OzH9sM1sZDI@D22I^B_?%%&Rh#?%IJL-5G%BZw3|t%wB}avi zn0@`$jDp+rr-*@n;P?E6vBef|w*fzWu=Q2_zk+ZbSC&UtA^j68m$+WC+PteIDq7a( z0NdBmwKGq`Yx`%e2|E-HhllXc!gmFMF)%vNmc(@GPhqWNt%uT;|8jlnJ^4+IVe8^Q z<09^eEmn6KagT#?#OB6{BkouRa-I1jg&C6O%KO>mA2&(1=x>gC>1(eTnPX~rK3VJ^ zwlY5h%*h0390#yi;82t&5P9ikEP0N{oe494Y)d%SO_HyKxqr#R&5!p#j>u5;Wvru0Q<1dh!jzBN#%o34E7aMh=&4A`dHrkOen2B)ObKWcn ziG5AwJmc_)o$;8jO6BnzsgsM7tH)Cl5R|3q2x5MxZc;Gk))gfJ`>@CO zu^E6wz%jZti5hAWn}bHsZNXw&LI6Q`pZGQ{q$HDlhR(DO@ShnS$#+es%nF(Uo$qz_ z4oPHD-+J+|DN>0Zs6;WGjpYH_{f#rv*$07WO&++Y z`X@DL>Q`MV)&V*Pu2uK*)jzBVORV?$f&ILw7TQ8IL`-t~)lDg&WrO%xvD4Hc(XU6Q zV-hKZf_ATkSTZju<3;c_Mr)@V)nwgqeQqbcBrp3x-4lgqZlPbw?*B%+%cdXQUUr>@ z?)*-1x!>{i64UFonFMDLxXEni(tF*0__b5uB73>fV4Ruz z)`Q5r&JsIYy`Fw~9D($Aalg3jy~SPe4NJ}JV#N~peRl?TqA0hcQxOZ6@@s;T6|Sas z425Q}son@NUUw^J<5VJe@kmKB->=`@xp}7I-JfVP$!RfUY$M(xtf)iG9AgthI03ktrZ|-Y?xxQQ?c|2%}}H6<6XniL>Oy@;}GO>kdc2@fMA0t zTa7?%J@<>y{nOzzsDfidPvvt8U~+>3AVqiR2a}@wP;ultAg#Oybpf!GRq2$Q8j@;f zMNZRwjNEM21r3;)7mlWJtVnFna-~2xEgtY?>SohL&9SC!dKvhlP<3{{a)rhOCpKav z21px5gosp0xnNTp%x#7&eYrwKY}Nql0AJ`;2J=tceiTXTqg7TfoR(?(C0!JOj&P~C z9JUy24V!fhG^XussRq8K7*YFiw&atGWV$crqRjRTe~rErdNrX>JFVUQ=a2-Lm!EHd znYXO&43A3$Ywy|$ozcshJH|GZvq?s|2?=`Q01GIU)@MIt--xIfyd zyZ5|8<$7}lo3OWg|J2n8g}Vw0X`Wj!<&cvJ@+&|XSD%{cK;UPxTa~e#f*dCLJw$-H zr}%xfkwX$Wj4HL3utHv+V_U-LfpYIEZ2JBP(Xv^7KbkNFh9;zF)D|V1K{>CQRVl*-;&;+BEOb<&GLtyY3x&Wgyf!{VV-D{GS4E-ea!ol z^@KM-=nY{XRk!?ugV5D-D;I;^#Cxw~)OgL&%TklSU4B_>4&=gYUd6t7m_hUEM);Er z3})BHt3TY}On+6PJA+s458^laBE%`>T!c8E1tu|imxX-hf>J+>$v$7ku2tsu*_;qql zCWlCr%3w18;cDJxWrG+MqtvK zkO%?Kk)OQj-saj9!df92x-fmQ8gpMqkQ!2e!-`YnCBZBRHg$cFir#XDf1mf=)vif= z&E){2gXIdrp~rSjIeqm4n4SQ7CEB5DeI&!3=7%P^wse1dbiD< zZ>*=zi6=SDxbC%32-TpJcTmMVITPl){PvPkL_xNKwwjEw6~+CxOOc6%%R&3kvN3#c#I z-4g}c4z@B)ThDsjuKSuL-X5dxUYfODbm|4)Hv~V}AI!`y-~QQjZ8PO@*_)e8U5^@| zz`qe?{C#lK$r)46Gyi(!;vY{TC^UM;Tk^?b8fo-KUS%0gnY*b){zdy}rT_`7FK1og zTmfBHxQU8~r+LHq%W*1HBA?Q=ztGl_c@-&!p_fB!?yBT*F2yO~r%#`z z+wAm~pa86UDOvK!$Vf3a{+k@F7?RXJi{rXQuguX=L=Y`uorOHxv^R}M>o+m+3aaio zVdT6Hu{r~>MGiX3yU~6z!g2Y0X{aBLQ137eA7N()EfL*`B_OpkY<39YUaC6!RBSAk zv))oAS)R%RhKAFx#3K?r9^}iZ9#y*qoqN5NT$*R-B!6OB7g?6|!yuf^NSw$BS*`N2 z!m4Wo{;a4j4Y!^EN>VI?$39Yp%EQ)wjQm7M-SPCXItj$!5EWo5`$13+TjzP(UM6y% zS?%!nfN@!4hv3 zI+u?-26%l6f!Le6U%m-NNZyHKhm@Gu+tcmjfy2r>2D4RQLV4hqxahmHV*Ef$L@AkU z`C`M1Fe=z>^gpS^SLaUq^`9vjM=!b}o_XGV>OKGO;xDn+)Q6o)(HpD6H5S)9(WY|$ zX1$lWE`(#;H)MygO+h7$SBbaB@)}(MI@_YRf9w{U{C4kIPOJ4zX7`4+fa$2DZ`AQe zT0%^5Ba$KDoU8Je%X-|Bsp)d3_xc(zHQ#hsa^XnhVd$C5AQho$vHC@GZDuc+JK8Jp>le~7CVhTtRyangd`oG{Po%V9S4n*Dv4Frvln)~wZ)MTFt_bF?7Oi92;^zrtg^^w zM|C2+nZ2H?c3S*FhUpQLzEy6kt(6u*iKUDt8>RvtWb} zVI^a|)_wwujT@jw2*;slr;wjz(An>=YqaA>n%r^wA(#<9AhJTO%1kUpQ1|SajQJKs zPV-l6fAQ`M;kf8^u?*wUrzLt<(xX!4%TWR8Z`1hr_{tm{hhwk?UG0pYVpN0eIPFL# zcw#Q|whP7*5s~0*ciLXnqj4en@Do=8%);V@VQ#I`gWbb;Ig}0K`|y@J7sffda+1U%KUjSTZWs3f{gmOUEaD(4iL9+&uuMRaghT}K#@g6gk4yVq@Y~XF6P;%qogRl@T<@EA0R-6n;%92H z`?INDmp~SYZx6MMkt z9!%M77z>Q3?H9XDV}j2_tQBI(g&cXF>Ud}Wq?MmIVf^N~zSe)mbs!nE5x52EvEruj zrk!g7E(ylu3I=J~Vt1oQ%1$S|P`<@apBCBkF=F#u*FMc-20&*;YmGm^W@yT-sWHTP zG8G+dV<4`l1V$QFqIKJgq`SWT_C~geNs5oaYjrDq8f!9K7Mps?i#fNN4tsl@2ENrS z#;72XLmEB)c5f$sF*!8M|86e=nOBd|v|lW1DCK{qjg*c5{I`|o6)5jNy^*GzvYnl& za__aQZfX_gI4)C%_l26)Pe|kty%Oq=K-BV}FZ0gB^&58dF4AXkM9pqP>iXDzv8(ai z6D}4Kxb{}ujP5`hmzud4nQ@!DRd8Lxy7l~Mt|J!3&pe^fVs62r?>t5?MTIqX|HGemT$S}*2|KKh2~z2Ei= zBPC5$<|g^N;)61zwyO^7tR158f4NkH$#7*bSv2DvB-eUkOv^m4_GWJt{Q2n%w|QVQ zXw>rkEg8_fOVv>ayoBVx{mqpM&RG_ppDwQS*bkNW4`rG1>f_l+92-^d$Wf>xuW}g zrfpZekE8a4+5RY`-1Fb+Nxz+ubj_@C1Q^l8HPx2Czj}+unasM)8*bHCsZQ|>$Eb^H zj1a6myw9bkW{`kOzsk&A`su5I>sBLF_f^#o@kWRKOB8w zLkxptpx64sgk{}wo|&r;N1wv4pw1NFW>DB*Ln4>8_&eSPXxzi5n4`8<;kt4s0D1C0 zC+Vl2l5i;-CaDI;^%2kPA{wh=@)VM7&8jYf8g@}A)Ua;^ zCrLK09rT5mn81Lsv9YzXsN1p^de4Ur(H`_h3DH)=G7+}90Bx+vo7|ow?CnPLEk>Yx zAS$Mb9RLrF)sfY=o#s_nW>$BA8pAY2%Lr| zd2^aR6W}kK!_FT8)u&eyyfLBC=)tkj7~MnYJ4e!M397yO^$@w zl8=6LR!Q&cUghmpqO0lEBfFIo7n^?tM3SFoF~S^|*X=j{QpA^)cXFny zxItX^N5|bKk1h;63b4Qb=9B>;z#&OwSWJ#8K%P-;Ou@aq<)3y$Uk=URNFB%M>z$-yEEt<)+rRcDfL+lzb{^YxjG1y>ffxw5PYb z{>U@1q9D)ZI3|X}!+?CR=^!%}mqkrJ(}s1zrY%~Q$lOTRUzAQuZo_4^t=>8{cG&MA zYUq)LL56}Ro95_Od&;&wMV53M2Lj)R`#ZX*C_o*%9nw}96LnpB8g+TPvpev2qA_gj z2v7HAbAoHWaXzp)JaW3&k61#a<#aUF`Orq5zj^q$UhdLXRY^I}H;OYrlgSwyW9y}3 zud_&M6= z;1y7tQRNO%uH29qySEBTY-9NKNdzce{MQP~^_E;u{KiX9=L{%&aX^pIw&GY>9QCy| zczQnZzEO{5Kr-Z5JV_nb+CM7G&2n9r$~??C&X}62V^qTjnt|f`Vh>s$iYgEHlPThT z{hO8D>JebZJjw<`b&O;=1fmrsPV?l~5yFscqbQwQ@tYq8YRs zr7F!;u-8;}T$*HVkT$Q82D6cr5?AM{o^?X|b>XCJ%jcCTdzxj6q&FI-+3kIXSAwb6 zt9XC5v6%9y#n0TK%cNglAhfRr-VlCLpl++<<`zsR=-)_-*FyJU4@YzViSGHHg^cLd zr%C0?E3tl>?vm_zYQ^6DU-10Dy$C2J(qZDP;*a(WtNGkq!+I)*M#28U!OI~`?n!G+ zx_oqZRgpC2fB;eJ2@ajald0n%9B>oXY@~W`tkkkInJM}17^RU`Xw{OgCV$L zX9V>8MP}c6AFO^ByMM5mds1$Ez`|=uUp#gLxR@?eJN?hl zBbkW|xeJoC{BLF-0}Ty@-Jgu2?&}Y%X!%>u7aDQ?10Q6SiNO~eQ6tmdBM$R5wh^QA ztq1-j4n~*`w<>Qm?@T4vfv|bBBP2^-xQ-c@NS@dkB!iU=kJQPqiL1-pAP{Fm|IPD8 z%M!wE6s$E9vUSq?ZHW{#Hy|z!l_+k4w#7Z2Mq}Yo=NLQ@D(^kYGri^OzYepaP4yX6 zNE)KdniR0;jBiY_Be!b~dwBQ&fe_WEG0z$(-V?#f*#P*pmA>1*mTmZgjTOVq4S})h zMmW&MGuJ=IJCKJ&N6;oS!EJu7%J-Yc?Odx3$3%l82)}182qi-E)JhrVo%hE_M-ARp?gbn(9U+khpnx(#wa^tK7M|= zQM6&9aDOyAaj!iOmK>78y3ka7ER>tD-fhNZv8HmZUSKvbnmOw2@#lgEBkzgxUaR8M zor0^KhLe|ErISx@8yT;TGTo%EJ_Vne`9Hjry5Hds-r~Rg`EY{iJZ;bp4*H^DS-bM~ z$||-w@!{{vW$eQVBTzsVD}7@}*lbsxY1H+3Vff8a1Mn-m3WARZ8gpOnIxT0KT`d@O z9jpg$d$rwZKTHST4v0$!Zyiu=YALq8H}U^Uj0YJ$8MDu;o@Ik~m{hs`?kolO{tDAl z40`MLLUe>5)p57kGjb5Tq7k^ovC?|`cdE;?%c}OXv^cy0v$?c*;kr`OI$}OX^M!>W z2BjLx9Lk$RK{Hmzm8f~bH%(GnaW?SsIoAEdM zbtW)6jjb^2jYPun#%v6*3R!sSab8t%Wdf7&hwNng7UQxsIWX$470}0OAp{jeTcCEo zXI1jVr)`zIz)P#*dd09VOc*8$1glTO-1D?JW4=>kTkEAWE7Zd68Jg@Oyc;+dzq+JU zfxH`(H1E<_Y#6mhi^I(RRVUt=SDScF$X^Z*Ee>S0x;Be3K53Ky?;DELs-M7y@lh zi3m$hvxNWtG{g!%B}{WOm7v|^N_Z`Cc^Y_3CT2dq$1g&GyGQXvuD&Q|qGI4Zv0g<~Z{M#7 zT(T)6#@Brxd~1VGiK~K-o0laz@Bbd$E+1TWlD9dHeLqkSqjYpF=*>Mp{j-_#aJHG- z^@ibL8s%|$k(I&|5h3x@Ku1}|2 zbhrX#jW)JTxDZPf*$^tkY^|Bu!GXpUP7cP!%mgt1F96JrA{QgfZ|7G$CVqRRMpo2M z(@bW0Gj4hX+K&cyv6%{i=p4s)Ju`nc6yX&3!%Fahjjq^>7sae8^;B3Swo;PfEX5Yo z=YOZ))paSGs5rzFt+>2T%H9Rk>rT(xjTw)+J?)vGU1Gl414gbs+YdlTJE_#r1v!g0ukg%HE|C^w+$u**)##drHXR8 za#pFVy*fRu-1ss9^hHf9-*?DqVu(9xC7z1fPrgY``o1yjFIv-abSle{sgLiZG6p0L zD?bNP9OG;j;TykPcarDtc$gFVn3oprlA%WZscpn0<+C}TGfCb*u0o-ldVOI3{WDKKgCK;7QiQ;sYvl0Fx>4JhmZdYM$gBv zcR2FqufMeoyA>uvx1@oPRWK9)TCD_=kwOMpQxid47($RWAxq50*e3dgV$;mA@w5u! z(0Ktwf(1^CDFzpFu_0cM0RgX1S_#ab4Xp@^Y2`s%e$6gu8gKy>)p%#n?oXr$6&uqB zr8rI2K?p%C+~yZxvo4vt!UuVvT@0*A*Q@MRpffEcF|-{pR7e)94Y}@^w+W>J?odO6 zb(XdI0*P8l#k8f`fS{44DwmB~U$-@r4OS9$63vHrnVB>>TCG(bP!VxYc@+@2!uH>d zun->*bA936H`3&&8top#6(xq@`k0reV*?+2#^MRa2LXV_NcX@m06-N0??^m^7f^v& z%sXjC-arK~IAf-^*rQB4*?ZRpS)?y8`6Ax{IS6Iw^x=oUbh$tYYm6oZMe>i3jY*_C;KhI>r_SWr(~{UhQN6qM{`Wq?xa7}y z-!vMI?K>dy%7)T5-*reykDsj2+G>&fQ01X9-id<+J;^}Cz#KC$sqzPk=2)t-TElR= zX(qvoC@LERkK#Pf`Cr`dQUngV1*j<0NjYDzSvh0+Y|^+S;4Df9VJq&poOTnPp{$%{ zcn_EOMk0ruZQG`W_IpB-WAmJKm>UNC7hoB;G`$abU4Vp)b36rcathK|lPHlg51h7} z5n+Xti9w6K)e&fZeqe>*w$v)pg+h7AaDtSrpO=umF_|97nn-|;+2GLvC)ji!!n!KA z(q6$@T0M=mqylwxdwbqUceC43vn%y(cRi6&x)(6_ALTI`xIj>er@YwVxsd7*l5 z9{5#%hPj_(Fp=Na2v?u+UuMnUsGghTq53HkU(@LU-q^hL9*bAz+P?%gZ!T6$t91)* zH#ma-+67_L%*n*n9r(Y>AZ*ar95Z0G(<3*)l{qISK0lvX3-u{~uSybT?CR^X_$0L; za)MU7xq6feexMCTQ^0Zcs3S(53(nd`ue@zt6UX3!vL+M;3d}bG>OrdTbOjM&BBqkc zia@8>Vx^RL7<0b^{cyc84<&)0eaOV{cbADUcG`Y42 zmKS9EVDt}iEd1`rg69fFh^5rN>Wd9}{E>?(yP95pb5FL$_{1UjVlaX?Dot2%$(HY8 zWLoQchUI;EKn^ei1mj(%r#^;k#iv=NdL}S4z3#zV?I(EvjvqP_J(Fk<=u6wx!o%L) z+cmQX$zC&!$F=RJ!%^`oAO9^a<*GlqNVM#f=-R%$E=|8$IMDYBezgY|7yt4dA#sA_?RR3xYF!?xbndNij47YAT^atdjUMDJ6-WbpXYg7ycHhE z$vM(F{%8t^OAyNZf0mD)XcdVM7U`Qu%P+MX_2d)uV<@blfMlcfy{I<<2=N6P9 z)tPs1_wptk-OmTS_OG!{LPy@NHPR@A&hR=kD9JxqQy)-J-K^d7Eqikg;HYp}Dr}<$ zmY?m%?`&W0T&+eAci z_;t1nYw2pLwSW>6p<_9o1Jzqgwos2_uGfUIf}MA#7eQZIO(v%pj`y_)i3|D0vX5y% zS}NWq;uwH|({;9tv~f_=v1Zak&MaiHI_b(jiiso&g}HVjq-G~5JVoeXl7a{q`SI_BPPVjPmHQc2B39AX2Pxal6w}uxC;mMa6K_kZz5JC}^h5#I53fn2!b_qLoEbp;-!K9s`8Y zOgNk;ZxP&f9@rtWL}akgqC$gtPPv&;yxd~PVHD3R_89HZ%9@E`S~_jCLGo*D+pSi< z+%jcY3^*AU>8TD)N<~=$-UI_uO(3A0FRr=bloE0Q#vAXl|F*zXRC2rgVHC3_*KjHr zY?(79)EeAr!!i^?2$ggG!1=24U9nv$u3j1^fdDhL;`1^cS>;fWvLYUnt)MR32e1;h z8bXKG%K0dMphcrbG4-#UxF%b|K*Yi-23W^eoN zvb7z`Y>D~e3g|^S;i3GIouz+19`BpYaPpgK zQYgTChP-~c^c=7+NZC{B>07DNOA5UQg9PY26lJt$(oSmVE`Zp=`8gpSqhd~!7X)8z zB~j0%m_SuVNhO*){DfSB>AeEI0+WprAQQQ~N5Cshk+hGng=j&L+bC2K3CtFSY9027 zFaP9py+uci>JV@H)IW5^j*aULLy=NDA=s^$Oor)%6;znZksSzUbmL|4$2H<#&91ZL zHYI}nkenIGx0e;GK8=rcj2PBidCD!YhwLzev2;#G#{6U7b|eI>OELlU;?Fnn&cWB) z)(y&BElMNx`NCWD$ywFeLtc86HYQ_Z9Ru?fSqfYEAgB2pD2Dn|S<)8u8YIQt$45ix zL|7dpa8YTeyzCD;Tpr&+qP9Ck2Y|*<#FX$r(pFT2fWaRWH(^5~J$u1$Nm49I80}i% z+;O2`u>8$k-g1Gp#e4m5b=BN|o&@VNxI18##tQtVh6N3`qezx0P!ZUz1C0v{1_mi^ z*pbLbg94^^KFX_H4SyAXSK#qP%0{j9FZr!&b1Y1eak!pmWTD<-i|eh9fiN%W*J|Dv zIbR39@U8|C;;43eGKzH4^)w2mN{F^3DSeYlhpOSvCVJ6~gEslnNz^=oUQBdNF zU)Gocyr@iELUhuRDChebjk}6*9o4iIsHlq921xWttzB6U2r^n7;uL=;CN-Yu?;>`n-!qp8+y{4=AzL_qt;z~fo$@y6xb7z_0iqZ+-q zuggX?#?!iIi;0YiV>ctyVhLqlFy+I~JdW(*y9C{70I1qZl`CiKt1wg^8p1xniHZ*4 zPKdLyBBH?=rt)jM$KK-j+HBBCPsz`iXoylQre=XbG}Td1!lVXUQaMG0jACy>c9tia z6UFsRkVebe^3_bpI4-_QiZBg@E$B6bHC=s<0xVvTCp*_CHP~OtKTXVVoX<#9(3Rfu z$78}dVWMPi4x?SzHE<*WI_$8Hjy4~~)~D2sc%7UT@Wh6^@%I030iLlFOPEC)YLVwP_WzoB85hhlE8 z$=Ug(*VLZOu`p3eS_GRz)}IAF4>aCqE)|5i)3v*pv~-v6jjq8=P#&moH+<>Urg&uGk3#FXfHG{)s&AY`8WTC^tvVx9d&{%=w;#v$H-3DLRS>t> z(0^m$`aCK~Vy79ISI=AG__BXG*E_C3F~vRaNY^_E~I4BB&UZUh(KYX zTO0IpH6BSyVM97)tmpsar0NQ~Zqjq7d_Yylb0_aK;veUl0^X0`K{sx%m+ z1HA(3XZdnbS(d7QpFNk+Z~g8so#}~|%#L=h-fKl0I`NE6%t5h>K?16u{&NT-pwRiz z02lp%Y=u=AI%PkV1&S96jIr&iL~PJ8p5fBm%w#5|XHYPxf(fLY!#hm~Ld}y>%%Dhi zM=ARA02s5P)eq_?%xG6x1h$-gnocQiaTF8BtQu`$>hSKVehmBNV%X)u_;ixU7lQ1VGbAworEIbM9HBI zBBEb=;Nc$1*%MO64JX7g_x?awLtx=QePzk&L%Ezg^r?B}j^UWA&zLy>{M_jp=O64C z=W2_xd8j1eqmWTsN2pa6h5Z)n;qk``z2^^CR@QPzkdH`}t+|ALQdpGx|o zg`?p5!^&M-FmcFfTe4T6T-hH|rDi(czSSsI#Fu|>?R^A?V zr~T&N{$=a&-JY9i>A_O0*$W1buVal7@O|xe9?Fq25BcV!psyLpJ#f2fZ=28X@qk_G z$WA(h^>%BMOD-xuw0Q>G18i|q66ISC6S{t8r9moU8 zc)+?oN|qdHHU6I}4^LVP{@+E{96>Y3Sw$XfHFmuwH~OWOv#y7iou_VrgEw6#6^1JF zVmIHqjF&njPdSR*8f%478rseA7p3~eZ@+>tSB!UoI#R;d`IhcjGx^o+^T*spta z3fb$ug8cG-P#A2nyxBUsn%DH*Y_kL0ahm#Nf&f*4wA!erE2giC{Jl;VIrAL z>OjzV%8fD zQo-Ql57n~S4PDk!JJE6633meMa-@p(p+P^*tH@q4$%CYeC0ph;Dhyr&Bl$9IOkkc1d$J{CLA)ft^$j^*3yCPWo{CX~nw1YIlaX@A( zI192|jd3Wb^FLu(LT9zyM;SKaYO?)4fnVdFxuoO7eQs(Safq8iMpXO%jhp&(mmd9Z z06#nyd2>1eEc>|?cAhVZy@O7<4N_Z~2nTMPSPbRFeV;ka?X3CFQcU9ait}~72krap zg(ihCDRr2)+LWp`{WU(DI6&^*ap%dS?Zwpu&H1$X=1XXm^}lm#qg!aA?1mwrOk31e z(*JZv@IY0bB}unwx5A>T8-xdxy^v6Jv-U6DvvWsH^0Di7JvZXk2&(G~%QWA5(4uD;vxoUQ+eL&*_V{ASZk-Rk~BB~Fa10JE=t2}WvSCBy#}JvV~2XGx;r3g-*q;AsV{jq$wtgnZLLmZ`_7_NfI!_bB{EKtyfbcai$e zF}U%!5hFc)X<0Rue@$V*eRe^+_pagS4*H{bplINAdGJq&hF8f`+M$C*v(CTG03MokMS;iqXX^X2=Jg1Ea@s4Ho(-+EYpK4UoQ$Nqp zCoQtz^T;UD=rvdBnZ%h_#0m-KR5AmBBnnBfX>jnu39}k-#-6i*UgLP4$(YOBVZJ

Glws%mkB|VGMDZ!7{z%9BL&3GiZ)0c5fUIu+#QX0TVi50?jZ1Hc ziZ4|^om;2p;NRIdq}6lcMp@*1ZEHLEM)@s{vgfO-TXUXm0*mT_j*RDR#^ImE3yih5 z>{zIjPNA5i|9K5az$D?5frjPO*VcUf`STwHu3@~G4#*`V6$4NW>A&{GPfQ3kyV9?29&KjgGusY z8}TQBNUG@3(VT2BR)Hp62my^wLm_Mwy%9hW>Q;?ZN!iGL_g*Mo%=w@>X;`Bmta#d4#hkA}0DU|Q?u&z@(6q!Q zpPrBAfs(RItFe3*a_NnnOcLgzwZvfzTpJZ2gzqAUT+j`Tsqy!uJrZ*q$G_VpJcosNV=E9cII1Cp;q0#{HQ$-GzCClR{l$dPMS_^?N1fx6E2Uq) z9j_SmkBi~SW@Ua&P7=yYfg)^9*Gss~ zH>1++_LWs@OW@6PV_?oj{H|Zij{q0nqc2%yoPG%w5@KRmWkdebqDK19OP_&XYpZh= zIZg=&<+T&TevsdiYsX9y0m!3hn4ssZcdxLZ=VFtMfuNCXiQU~e0aG*x9Fk%c38G=@ zx!qe;{iBq!ikn14HknpzE&HZ;98Axa7v=~_V`3pU_A#jVjDN@?TDy|}2Pj>+9nD?j z3s-66C9*#Jt?GzJ>?fS!^+3M|0)=i1GGuZKoGTbVdBSgYnJEyLj`( z++)0^md_X3Di}<;jDjM&&L`%}0KqeZC8O7Or@41VZfdFSCyM0eJZ!|*zCqaar7MI4)Tliei(>+1`B)=iBtaZ1!h~P zKRVTmK=egyHo4mJ)qaWCBDeZFLpihKKQozCZ!60RQRr*wHK2&Ic+O|`kt;5G=q2~R zMyFXIxMb~AZV}C_@6+t|+v_{21gK)myP6`$P|LaOWVhM;=~do(qce@A;flOL1?Oj*RITn+8w!v5TU4`|iYTRQfpx*eP+IN{$ zY0g1~REs>@YI>+}3QQ`TEPz?6F%#Ys-Wp6)zj&h8r-M|WR!FW|sM%GuIA7?9A|8Ka z0P;pVpo_V-3ORd)v5RcTK6!j?FSbh!z*gf(DA+MNO8wqL#?LJqtxR+fe#gxVVN zWdchCR&1?l0rj?}C+WB3ixJh8s5c4O3f9zZpYA@LjEp$snV`xD17JBaq;z>+R^dytMX^zyBU^~LFbGS4cE==dZl>fqls zv5t02<@T?(CvSV(f-UE5N7r$@6-tY?|7(glE%pbF>Ah9p01x3&vIcO;Y(sG`5-)mY zNBnMqJv(;S-Mj4 zCXuSn|K)A9Q|!U@yvEbasj&XB#_DQZ{?3&8ofDJw*6W4dz`I|)w|9>hFL*be`E}GU z>Xb3L&+}C^;sFt;K~<$yf++m!rKCVkR9OYer$seo{Q2U^)JaoGeV~ zVTE8v*~<)hK&exHZ7r4FbewYzWOEf(ktoh-X4V?+*bzA^qOEh(sU=L6{k^D zh<(9&@8?pWvWxasG7%2UR?9|u+2CL) zW9sHQS>7pX=nHH)v6i0a0(HMbCGCH*;Id%m)7$UUJg?reX3;$7Z|;iO!~ZG4TXZPL z{PsQ71I7Ck?t9K@PZnFdllc`BfRJfVTT%WfQm%MbjmQqY;51@mZfS`;c((76YPmjp`_yv^Y zu{FP4TbzSM^BO4x<&0h<<$XDw{WY?s#}tx0H!2C%1A(joMV=b7kF#O-JKQM%Y|u@P zE|Q1fOq6I8fm1{z@q{3m1_RUBU|A zwbV>yo~}Ci6={oLf;fG2_))%{2OIe3BYq$`$~E2H5F6ZK%6~C~G7u>z&w|$cqHJDxaY+u(q*q*d%?h3d24SQEr9zFy#OKj|0WlhtgmpBUH#L)jct%s(4$Pwrmr zy}h6Riw?d$pD$=XLoZv@#fmvDRA%J5KVmn~Qyu`yf*2W0fowWdiyYU{2VE-g0n7sP z?0KGWe{0$6obCBT&Ym(hbe<=3WNIpP$yKj;tf%Ka%x6CnD7bZb7D&14h5WAv5vhil zX3E(TbWo6!ljD=cwJ%gq<(*due93>UmU>?P4K8tfcZKOXz5F=4 z-gOgoU#jshd!#d}6?gLcytNav^Es6RJ)N){VP@5p)j^xK>A=JOVDNr1f1=+*yjthY z_0_|%=#og%d%w6M;?FKlvfG1eek;nOAGGr9u!rmO$!#U#L~=#O)lcG!Ko*0OMD13a$KQtYc`V7Dw*v$KgJdxJC5=~zdCy@6@W{gUnti* zTeHNud;ISo&(>GdTH^DIkG!8W#;KGi2?hRh9`78_1OMWjlpqz1eFGm)#X+Tbs&j4T zVW=dcrc2U1Np%DV{s@6cxT*iNR&AvwBv)_NbMo=~p!I*`$OF1xHOnJB1>pg(0k06Y zDsU%nTl9;Q*>jgA5bzbiFqMQky7n&Ov<6<0{SZP?)b zmzrI!NNN^raSUJNf9UI;Hbz1CL2JWOzqY)<+csX#*1Zbkm-k5Sz*FR|z%()m@aR zKz8m*{kMd8Yr;%J)bvy5V{2l>5SzH)CN~uTDdg-lbzKY^^1fX%!zY3WS{b}X%EeS9 z33o+z1im*Dzf7-U&kvSI(CFA+JvEq`9fbV+zf{5SAGiw@r>Bnc+%NA?VM6%{VdEDCWep8W6BB2(fqPC%KZVwv z7y~we_uSdk(WXz^uGB**f54z`(DA^>#l+Z!;?BSnf)r#g4->`SIbCDCcMhK~7Iash#}(_4 zkw@o_*AyQ4ZGS!}$*wzF>HM_Q?a79UDm|Q<9ZF*%m_iFT{*~(p7ve}U7nA7xa!&dd zPTp-t`PqbQc2;9Bllb>kV!-MrVt#aAcoHYa!eZs7m8Jd(bXI@m+s$Oo$!?lBwOS)8 z!PVz(e*YtX@Ry)B*Z&0{g>P;wxFMrNbC12Q7O7@}M}~->0yq})GquH+W|8v)Mc5B- z?x+9v-~yIH0gJM}jAlp=2)x5@C!}3Qh!d)$8ZJ{TBSZZQ8j8j2iY~ugU9*$YRh2$8 z;qPd7^d0549Em9VBG{icu@JrLfesYi$kVHwR#i{8fva{{d!Z?3rfo=4$E9co0f~6WL!>%kjCmis~Y-iPwO|T%0e3@9tw_?%#(yl++0E_%;+R?rf@8Q#ZU|n zHs~YSHbxN%P+bx4f_Ad@nz->HACx>OH8nx?ddPR9Az4WIA5hW(hZ$zC8kX10P}~1N zfh*-zwt}d?JVN%fZi7i!z?gO|Ksd-TxGomIKG($S>HVcf$s^%2Z){vZ&^+Tle|J3A z#LcSq@AI%apSrTNJ|~?*&hJWiuu8{MX-_fu@>yLj@?CksgBGM&^qO9#-^w z*cPW|gT}-l=*NuVNDWuKIT|MOq{Q=q`%BSq3w0^atAxC~G#iIFc-ObAGOjXvqI4jt zX=5@QO$+$=e;`FLVi*zFgsMs>Mn&z7cYpRgo3!1Z@hA391vW^K>5u{}<_9vF1zJd& z+&CHupl$KD8%ksX89cGZ&mST3v9if^G_feCvt^W#B999sCGLnr4v&;@f^X0V25&7V2l5Ba8{G-4~JXLSKXeQ zeG4-aA0y6J^;7-}fTJv;Oc1$<7j)sgY7w?|wm%iLZe>+FD+^IC9ueAEgD(JKl zv+~fh%ligWhm1#Yl`aXh6=rWg4)OCczaQg~>Zi6+g&Islpv(q(n|U9f5gz@v3MfUC zRWg&>HlfpO;NxUockF#{DUjhjP-cLuJpU`hKx_YI89q#kd6y0;tLLe##|zS(Wj2on z96#m{UuqWo>J0c7t{|%yJeUT{9qm6Y>}(heFAy|Ae6KiC5qK-*B2ek2jFS!4C$Wq( zFxT}N?DU;^jugpsyJ&NrJ4Jly3RdFQrY4z4u>mge1Vb&~X^hBmp`&V|cVa$w89$++ ziTPkq1laO;lwNDWtd@VAe_+VpiFZ*fem)B4SNYuYSD*yuM@dD0?ER=GOF(#E%vt*M z9Eu4>T9q|9wOw+vdViidTXY!LB=Shchetdc{*{~K^t~SC-W*@Uv*HaM@1_`I+bj%AQ`^=SAS8VbVQP8l z{Zth;OzCplm3*anhbheEE(oluAp}IQl~K)TG8mIkDM?Wj#8TCD9QyrRH=lNwps}tj zB1(?k5Sp^`ajNRJEX%GRw%cvHs0DzT3#6RGRe}%@V3r)c$DFex>zuRB&C56t&XKSM zTOtzBm~x0ArIbMtaqh1M#TcWA=A1!EBopgAUZbkL^j$aHzHzf@-W^qulv9Y=+Ibsr ze0laKuX zk}PDW^ukU30>5_fD<%&ZW9I9wARwTCisY+I(-(Mw7x;~V=X~ogVO^Prf;6UBp{21~ zHwd?GY|qPk3%-ok|ICwjc0cj(4}ZA+NO|wcY54N-TY9qP8>^30`rG62pNmJ2-L-3J z80uhz%tk{$wtK5(^vi?2*WY^cn?CvtFI~GqSr>~1S<{b0Q54R(uIoncGqb78 z-rnA7wdyW6*4pbwhs&WqJwIFO^5Ff4v@@RGe$`H&-^l8~?b75-Dl3;a63+_igEB8sZY3QCB-OukSOMa?LJhJgSSvZR>R##jTMVe{3~;d(y(_dmG! z10T72^TzV*;fZh2GNV}u^pYa;0;N;5!!S(KWR08ssv;tiR;xwyskCJZDNG||F@}o5 zG1@tOf|*6Gf=Y~`5CXHEP5KH1a|0)nwU&rPAZsqFGRK@!LL~2_bEdBA-ENms8iqkt z%d+f;oi$Wdl}KcR-UlYjh5-~%07OL*1g>1rN=!kb#E58T*STa+#4K0(@ti7fl^~R} zC7km*oO8?^W1Q1wW}9$XHAJ-CZdFWOxG?#uS|MUxH;PznOIJ1rM@Ci4dU<}n-Lqrm zETm}bm7m5Zrx%q2X11nqb<6rY|xk_ zV2qjkh(=>jAEw}QRw z#<*UujkOOSJ~XyEJG}s){r!FCoORY(1^@!diqF+|LlF}rRwIymAWO)=c5d8}gsMO+ zF(n{k$7C6#^a44n0_2=Q)e^I1U=zmaW8d&mHucZ`+^0YE>T6BYY%k9Z^XR7vi)`Zl z`c5$2aDR8E=KV|lqpR?LzQ2CC2tFI)O{q3ovne_ti(1(2Q@=R)3+~&#Rlay{{Bj>k z(=O}WM{r17w~MA}&d$!-s@!e1p!uacH&G$Rh^j?VnsZr} zDTT#ivE6QS&Z^5nWBhf1yoePJcmF4Sk1Fm=3~qPMN!zI z$~ntb#k3(JB7;Dx84(p%K3)(6#r+Dw>TYLjOrcEiw#zT?d#o*D^XA5GCTWqMVrgLn z%7UejRMey~jvK*Jn$(R*!SK?;X}3A+{l{1E=||OP&wla8zWnsn8`n)q$9{YJ){U36 z`D>qh`r}`C)O#aU?RbdYWvCWhA56W-dsRAP+tLZmoHpBqpLF{UI%QS3F#gjr(B7?;Z> zK-$~elN6_Es)}+>S4Za4)6=KNPp##;u6>SO*R2*4k(*!D{IDFu{IchKb^-_?6h&c$ z)7(UiFwX%9fT|?PStWxCUdd=#CCfa*#RiQ?$|+>UoHN7-8I;7QNu8^z3PefanOJ+3 z4>pb?dQgOUglqn&Vve?O%jMD(Cg+@!sKPMyv+deg>IZMFVONa(P`d)M^y7H<`ZWg> zHeHlRL{E;NDso*^*6^ZUfYguAA4wKesEZPqduTrBj~4I0f4Eh3n%HN8`n4YcgB{^7E|!i`>`8t93HVkRPa8Mb*6Ar zNFs9U){SSiz2uxTwlc=$?B~T=Qx?&jKtNLpws44ZQk~9)*dfHpPpXQBnHf;Yn4C4F z6k}4=!Z}7XM$gX94)+hnaSS=^uht^6*=%BrhIqAFX%_EC05HZV3kN}+1Unw%w>s)kY%{`YV^?3t{l%#7oRx$$nRgBJq74; zRh6lY@uV?xInF#^UqYKt(QV|-DC_X59$@GI9n zAXlgBOha9%V88sq9*HGJ0IFrg%A-XWfFC4(ld5U zMCP_hm>C~PlF6AOg(RyM zMp!V7H(vd--R9@M_~?(_&P^KMziKy$65EXq-@Ur{Z=O7u${Pth5xWQ(J43Y=aj!cM zP~7I?LDfFYR?WS&TvFF%bUS8V5sp^fxYtt|+ zs@4zFwS#Ntn@bC{=}s!MKt~|4bj#gvzG(LnyZwXJ&wu7;sembO%KGN%>66`No4~kr z_1XvCcd)n2A-?_9H?Cj5e(Tn)-Q~`QwA<|*z~j^7A%)tON4&Zi#)IX09ENt)3etAy zA;_)8;h3DoXv=WkS8?-cKK#Jt*dD^6#6GmO;{{!esAOCbV~A!n$-JA}CJF!uKuBP8 z&L2PoeYODedpuWj4$J_M11pW&N3hvv%6wH>a2H_|$CDxSPfaYAe-}6le z3mv50o@{qyY}CbiEh1f(03hicm{YtU%~fF#IfqnL)xmnN?|Y5fRxL9#8(TOK6#>YA z);d=Tk_Z49>zpwrN_H3oK@mNGsv4q{WE!KB-nGl^6crsC7s3dHsOWuH6h#Q3ZQGpl zcDuFKR#mmEmWY_L8mfSl9c5KRL{~A~NN7L+5dalIV$LK5m#Y9|tqoy>0@)*CAR;8A zDlrQYTFW?>oH|XDrzEW_HeFAQ#_%RU-^buLZCx%3yV~17KfmlJ-KOq+FTae19X7sM zw$|El98n;J;f>R?y}k8%y$p4^*<6-IZA^LEb<7|#)^#01HsfTvR20Q#GmQQ2@Mw=I zn~IsyItwMMs%9T^a&?`J6(w8O3CWLUce$H}o&cM+A~I9(tJTU_Ft!jR2}wjq6+n;_ zwb+c)(j-a25LrYmj8PN9s4Ai$QJk@TH&)hMKiIcrv1r=eZl{{7wo(KlN6a}Tx~hy8 zWKt9bZkk3wW0*n+h{}9rcLuzY?Q_m#7=cwYDG*7N%!Xx>oO6yTg-KOZbDVTUT(8%r zu0Vhg=K@yCoO4#m$^<9^Ip!de2$dArSVBi^lZcN|oa1;}6h+KF48E-n3tMT90@>OU zAhW@g5V=BRkuJya&aI=(<-PyU5B=p&e)2o7UB7mAe6px&>k6I15Vq;~lrmyj1Q)(} zb@B0o{LXgYnQh&?wsl>5aSBo;4a+fk98-QErueTz^M5R_b-UBE%r};Io?g6}s-*)1 zW2bF-CdFcXyX17*M}Ki%*X=lku^$dr%eHdO`tad9Z%tEp`OZBB^4>Euq=ZQQW^1g? zN`N-2#1s*tVM5_uH&j(s6lH&TzFaOP_ux8Om!G3xn1cB$0=sCWrZY3aru(bESf%}nN3Xn?v1ZL`ohEQrP3PB z(JGzdTf1(vDvp>>!J5O}gWq-}KXe9@xiR&R`r+u-jnx}xPXq*D$j+_pdKYt5mnw+} z=6s=|X%z_68!lI772V}&x@g#z*#%-Oie#Y_aX8hZ{8Cxb|9sC4HQQ0-? zRmq#(E{Q53u{kJfOWa>>Bc-aUNi8u!=CWCA{U{l0R-Q*Q7zuE1eZb7aFlBjykRP7B zNn|8KIoZ8-f9xkUMN!t%G-b)GIF}IvKw?qFxjvSN=DrTj3^5nZ#gI~#D0$X0^V~ZU zlh>#qh#AlV2r+O~#gv@m1hDIR1u=zl&UKglZrE98fDjEQRcA{8A_^&{McwM42CO8> zIS*6c){A}^LQbgEcfGZ?Y}&r>OJP$MVH!+fTGIaHyxcn^Qx9amnx z55A}>00pL9*X5MR8pzCSFD@?1vTV3YDx!Aq&`o32lnk~qdtEp9m<5@cFnen(05G+p zu`bP`+5^qDFqm>x7TYO`<;jB2DfuKx(eh1O#+-Iy#uzfgG);@e;_l5m%gmqs(ib*k zZ)&&o7ZWB^JM_G}=+}GgdT(`l`ovG6Fb<+2S%zU}wYAbfjM4kmV&w`(9?njOz5ONT zT!5vz=qK-34WL*y%-xgAy?Q@kPI;O`zf$IqBAdAkoHpkdN7oLZsA_DUJUxzC6iwWP z<9HU5@8r#Ewo#-m1WR=STr3I%u8J~@{S@Pzv11FDQtE~=LLw$>5Jfa%w)+2N?@wcG%hU5d>>d8Y8uob3 zop0S5yPC~rlg(yREs`Qd&7vj4vL!;cwVtm_W(#P1bF4TNy<4JYCt&`*zLPJw{QBYvX)bbBVmd|pJQD)r82MG*~O`9 z8MJH9@!ui zfM7KMI|G_yOvbq+rHi9yj^Jf?;hNemZyW~UIJ`qYc(zzKigTtxIhTYslE!879o2>r zAp>N~00JhfGj11pMMUeJudAxcS-N3VRjR5501cfPcLs_OXJD@!DTdCu@ zWxar#jqZ|nDTm9wHgg{>Ty9Q&D1Y;(Z$3D?+#f>Pgxyxa0u$r`Y)vF2maVZYvGn7R zF>8@n;`#Y`Th(v9_13flb$W3gLYT_FEYyX|e!ovO`^s!Kn^*3>HVnh3-uV9YdU^NG z_UwGvT#Tjg`o{Xv!zT!wQhD#a4}Q-Vzu5Qv$;sKWn$7Fx-krOvS-aV7zVWSZe&*Ak z$t_^PVHnalA|e`Q>&P03h03rECGGbmZ&uPY`;upQYy5krkP>~tJMlr+O{2rVS90@LhJSV^z`K6!;hM}_TE>%xxBno)M=_R zCHba5rIdn5>(z3(T&8h|F;<>)P9cQZVu2{uX-bKVBHL~VMfLHsi^`d*sz8e+bhV#h zbYRa~2Ri91bZA z&asp-r5mTVXIs^Pm{LZ=0OsmT7G9 zt9kB`$RO!dif|QYQDMN(eER!B2!H4A{GGSoep^y*s`}oYJ9X`})WtZ*$O8-qd;jk8 zU&rdd+jmC@=lQZ4JFsm^8&4*>W(e_)#V5n?<<iSO_Vl z^X}H&d!d9wEYj_m%)abFsj6Kc`Y?8F?eE;VM?_L|BA)<&_x^A=kYQ^r@>F-Bgjk9c zP@^=T&!^L*ZCh2%Sy!v0ez%2*!8m2#MG(Lm)-fVL2FW1eoTiwwFp{81%0Pe?EFmxz zDU!2c0u%&P6+~pR#1;T|efkIQ{`%klc>Jj!edG7NIeYKyayb3gpL_e>><50}fAJ5$ zs^jcf^c+XT$|;((S5pIAHK*v*k_<$uyll{zLymnp%#9bK54O)+iwmx49EURd^w9l# zKlRyP`}VU#|6uO%sowZMedXiN{~JGjym;L8y|0g%>(fnk7*i@Kj+RZ;)Yes5VGPMH zs(~~+2o?I$T3e0%XuY#VC`)Br21cg#E%w{uHh$#`^>FH_Z>&1G(jHegpwP{r0$+O2*)#TJ8@;HVPla1au z11JQTQfixKLgz~^V+;V$RJ9cC;*ioft9vEf$LeC0+yqZk_Fhp$4|bOVP@BW*z#0tgD!nBJ!HN18;qLO!D#br^m;~Ll~Fl2$}Xne>fa+N)O-rz_z|Rny;#xIpWGv>aIYlDlY*TV# zi#bH^J%F@rO@>1bSyD=gkqC`yVYWv{M=6$43RTw4>k{IaLe3e-^2(iCl2bEl$+*Yw zzu(8?Dj&nZ7vK!#oEfm0&71k`;^N|}o)>Xy##Yr3(th6&dXQMM2`Q*5BNCBOP9;k& zr4&T9mWgn}6Nzfh2@w$_rK@O*Aqzy)6}#{IrfEV56I~Dyb4;pwes(r?{j6?iwFK3^ z@2&NS9E*7GXP_aCRaH%O_xtU3y5U?_->u~4+L=21)xIjf#%4k51Wk5;Vryde0_(eaS zqrX=h|Iz$r51YH25BF*5&F;G`ZMDRz2%=Mek01yEkho9SmRFI?%!rgVPpJ#EY{=xC z;bs4yd?)Ajci02|wf+Ux_skx+>If3it7U&cR3ZYPBn1hJC|l#4o0?uk#27Y!kP8VY z(-fms{mpcU=_+3`2!bLYXfA|WAuyp{+XL6S*7rg*J#V6dBCsk#QH2}xyIqt`ckZ}4 zs_t(2{Q5UjN)7Z(KJ3|sHE`+eQ`7~?pOQ&xJ! zV2BP~&)a^!nB~IvZZ)2K<>$-AjX(I)Z=9ZZAF1D7<{&IKs<_+@p&c$5?FqNHwGg8{Bs_OcDw;A?_S=Fv*tC=Q2!Xehx z`cub8|H;0+1eZ**V1=-NT2w_*001QYodd%`q?ez29P75t34w`qs^!!WLR3IiU@Ao; z5}69wT*~ldsK*TZ{rMeQS4bnZpkxERee=#}mK`+9xT_3b0M&DU8}X2}%$KoY?iR!8 zb6(V$ZrFJG?XSNdU~{?ME!{`a9?+l|M@m#!i$Z40CerY|ey6T=t!w>rQc*R?hze>1 z0E|f_flVF`-TBEEUJXs&?y<^Lr~U#hdGDL5$vMY3qG~P~Kom*{LyT7io>Q#8D4=0i z)yVu30xu#8I87%@DNHzlzg0x12`mz50RU1F5s*wG-UygB=i;ril{1WLsGwoysMI6U z8PsBcO3Kp&Uo^)UFNdV6$Ta5Ecim>Yx0PQsO)N!3#&LAc4Z~PUnaySo9z5u}UNkS4 z%Uic@sOqz)PfOOiu8Zn0j435V^4{m1V@y-W>FE7tvmq5iQSl<#GIc{l6%;K+9Z(V7 z>~?J2e9?~GA!u4OP1|^1SpZ?9rdg6)<8>$VYuQi`(%UJT}^==00B_r{AE`wv<%+iiu0kAtd7~o@Vz-)2I{?R8?}(B9jFbLIhAY zVw41g02r~Pv|QaRLr5xw2|*B{i1b6UMF3R`fK0mxS5^v&rhfeWKmPmPeDlriW_R!2 zy>T4B`qi)f*zf(3MjiRX&~0j3<-#WyXFpLjKXuFh{bzS>!iOd=RYv0w$wb5z3}a{J zj(Rfx!_DvcFWvd*PtM^jDv! z*Pb0dzl`T6AN-y>i+}sazOOe-)#v`VfA@opkj?D884o+eLSz&qNmj)PTXf~;kpX9# z<;shPLuV|IavD1D_VTc|T>i}GZoN69pTpJYjjyR4$M?4V$ET;?fAi@5OUeo^$&syk zj%?!rN#ZtE#D`J~l5{sUUn?vc1v5MxPI_^hJk&BX}wV#=VA9GQ_ zK6hml6fw*x1pw*0{eIZNREqAosv0e52_2^p&v(1soxAsMF6Tt~@;qmOz8k&wLRpHc zYF(MTw_bVg{dae|7bwnERZ~4XeQYdM)l5Tvdu%ww0?%&3=0@)_Pk(iYbQ@ zldY=NV*TEG?=`d5SZ^wqQOA@7iq#TNH?3U&@>_RbSF_P$Z**oEVG%h^CpHY3COsTB!;1B8qQTp8w22(nx;t+`feBp zRFPcr+}2Da2z50>PdPT42{fI4R^^JF=ctT`7g1%V;MbTn2~)z+~y_eJHpk?8;f?<)O82}kr8yVASPRVa=B871PCH2L?ZIm?f3hts_M2GV<-ew+jLzw zS?N=i5ds-w`Y@JKLQZ`cVd|AJW;UN+?8CUdeB<_=TSsfQ)xLcbxRIRDHK0gS`8Q=P zgo-3UdjS@O(EzmGx~ipWA*LB|nEJsdL(VcAR8?h200JyXD2_`KR}wntX63u=ZD~kK2otQ|FgwG}kTrU~{a)W%{uZ}b zssIx<5Lp#t$*A8+0+jD85xmy5J|X&^*#mvoO>Q|&%PLf+H7hi%Ap?M@I%BH3&ZSIN zo2n|YGNx%Cij)nD{1XkRzj1=)Tv-Ctq!1+&6QhFWYj5sa*ZN-QH=tgq0syQ40Kv|x zRy>9>8W}H-tnAwMA6`EA3*qtV_}<~^ukT}XKfL$z=?A>6j655tlC-h42L&mqVayW0 z{qd7e&BH7A7E1LozqTBAeZ2hm*|%s3Q80uMOAKrnftirlARQeYY018>4~GL2oSz@& zi$&A6b=|07DJ6^KoNYzOeAw+$9AZxU?f&xeysq2t`~J`T;XnN5Pc5?(!Ymu1rYR7Zh>4V=n83I)mEvTxBG9DbRU*7InYY8OB1ghRSXy!R* zmy@ZQtho>MzJ7I9>DlgUo6y}`7#)YAX;2TgdbAnRT1(iCXhX?wH}Y1KzOug{tkdl1 zskQjZy>(T8cJtvkwzHE>I^cbG=l6!~$3Bw+K-NN15CyZE0``-ZNxjy!uJzj}0zkx! zRM}J1@)U$kqH41pnytIBt;d?QA)qWb4swPaLbB5&+|f7ow#0I-}ds@mk7OA4A(R#m|qA~&^RMp6_)f)|q- zB_d*YQHG(0k%&^vDm?8L48s7quy+-C$)qI%i9!lvPXvlWPGlWGD^wtni8&p*ff)%^rC7_7H0RVbjWJe447<9n$8oHU2`ObMSO9^@uu2ip z%2kS>nh}u5pw%K0iW)(l%1TPkVJIcDt;pFh4n$Xwdbk3J8&#bH{`hAfJott8|H1pa z4?aALq5A`W^h;7uarc*h`1}9ih5Scf{cs^z(x#C-n${u;h~Pwov`UH!stAddVmcg{ zU7`ZJO>6w#&bs~c*KGc=PcQz&mu`OJX7l>SdMT0- zIf;&4E(N%97Exkgl|dk7RVl`haduX>$W~R+)L1o=yO4vMt=;?>kW{ww#VW;dh^c8B zEk&e=NM-7y<$Ab%@5#y2*`gJZy0$sSi_O`MwSgizXNAH@@UlS)F_l89FogpE5HX}! zK#X$$kU|uZAQD1Q#BIL|eI@|5h-^|RC95D9;x0sSj+u&-7^5-f?YH0V<8X0tadvSI zq`AZ-$vHEO-F&eSkuLVG@u2hXXQm?b^C+ zp63un`(YSkV8+VWF_)w;UoNsrQIS$&$(C6V_q*M^uF{Z~Z98vgo9*`T`6^ zYC&W~V@nZWR@8zpbbD_ddE2LWy1g92m?RPN*eB?zVUTn5(GAEnL&#&oO8@EQsK(FZWye!W8W9m#bNRbScd<;FQT41;IBee;;ncw;5yFs6NXU}w9o%Tff?*FMIGh&jf^VsY>I7BfFR zdA`|gbIt;a%wV|dIuThem)q^OZQF~B%gWf2b4sbMYa*(uN);xjWEcigP2k@7`mew8 z>VuDVHZt^GM}97y5$pi7k=mdk2&z&95D_(_05X&J*sQh~tJRz}nm%whD4fsEwqZQvf>uI}xp-5YI}~x>?Mc^M0do~z z;KZknz3=sn%5BN@I>rPdgitwVE+Jq_Rtu4xmT`z^TCSb`^NIyTv>=j=EJ8U+VHH68y?)QH zb*1@x6QZ4mo`L+uxoaEiU^#BivZ5Jh9|5 z3`6qH01d++ss*%w`lgOzOHwQmW&7pf2CSCz`Eg%5c9x7m5TgpDMTHT6`wNBtbCnc8 zQc+3+l>o#jkWvN!OKhA4(JWdNQppyfa(*Z}wDz3t%on%sd}_IW{7W$nXaK$5e*Nfa zzWB;!Zs$ukK5>N&>wMX~`TG6+qKccb&srJw{_v$$|KQFzJpa@O-~30jp$w-VEUn>N zi-$ufSg-=u3@>stTZe4CV}p?{K46xKJ6Dr@@TkaLy-%xs)Rq$w5AbzRPRKA$fZ3+G%_ z`4mT0t!HgXVK$!`#*|V@Wwlrs)XEwn9LI4Y;-qj`%;(FdA(a?<1aZb_P6`Y}AcTxa z3;-&k1gJ2KLsglonYCCI$SFh+b^eM*vREt@^Ho()h+I`6a!LtOnr89S zrtQlN!PI|ijCoONdllE12m4fOI4X#>Y!l`by8V9a`=)7{$}!;>4{cq= zm;qTt!!R0SfHcSCT=Q~^lbM-WVRBY{2thu!6WK1zhso5eEssgBhPMy+3R2-)eFpD8ZEQkos_Aypj4X7cojDS$2loS9U zLw>PMd^ON2rIey+zgt(~Fa6Z3|I`2XH-0tJU-{Sm$Xlz$1-NQ3zx?nYZ}(rsN^#B&-2t1bVy>*Q2q`58 z&QRTlF@bG^AQu5x#I6i&) zbiG(Q=K#Ubt63HdSD(Af0 z@2s`XSZA#$VXAW+Fh_G{%sq^!xoDwrx8{#8k4cD`c8n5 z!#<)_A!n*?-;X2D7xi-4_QU>g=&DMbuPryTl^KJu(IMuN61!Tl6wtPr#gMn#Eu$gp zTH_T;&e>WMV=%_#T&DGWA_^glyThHMqsPymTLgt-yh{YncbE07jVXClMan5y!n&#f zv~hlVKeLJ=q$$j24Xc+^4BBF7uz>HVbyT+I@nfgQ%zAU9=3?aiX3&MuS!^L*9TCS(B z*S2j`OG()}8-^i;xZCaaeJ3LO{eFz0ozJ~7F{g`*i=(5X#bVL-ea_`&_`q7Ls#DHS z6~=Muzrh<0?czp@%U552`*c%2kB7Fc_mFGqlChbV1sp>bNkfn!+B)y7F$i3s9%2X# zmc>%?p(a95g_1=xs%S|mB_kjVKne;bBMN3fa4xna+>Z4eW{sERPEX9`)(nS8NzpEv z-1+(taTSuLiX{O5){nZG0RT`zq-a4cMPmX&_r1Qc{7pi{EE!dq2!TZun0@Uym+3wM zUThKofQVe%1lRiH=zC@lSmNn9MD!{dF#WpbGTq%8=92T!_a$c{GQ=iXXLkSqfB;EE zK~$-f2q=B+zIkJ*VlOx6M09%b{MM~o&Acw9DTk)5Vj3Skf4tl69zA`c-hAry*KV}Zb%Sxndmlq4KxDGk9d=s_25#Wd z>i+v5ygTx^oVWkC;mw_BO*@)k7hK}1cg0eMX z7z-9>s7MLJm~$e)l%pp|l4)LFcDyY7+AFht|LF18f93Y^oz^u+X@By;4&VH9wQ2d# z#}WwP8xJoQv)fC%&>ObvPG9K`E$~;~O@}q6ZMPgVxfrOufcB5xTaB?iIo)jbqXK88 zqGc2@cpV?O*0p}OND)ebpb`-ZfK5@tfML8h*FXJzN4t=dTMtc{7u&^jqpB2Lj$v9D zEJ+!)NYWyP9g$Imn5TM}2_HmkCKAa@Vh<6K0$4^O(klXnfE19Ts%#A!ML{Wo$c9k~ zRSU{hsX$68g-}#t3Yf97K7d29PJxQg4l9uL*D!57A;sH>y( za&d%+%%*7?LfG&3F~)v>SQJ^V7O~{@dTp(J@4feO&TTt891b}r?;Rr4bzQ%RM*xTd z5J8M_7LwpoKbO# zT!ava3^UihuIr=2VK=!orD$E(vsqJlI}Uvu`rRQk&8(C%0kWsyi1&3I2SqZ@l17;V zqo~GmArJsFoj!m_$ksA5WhxRS7sley8NwopS*)wB01*n)Cmv%F5%QIy`PnbLQjGt1 z|95}qmD;|!T09ve*~JDf%>bTqvibh;8^P@RxYw>Lgu)K3yGj}r5iJEfi7s2?G#l`P z^;mYFsrA40FMa>#tK|o$hc1TKn)3Q_d*Ierv)Fa6^?c_>-wi3?F4pfohX4Ct`)V{d zD~)yBe(|k4fAmLwAe}$FeE8n?z4`jl;?Aengj!48oSi>kBft6Dr`~<^o@?mt@y+A; ze6!nf)=*@h^=Ne)^LT4{WAuHCtH{tnyccY!> zm`)yk^v#c-U!0x4ayPN<_?nB_&35Y<8OqV@yhEFue%0^~^XMQY@hJwxOa5#Tc{S_ZPdZ5VTEY z!8-QVd&^u>bae#)#@UIbmP$q@V@)Z=GL@7VXgaW&*|H&`uIo}x#*j5s`$|OWiqJ5U zscJL&S>N{nV4V@wZX7}tpKdR!%CkXdd}ZAc64zb&g%*-Dm!%gviNMB|~m%mb|Ei_i~*xLCI1IF5Z-trpw36_Dk;-5&Nt zSR2m|!2}%$Un=w-eB|ZD;c~ZPFM+DJ8~{ znY9!~(juo9=j-)aKv#L?DnG_pRn^7j(s~zCJUKbZxun!j=Yq9Xb4e-B=W_-W(aJh! z?2Q{Y`m?hicyP;6eZCoLH_tf@U|Rvxm1GRNO zLe`CGW;j@?1>$dc{}xInKt|+g=TJ%I<&5DIq#^>U42TF?L{x26&6cZhaRKnMQTeL2 z>U;fSuXU||Lf^~o0RRAym*JWh`5I&-C}NNVL31%k$Xqm%q7k525He;#y#lrV23+Vn z9D<&SS2xAX6Nqddx>8W7aLqY=Pv7usUF&zFP(|^o4x}KU0s$xhWyZs1-PYBj4r7LH zZxhyRZ^9O3<#o2M@@amQ{QOVWkG^`i8T~pob0=CWRi=vZ5JQ<4oZo%*VsY#J-4B1| z@i%_${KGFb>6JU{1+*L3AX3C4P=vIACRLH-ycf`M9FNw^wyM|bWhv$Tk3RhUKLt7G z_3BtvPfkv59N%jf)#b(h`1sDZKm4HE_xJAI3n9Gu=3Afn)LXCJzxVjbqps^pe>gcg z>Bph2>sxnj|MIW=@_aS_q0fBw+aG>#GVI>{;KM~#&D*9}N?jl*hB2n7h{pPqn`>iJG-;%@l@~NgP+juQ2Ile`FYaK zqqA=upMC#&eXD7z`7JyO-=9yO>^J|1Z?A8q$91eXRqI2vHx{;8wMQ*fM|^s=-|u7J zv1F$uU#H5hb**(@~mvxo}D z#yOKx>ihom^pu%Vbid!P)=NYjW321?_U+qJfY~krzU?Fxbi}7DlWqS2Po#)oehS(7Ae=-{lMn#v6;w#4s^`rWHPwQj!#) zaU6>PmNJfGPAL+wZ#J8aan5^hjOqGuu~@WiJEqYXlXFHU@BQ?6KMjngRnr$m!5|`~ z6k-7YMv|)*hd3PuQYIt8Fd{lTjcaPJnERaKxDUCfda+p55!4!!N)!>v zg@_nAmpG)+`+B$AI9DNZ?fu2Y#qq71nrOSZbflH5b4aF{Ej+2i(@uQf|M}|K|0=vX zgso#$Yem&+6O6@jkfCky(W!s^=W+3O-*5l>ubka!^8VuSqvz)yXE$%Hp~BP`A;AK< z6xA|s<^YgV;;Qm>HL=3Sab)IdwTdY%7K;#q;#CsGJI72=@-TMBRH~Y@TzzVYfLcmP z3dG37Bn}ZlR8gR0QEkeo4@q6TgUTPU%4H}Jy+)GcK6r*`d^P^?%%w9 zyj*CcNT_aLe`INFMYJqt>n@X^&s?h>K zTuKQ!K!6Ye05EU}v02Pj_0V^9<=7Zx%2Mjt%vcdgQPf&H#JJx8PC>E0@2ko=Yxe!Y zcq^zOh23uVvQN<(+V8u5e?TN}-E6jK>!zyQf|enUA#`l8l;SGyo96uT;{2?i&*zqz z01eyCZWqRJzFI2SZtTLLCxhfT<~)o!rBs2I1QnP*W@F5JJ|_%u?7NI5CMe>K+aC_z zdl6B91_%smkvx`$D_^;i%H`#j#2ce^)i`To$PCH6K@FipS4yb9%O0F(g9;1?lZ6fPo%rHU zl!yq5PKkd+6vdd*Wc|Km3SFU7u63fBmi8+!{}xmGg&Il*k{{GhEvP*Sgj}Z%rs6 z001BXAfPKi4g<}sD?{j?j}>w;rrX?i&(}*6FjUxUwJ7m^OqUPp{ZH?nZHn8PBQh+- zAQ~|g(A<~pW$}J~_rb02yEuCF7caj3@&0Q+^5N4jzH<8GlU5{IiSG&7tGU3?;AptYLfm_RWyuWq%k_M!{0bE}X`6 zc#>y-=i1 zAD#AN1|;J_uD=(qb*$*;3UaeN$a6o{G zY~Y+LC7qm{cxTt^wfA09HqK8A;4#L{W^*_ka+0Q*J$~}=>9faEZ!;S-mSP%D&&H-{ zCc=i~T-PlTAoCa_B98qq_r_T^#>5QULmz;Vn7i@ zgD}xW%Kq|jdAVIJo0r_D*=z;?Q|qIu9){j}gG`du5S3E;zULSEO_LbnoG*okacw~JcJ}5Aq2tHG|sqY z`h%{9R!Gw;$!RKU%_5lz4Ur;=OJ%8_8@4j{C^(NU~#~Q6e*kpNiHNUyU@OYMX`6pxdAM43`)y;8#Qfi2a zI&@9gqIF!^F~Nc$u3x)6Jo~?XbpGFX&>nG-5_1`uddWE7SBItc<0x0XjzVbax}MJ# zeTWnLWwBUzKdp5`KLnNRtHxF?rL^Dg8No8Gm#ZY!IoJ2SwH82(F)Cmz1ERio0=?QO zFPdXQ06S}vLM)jGbIL5xI6n}hb=EtGDw0b{q(o$F-O3fAkN^ch0fopUm8@dj5y$e@ zTW|mB$7g@)N50?20zh|WP}Swm`tI?vJzGT?PEV0#vmu0pN{R)5Kmm{m%_t7~CY6-% zqPzI9@4NYL{n#r%dP8>yjWB|2H`^U&$)--gO-eUzA5lRVyCQw-=QB}J&DHA%+!*58 zx-^CuvkLWN1|WD#7WLxM?B3G_`J>n0i~8UBx#w;9_Lpw|{Xg=hAN}=r{_%WXf9n3( z-~OdvefNCtsQz`mS>a}0zFCz3G5U5f>JJof=O;-Y$DjM^x0sC@KjJFd8f;xvmwQlh zM*SMZT2DHjJl;Oaaa^>swrxXB%w()J*5)jm-Q}Yv4?_rKkdaoaRqO)8)hzPLkjq6x z04S%dR3;xz5e#9@c^t=_vmr(xB$IL|0EfPN`1nzTyk5?%@eG_}Dw3p-=2B9QF~%4H zpswqwM&4R8HNbxZO91zrdS4tT|khDXAA;g)lQFVHzaLyTL z`(fB#T-0@|nGXA&$TXF!>vp%>5aH?B86s9yb-CM4*@e2ULyFt|rgFZmrm_tfhHkM~ zIkCPryOaz=+cslP=cmu#{ouoDG24y3A+DSQhV6cL7>7+7?zXpc8S7?F4$pTNyMAEr zn#GbNcx-`?hzt;q`L=B>*Bh|)adr0ed6nqa%~y;uPo6((n#Qb}!{HG3dktAR?z=$% ztnpatp+8uM&Z=Xhg1gI(scIT08d3@fZ48+u&gPBx6ob@Fm0}pjF(>W2ppXrFMopzG zm&<-QOkVh849^#B*L7#-CuEFsRnA!vjM0D`Qy7O~J@d}Dm2t+Hecvq>i*X!JpFcld zAHDhd8=LLf^QX_M+HJSno40Sb)@%>^x@o$hN5e&Bs;T$hua-vwd^j9jRjlPi%4*T@ zm}1v=Q$g{5zxUQ%5iC?D9AJ!*(9CB`>lhSx+ma=ccN5n~nEX zT{Vb!d2yktZQFWd5BomFxL7P=2vSO2*VFIQ_x+u@`!FvbZR%6J-A6d}ua}a*SIs^Y zyA0I$xOuI7_*r`QxP1h0_{Y!wYUMx2vmZ0&%>}3{;{cc&*JONAAxiiit>7CChR7f% z5viSBt-LkjoXcJrG6a$>;SL-s84z2KtB5u;jO1s!brLeS)pu`%qnr^KUfjF@qLKm< ze!^DpnWxB85fNf0#F)~!*)<~H$rPG?7!lzbZ*r|qjJ{{~z!bv#4W`hGo-RcJ1SV3D zq5>~BV#omil1y_(1OUL2!61Oj%ir|HKe65QLn-yl>BobrLeKa`W;CifEQojfCK=f016BMFWiD6X~wco z)@Zre;77UKHt~Zk7g;J7^-n|!IeXO~YRHz8Xy#J&!-sZvT{ zY!)~StkCw&)qP=xv*X`Q*ZSv$Bf3`Vs?7a75 z*SA&unYTZ^>kf!`=jJWPWGesK>kszte1qG?!;e0kExv!bTC9qmrrong+qG*cWRtV; zHpboU*|E~oZ+$SF;L#e0u|(@@g*K0&+hysaw~XMWBAivYM(gp? znA0ym$bbIpsebhcURk|m8cMsfUwPJF{QCaK?;pLj*gl3w20i52NWGIWo3&H6Dn^`w zj7F<$0e{ChG1t1*wSJRCL>Lv(0Nbpp8VQp@cQN*}wU(UwGfhM44)ftM;cLtM{;B`T zH3b+Bha6^Tt!n@vcy_}RmzZI;U=vq>q&I56`#^sD8a2M-=p zRdsoJshUz6i3C-@<#@fC&zBqW8kPc; zpNDwX_{vvi%)PO*7-+vw017nJZ4FFpjVdMqZXH@G28b&}%&ICywWy$~7J(7ffmKtt zqo66rb2dg|5zE0R#p_G)92{B*&2vNRmisD{HXQDC0OvNvdj)vNDsa>sH%(=z0{HHLmY_ zlTFygNj~%(!XSV}A(%JgPT2cfZV3Z2%9*fB_88W`9 zNX-~VW59T5$(1bgrm?8Q_R=@LYMYn?6kl>IIZj*TF^bmZ_&KSei zD1azfMMR^5);I!$B8@i^ai_Ya_~lOt#2irhyEd(MTCU|T(-@o-+uk!*Jvn2M66>9IgBc0UbUnO z7*dWFivTH7QOHSDgh-~)hKf$p{K--CxqDQSP$QxHWNXH*Pi!=$L)?xs zII7|}jxkmS8N|s*hRD+xdr()Qv8LV(antR4kb7|R&h6XhPoDd=BTskl9W7=})ihhr zbaQR3y*%vB_xmBmkW+=mARn*Wm~>mBu+DFpqdDI>Iy&E6&Q@(qWp~&QT>^=!diUmW zNO4F>nHH;NKkTM`Y8K<2>-s%e9^y!bJo!@8<HW$m)>d~{OI)q&Ea^9wryKd}cBsGg= zRX{~c)-eaoITt`8>id2i$EIoK^F@p?rBpNvh$0}6_b!Cc)~0P+>nlf?x`7N9-(Ge@ z)-4oRRCMQfy;!b4{`jFH{_qz*f9UsUUCoheFPa{_djGKRKRP=t0E^|)TL^KmjD1MX zIRQA^T#jR`Dp%LFBBG=*jk8(foUxW;&Ru`7*4Ax(I2?+WLr575RA~!|4}~>{oZdLT z_nBLF)W;{`<~KL)tiH9ex8mG7FteJ*`W3nfu8dvW=2(72e)R#Lej|79Js;uFJ+0>B z(X02WL9{>MqKX5=oJ;WJFppi`q8yT$&+-t9X~|bIbkvr{&2V30Ab)d#Ww05#18+B+ z6Qd;|1{Q>16(BM|t5Jah)wPcB-AVifX$p}5pvaXwk#jbmH0Z7t0#qhcQiPHV8ETul zl#;s60kWl0OHwh!So9P3`mgo7Sl`R^z;CaLj3;OQ#vMDeF{e>O@^utzb??b!;)BMu zqlL&}K(5vxVQLuvP6XIB4(wXj`e)F~+}!`)#9RS00uc*>D7eN}m)ZP{5Bq=gjc@+! zPyO+?kIDtROuXDF3K0>iR#k!jHW7@%vpGK6>=<4gW?0*p=|!!-uAz1A67|Jsl$(>@{6%!uuncb*+{5CED4N z$VnWqoJ=k~Bdyc78O-?ae!@@RjY^w@ycvxBtNXpZSwdnopf# zJHvi<@+9uhrK(Ft_H_ZcQQ0FC@62&kSM%}gHv>zQpFgG#4a3o0TalSQ_I z#7I;A6_I)B%9~);boJ}g{^@k>zJm6!nJRyQNM!n!#GK0nhXH0)R06U_ONlYXToj0{ zA*8EeoQmY^oJCMXFqNOS5vPs&oa1*!0pMjX7s9koE-(I-YKghf6_it_m75oIA!K7F zc8B+VsxBbrrZxTkK!nUJQdC6*w%g5oK96w}(YmhZvzdr2m&>ZE&d<*$0yrUh?;kyS zbo=&g5$U?lIb*FaMZ9I_ocA>Vq*Pvi{q-1Q&T=^P#u%c?dpn=c-+1k{r%#_=oNo%~ zVzCmD?S3;1!+spv=8iL#85Q-xg9jfy`mkLX?>&)Wv{C>KfMK!(hNiWItIq;a4Zsu{ zh|#(kIWvTz?|VgHYmMU~XpObb*{UWs85qGRP>Jd2=*ZYBIL1U1B1FT)1f^u0{uIe5 zj3T50WDrzU3<0yrDnwMWh$u6v6ecoU#c@!<`D`}Lp#X%L84v(c&f{)x*~A!gh=>?- zw%$4CQqBTeQkn`{1(1xXs%qLZ6%hov;&p&B*#eqN$sk#ZfT+^+W7C^cN7}k6D?(UPDA}qCaL^NyK5W=H}AARb0{k+oW-TOZTk9798;+&SprprOsP>7rQ_F##=LV3xC)go?~8&O;sm44a*nqCKXR+l6jmNHC1)PkiS7-+cJ(F08A5Wt>-# z5>us$#uTA0CAG|ogutpOf<+7%RV}2`_PvNqhpsUO$-ep4Tf5z^8+tUhswy@X4HJ=j zK}-8hDVgbLffslF)7hbtjWO%>^3JUr81>=9M&T$-#F$|JQQue-X#yBDl<86QVwGp8PIqG zj!iIm9G!DjUB?ji`+bl|imUlTa|wsRBO2o6Y*k9Z)@{4)+0)BhQsw+=wMxF4z9*)c zbrCVflrf45iOk#9TT4XC<&v38DYM1=-6tQNZ7%QMzT+x?^XARt)p6hV!!W?Q8M?#b z=JCbu;&3<&eOJ}DlBnkFtt(Ujzrm z5Yl!}29J(bGtX$4nPW~_^!WIAh#|y8L|qtnhrO-5Ar9k63XQL02x_Tr8e-_fkWzBa zS+*iV*6fFlnVHE_rBMvQ_HyIdOqanTGKTQ}M;|u6S{*G<&(4l+tTXDYZc8cKI6%x> z)AcFro<0GlTX$~n`ffiSQpsevb+vI8m`W*tj7$Xz8n(z&mGNXgO}3LU#u!d1RaG+~ z1dTD%BP27^^z(%a^TlreZ2zRT_083NKy=QHF;49f&z?Tr?{>}6GRI^sPv5rd^|~7e z1wo{$s-}{Ivx^Jo8~_|2A9vmUs>eGF(+dkBIOlRqSGy3*RaH%|78Nwlj>YC~EXLn` zaPOvv2@``g_5g@j)kDB$rFnZs)sgpoRR?>cukX(=-WuNtu=^Oshq72* zJk%68j)ZDt~p z7j&YRcve;Ygk5c}b*)ddUgieA(-=U+@0OV(YXXwRf+@xU=MAT$JCDs5zy3|YFaPq7 z-gZCv`R{99Sw3BSHa`8}q7UC*-v8YG!@9I*um51aIF%;Y_;JCzsGtV9B-;(_aje?y z;`Oh^GXLh2PgmcbHTF}>+aGT}yukT3=(0K~=3WjNL5I+T;KDXj1URK|a`H?{n6*bC z_Rce-D3nq1J`5$MBY^SH-B_-R(yE!&b-meaCZO%v*%=X?K6{=+ti5X%%@~t24gidC zr`z+({oXsyMH#uz7n|3Nvplb$6eXMc9>)%g@HD&;MBo>Q z$KT!%A_1zF1PTa5M1Ty60J$$yy&_6-U|A?41Y!GKct@&;fS`&11j?!_qMtZ9R@A=Q41f{RS5GhUEccHO>dexb1Kl_LrM1=w}O4`@;%9Ij0nuY(ao&R3LKIkx(`!gMP3ye?09s&r!2TzFd_;s7&)&{C zHZJF^MBe*2j9u4RYbS6}-}g_SK7IS`x6xn-0g)#-(Nx>J+wJPQ4&&&oU9Z>E_v1K@ zF~%u^004bAuGj0$elxLR)FQJrhK+I4D=$U8x9sfE(b`(u4MX1_6rpYAckkY=3_pK< zPJrGH^Z8=G-*30O#d@8y5EYr+9AnrRFsKOf8&k`xzn7d!&hxq|q2FGf?k~>B<7~Mw z&gLw$dO_AqQ_&(+vgTBZ3P4#bXNU+QWN7N7Wy7dNi%5=BU{XX_i$O#Jl~h9P`cYMV z(-M&oA+w~+Ow$X^DeMjdBaxJxv#J_nr-PxCqM)7#0aGlZSXEUj*;YQ}V7wz#L$O)`i>0^bW#CnbM#D9 zNJNBHLA8_!mH8iBeD;4nJNy0j;)iF?wp*KJ5xeKr5}jBAw$@fv)pcDxYZYY}xpkt(vM%DHP3(Yps*bW)pLA&KYYI!CJoe%KZ?+R3AMVIn#NOQc?j?B&JxBC0o@^ z9uiqnEhVLtN>w$6-IyW*5bAVaL_|z%2&DuCB{V7LD<_OdRIe-Cl zUWMV{ES^3+f7Ii><*oe)+ZerrV{V_te8Z<&%)R?}?;JhIFn;{ecDvtP?00F=f9{C> z!jInl;!?}yNg)X#8ZzoSJeb1hfF3{Dyz$o2pMI_S-#mQhnX$`zw}b#vTr*IPV^*nJ z6jdUHGM(M2ibAT6j`}hLT1i#T)^5RPJ zg8G|FU?u|1Dh0t*HKx+-QQF(Y{40;1|FO?j9Ja$g_=O)ku51;@(A4e3a;av^X0}dB zoJ`#|h-zGs0t%(}-df^%=_@;-fd~)@G?x%YVIJbJ+wc24h8RR-IiF7>U6QD!Ofen= zO(~7zIF9{tIk(2AJj8uJ_QPbV@qvL!RG4|{GC6cz)zouA6=A@(onb1@xz%EMc6O#x zw)=foBBS87qm$Oi7ca_1Mn^5rg!%$Ox*ql6qK zkKMMY4q=RG%tcc_94+RP8^MNvN-kj>A;z3iN+kn8EP0H<7~{Ra++C)Wn)xiHRAOqq zGXmoft9ljUpelI+{y+*4w19dRW0GX=y&*tjLkQl{Vc$4?dm$;k&1o z+vJBsIcjF#eE;L_bKZI)Q(JNvPcl)cnh`d2KLFiS&w*TM=5BmkY7R$l&UNc?=#@U8 zn>iL8H}~SVhu#uoBmtC+2%w@={uu_5q6k-8XFy0(M9DCuPZlJWh^ECpoi2qSnl;6^ zeR?8ctdOiV88yRnVO8Yof#bD4Df$=I9>|~^`uzCAD9Y2yPzFC+?=61M zgUhF1`5WK(@<)05U-;tdpKTAn?4B()yU)hqQE9*U^qt3Up6!|?fp3g%bHQ2(gtKBP zgHKhcZ*A_+{??P@FMseK|LlGG!TZOTla#7*M6lg% zW9o)68}F=TMqc=(0D8lRaoB7x8e4~)O3qZn@$vEH7 z_txDF5&2 zt?zJ%!yR+}-}~49jW=&M2F5rXoO90Dj1-44ja!EW3uq}>gsn$g0TQqyX3QXxUyPe+ zA|y=3umCWX62o*$DU+#-0>A)Mr4|{U#^NOj5=`~LMI;wVDUC54_T$tvONgjKh=gDa z)RmiNk*X!dbhXh;tN@l6m|j#JC>dsJ6gi8eluIeoOPZRBL9P-H-)Rp_V?uaQMPLXi zQUP_&tX8WR9m`ivz{$zU>FMdKZ@h|##t<_*Yp>Rj5v3GnZknd+IuRL$A&mh=!x#ae zY3ra6LW(h`R76Apsfc&AubP+&fD&SsB9g1NY38%tc7N!HaU7=!OUdKe$<|v}`P|Ox zzE9Q~)nr|(NJNI&vTpeFsE19f6jaG(~Be z#u$@x0x6aAh@?zyr+dut_r~l#q(|>U{e|6bk5Fiv4D#!$g0Ku!N zs~i8X?X#bJg>TJ|c{b~lXx+HBg3*;UUR+${oI^@%Z9k6t!{N0zzi-~m!w^Hrgej(2 zQgo(Is1sFY`h2UZnix2Sa2&_kY}U?aQqtFCDgov0TI0KlrDWJ8u&(l|7mZFa&Ie(7u7v)#$@ z`bO^dYqS37FeYtYX%Az{TsWn4|Hg`a^Q~_@yx}R%;U|9d_8)n0^x2g$Tn4Tv#HIK9 zK1N39VpTz9Wv3hg{;l8tg~z|})&J<}vpchVbaR0ocgZ?+#%7Wcr2+r|QUrk_I047N zD7`GxhL=e1B4dfnY}kmDI9w6Wnc0x}CoUKI0uP*j>+zhRSj7IY+jl7xvCbsn;%c|u zqpcoa;#iv9_SD&?A2ETcJT^^3)(pe2m>(?{N22U~4H?zYAq}OJqoZ}>?2|{2`*Gm0 z)V?A{@BLx7X9Q*>G#7n;=sO@pqM)hd5F!z^ zZ99%*#nu=qSuZ!2)>^U#q_pk)(1)S|M9iF00FdQ!9zr-?teDw)cY1p2t!vwP3egzX zb$c?*&Uogks-Q?oCC3s=1_tMxwdQcKUA1#tS4p7=jpJyX9fkn_s-`y92?&8C$b`B+ zULj&X?CY7|>^4U?*Zci`2;tCo&3xu6JLa@I?AP^7rpP;8wcHjJi&uC(vT!y1M^IJ~ zaju$-#+QZUF=j;EUR>OmuU0pXW61zIo6W)y5b^S`xxCm~+O1aW%Db4-Y&J87&URf0 zV-68es;X)#PXtG+#bAwdRo8X1`OI3&)|OIomagB|b&Y7OvDRAe{l(ciBg|&A{?HK- zhys8?1EP(u&i5N>^1a0kTk*Rm;bEAaxyAYNe!Ey%S5w}^FvL_AEs_Lk<_q)Y(ZgT- z@%)v?{dqU6`fbM$x3TQ!Z{+1GEQfHm1FEVtmRVchxC8pY!o&Sol+NFH_dj^{1AqBP z(y#s_|7YETbEr$FZns@*o@k6cEiyU>0Z9c#P!tFi2+;`s(?VRYApN31R}LjG(I;>7 z-m!U6JA4(t?(3LC5fu83R)vj;yu z%g4RhGDAsNqM&Z^Z)0|kN%a5_dmZ}U2e}Wnprg@Hil}TLRiqU zXEq@WRpn5ruIuIT?DS;A6!*Pa$R&s4dSwdK#vO*z|byI<#x3h%0?e`@_$8}xLyz%`wdNzZiqzt6n?Y3|ExQ2wvU|bwp+-CwJ zm`aHi2ykM&>Tma5gdh;00|cR>AR<8l5zXD#ixbHM@Fw;hq@(^sxlLhkq(hNudd0y&b}I$Y!bu+NJOa|8Ha&T8QLL#XnQg##&8-qsas{7xy5|H zd1ypP%GrMJp|~NB*=FzBANqsK{DoH!zxHeYm;d$p?ji&V&8_u={_wm%eBCM0^ktGvSmHK%Tq@MzqGW^)D&mFRO>A@jc*_?)r_l>uNy~u3$u} zNQRj?3ovtr7-JBT)oP_`iQwpH<-N^0d+)8a({8ntG9?dGt~!~|&(9Z&g(Wt1UDvfD z8DkJgRo{L0-T8c8N@?4c*}h~Vi3sx)!cZV6IXTxZR!d(uXQvlqiez2r`?_xXF5JJn z8pqzc+FNU_zv7*cQ8r+woJUrKX@K(b_FhZYk|n31+c&MNni}gWkmjOftgmM=X96Tr zKtv!kY^MNUrVxT?Ht4iu0AN5#Ip8K5nj!Q3jnHWRs^O}GJqOG{b3{mUo}MCX0y?p z8t0sI$mHvWtr-a~A_cwNKHD!|ee2JC-kpEY|6E#C%NY)%iA5EKnH239Y%XbzUyh&p z%m3)fgJJVuc^ltY+1>h{j&u^Ps5NYj3>Z>ODa9D$Fm_7g7`ww@m<_QZ*mnmo41iP9 z)pSx!hrKh7tpUQvj~|Q3jT<)*vG4n)^#-W#4{7Kjq&S3`CuBqvqUj*aAV_@W=cOR?!p%RpzN?6eX&?LAw@UT@m%Tra z)ppDB!=N?ny`SkF&wS1|-l=YlUDb2DyFDS=O&nr|1WbYqQIudK2pFRlf)L3J2~%Vd zMHVIj93*fY%UDRUO>FFT$8PsjU31;Kb?0yX&UeOldWJo%#UJli)#cC)TvJWmues}w z^T&IR*4bub#{E55o{X_iV$?4LkyEpID zS!|4G%qp1@o*&IsF#~RypHM}WPyrDMk@Wc^p&)DxDm=%9XC$&9np0N%UOG+qF5MXM zZ!3a)^%-- zan2brA^CDLS+wh`%ZsY1s;0@cXXXjhQ{mo```{pelLtR#NSq)w9eb|oX zur-sSxxT(GiXwx?ZnF(gTdbHY!lhYk){FJZ7^?`A$z*45t^j@CTQ+4`IpR&f8QKmk z13=6BjBcu1#)F^e(!_5o!#B}&S*5cT3;h#em-6$ z|FL6(wHB1D{m$G>%EnoPCQ7EPsv2XAG3K0!Ob8*RWYB^tAu?bFaE1}_dU08foh=*z zFE^`{q6=>5+oG_W&8DiV?Otzb-awQI>asG%iO6U)Qq`EVsv=Wq%E@Gsb1sY}A_6T- zmr_d6x7~U)8Z}k5U25C5=d)VH5fBc1e^%%+#7-k7WN)9<KYlD9%lAuu&z1+&K#p2uFbLil z$HsbDT|DkPwEKrrOvtaCk!4)xC`~bzuu)V{&EIK+P}Quz>loi3%g6Hn&qAmr+Zc05 z*>ZLU8&Ib7kj-eZ=}~!7oK;`=UoGzZ6Ps`R3nrCE`Coo%df5*@3{dm%zt?PotD}W7 zd(EIoWEGR7o&c&|(M`05v*L4q`C|M>%JHXfnK$pf)49fIP;xos=%Xb|hE?(~3;@Jz zXR~SBMadjfATaB7ze82mwWchlqj4Aga?>K>aaH8tQJ&99XqKWVh~V&W|9o-b$|Cwi zfQ5CT4`J|q*F8FWR^P5i#XJtf<7ZFpom+0+T}|uw=wPakh66L4&q^5{ zf9owhG)2$QuW=M?JS?6yCR{-Y@X;Dp{a8MhkLCY!5d;u@j*AW2d+&!K_r8m(!-Y@& z{MEzs3qSwUfAH0t&wRiXi8XXy;t+-bxj+OGCCLFYlZ}c>3`Ck$B>~8mJQfl4J2-3r znFv&$-}9d5UNtkZF$NU3DN95|=B%1T0HCZ#A%rB7N|D#*W9h_KCb%_q~5uW0Wv>Npl*{vjvX`i zSz^pN2WBejGUx25$~kAy=c#Uh6eXoZInHJ~qOk7zuIp5@_uh~g^PB^g5q-))qKYx* zqGSM8-DVpV0F@Y!2w6}8iOA&bb|Y+)ZdchnNS%i ztLyMkM(? zTl_OG9X$qjakcydpE&+!KYi5sPq>8Y?#4_#g||PrKCEmovt4bHLoveHh~$n|>U{9A z6iGy@U`Bg~%LngWth%sTzx>+mf8%G(zx%5X|AU8%H#)Pse=;wthLb66Xa^#LWDtM^ zSp*e0tB1S|5nEufZNO9jNzY3i28o#gG-*nbo}-g}k8IC?IVbp@n!$H}DpDAtbEd9q zv+5vbXG>w&NRxCpyV~5pfYKmp&vIRB5JDIcdcWiSqke6p=2zJ z!dMbXKBS!Umgi#&E{}*QB@xY8`oR-O-cr#buGecn3?Rw^jjK{oUM?@a_tkhLAgjJb zqLErt2x|3k+vJ6f*={@s%!{NyXn36 z-)`?!uG-nzDN8qF8aD07RqW*&BN3Hld9_+k9bc}yVHldG0Uwwd2?)9K!?ulRv)PE~ z(f&b|su<&Xxt!1E#<|18ar2#ezjDgoyp$y)v_pxO}DT`4F*%r zDJ5ID5Ms#5v5O&w!4n{fk6j|crXCw(+M%snb$z|`-Wy|@s=6KqMrDN6^4d8!olKsc zovnwi^+Q6L--M;w=uP;5JA33dWb?vH-F$Kb6ouf^qNotTaKpm|^ zb}wE2HJctjcqX-^tBDz>;oWEbog2IH#*g=HM?>$dOOhjk!B}k;Aw94Pl}T6S*o}`a zH@3Qbuultb1^{wc7GOA!6a_Gmrl{MN1SCL0231rQRrru;aoHYQaCX`D6K5K9=7li4h63G#0Za$SDB< zaoabei*+@gC>epUx$4Jzf8YGv_4Tj+H|)2YFg*09*Im40-Oq%#zc~5ndUXcLAqi+i zLB)t59NKUlX9lilf9eK2y!C%w)<0wI%e(EH7x!H;2}49?DkMotBBe2^%52)Ui_ZeF z9gT}&-8QZXeYYid1FIKD7 z&fad{cB*+YnqDmy%l&>FdWK?P?qFe?>8nBkptsWV<`z-qlAHfhdQc4L^KE_xSc6M zP*emL2$;aCI@FwuDCbBSeBm@D!x=)(MA;-`5Cq0r3a=J`)15oF@4Tkr^n7`(lahWrj6zTD%arZbM|>5|d7LRSLr2mOkCEFa6q@{cK*fOL!5gE1%XeH=WbJX$R} zUF_=BpZeK9^annBce7ZLjsTn=qQqn+F%wYUbY0_2QPm-4L;uk}-!0YZy-b7f9~K%#jI?qQ7C=CWgmN<3=Ds(8Iz^uxNYtEzVEb>AFJ zoz3(4wD0nK3}o0i2W&E6k_1VzL{JhD$x%e`dEq94q$s&YRK%hxwR8%kVv`3~5ussZ zLU0AMYnLP!G7>oE{xTr5^?yN*6=34(TaX0yq3vEDePn6*$vw$2(?7K)s6 z2GzdnL_`%;)fj^as)Ej7MwI7-Xb8-h6^V??2#e)f6x(h9jr)0O40m0hBB8T6Cy6O3 zuu)LlCOWqd?Uro7Zh;Dg6!MmMfYjjg)^SoIRV75{oG5Q=SE?#8^@$3#*xcjIxLlZ|Oix zD!YzY`r(}eT(#BgFKqbNzy9>keR^|zbi`~gm&=s1tIERIqO_aMdc9g|mYg%NNlA<` zY>AXbWSb~t<}G`Gs&4V4wnd5e?zp~l`@brUtcgi6AYD=p*`w+_Js3PqR5`4z4^ujn-b`-Vvs0#Z~-43p8$ zIX@HI4c+?5_2$me-hcNGzw`j}Kn%Z6K0W)@2XBAr`u=-e`9O(T?;akov9@T~I6?*_ z02Pqmq9OTCmTem=ESc|46m6?t%v@E~HaL6_JLvN;V%UB66VTrZF2*@XS)!;TL?neA z#i!t`b6iL{fAg|8Z~Gsg2%Zs}qTHFyXVdv~+*~bIPVN3|@4>ro-MVq} z)_ni+>EkEfCq$OPd*62*BbmBLNkRxAN@=Vkw$`rO6{DGq$8}j0MIk8@yUG#U)|y2=%$t0V>EY*UR~U0rWho6UMMogm`W zHS6_S2uGvwrrQ*@C=3e%W?3zl2RpkV1ZR17zVqVlp0Mk|D$O^>xnD4Sh&aBq3txmXL7LG~34>0F=Ocuc}rjahV~pdvQO&L}Ip;X2GuB$``vD}+Cev-{x-5#d@0UZ{_B{}$D8n!qB4bS9 zNR4u(tXO19ZK z2RbjCv(@#Daq+mhEQs?D7(T>bfpQc8l1jihbXQ97dx$svzomvvh@3$g94u za@lWs1elCRF>45+>$)L$TNG7Oi$LL?!=`OE8%Ee32^NdRFbw1I*n5u(o6WkgrnJr= zo$S6YlTW|1zH#3kZ#2$KPkoNAWKN|F!vN@qi49%38|;2{{l@L?S73D9^$(Zl=V+(? z;;AdSy779#vKpQ;J2I6fpO9RZDxt=&nN%Q@UI#Pl%`*vIm*d@QYlU**sJ#V7dt=MV3e^IyKYe7t`D^J9Et*gJ1-&bxDK(Gq#hN>EvL*+qaM zu6IKPrJQoc>CLZPZLZt;2>LcR{V)_eg=AT&+GR2hplFwT*cd6&jVnmrw*z0VO6T z1Q9{a|3BbMcS(gzC<~d-c{EijBtnil(xk5R2~wNbvKh@eW^;L|{M#SPRPq+lQwep8 zScC+MF6Wd)R1*P`QKC!$qAHS6$yn0lIC@EeMgueoVXEYET3kG+>uR^OqiAbm(&6pC zy&z*xuQz*>I_x@2yjRXX; zSiITprpqTgw~wdAxGv1{U~0U9$LEF+p^=ZL2R@dM<%JbR6jTP-lDqmCa*QUcNt+55 zpS(5x^vf@`T^dp*Mic{49WkV=DcHI$N@uJYQb00f3=sk|D2aL$$T^v!-10HP_hj2e z1cA1Ra$^h|+Rm7(>Nf4RB_?2`EsUixe6?Dq=#vjAMi6nvGGK~{Nt(iyg<;e%Y(f?V zvW44%?{Bf-MI>fLL}paY+hjVkMW*c^D1u}mwxD@iKOhkNE}zZwm46o`1QF0!8$v)t zW{Np&e?vqmhb?=-_P2njkcnyA#EpnWQEa~iMUpBw=eGT}q%aJ_-rk<~{^H_733qpQ zSF2S@*;v}y*)hgEe*AbCLI`1hzgB@5V~Wu^6Ju_3EQ-R~!Z|mD971YFlbyL)T(xys zhS{XubmthEa5|d}onBmARO3lAB}ZHC9tPav9Dc{de^lzaMlvzyqAYDQ>U}V15Un-L z%!st5(G|&Ae9oaLsx1<|Y=QhmVvISab=#_9J!*1H>*Zp-Y8h#@TFvV+=bW-`$(toZ zAG*HpYg=7jUee-T#A5Yr5LKrMn>v; z#mzct7}H_PXV>fT{AARx-a38XU$sM%M_2yedH>2)H8h|# zf-y?Z2S*B}ZA@x}iqB&OJ9tmfQBKdCECv2sNcZQC{j7Op}j8oHDcsT$^{u4Cj7!Y~XLv925u z`QT%Wh-jH%3kEd|2qd70Xl=1&8bH>jDpeJcx=mXbyD-Lq2*v|mqhOl|1Y^5W?k0L+>(iV|T{RlBo!QdxIx&N)hkEFlETCi);Evz-wo z>4zbQIM^_$Cr>ZWC(S5qhMG%Gx-*)HNRpUyOe$V=*=;0c=bVU?bye4OhTJEgQrex& zP!y6FQ*812Nju4Lxw`)JC+8`ODv~l}9FOK_XHO-0E{RKHvY_D<0%aP!7f`Y$CCNz; zIA`6iJ@_OUP<1kyU0*D%I?cHnl_D~#8zNe7mV0}9qtWQ?M~?^10R#jr04q3Rqm)t- zg>Ah(=2Y0Ss;V%=^>SmI%Gx4?P&&8Ttoz`rrjBi2)KyUw+rU+oWeTxrs%Cn8{_JcR z@s1r$>ZU&2y?T6F6a{Z^eZ8C(^?Wq(-iPFqYTtHXS+i)$R1y+HRMqTbO52$=&?-@VO>_vIcLmdGTHc7-aZ?j+U7i+=R$iQODyV22AXqBzFju{^0;1~ z`5Pk$`=`HtV^plKH}^N!OPMsQ*bbB3gQ2T#4t<(K3Q+ca?@WZEh+QVFTK8&IgdtJ| zG4#HxcBW~QL*r{Sh>QO5i%hs_)zlQNr7#=l9T~3r$p|f z)q%gIqQUQ3@b(<#QB{Pq0E%U1MoC#xAoFoQ^2hQ~5hK6=&u=$^!UTmvdAa(+m;d2| zTdzDhy5Yib9-Xzcx)FF!uKY5i*FZ~UKjt}geh>GjTW zKeDloO7ls>8KA2rx_O@K_?4KRp&D8H+oZh^D1_1Z(9$Xz9n_S&~_d($=n1{vQ?)mPW zpl0{k!{dwlMWT>X>&Qyjv|UhhiLNH=f-%c_H@wA48Ai_0H+j{U_H# zan^h$9p6GQx9+|5qTBVM^dRyFcRtzezZ7M-b^nc{^Tn!cp6%V(9RBXE=j4MozgJHU z{Z@|?``+ExH@9Bl+~0ci_WbI6UChqsx4!b~&;P^W;XnVazx4Ub`)@qHKKZHse{}pS z|8w}M-KXt%c?r^99q%mqY> zB$T%f!tue4VwUb-$YSM~`Q#woJh=2>@^BcQypb^3*KR-V)<=`QJFS{7eRekf8~5M4 zXU)#*pB~=7|I(wCJpSh5^&7P*u9Cj|;F~Ai1LtPf6LUQ~_T0Vx{)6xJ4*?xMdHj#z z=a=95x*b-+dVO$md2lN$?mc~Q^z>et$MqB>>nP#vfg<)QJOrsKfij?ke(gget39d7>^%6-M#k& zpnLl2r=A=fHG+rt9?h@LR-BhdH=f*hg@A87c<09HuXXO19(?X6mM?#*vUuy>mv-;H zu?YV0OFy#OJKpDkAN&S9yGq)^r(Sz>=N9h$#O7;nfAXupawmj`qn9rmneFV}c?M-bgsQdHnCS2F72)T;?@bTeE;BC`{>LSX7{C=?#5nkv3uhKf4V5i zOkTNNzkWMFTzu<6diSYn9)0$u`qdlI#^IYEEFYf2q$=(n*;~6s!tm~+)x&4-{&~IM z*qEfTtFOM3FIJRfeCN1+q?aKFQXWubjY*-P{gx?`(6vkkU?GxmkYl4)>vd z|55w!6q%-XZrIx=Yu5PgqxkF^LaJ{anuGD;`dl7gnn%}E82p?{1=@${Rq$WFf6htw zYd4>c1+RbeYvKIb6=i+%$lW?wGx|5)T%NB>GCQx{u3tVDj>|W`iI1*QXxAsx@^CVt z>G19Q@$&k*ag$eW=J^PH+I;i9_0w}(xP#a3+LPT*LVV-F>d6_TJbU>j-QHg;7QEiuI0I>7YO*)<@qv>nk=$>As zHtf9qvblTI#o_WRZf8JUcy?%G{%55Ct z=BV&0Ej92lu_|-m5q9WJiee*B`!p@7~ug&tCnhAOD#j`SEeY z<anUHaWUszs09|H}OO;hKu|-QA~?R}1X+pFG`r z@Hm&|@oO(X+<*Bu%xAy+<`;ix`L3Mz>E`a<-B;FUZ?#{2I9gwO5BX&O1(f4$lW^++Xs)(@Z|2z`nB5|2<^A;^^Y$~u+zII z#mmP%NO<_HfA0yof`8=mc9PX{lUFh(&*)r@}-wz8vOfb%MTtibMfkJKG{)* z_MLm1`{y+_^wRC|-NWe8@b>#x?|*rNGXC`4;m#3vi{j0Pt4C)tc6#^F+&r-%4v(H( zzJH&Hc3*kf-n!Ad8GZd5a)O_}}pa0=6gms_Zd$jq* zUyIW1-aTHv^vYo|iVse&KX`y4?B2Z@4tNb3-+U)Lxo&JVxw*I8EsQN@i)g<2AXUcS z+{-uSs2SgV?9MmclhAzX(?PY&XwDbm*)H;?>IvyA@s!ydaJB7CxIW+kh@`%jCr zwv!?6RB$}a8q23w?*4f(pPY}8?%bF?X-D_ZCP9bn-OYo|*xXy5-(0kKx$z%d@yTv~ zbJm7|pI+{qwt*qs-nnwVR6Sk{<^6Y^qbrlo@L(!tx}eR&XNJh{jBozBa z{GctvGWA!B4x88h@XquozxU+u(UXDLH;OlAICy^VqUgekuW)3pj*E$h@^mw9f!Qug zTJDys3ST*m^(usZDE8;?CvT;ho?h>CS`03}R+TRu!QkP++2P|g@97)mgE+BMfuptX z(u)pxR<|=g7yp?nsey+foSQTpOgQW4+3IKmLk#Va>kljJhoeoj7pqnW+)MZ7&4itO zaJnmOEe~}L2h;P=zx1rTkxGBj4oz4xKKg*CyBx(}weKfxb5JMHo%7A!RlLH`?v2)yg1Rt$*11JX ze)=vlebP(+;L^9d>qB*RDx)GDOgCY5E1KyC*J9G+8{FB+D@Qly?>%tmOUJaBxaEEod^>)6_0nl_ zmFg>1yMOD5^ix<|zqPpL**kade0BGSqQBmK^wv*2TM*kPFW)#nJn~-dUcPbk);Dp0 zCpYg5H)af%!yA`}fA??f%xCvUkC!3jezAMj+T$lUbCSP~&`*1$YMSbf-o)4cM9q#2+KfYI9euF~T z)7|CqjcanB{NSDM&w2I1e{4Ot_xSze#~&!z$G2a7a(ttV;nu^qXBQWpn_SIrUYs2K zK+exQ`(yJ{KM|=I=Xi4e-O=J-#0zCSQa~H4-X!nS=G%aZyLSL zTHd_(try&`52t5Y+-&zI7kL0ni3k)4ehcXfqh=#pQnj3r|KJNUPMGpYlZcECzJh7LtiIr8Nw0hWGM`b{+Yk@C-< z_|$7(Udxxx1_7$_lC^_mDNkAlFZ{m$c6$1v?29}KUC~Cx_|u=iU6s?n{`K=J^)Iwt zA6Acz+?9JT9qs!M!QXrHi=X}6=Wk^Fcli%4(zNs&gwR)wFJ_#$hzpj5=vdHXw#pU&#QUM&41N2D z!e5hm^s2KCN5qHYhN3m2x;fYhLuY)R zxGLwY2KNtd`eCrDl0p*kS$dUbI$K>XY~L|y7?<7_7f;VpyIEYFjV6<~E*{Sgb`SUV z&(EKw)f!U7an9cK(Ju7$TMxhR@19EB=JTSg3Q=kazDMuK&U@9426uUmGo{&4F6x{S+fA3%V>`VXpFMhF* zYfSW_+x20;2a~1$+3(x`U;olK2alU-jMizS`UU?U?CNo(K-rtk`s&H!NHK&oFK7Pg zZ~Ui@nt%OY{FkocnIDxbM%PQDbM4Ptqcm+;AYZNXdLxW((tv9$LTp=0sH#OXiilBx z{g8Wai0pI}D%1Ia)qS?isoX-qlm=P8jY3=ul#((@fow+HZer(u^X(@Y4F^r;Pz z<|7>yo9m@6+L^0dU1@2dC^&0sTM3?-xvU1Hv&EI`hn>-6adma=JDpUkcB6~+zOAva zG^*AyFxjKYbmA%>y2WO_^6BJYA4MG@v2A@Y<54r2xUi0!E=Pg7kZCOdyos()UB6z* z5FO2%rs#uiI$=U7T zcLtUu=LA*NtV^`WoaDP6Q^!JxeXl;nkZCgN9WItvSMS|F;4*;J`}3WfCjii$U3C{% z&Tv^5qxqCUHRYOcBOumATYD7^sV+N97Kzpa_1;UM zaiK+%dOsPBG)qd!)m6~k`=LL5##wVrqmzA^jh35r6JQ*X>cUqpmd+?~AF=m|Kpa|Y z#j^1vo7EtxnN1>#PCg&s`TNGWUwiZ0($s(C_y3`;Yg6n^H~4-<+@N(-}U z{*{0KFQ3%SANq+O-*a}%9K%pfr%RB?+&z5iuT~3;dpC}I98H`li_yROmCL8qt$t@# zty)=as+dsP36(v$q2w&S@I&o;VZ9zr*UdpTv!C+s|JmEm9$t2*ve>EbIDd9_xoLdg z8^mUGl%sW2R;@9gY2JQx$G9nM4p%msFTaAuPIX)1`u8T4#;4>53t` zYU&+BQ!So6IR3=%TfFxzhq-QCF6+{mv}tAN717kC8IO9EqHR;#J5%7eN|lYt>(#pM zGmI)T+m+E&K($@v^%7L;y7tbYD25~_HmhKh+Go?rHlPA{)7isRi6F3v2w zeb+$Oqrt=5Cpx)V73HC}fA!Z-|57{{*Hcs3A?Ya4KmXa?Z!I>ze6}7kk9j-|{RTDy z?e`@+LS;2+70pR z^ULdDNGGMk3HQqAN&^aAH}w60#!Tn)$-Dr7A!rQO*Vh3cGGS5V#f2ngvRarHQV0Xb zgb{l^cd`xVmagdFR&ay9c*_?cHBiA56+tAgM&e$ds9<#2B+h4g@WvqNzFN zN@&x!Aw_J;^UbmX-r+LjbQw2LHYYnfMNCHI?BX0pWlOM(;SQI+>k)W*u~PRhrjbfzzs)Crrz>&EX2de!XLf+w~?6ei(+6latA0()mGB$e^cJ7d`75lB&+Y z-l(Rkd6@>!5M#{kU=_29&xy-Ye6~QW5Mv@|O<4rZMX!VUPz-3Nf9&V~)K|Xr*Hhb0 zYX0Ik-`r>@c1DP*!=S^UglO5!EaoR;8A<@B0ZC>W56(*6ym!kdwxMBF(7sA&tfpHPoJ6P3ne-uJS=#Gl9ySC($NH_PaTwxg7;pPkory?eZ`rCqn1Y0%APL)J)Lwwj8n z%*)pLyh<@Sj*c{@ake+6yCpIa75lqizxS3S##Nsn$GH__Dsk8Tnxk>pgkdvizebu? z_1ItU?M$(pRWhCtKI8JOu)lzlh28I=4xAn5MzbQvs!muyCz6--Y5{6iS+B9uZ?zC$1pbI=wLWo!?f^ zrmhIH3&Vf%;1C zz0F1O!PkE2;a~Yb{x|6}96AMv72Jx;Q>RJ^)QvRvNM`gbf?kLxjz zNo}yRVa!S~y7>s0RbOF^DO^|Xb0RINjMYGL1%uCTRoRgOcE z*Et8tjd6>`Vl*8UlhNh!GAkOSaJkUcIyqZbQ%4$8;vo;q?vtPY%-{O;v$xLf)pgB) zBB>|YI048LOTDN`-Z$g*&LN~lgJmI%vFI&-U;M1O45f)iEtn!I$CT0a#YDtqgEk1J zY;48*$BP@Y2cG9tK(-NG8d(4^QrR9saWfiO9QmQslnPvnh%f5Z?%nCBa0U)QiBzk$ z8s|c)1c01K`nnR^Fe)Yw5S*7}T`Ej8<$!bNW<1t)>?4-1){9qHzZ|;v2CBNyY^v7> z8t=!i$9I3Tju*2@G1`0Wal6;!A^LV*i6z$?YeC_2$uJifR^M?=e2TzWE z?5Clm<(2)3#;+(v($H_=S1j6-+r`)eXd|I0=;{9mEi# zf||ijx{4@5wvhuN!V6^Ahte|?wMgkOakh5IJfsX&UpY~Jq3!yxdVq5wrl}9w(Lv8j zXfcIsa*r>zUF8tU(hgKDsocP_X`k*plh?~DfBjGWv;Ulq9WJd7F$3nhFtY==O2&H} zB!mzVXf`oOzzkVEDl}DaW;+RXLMW& zqDOQ@B|A{nkxj;il)|!CMNtg3C1ys#C<#?8fh|o|lR+#+WEqz_D*WkX>@!+Zk;&w- zFba?X&}XB{)=1$Rbc^+hMQdC6xzVB|<5VOk%|y8_Q{_A;Dv?5EiUB>0YS7@%T3EF; zvD@8qb$z{D)(J+Y;-buT4U=)BqXZzFJbyn&3tT-lL{q8L~xu8 z*5e&*TSPU0$??WOWsN1nNYPMY1%McH&e=vVs*BB`kff+wX{?a( zj=8EaX^cpsk#e-8u1qQAm<&M4rnj|P5Yz;USxkV4xEWahu&NLQnIB$X9vtix8Jq$l z3PCiim2=LOO>qcN*s={Po{dad8e1AcK^&T5Ge(mP8Fr#Trmqba0Ew(CmaC6jJgI33<}IhNG<~~O6{~X7SR%hAu4gI%4~9!6r2%Bj?8Aa zbmi#B4}S9Tvx~Rhf2$l+1l ze{JXJAPn85=4LkUfJw+9=AB7kq8uTTv@Oo7W{-dLwcUU3;zl6@t*<~OIwM8n2lSXr z6crA|ERA+h1d`gMxshML+IcIshkLl`Roac@oW|3%GYRWU@c#VXBQ$pR=)f4`IJQ?; z{kmheQrKJ>t}P*gqD$Cuva_aciX5{>>62_?T2LBsnpceku|iaqyt}iyI31bFS*wIP zE_LDrG%68N31po!r2|(a1xe8+RBOQ%W@nC2*o10L95q^z?UP9q6%wdahQ=ijrJN~e zOYDs?v#HoJMqy&gS&9PpIEMtHCQCNd7o?2=)dG8a`s&xJoHti1AacqC?r}uy9?>+ujd3Ym++M10m zw3%#r>=C)}J$VthP}5Z3pFBX$xrVCo7(HlFg}N5JC9x$r5Pi1c za;NEpk9&Utsi<7c;X)DQppd?7lh0|uCRS5TURLcz8?q)fR1|Kg*s|4_!Z4U|1FDkBZF=DZ|v(B-u z_Z7fNipzG%CYZd4eIgiQxDK9}>-|~3T=IN0i1aB`fL9PM)E|y^PRjafwTv-diw^+7 zr!;t1S8g^JP!dhoi+R6f>bB;t@!ZLFn&sqqT9aU8m2fyrARb4kndopR{ zytj$7(w(?l_5s1%eJ;x?NW`*nr6M~14yiG zXUl#_5kdi4l1Y|*NnTMUlR)<2Qsk1*S!=|Z1e72+vy4FMa}JVI*RA)b)1BKV)9JLC zjhWXyt{trLtKugxQ?9~cj7SkWtbg5xmwo6Q*`TZ)j#|V`-NxC36!<+b!i&s zMHN@Z5NM&wk_MYnD0^F&)T2|X_p&=EORSE@-uHQG^(?9@HXFHh1n=#BzH9a-*VmCa zx@qJw0H9%$v&rE_Kld6SNyx4|4EY)05oP$3~nbBA|OIq4EYD* zA3HiOsC3<+WCW+Ns*{L9MEPj#*Y{5k0MKA3-DKnxYU2QqelOVd;qV+8md?oAbiF%?!-g_8h)Bv4kx$da8vViQqS5e)zsO^7HK6~rW9CE=Wl z42mY9?XayrYI?G;2nuB05~3u|$tq_sXiUU1TNIqxD*D{Z2CETfAVtNjt_L8K&_!BP z?=X`Qb(~qVf=C9jVfDP=y4A3J@o`=2(^fsq48&~T+_lJIEXkN434LrRL&|p2-UT3O61v}%lzgS3+{rR3h^%oDGnn}Xh z+`^_b)>&2`Q$KVHrr@EYl+wsm0C0J|$SE1JVbjW}*2?70Pu=A8gWL0FvRE!BO`SqK z**^?jdwVpCqsjU5lB?487gFOSvu)elEiv)L!X^>9?~k7$2qXj*QJ-}HgNZ1j;}i-t zPV+E?B#K$Asb-2E5~XC6*%%^%90h&moRtik*tf*VR0aqPkSZ~iMtY?I3+4eN3OFPT zg@;Ol01nnxd9X2|ooG?^c2t$6cBhBtow~oD@7;UV#H#Rfe-Tq3RgAM?@XY@p2_}^Y zA_Cgh&b>2v@^{KUck?f-4u9h68<@xFIu7A7zWNGV!X^_)5v$)HJKBE`Z|1cu0h zU{vwNh+`jCk3eE&Bczzn_D~E)vg>eDzSwqs=FJ0u7>rho;yp<Yx!^u}KOO2D@29!-$@G;Z9`#bniiP#(}`7%8FE-VcKr{d@QLzxR?Jl{c?8_l6WI zJ0|sR$Vw3VeljZ>cA*<`NJ+qzuBqy(s#LWC$Hol95OT6M1G6(0l{BW3%c3!>>?gKz zXiZ^@>7MmeWmOrn6QWd>#u$sq!O%YsNLXUSS*v`R5!mWYFrMORp4hM|K# zX_lz)oHiL5GC&qY5WpXa*Z4^{E<9M^$4Br){nZY}?rh!^Hut$)JTN73xaZ<=BP6YmNwwu}K9YGIL0>2_6cQQ`qIEUgycI zuE?BUUmVZ&cZ%`Jc<=K1%2h6>m>0dZYAp$56*5FMnGixsG3T5Rnc13>lDG&HwVOni z>`qhc8Pj?{L6>}BwwAyb^{Q>V#VXlx9uPA&T-YQ4WUG2$9h-8M*915!%h38^7=WRN zTqv<&0b6%p^Ny67bMoE)NN={GH^LYAw|(MGs-|urOC^`ipePP0LJq15IirC57B2^^+xF)1%^Y*zc9T(4*zt9@ zTrSs*t7A??G-FqTIXgeU-YjD`T$>C48pK$`buCfAi_6O7OEy8XWizYl(DgY=#@w?S zTPhmIuH>RZ=L>tfdOB&sGQq{NExDXGBWIbb@reEW@YZELUh@1teeP*GG8s#BrkX2e zjX6chiU=zG5c^f1yCP<4#-^yToNCDi7m=EW_3`1=*ORXgIKLOyMU0c_o%iql(>$EY zmRUF`WJE*|MFJsx(a(oApdzy}_exOpq zC~eCDh5!Md^JdVNUq~?#00ux1W?>B&GYTVG#RTY32oYTd01B!g39!g9G4%uh!k8_x z5=WwsnjQrYNJ6X(0aJp^2&|~;B7ra%)~LQGMQ~J)D5XT$qarC`Hp)mG0Ew~?Bt(!% z{h9<>m7PmM83hpyW>t+U1cHh#=Pi?4W)K4cLOFwcGWI$3yN?;KwBPB(f5Zw!I*FVmCL4^_~qk{=fSwTSQ2elrg09YuIpr|I)41%mY z=%qEgI}qO~KJ&jBK67R_lm4lZGqou*gNh<3WkfJhO{7p-O^O)}Zg1aW)LYeMx$`#s z{m|~S7#SjKl%EP?9}7ai}Pe~G@mk3oBF1yAcVd7%uVZO z%d0WDN8?wWZLp~+HQt;NrDQ{u@cZKDEwiyf0?G-JAgUM=>tF~9(LyXH1XZ$y#~w74 z*(63K%47vGW1mwGJ~K2jDy1xmn1$2_w1%7v$2-NA6UjBNOAJ>aFsC$4Yb>T+kXdfp~xfm;#23PW6r$C^=_t7SEIop}2mdj0l zaB@7ujPdN=`+xaj^^I`*k6qsR={TS7X5C16O0X`4?zA1|{@KD6X0J3Rwj@cN@aO=f z8(Ocbl^tbnF7LfJy*RJyax?U45&B##JA|}3998qY_BRe5VLfzR6{ChkMvcWtK!Yk{ zDikEBDiE>|1yM>b%KMI32vg#eA$Uj@Gbl$y07WBO5RxKf5d>tD{R?5&hu4!LK_-YK z!pT5F(jfTa+x6k}ps2^(7s&=P^212ZCU07qBCJiZj z6-8oA1|)!xVMB<5BoIl0#u6BFh6Lo3AtKA@EE+~aAX0(>w3Coi77+v}jDsvWXUQ3( z8X-g!0E5T?BtSlsv0^9#=A5N+n1obPOc}r!R%8V7IVww10DvM}Mev$KP7WBsT2xdt z8Cd}UlBg8y*Z^Qok_Z(sW(Lqc1~;m?ED`|{Z^wV%tMS(#T7+CZZ!<8W=ej<(*!CixwgBLSs1)Unz)7-`WPe*LdbQIMRSS(B5aZY zKmeuab0knjQ5}_4V_8I2!{CCKlo_eCmeEMegZDlJVq@4CW7xS(&gCUyl>q%j!4ZKt*|T$mzg_Av^_ti#BblCpxB5_b-FLQ36gczk^|-`l;uzTP|B zE9z=;um`NwsMM6LuvPNiuo?Qc-MDf$hl~K$7d$`e3#)e1N^gg#k>0rfWN&AF_29`L{NW${(N{n9WO=^2x=cx? z)7k2B)s#&&Kk(icUg0hcPpmCfQlY4p zJm!!iKr&Pq$Yno-8U6D0><@P7M|W&03MFuoV%q~7wLFko%{~sBq4S!9<5^V#fFmxf zL%?p`F1yYa#iX=D9I`RU5JikJ2CZ##F>ZzgO;zTe2(;@x)n#&;bA~KT#7YK?GKLhR z-psN_(1pU-ovMxFZ_7(-}%JRfB#Frxw`XdWo>O;*o}+cG7hFl zp&nMfExteTIhTwf7*TLGl8FLNWD2ZY3?PYUVXP->3eLuKepAk7bsF|9ULsv*sxI0a z1kTkB#rM|Tv%ycTeSNeTr;tN8RLyi;RYj>W1usMeq}-*UtgA%O#blAPg{VNKtGP5H zKJ-~pQAeXu@!X3b+cIZG4wIGajhcDbvrBSqoWfLyl6L2 z+aV0md(E=EUX4bhJ`Fw$)>=Rm#Z~AL(H2IpbHTVS4&nXo2qFV3*;pQhIX|~DUC*>6q8s} zSHF8n|7t!sR?hJQFvzKnc=0bPRC$A)Z`i<&G-YSn)8KaWpROJYUO)>8LQkkMe z!5}#W8ImSNvXe=9c+;rcmMi^SgiCPhOd<9}EEu zC^=1_0Kf_&n3?Pge(n{KY*32am%($0Mb23;Wk&g3U5|iwVpv%)M;o&Wh!%-F|6u&H zG(MZs5F=5*7}=Ac6;AZg+OO|h-Ve!OBF@;B$sk}R$(#|k&Z9(tD8y=r)fqubLZS#+5d;CV z6PtyEF`A43AgrQjgiIzu7sV<>3kfMJm;p^4!bi$Kl#PwX1V`c!NdYJuC$K5>1>g7I zUXPTZs;t?diYBBCz<`+2i*473*Mk9BqS3@wsK}5rXK!*LKalm5VzQ>311lInL`)zV zvsSyjSs?SMj$v)*x>2TRqxD9t$%vekGbyW}Nd~K=IVU9~W(CSAp^|00N3BBr-sm%~ zPoEi{?oBqWDTb@oCatO}Cc11Fn(W@*&feZ$Y|4kjQhYQ5nHVMkIEXcKK^Zxz&wydt&g)3(%_(|1xjb%nu003)k*L7^MR_5aP+2n_Sjc1Dh z1*|+B5XA47pIJ6&3>s2Z$bbn*RV9RG-WH{>YB{FtfGP`lhf){{uSp3hgMcKAq+}yy zMo6&&rbLuk1`KJ)9AP@=A!|2)&uGD95WqmDR1Rz+rkZ19+vRq2@y&yaUmUOA2nMkz z#-oGU*`>&v=fi)rXv?ah(vX|DQqyoTbNSG@y}IQ-uEUEB#3}$fdxwDLYxaF000V0HkwHlFe#XJ-V)D87uvsiWHQTveS825p zL3YKEy&sZ5o;vh^nE(-ikmPx1opo~Z;q5zpd z(Kv8MRia4FSrrMA!2%RjH4+*D0*#VIHD@4z0w^0aL=I691VK=VxiSU;GMNM^1|3+l zF<=-K1X7GqGKz>YkzqwJfQnj-N^(|33?LegIYme$0z@8C1_1?QOj(z++4O9^STEM0 zABw_q=@Nk-0x8(Ku%K2kNk*n2ML@A^$WqvNwMDR+Qp%W|F-VXkb5;@s#7rWfD8LFa zXQ~_$X-p!*XasXKm^LkmmTVmgS&n<%dCH$);vISR_{{VglrX zxNsRD`D7TQh!TiFWl&8?gqeM4NyLIA$We0wtjb0jf70wk z(Ea26yivJ~g|l7M(#b#hTJ!Vm$+_#H=>CMs?tLy$MvW(24YMg z%FbqHLBa%>`Qg*k!Zm(~wW`PDH4@9tctSZw(1-|-38*ku1ud``YVT>4~nsSWf3>Rg^3Ir8fDH=qsr?V^&L*AY3q2=lBfsas%dm09= z!{hVi>vs?SCwcuB9)AP6dRXpTK|^)`8#Ef$kqUpG;`5Mmi3Oobq-}5|bx}0PP&d`# ze7;;^-}MGbtzB2&{*Nf!}@=!2UH?G%bb+WS*yRbBrGf3##JSl=E zpThv2EL8+U98Bb$qFJog*WQycWm!&(5>i~OS6DCz`QD!`mz$w)Razf#-6;YZjw-96 z3n2s_W6UB_6HVu{2~$;+1=`BEkm7m2ZZ8*7+0DgZjOm6xN}P`;26XPKh*lADNCZ^d zN;P9vCX-dq!zM=_atO;!A463X!=|%U!G(Kr{;V)|Z!)Wj+8RShrLiG|Ni!;4admZd z`sArG27@2jq9_U?+9{f;DbKsrWxug?p}m6SoYQ7#ZB>?KA*uypjIpqVwKj&_6o#z{ zAsEFNBROZQx+v}LY>G^+_oeG!xp|Y5?C$LEAMZYTMpvr~&R_~l7TI77nzL$3lF^XP zGYo{i#Do%pU$+=>n3v6~2RF09boF$-TFa`tvwyHtH0Re>lG1!M0ZD9au{#aP2aRc+ zLV_G)j;WZ8ZC!>CG)M9Qi&4rcgvdk$U=2aeE~Dp=Bz64?;*B1Ts-Do9F@a#a;w z?1#?p&gVym2k(Dy@7=fGJ=s5S&A3kkSZ>=ElajTHFr@5BDqB^?CZD>lD;%jJIbvlN zL_`3{Sx2*xCF6*PzJ1hghA>ptVajKIQ5)Cy{k6#@=aKS_!=vS-yT`^q_ouGp-L zL{iSqoGSaQi2z6u4MzfT!2So5Z^@RUqfiKuQDh)aY%*`k^pV=H@7uoD z03Zm-NM==p5qEx6=a%z)B#5qGC)EOn{8r z3yfq#iHINo00^>I002ZFAb@C@4OszI$^@BEjVh_&M@x^Q%}NQ7Kr9jfWs8~7syYP^ z1X*h=vSE&{^PQ9qL9GG^BcTuqiyGAcq@W0y5e#Mp(M+09xlkkliUI(ZQ5jGmsZql~ zNSP=h2`2y{`KbBXDk2x^sw7q=00|Li;1_-0e<-7l1Z2nkP$#(0u)xyx$YHhrRe(p(u{VdaS{#2o`FJkm`RNn z6w1We03u-5XA9Y3h6FLAB9Y0gO~MO!b~wKAV0CLwul>y4^;?^Z)wqV3B}Hc{WKN2g z=Vzm8q>!tmv-xfwf=c$G3qwE!0$@gD%mlq3Ldr44w%??bFo1y1egE?9I}Q*fPe)CG z*!RQEcz$_yK7HvY@Y!16aslOV7Nq0yWL5*Ws2oM&^Xv7~Z~s!i`|2AX zdLJ%z_ zfEVI@7jOa`Ntnqb05ZyyoMi5~xK`FrdQ=630HQ9`6{D9jJ=QM1fthq(%i^ zY`Z?J9t+VZxQKQD0~oRxYZ61m7vy>V9rRR|7HP-;LmS2#ig3a4?4S7ke{`=YA3k|F zZVrd&lY}JM7$YHSj79(%B;^z$09XX8=&G{qdhb0mm*eWzotx81QxsfKkz?{hpH+~c zGQ>ozkT9ZUON0n{+p|Lejx%5~AcF_vjB|);*@9+_QB&5O0n`wJDx{pE3WExu0wXzB z06+qbISw(Zsv?px06By~MI?h002O5OT(x1$Vnh_TVC6!DY^{K*DkCOF0Z9OAh@6ES zlc=hW$Z5_ZLd0kcGb4cJBqk?N5+MMifTEDHZaYA&Woyu4&Y761x~@uB8k<5)Xk1;D zt}0zYRg;HM)D0_GvVGqdNPx1vi7@JXZ?1|_G8sexA3AHf7KZ{N8Z?WdF~%Ypz@C8# zHD@AY0G)GQ0ufy=*VYx*T2Tl&Wkq6Srb*qD#-{M zU2j&)#b{K|#uHxj#7)yTN@A!cyp$W}BNHK)&>G=`6ADVm#m;l7WUulsLZ zef@`S-Tilda_`M=JUlD$U9a9EnR1+>!2g52KZ~`j+s?zV(adHwyE$ETe}DVSOI`Ak zq7=p=DNB*uKoVj*PMjb?l!sW3orlDRk_SV1NM|5`)4+y-7Z-&ED<_ui|S%@`vO>r~yJq$rr;y}Z0seGi3Qr*_RV=Ui*fHP`sY_`bdJ zh_L;(?9W`loGV0D1yme>I6fd*+HW*G?ACq7E1{_-Gi%!QG*QLld6XHfgYwyJ`*e5f z%i!$oSK|E2hN9%DmIFHgA}zPq=d0}zyO5>kDXWTVMJtr70rpMrV_+mqWtxU*cj)gQ z0iq#fsV;_fXjidc`F3}+XHUiCFpUbW;Qow>MHyLQlOxjcXE`2KO%)s$n` zc<-yoS=-&*+*nH?1Sax;K6rDaX6W$h=6c@MzH4IBs;YP7t8LfYr;k5)_0cOo__;55 z^3#+aZtj(FW-*Bas!^_C$}9-x1%Z}4Yz9%0G)F`#${tv3mnT2LmyTZDL;9unzDW=L z=U;i_`pJ1ZYc?UwBF(zTzR8m{fDXMnu3V-%I0aG|##sPd3|`GEwG;z50VSj~kDf_L zD6&nN5KE~s##A#h^xUwgQVKchn%0K8zrBH+AKbrx_0iMe@;ctX*R}oba4V&(e8Ubs zI3Ghr8dAbSj))MQST!&JGcdJCp^XhS)Od#mZPO6(Zg-n&aco+o2KFJAG_JaKp68V1 z@x}R#%xnq_ebaVLW2Fkn<-D8cd9&(*^EcPm(Z#cqd(FD3b0MN%`S61}UOZ~!RmRKd zu-{+)(lyQZKakmVnSA2h6he%zjcqq_#XrUZx@L3pHT~l2o zqQ(FbIrz!#Ci2de7H|3LMsGf>*;9Y~)8WU5kKWYuZVYho@;`pPzkV$bH@D~jS&$7C z0Srh18I|yR{oET64b3TZ*;BSwLn39O$-cAa-jKMUB{ouPE3v}jf_|5_{o6i2Jv2aW z^nirEA~OTjBtSO%ULV(QwjB~Q1|d~YHYEdFn1F9p516Te zLjnV5xG)6+N0IAhJgsg1{_*jd5@g>YR)mL|7L|Pz8-41aPE~EF~G&b!$#i$OTc5S&6iG z0?CTx0R0>g0K6GXRVib$@r_W(s3>!zmCPmw(}3Z3)ciOVUn-`mr0PXo#SD}^v3yTo z_ur@;Q=17e1jS(J<`5bLthHD8o*vh4zMU2UqiT?>QnI0$c|`m!^=HzS$a!QyOH|AZ z)J2U3H>z%m5a)Ywx*h=8$UBa+vr>h`2{3s>AgNr^?AIR2C2^t*#*PG}=6ty|KW(l& z=acy16@TsO68$(KrWVJV8*>x^8+V!a+Q!z?s&EtiAJ$V4D9<=Z{#0$uc5=ro;q-yiKwmZme z(lgY7m@PQXS%(87TJRk+nlMqZnk6A{P}ft`-RzDWAW^N>pgH7H+K~_{B8sbmufSBo z_{|pHnz~QC;#xn;Ji2dOuXYcA_|t#*zx~FizONY%ClCLT@x7mV^B4Yqw_SyE)qtFcFr0C~dozo~|a4+9qmu;tvSFUPQx~<`cyZQR9AH6sKi7$NS z&wu0cmp(eT^(POtEwCAFb?Wd0syqI)>2~L~|e2J=XTS>$)?ez*oLB#4Get+t(#FDdIhQ1?AOuHS4s*{a8eFskoFon!+C z#cE&ddwN_n&YN}IkGUF6yNgeqjQ^v5>W}^S_kI7xa0Ag#xs+1W!;Dp>G=T|;kpQ5Q zLUfK8MYPI(zqcx#3z~D^G$*I`&`Bw27%C8&BV%MlH1&==JFkdLO2kG%&`boRGDHkP z5vzF@nK{iPA{bZ&BS*++rC1drg5ZQKA~`@Zuqq-_Rn&|DmBGZ=$QzAm&PjzGA7$3K+brS{)lu1yw0fst01GNyMtM znTeQL+qNzSRVDAt0Hk{M6$pwL09b8(;I3CkKqw-F)UcNj6oi`Ko90kbnkGhQHy!wZ zYBiUIb*NU!foBC%wt&pSXaZ(0;@OP`ecz+#iW@R4MVa|)Z@&3XPXFrv^nZD_T3vkfl?2DkkK@x4c1 zc<_IGr};27>IX&XtpRf*R7KaIGN7Plbk~hL>9Jxv!$CnBT4A~+q9M)be1kR5Ga9(2 z536pP_XH^O?AgWGRxbu1U|A{>id9r-shva<)YW!4K8CnqlAjFDP zwXkCjVJ@Oj9l4y7YKDfoq(zj-ftbz(#n3YbCt1t@L_riClS4%iE6%S(q<|7A)Y`39 zbCs()MRO3`JWdc?AqpqQT~3;l%WX>ETw^$M4{x?pA3ytdZ?AtWbgQm>)wEUyli4y` zG4!3TZ~5C32iRDv#T3xUkkvr7IxwoO+fTce+OS$}+Nk@hdA9>_h;WPe{(gS)bo|tM z{^Q^O`iE~{w^-Hzh{i}Wr4KtM(VBDze&w$=x$qiXeo>x*5P3Gj1oeDY@A ze{_4*#<-?9?GKsod_1%4Q1>k2aM_GT8_XjQ7iHue=PdjIx2Z(WtKU#~W8KTp#nrQ58|Zr4_- z+pH(8MXSfa2BnJk-ji#(c0Uf|oYw0#B9yswFB5z)Mg}Y|mcBNo45fEeJhsqIK1+>Tnnw_iYC`O(n6`s-lG8ag#FOoePNO zqD+UH+SMvf2|clN>aPy&A8uxaQ@`@{#ny^ zW|j?(S4R%eRf~=^7jodEuUbpPq{Pgska89>PY@Ann#Wq7>~~cK)nfEbh|1JuI!t4c ztou@i;iZ>f8pkm*-N>|Q+qG{1fxdp5H;iz3rj1x8%Ph;L^OV*!JnII<( zNGhKtCc$MVUBQoj7MR(wBZRHDsb?i@X?!T^ZH-0|Jf&QE}G~sdw1{2*JnIy zX>TR4Ic3a_%#4b#f)i`%_s09qZ`6(!LGOWFR$oL7140q0y?kficV<|DLx+{cP)z|U zf^zcZyE8v9pKvK;Nx>B$S<2#3sp9Xb>-BBB?p45XCNGB7GSV|2H)D+2?8CeZ?1OpfVlB)y& zNCtpbxc~qF1z3T=@2qy}LazL(b_}d3P$@VB(OHp*d8DZkKa&D50T^h)w6x=4m7D+o zN_hTz0lWwxtEnkfL@uBSz_lSZM=69K`09Uj`;-5A{ozmLFa5a>hl_PhG_0C~o`BApQB+dxMDQIfV%tA){9>4CtK|6}% z)VxZ?tW-iR00V2F;`b->v@8vN9_eJlm`cpa8W7&=don z1e&FN0GOkK5epeX1wg9^U@iv8%K}#i0*iVYD3K2@Rn2{R;`0fl?#-`!`IA5LIqU(; zoRzFiCeGKg;*f@eHrB8EU7B3qPNiH9Hxtz+hO3*4W_xed2NO9QZkVlKcSD)RG#|(2 z=N~^_ot?h>(ff_j%l95lVk8mKW=X5iu)*F^KI}!RGK~iAaJf3MnGdjDZ+Bs;WHeOg zK0ZH-H;jiq{093oAU~gHMKH4pU`P|Up=*Ra#+IDs;o8iIpkM1@zbkc}x4CWCL5AMi zivbC4i5#o9R9d2W5Rg=0f8(U%o*m>NQMa!3npAGrFFQYzJPPe`luM-sP$!yNaEv+Y ziG{WdS91H+Cr>Uv`_l2}v;WEae;l-)Paia|p1uCYmmYulXXN7T*FN$4-?17_zWBrc z^zT2R%lCirFMssTFQ0B7Y|mc#JHPn1zGk}?s7KchPte}XKr_B}lcxr77{=et> z?H1zYlverY#}B>)`ybu@&_^c^9{Ktm+{gZszm9!R&rfApWz({*>xm*G6R zR!?vm-aRn%FMWS0?mxY_#K%w9?h+{Ea1y5X2Y0MxOoy$5OVEyzdUPeY8=AdyN0|}< z5CW<|6)2uOLJS%~%I~!Gjz$HM011GB(Zo=V+02YBq^iu6tC0tz>V9|bhXqv($hybJTviPL74dmHrY0cjd(eFOmA}!r zBZ9a8H~*dg$?yHX?|Ygr@*$HDLWU5=ai0(S&}{LHF?+3<86$HwLqtK$1|aGfgR^eE z5~(Q_R-Y0a4!iAk8=VILW_HAjAd!NR5gP$O0W|<3ACN==z(NzuR6sd8GeB_8q}EzX z1+~Qt84+DTQv$W30!+Z91xhIh=7512kc0?;ieagx)~X6bRH%EBVG!1A;GNWq`)4|8E+ue*qe)Q<&Ds+8*&^fhT zoNlfy4Tn0OwkN~wj@wRi*^g6<%z?eJ`k+;OPp9n(GYMF9URixZCFR3m@J;KypHfjn zFi5E&qM1~y4Bn9+=b0ON^x8{vO5^^}w=J_XgEW^q49uJms|pd35g1i9Qw0D-08#@m z1v6u23mvAxKyqWGzKoUIhz73hWG>t_M1&%Zce`;ess7meUrkv(^?l)_#QWOXfeXOQQG{#2fnr`-O4EMKu$*lUwvkYWZfsj3;Nj^}WUF^jg?_8QE10|wV45(=xsS1KEo^u8uB4&n& zfxJ-3c_#D@kW>P@DkK#$fC7Rl8X1t0vH=t|H9{oEh-e6)fT#|g5{g38wa)o!mQwP> zno5RBL@Uo`XaI++>n1cAnf!Se*L#tA1lyB)H$!^4;~9qk#ZOLu`k&a z$?54_^DNVJ7=w#xl*&jH3YBaH=h9N#9&N@ndhbD%h%mtLuJ<>LBw0GcvKvU%a*f_^LklZ@a&!6CsqJ0C%~j`v+u=Yg4-lPsO~GAinU4= z*aY@c0{A!Xy@DPd?=P*C7I>(7orE1?mSgXH!RuL>u|g6}?W!$OnHdGSMhIdE>{-B) z2{UZtx{L{^b?lL(Z#cAMpc$|rNiJ>MjN1Q(Uhk?vDU%cAxv~%e_Bd<>`s$pZ(@H z9^Znx*B;l`%hAgriYcs)9*IcFBd=Gblxlh)j$O-UXh|C1nBy`RLkHZ5FTTO1>0D`s zo95<9O6I^FT-J+s001RJ^q`zcR%xQ!ueJ|9TORz8=Ij6GIswJg^)!q}k7^$Iaw0@f z@nGa(swDzfN8c0Y!{4YKU{Vk?V=l}^tteE7SlxHHKL}85X+Qvg!2y8icPrjff~f;U zWFuCGYz8PsNccPIdVRav5rwas30t*$^C(GeeW5OnHfY0z7|W zzreQ(1g)4rsQ@(~GGphs3p}aYzw$%y2me=gdR#O6X+)~7hBbm5#cvr?*K$HAjwt(64kaq9ZDmTFbnqqdepVtwDowh@z@vv1s-6V)*HW<#RE*}^&q*F|Bu{f7 zoAZs%hcq8BH7V@6<1@%Nd4E3dC#5>Ang_4;(6M>0V6_xNLo)=vaz|UO?(!%5o39-E z^ZS43m4EzS_~`xfw+_+0@zNLm zi9dCEasAU@{@|DPbo>23(WLj8o$E_UmpuItwn|=zj*o4ho8CsPyNh;mu{}kw=%?omO5+hslMrQnqm4VeQ7+fuIRo)L)p*u6%~83TJe!{|``du-wTD02ei*OL|8jlpR#&IzPavQf zfdQ*?KxU9J&nT-GkJ=Y_f$suTcca}VrQH`EJox+@ugYN`Rfbwo6--i55pm9W@6Alq zzz7jkRb;W9GS9RuTR=q1hgvHF-)-P# z&prSEs3|N2gL8|(v8t-7DIg*dsVYd0bRn5&bfJ>0SM3m01>f6BtkM# zTABp_BP65cxBJj&CAGZ#l@SmTKh|e6Q&luszJR6xfJ{r5v{qye!7U2uL}Vs+gM%e~ zmQ8lJXYT3UbaVC8pKY<<R#nGgf4JRlw=qUDyScex=9E$h;hhrPr%*pY8_U`F1fy=wBQdF!2lJ&mh}jCrT++^ z0Ax!P|Jhw_0H*q@PkMN_5}62TS;lH&wX9ctE}|ss^?DrUIVCk<$4kmHfFW_td0BT< zx|^6z9J*YSv$Nh|xBYA1xcOkeKWOE)aU5z@QbbcWcxF=goq~uwyGsF)mQAdS&Ai)j z@5Bx<#wi{Bz2J7=;rFGZQGOAPU765QQx#AMu2VGzWc#D zAM9@U629>1&w`l%aPUJKKx7lh#WwV`zPGy0IawdNql3P4e*I3{J*X7h?Gd%>U8#vY zLqnnYY`xLuD(q22V%zq! ztebA0XL48yZI6!}QZ2dUayT5C5Kix%jdQ);?fNzXLXui6wSDIZo31gcV!Al2q*`lE z8i^d&S``ogrfG^koE#s|^9*cN1QDGJD!?qY2s5iHJF2xBuyfH&nkJ~0rVUN(&}g31 z)2HX_u4|gc2X{zQ=$dh^P2T~@n6meA`LJ<3)Ol!~>uv3G zYrud8Hr&*)>zdBD<6)qtJKCJAIh>CdecKyy5>a$R(y`XC*?z9=8mcpoJDPoiyIRlR zzpi<_f_iW#7xjnm^woa-(AZ0)Ik=Xc5$oN?K)@~#Ip@&8)YJ^0?H>z|n418B927U~ zX}7Pr7@Du9WTu8h&vqfZ^Eer<>eV;Kv(M%3Uj(=I`}dTB&a{^aizI+%-w!*_#s+cY6?D2M8fCh+27MK(CJX(w>}-5%i!yukM!mUlK>-qQe%y3H;R2}MMw z_{z=dQJvmj+p5+IOOl~^YP|EIXj)#~Oe>PHL($?rJC9ZJ3}raXrn+fYR#TZvbWGlP z@85X&Rr^rOG`{=6`-f>9OG;_-D`!%{$Bkm-z&820A=`J(b<6cP}+A;{M%>qj7B&uBikn?y7JcGeZk|3(=;QuSeIji_XgYM|Fqjn2Y1*9W z`eE2V#%Zi29cV-7{QfF+r$6w0>)-!M|3>?x_pZP4$KU2k5Inb3*ZG_;2$O|TOV~UUFB7Ge0XyH2ma9Kj(UH6c{NXyl9HK-GRLxlBp4zA5j#&r6dMKra_qf#j)Bn3 zK#7@`z%daydfp>&v8iTKD*!N>BlHN)tS+(UkHskOLd{Ky^gRun`TwL`+pZag~f@p4~!7IDa=wNVu>fOE6RxiC7{+2S(3DQJ0bh08+Ch z!gx1<%uC2a!S3I`|JAR4l>m=V?lIsrXF_9kqD2h>5txXa5m+_!Ow8<^Z=xy`Q>1Vh za@ieft*O+eX}YeG|WPX}qVL#*zo`d9zuaUp%d;y#Cs2>wYy&6Envc zonrz7vr_Wmc9(MwZ4;w+NJO;T9ma8V&V>-zIRTSe7v`gyI&h(h-g{MzK1@Zsw!MG< zzGv*ZR;?OH2!7Ew2LLnz0CsM<|L;Ve?hFb5!0v+4P*AHHq8hVjPvTICrB>fGRkJBV zaAnS^NZa-~=SAEKq#7za&pwE#DA%foamE6D-?IznSC^_SDO_wRe}LkOp*ry`=N zXGizR=*F&4ai`(!4?kQJOzzXwsjmlb6^H-3`0o&w?&uxgnM^ z5jl|x00f$I5|Pzv15LN_9UPtt`rzpvr*{YZ*Z%6);=NBu?DFlPN*)Li7*&7W;q%*$ zWzxR24FWjl+P1CZC{>Xv0JwI2sq|OhdHd#O|6^~+s_zeTqNXp!4k-suFwXmOaL#)U zU<61CG~~IWF}aiyISwIot9DBB(~I+z3O1yS6|jKTl=1fFY)+@0+f73i$vGPtfhkyw zjrV>(3{#qgEQA1NB6XSulQQQy*E-JAP*dYc@;s%vh%|($mZ%|kzYZ;mRn@~dscPrL z^008sEr#H{nVp`Vnkphj<`9ArMrx;dKGbwO?22&{;;Ly+PELvlddJ?ear6O1yRIwK zFywhC>C^pLoQp@L$cHrcO!n1qpbdYK8)g71IAQq4D-ufYuQ5`qyu-f=G=Jncp z0Oxv#Gmu>l`pwIW^FiI|rlHlt?dum$-X$Lk$vp4ZY6RdtuBw~IxijB2EA(;N-(rT2 zoDpyORnp4L!(k}X7`y0+S6%=0ryq>_xn^yw$Ds{f-xwhWFV)6zcFsHUiWSge@XY8Odyn&6m!%Vet2L`uky>kY z&X41?TJ@pvA%r40jiu>4PDyInSCEp?pzFH6ZJl_`X-YG|6dUY<_Rg<80x5FkPPTh^ z^`*0^g7@ul4Cu*6lHnjCz-8sIvqCUpbm$$|l&|W05)!SBgY+T2xxan;)~D3fSH6J# z16;it*AEnYDPw4z?*d67?=fXCLLvrYX2;GJ)_h?GY$^RiWRln*WjF1@%{7i$82~9% zDGryf(#Hu+#Of*R&TIR|y!ru}o*K`^RbRZun&(uckgfi>)pHm!Qqep`{~}N z`RdCaX%2O#tDXbqDeysaX~`39W*hZ;Y8rQf-HaMbp z-hR)&5~12U^xdnU_~`P<)7$HLp4aR3Y-z6JFiZ?c42mYAM9=_U8y#JK-OInbU;Qid z_8~^^##saYJqPuhngKJL)l^HVl@S>cYB3WjwbblgobphoU{bHr=|}Ipb@lW*+)icQ zIXP_fX6=S`7*5vZWZN7yJCyi%wDVMtA09pEa=v|f#;0E^Cty|yDkCF!L<2N4=QK|j zZAAdk!>@St@nk=j=Gae?2jB*vTW>dSzd!C*dg+g0eA2jMh~0a;!@F;t&u{M^9|+r1 z!=04`2pk(uqwU9c+8_Sq-}6tugrz-Up|z#VNG=!ODEoMhjh2YIyf;0TeTJu5e#@QZXdZ(2o1Mv0Fc`{hui(UxgFmg9`rXIaQF55|E;Y)I@PaD&HYu)fChOIs6b|BR3NG=_?xczc!3vq zf!_)cF=L);u0BA}PrQ6uG*zzHMk*Da1*e?j#-j`2F0^k%07!ri+_PYMu8PRaY|6|C zh(^{oy$gW>MWmEcO3FFU^PF>Tn-D@+bakF31OpMNe5-G8w>bj<5I6%dQ&TitUi=A| zs?}O+DFqROb40ZKwbtsKV`g#=k&vIg=F<{oV`fICyQE_P12eTH)XvPzq-Ka{fJ6-J z!1Q@!lL5-JJ}>{}SxzD-f~koZ(K2%$@~k5uZmzhzOCGiJ6I*h(icX*M<;8 zB&Y1XC#S^h5EX$OBNHMa8hJP8)QT+A+r}1wieTHXhhezBzCAvs5SnqEYpsmb_njFy zqTqNKhMaSZaZXc=v2VHu4<1}!Uftf@oIN-L0OuU&-Gg|Ek~7>LOY)pb%FemI?_<*v zk${PqZ#y$1)#W;tQgSYcIE^C!G(NCf>i!i#0W{~kXd2KRBeew00Tt}-x;(q(3_v|X zNlA^#6H_n+Kx5~<_e+t4p)1w)!?fKVt04y;U0bG6K-;D_!z!j=E;gkqB2UvaUfotP z6S;SGwqCEzY#OJUavCR3e0F>iLTLLnNv=(+sbk~6&CIh?t=_Xp8OK8@#mv+i zwd%}Q5mghk-C@}8_bAsKtnCt=>nbs0t}f%kTj}HG@_H(>h~kJ@gZ|Vr8YJJ%04(|JKib)gJx8E3cu8 zDea(|b52#OiMmMooelpjH|;{BEa!7Xj3HdNT^GEKBSS?l+POn`<-ff6=yrbhCq6r^ za=neq{z~=$t2_Bt^Bl5JRy!&q;GtX~PPc*XwoDh>%QU78#omL)QX% z6Cs1!;ebrjJbBNmDpINBG|kX>Qvw9^&O{Bcf+Q(fYZ@nV9D-*@j?k&C+O9|~DI)+> zA+_Vp_E6@FtE>C#qtN)Av#M&T21*QwFbqSQ=UQtBA%p+`ZQGVo=6Sj-!t0~k>%$>U zVumJp9O^g&hwRGL{x)`P(t37wHWF-4?`50|Cf{!NIYQjN{q?ucGPmo&bnW6+$~>oi z^tp$+>bgscw^!!}gKu7>$3y-4F85A>tW_#@b)KdtSGzgOBe$8x`tI%t%MNl&jZ-Ew zsaCiFr_KXIt}?Q;Af~TBcy!ut0y#i6vs&}rzQ?@kHyIQiiwFZ*m8vx~5!E^8#OgfH z8LBD-A5=l5hQ23bCO^-URteshQc_CJc|$F=mQoz~#jyhbN-A;9%kcGUk?JIi#0Q@8?{z9YkUgNjj@4g5UTq6!LJ@_otc9_O{y5 z5n}7vGwNqv$v7USQtIlYhvdeAfQLej>BcwfW+hTmhV0_iXjeDsX8p620AP{c14J<+HBdtUWKvZ_1R?|=QyliR zyRbA6Ira^xffZE#mILXxs7eEl4At!)R|T49>l&O3i-4&TA^>;;01nH z;srhMZN_r6GP6<&fWou)92a4Q0d=3DRH`BfBp6|x_qMxvaN69{*P5^VKi>MnzbG!; z0Gp3IuiCQ9OpRJ*H?eE9WthI(Lwtc3_@2SXBk6aqWB>p#-&|t9W*3#MYwhyE5C5IZ z{zne~?|GhU&E~DtI*Br%fcoGSp_Wor+P+6-so6xlXGSvzx6^*S9jZv{!|C>9vt2cP z6Ew^|wbc!GBvO zK>>bzQ)l>#@PSz5l6;p?>4frsE&Lm;bSA_j;*GWsdVl zUS*k>e82+ zmwuk&`s%Cr(n~n(!*x-t$SR5eM1>*L@Agu7ffx9P4k((&p&~lWuiV>~xc~roX62#^ zKp_Md*x_9UdT>-16*@HU*db|^MSVzBrB*~Fa;l2R?0iXdx!djc`;w-GY-pOMiGA1k zMV#chE)YCZ%Yk1fc?SUSOfUxk7BwDN$`PKG6yWm0|14V9wh`Gc41pm$mmcztR29G$ z*&JEqW}a!4AYxTzQ~+4g3_%2KiMa!OD~O4ADf#eRTu7fu4dF9g9a&;4h*oQT_OF7b z&ubHw+)wX4e@8DP^iWie z-}mcM%Hzk6+qPY;Hnr+uzhA9Z3$YQK5FDqJ_Tvz{_Vo1h{QUgAciz2sdeU`WDO0qt zB=9dWvPFApQIV=CF|+sH1z&4j1X7}NsTL6-$K;kUbq-0JxLcykq8H{?n3 zw2D~F$-U41!j9>iSAY1W_Bi@UZn5b~<$l|QK+IeP0chc9n~Pbs*8Qq&8W9;m)p4ptrfJ#YW=Tw>0_e>md9zwK?5D%9Y+N-L=Ufw`b8Z%C`i=pis?=HmL^S8z^}UFQ>SET? zcijU!fht09H63n-{qbh~;P~DvXAic0_wI)u4QaNn+n4$L>JkwiZBB-K7-YV8@4?j= ze&O9=hMb3W(f}tbZ}E_CF6UiCtI(dMI!lq8tJ~c$&)oJ1HS^m~OXCi6$-UALWDcbb zvgux$Hs61n>#JLJ=Wp3Gs(8W%9s5v(C{2~TPuEijYcSWZSD$+QjaSxZWRA_oT10^z z4z<{RXf~baFs4~mk5}8ajTeVeL_lDy!_m=NP|(_3lXIai4_pHQODSzvYg}(GuCGM2 z?^l2bP*s=J!~m#^XPpU=XXcy}A_9VHT@>&DY#2u8oafN5S~#R}9COZ_^(r7Y$fx%o zHle?|JioU-6%n-UE**9 zui(W)#Pf@DL3{bAGd}*>mHTjBU*!8Y;IBq3*f#wqQpdevd;9RvHM@Qrwr3}zqXGqt zU1U!{7@PyrsZP!{3z50d#%O3}S{41BIuI-*;tSrtjj!3!DDay|9Dacp_=g;NXU%uFY8Y^lPV{b7&f_DLLO7x)h1dEE0?#nb>?(lSg9<2w4bjZ?9xoaO`!OW@o&=e+|_Q8hE~U}}8vW0V2jCe`;JtsYuwNGeaUvJpU6S%5KccuKqAa0#1A1m4fM$lshIb$C zw0E96A4o+7rf*5ms6sVZQhE{ah(N0l@Dh|T z6Er|mP(jcpdS(RBQc|rI(Tgzf^C-4H52~u_BERFkM`UIBZ{f`SsUdedpcM zbzMqR(==w|QUbW$Zc~~{E=?OzMQTxn^sES=@B24D`X&G@8T1RW1g0XjiWniYb1|?d z0V5)JM-Qn2foe)gWKLS0b8(T|R&~ya$TUr+5E|b!4WKSHE4mN~6!2qM8eG(A?pDN| z?-D)}8WQJ%flro2%o7n=m6FEEu_tmjx5LHN)n>CvDVf2lZ&MMJ+P6)qnzGC}UtC-r zZTfDtWhuAA!FwNL%qfXT+qq>1VvIzz*=&{@qa{D=F4H_Bl89XG=1m{|=u6FC8_HAb zEoU?_L|`WjlLE=yP#jqN(S!ABZrIxj?K&rgm|V{F-lSKs)AZytN^ouk7z!!t_^B61;Ua)MBEW+G4Q*&!`D=)^Gy zAXTwawN?XXwB#oPi1D2smk}#HFT44e6y&$eKg($_mfufjb>> zCS)RHL(~{&Rov|MO<; z9(>;??ya~~2jJPMs%S;Wgj7Vn4aq5gUpzNBxg!G2$iNdH`?lHk`)T&wy_G4CSLpMI zs`ozo({KMu#M>WxonLY%kEe2;E^d)~bVu70MyIMJWfTkSK(v&qs732+dCF-nxoKJ= z3dl{{c@B+tOfV+_wVK5Pq36DBidh@k}l1dOqBjtrCtxB`1O=d@fw zGFZ3WVYe5l=5)+cD%m*)IWzL<_88St&SOcTZB(?05fF_qAh&JnLm1QSoXgX^q!@A* zMBMN9j_x!ev@ae^CnKBUX@WVAlNDaJ?M`dYgvG3`1)OF3(BFwFvGbn9vJ;QGQ}4X&L2 zo=-Nvu)^D^)(UNc>^i6diU3$DTGRDY%7@|h-FM$^A7bm;qs?a3cT>*JITIiP@7&YJ zPZ{v2U%zy878*b0ebO9#*u>3Trdq2Z=3JQzk!za9Osmx885(2Ut~Qr9*D2@qdOc1F zKpCBgs#MV;4k_R~=NPS44Z#uNLOUTs)AZvwGNXyKZ4*MMsWwflDJ?r~-*vrT-TUO$ zp##8S)x~3iBJ*l6cL{nEde+pGxPiFFed^xJ1YoOvQr@E$Y2pZGqdOK zmxhpiZ+VZg)%Z}{Y%%~*hTh?zP0&)kJeNsT{kY!W3%tM!{O-aFdf?lPIS~UB0-=#9 zLM2lK7}~}}jaoz|6UQ}I6v5aF{*dKWIMs2Hrcq#QXPOId<27%ksnR@b=9o ze{ecH-0p^H|7GvG@AeR1;034rbr9?oIAX;`Pt>+6g6J|6S>2f*D+W~o*ynh^4G zE);?a=rm8)AGqDqFKljlzJPpPc3(R^3Kw&bJeye+n~Dn3P`D_tKbU&9Rx=_(A6=Pu z9}YNsE+QdYP%Ms~r^|{jS@z%xc`bE9Ml-B1DfNhjgEEVFI9LsBvpXag``wgczgE$b zYVUdlDOC|wtQrJ>MnTo42;5PBC5j)uFP1GufiqZrO8uwRJsgI2kK22foApp+o5n{m zkKO8OqDgVglW(Rkl%Id&tv8hoY4x@HKls(=M|ij}ArMnllY|hxG1f%uM?c(M{FCQ@ z_Nw|{S^wxy$?MJ?$U^X`*+kfV?WewSuAlhg$tnz2JOB8}c$hHuKCVkjSTnmSSXf^$ z1TXLczunj$W+SN6@an_k|LV)X@Woet;7j+;V%g7D?&5b#2oC@jVH~XlRkW&9WHffn zo;(=_5vi))hgxNx5^=oV-Ii%qOHJ$7o3@RfnPUtwMydJZk*|+;>3I_Ivkw3OCPH|p zz71x0r_uw+{P6@^xGQmROKq0nU0{)z?=tAsR2KmmSTd23_|8+o%#fKGfI#mU2m(_R zl_CaJp{ivyRde1iq{Z{!yTm$ktyP~#KLG#;5D?G|0Tzmd84(H8(3Wc23IJwGcO-!! zVorInZCIFwC1vIdsAM*83T>&M6&6UUyyFthYzE*H<5W@WCfO@rkBw zFU}8$cyx56s?#v2W9PWm3V?mzuiACLy_4xbK0XG3d7k?|5>YM!2)>DH-Vl+Gp=imq zcr{f80XD~utxC?hW{I1=X__TO_&nn8oLj9L05B*B=m-%#IIpTpjmYx?3wUnE0C)MJ zcRzVnHKIgJgy3r((gH6{`fQbi`mKFyOya>PVL z41g#IMxe#CR;U>OiAa!<5kZN0;epiZ^9B2BiiCb$0l^Zue{MhUt#1N=E==OxrhrV0 zhzdrCjD9gYF|#U)Otscxfb&$aqKbE1iYz+|JM`X{G@Dt7eyU-O)tLWp|IuIlkKe5i zKKG+8%@i>T5}INuAV}CaHX+Ok{M)fhKc@$t?++1X*ZR}r&0N3)U==T~9tSPC*6`?4 zzi|6Ccz^eU_t$~OmPrAm$}~+~(-Uy(qVHUAzLc~`uT+&WkK}l@=@~r;C{d|3r%J1? ziyecocS_)x`>yZ0POJ{|6xgMdfanwVUul~@m7G#>(N!&D8iI3e7soX3#{)1i^G+rQ zUR!>-zdsBEH(j&tFE20A@OZOL(~JmXPWx$e!DE(t$4B0KbCeGAYPBl0=A4%}cnG1) zNmZK|ea`HCB|i*L>B1!kn2wXvIb(D|V~9TN%{bK&hld;9Ds z*6)PF`L(!GORF(W#e)+ta6FYtjzBHf6k=?-tCEMC!_6?PyEP(u@7kvCn|A-fZq=T8 z!=A&v?eRVhb&@*F#g}=WfvA+i>=`w%BO=G10CUdcVMwL)O{;|aap*XvREmiz#5TmH z0Y_2VUI$fgztRt{>l36(b+w43@Y4& z^?L0ax81LXl-b3(S~k7tH>3KgR!6y^HB%(SirVVsF;6aV$pv>;9pK3)zqlLVT=ApK z*SOa0gJydaT4qGkaZN|v@oKKZf=v@z55qJfFtMd9X}7Oh(HSF3u39`<0t8;f^=cI} z_MV(mRU}d~sZ~`85XelG>aC(PiY#VW6jYhf5W$wi#&bWDWdTZbFk_g7;Yu)92V{=3 zA_r@t0Jyp0QlEi_X7JmyLA<~VyufceUeE*IX0WdaYyj18dHf(T0<9(;>b38p3QEb` z1}c3cUB#Jmu8s3)?r*+a3CiU*vwQc^{U3k)WPSc{afPxcB6n*s$~H@ci=WO`2`khr(MQy%4#m0=@~;VP@w9 zWp{h%`VfN4BEgOAe1CLL?^F5Y<$v~#{B6ob+*HUjdC!`5X&BRC@G*G6u1-_!M+t&MTc#vFAO8vQ zB-Jce&80{Y1n}sIBK4vbB@nbC1CTo5n=68^>|Z_2~F?ni9y=ubRzf)wZqo+=O5z<9Nduo`<{_HGsSLm;4%T;O?RH z@yz0H<AE)X1R{bhiRJHwp}=b zd-v{LUtce}Pbd`t6imSY&=5^gMEbtJV^FmAZE$A8VXv5DjQx7^#v9(uE-o(Gwk5)8 zn#TRuwX0gQs7f9|8>|HEJU<$vubKIwXM^X&U>99txt(_Gc)`u6BJ z0Mg~biw)GiR_N%YL9TE9l`1`q6IT`53PO5jJV0X$w zXp33Ma+5np;MA(BdhY>Q89lOyh-q|;rniTI5n_z7@dPF!YSJ_fNZg*Be&y*~fBBmi z?&$XuRK#nuH>zCDd8k$~5F3~%6=@+R_|CfB{d($jS-*y}6j$pMC5fXFJL-?}^`!%G zzD(VNpS{J)@!j8hhM&0qfI6pbUsR{Q<|qdjq}v>9je8)Sy8i&tBg5e4Q<$s`=X;Wu2W*dzVCNw zI;8Pt9?o|+tG4T|Z(JL#R0T^VlOm{Xx_)=qiK&kvG)>i+na4DbheX*z+qf7Ghr?1& z?6dREt&g^=roDfBDpq%SBDGw~^*n^=Hk%_LJnq*lmQrdr>tseVubR|u+8p4QuLqB7 z7^k54Ve{$1JxEou^B9jq+f4Uz_M5fqlnUzL&fM73^x&ndd9PeciSgtWZjTWZ#>>7t zlDXFD>b(|z1 zc5!jFmAYPUrPS@&stZwbu__4Qom<6@8aJg`%oa17lu~q_7|cLa(>OVDecKhO^E|U- zFk)k7KWi2d^5i{nXlKo)z|10ICV*ONZCf(KTD3|>L?7I67;DNzR0-74q$PxANOML_ z^BA3{biIP|^6A;3+kW%I%lG%?;M?_smoNS2nbDVu)>v`kySbO-iWSs!g9-@@f^3Zy zbhP@;dxwL2$W3run_%co$Wgl;!8tjQPTZXi1${Lv=(?t+DL9HTnzmI5QiXJgG$?vR z1ZH9uQ<2KSkz=ifL{_ENDk6^95vvGTE{;H)OBxs@k^x#WTb>WL6cK!DtLf6y95!6V z4tm6>wsdGvhaS=3H9y12^f*yE*7x>+Y7xcil8KiepiU4$H++qMSF%WmM zSjzaORsaO#kqyANP7GWu5COn}ft8!fr$6`UKPfN&h`#)Z@KgWx@wGqeV*Zvr# z$1wv$9Eq{SUgm@;$D>fnKWLX<;01oy;@4Mp-~HUtf!k(=p~+a~vi(GV?~kq0p9cXf zGELHL`?>6D;t&C=KP%1edG&K|U3~D)(+|jnX*YMkD~mHr>$@S%MtnUFGwO60wz}C* zqgFZcvFW^Tx)D;C;SYTOmn4_>FQ1y?@p|+1_utx;scyTqYYr9r{vfw4dA%=RedqT5 z+tu$o$rp9KZm%wfaD0MSQ|9cLg6A=t|9cM+>YMe)|99|>zt|m~OkE$@ z+IWl8yVGz}Hm{U2JSuImVcSLT*B_3<=JIF1^wGbBuYKBI{=Sestk$*rl-#_}G=|a^ zo_FuRmGx~0n>F2H6{7%vsk!a4NAY8~U*)vAP1jKd4?NzacW-ixU20{>#er`cvp7NN zW3=6Yh;rm&;HtV8wYe|w0>5Q^yguyX(mnaGS32sjUhLj_`sRBdJ{|tx5B>1#YNt&& z@F8j3s?BLoM{Apk&DQi~5Ho4p$Y^8=MrjzYZ%1Y<22(1?+(z78KixFVYIVQsRxvh& z4gjiT5s9%?ApY^~Si03SXk8`f7dc;z5 z)vRgqcvqxS2ru{h_3F6qIxQ6dx^;Ir=v*p*#?=N)iW!oXJl9$e^F-uAaE=MAI!9;s z?_FQrHcg|d>-GBb@-pYVTCKXSV`hi&aGmB_nml8zO4J1FdETy%FG_eR?(@7>)#?gi zL`tj4-Kg^oZ6Er-_;&w&zx?4p|NFL2cB`kiha@5h9-BPs?9=(pV-h4uCQ9VCo70p^ z+s2#SK9!6yIs%ZoZ6IQ1Z=q=dJG2?WTx6%kpw5zv2rD`Y-UlB75E!7UvVm1q!iqtC zytBkWBSa!VtF=@B03=mKU}TSXHp0t77cf|~I1oY1z>L)x2uwk$sv-%BKv6PfMpZCo zLskSU>b-Z-0f1^1u;60bQpRXV4%oQ>gsLi1RO>2KAapKPbPc*Ry|a4lhlbmCkB(@- z`Op5sU;F%P^X(q;EDz5<)5##@g6~Kx3j#n@Ax~oP+fehk+?jAE84nACViJfz&A>GZ z2t^{nRV~NOS`bRxThqUFh#$W5)+=LegvuXZkkVH&5KM2>SQ zXl1+INGZ&2+wdl~W|ngfOzWn<(KOG80Mxh5LF#oL&bl?VjXOx=d{k`L>zg#DlIN1e zx$)xZqtnyYyH(q+yME5;P^S>W)7|BM8fz(iXqu*Z@9Od4CV%p5JI!?yj<0t6clYnD zPmf=k`4j%=9MApc_4A8cE)J7ZcIZ#>^d&pFf3DKDZ4=|1)1=k8-hluNk|%DtNZU>m zgmBWme9#ed-EOuqgn5{gu7TaiGLP@oS6&P0fvb;)n|5=rKa7{Rpla+x8dFZG zZ&$%Lx5Mai9On7umtL925yolLcJXLE$;D6@gt^1)5HH=bRAE)8UGv!oO zt(pL!6p22ViLlk6hI!pY%#bQH>wdU8q&%ISok7xhp2)aww*X)fr+EZWA`mUXHyr#t zRgwL|*&`XG>O#!5UJbXaW@?f$Y?7r6w->kcYIpDU_9;n>y(KAKWACN>{`v=-^&1!Q zbOP;SjE!&Fjq@u&Ce~c`hX(6Dfe#o?WjcNH;v0S*o9)>fP5&$V6fmUWsvPxw_t|J= z7kDj5;TXFUDG4D7fNLWYm}`j^yK+&PLiye&b)55b zd~r#qC;8re8utJVD*$E-Ab?O20e}ra5zr9fnHkc;NHVTM6#<)K0K!mBC63;VAt;jb zGmfDud#?ztR{LFA6rBLwa>iP`f`|d2l=5-9_qP*jwef9AMTtTPHD$HJfxpA-?F;-v zjTiL5?@*jhob54w_+NUdU;V;=w|w#+YbGyG-}JCb);l>^*(W&axSEMC9^eH%@B%OJ zUl$c@JEtMkw)WlpRk`|ld;6g#TioX~Nm=+dRA%3{ZFczf)srEQtF9?Er&=VmyP92A z5F%#sjLAfb_e=z*$4A5d5ToDS+**~H{H8T#E&)FG>CX_+TkpO5>Cb-V&G+BFx!vuG zY*)=r4@ZpRU(fRgpS$=A@3pU9o*jELKkQt&8C@uz0jr3x{|(P5{7fms5WsZ#lgy}A zWL84~BS1tjMPoibIolq~)AzozCGUI$bIy5HK!WzQq{@?jC?|tT9vNWP*qB~biT#Dg;3#X}HRf$h`h54v=@t`}s1@$b3HYtVbJLtcN zuYaz2xlBi!i%%XAWzCqSx$brc{xx9M+>4)NQpXb-?%gy~) z^jCiB`0Atf$Nu-%$DeC2pU`T^63q;G)4-_c!C`?7!ET)cd8*{%)D_U1ep1X`_Tr8} zGwg;U37lH%4tzGOR;#0)K}&ky@+6wblQ%Pc@d$o_7x=B>IWh3@#C!nA*N@lA#rC(^ zXsVYVeC6Nz5C6R%e&a{~`G4exTeqo|iYis{0#hlv22Poge27eDQi^CP^I<>stL@>i zE2V%*DXDMU)6>)KcDq>SE`fSxX5dnwq`V|QF7iH04^68Xn0`y?<>Qows(pM2&y53@ zM($ilnZRhV5Oj`~*!{1OFfwrPtI3i@qu)>oP|Z*c5zKU+b1K>SmLZy%NPX`2Tczfz z%Z9&{5G?ID=XSe;s^*+CJ5^FOB(7F%F|=ib!J44{h(F`9^&ND&bqLI@${+^*ID5JCtc0AflhrIK@&ddKuEZpJxh zB)St~GJHniz@j5%K+nn~@0P$brwROc0i~)G5i`j}D`v|G6qYMS=*%#s)HF4v()1k> zIOoj7A+bj^vOA~bnsZjw(`&OkYJ;oY z+BRn{B`cyO7gMX%Ic&Z2-p^U93b6~ZbFrh)kn1A(&>}FWqD2vWgcy9&1!frXFP`Jm zdiZ@0Z_Z$%BFCrAJlrN@E9#t^=UFWy(|*{kx7~xIlT-_IUCw!c8k!IZY@VAKZw~tl zbCU9m-Z^h6J-ND=Eh7^%8|WgMM}YG@uQuxfRoT#V@!XCvPQ%!CF+?|xqpCJtyWB6+ zG^yCKVVcSEaLG9j!?4}1bIyo}MrmGZ*Ze%s$)bRp(-6HG!jtP;m|VYJ4bGv>)A`eH z?)*2ymu^;{kD(PJY+GXwRmU_UDz%ZE&+0<+`V`(esW(6;SKCkfn|Hd4i>qn(+C4aJ z@0XO$-Fl>`(=GT-qx#I1#{j^4zif$S<{T4IF0+Y-*gCUP3K2zS0MIICh91pP)tanq z=Gkmn)>fqJXdP1DJ4W>W|Ur13?UGa z)VkC)5|Q`(T=Vd^8TIbag?Q9S2zU!LTV0ZgU*HA4Gk6i3_#F!KgFnn7d?R__nWsIp zFU411-Hjs2`nf8sPiQD4x7cp+0QGk%jr#&G@Et&>w?a;kkmsQJmGM6w65VD$O%pT6 z7-J0RC>2S0iZ1r+qbH9)Y`uHq{{2$Yo@%zNfJjRI1v zBI2m&_WJfv(}Qkzv_87Hy*_sM#>1E2{m>roui2NDSGLjY##T1>4u6;4`nRs(Pow-ygK0NT7zI#`GIy4J;b>in{n}q8TZm0hF1WRvf4*8?lm_S!|D2s~9g1S65Rp zCUY*%QwCX7C8S6NfU2shM23iE??R|o#Ql?{uXLaKb{5b?M^4t(1f7koj7mQ6Cfd*&E`@@(P$jawKU#r?>};hcDIKsH~Bif z+|=f{a|(ey8k+YI0u*Srj8iT6JeyQ`uw z6Ejl*Ravx>5P3--Cg+JrtCdo_wj=b2SZX5lDNXG4IX&>4_JEIB6ny+&5wVnP1`BmD zrL=^T5Ydnf3AJj;cX~uBbeH~JodZHwB)Q;X+k59l^wIrCInSk(S{L_a0)XS=JHIXf;_b{hh{C^rgr`$sp1^&*QM)#n|_~ne}~tdV1=; zzqq(qes|8bZQCg&Gn?jl-S%Mkc-qbLiDMuT!%CuCnp0AQe~rq5J9R1FkF#0&_Tn2DJPy=PSu6EH@{Oiu6IH-H%toWi0x;s8}uck|%d z5S**!wvBRL>c8>l|KfYo{4f9C|DV73wRisHXa2zd^~+C>PG7or{|L(mBFFSQFAep% zK-seSdGEWfs{nvpX30Y6kHXPrFs<{nwTDvA_rsgEf4b^oan02FTVc!_D}#LgT`#-TEaLcClQ3?XtcDui5cyEi-kl z2Itzu1&oc^f&)k7i6ar2D0@{$%pAN6kvNhI!3QF$xm2jG=@AhK3lcMj_3>4%!_&)8 z=jQWUZP+Kj8k@uUcpwTC++iHzD(;5yxZV8!+559#+p;S=44chrc5|BBy)H9fW}+!H z)v}7kVlTE5X;}@)AZ3%3LJme)vMk$lg#F_W`^T0)?65`q%Ml9e$KjADnW9LCTrzF; zB1-P$i=XFcaV#0c7OMch0?Q@4fcgYtJ#} z7@Pj$*hf@j=6xD>W51}I$S(R&dpAijrnB|2nJt=S0o(Sy7!Z)5IOkMBwXox}eArYI$tXeB6 zn|T%zAYm(hoM@Pi$7Fe;!P+WTl0ZgP4QWjATYJF}Fqk_F=KJoz6@X{wsxXeDloFX$ zWlFh?p~&}Ud3t_g65?#SD5?NDZ#g~^G%k=4is5Eb9ZJs3^gS`n}Ko_SZN6@V$qBLBp#hFH`?D6}?APQY&%zsE7OlFYsN1j4yfE zuSOZGd>wyaz5L-L`@)8zkD(vOaT2Pq3b3df(edK)JaS-!;CSr2oHLk}l+DS0>SK&^ z^~5<>N?~S004Z5Dgy21QW9QcE$5&5?$VI=;1CQa;pZwI@Z@;av`o2$nI(_vkU9aXR z@F$*!IyNz^9&|sQAD?`M{>5=|Z=DA266+)wocx*>r*8l$7y=R+su`dG8jt~}!j%D; z2{55E91StX%6X)cwwF@?NXd|A*`tUU!Yq3vqFG}Fge%i0g!Oj6Ji%u_@fY1fF+bdt z`)>a(+SuT}c;W^DGm404gv5Tc7QI`5mN%0=%Sy}UeXU$5?$)z#fkeF8Uc z=r8=Q>~sG;diB5C4%<>ZOLt`zGveZ;nKmP>mnoOh?}7xZ*8;^RLAb;&#Rsf0mg{U! z#=05BSLtf`%y&+@{kF*?HZ8IXF5-3qm_F)T;RRmcyB^Q0x}LAgt6!<}vESzNHy3A3 z`|v?&)+KNL)|YDf`_Bd9sug9Xcl*yql(>Oe&hCfB-vWa=6)S>MHQrnTRx1 zl~NjpK7`P=3q&%rB54+u&c!1IJgECs=2AYwAx-p~j{mmoi-_MK$uqN33aB6w96m;~ zfbUQdzKJqMfVt*)&?4WS4ZCT&GDta#2sy_Hb@T-Q)zINgX{Zqq0rPBOTuLdW0DyBY z=Uhq&A(WDGF8lpHm#MDnwrxeSXeJ;4Jx~~621(PhYugDkI!-$;nzMCZcomyInwz7!7RDN7>Q_3uWg!z_t6lFF%h$Q zA_6O*MbrSLlw{t|3gQr|#nFmfB}2)Qx0|c=di}Z2e)jzQ{L5ed^7ZxgYPG6D>~?*O zF{RWri)qY`0~ig%P*ss{JP61>9HfX4qGN9+n#oYSkNq$;%f+*ci#vDjG)+@V>ALO# zn+X5_fB;EEK~&vtx0{0&0ASHBoOAt@)QX4%UzyErH>zNOfP}=Xb~vTCzYa|i>{c0l z!>P>fG)hE_KuGMc06CyUssPiJ7tJC|nKKWZqhTD$W&#C7ib~d849q!)Xf8w|%2`B& zNDU1UtEw8uaaQl%zkff*I8BrHKBW}A&pESmV@fIK>+5UDrR%zQ(vrF|sX!5D6%i^Z zyQ&v!+|Z{;3%0Iq?HyYEuis5)*W2HJy#EWo`{DgHonMrGn^#Vs?cOuAaU21#w|==? zo}QjwUvB|ORlJWrctDzp%-l`RQ!3d-_Aa8B(R^CPkq@&sm721V0h)m-!A+Vs8RB8~ zA|ef1iOt)rY1jP^t$&kQ#a^X-_ z6e0G`1yi1Hfl@4`>}pTwOrT`p;Q#h7e(ftCY?_xp{a63a&pUVYe=TpHKluD-?!Hv= zXe^S%f6Oez15{kRE4nf@qx~Xzj!Wcs3 zyTqYV zUN-I1-SrWfH(j@j53Zhd!*H}%?ov;V$8ln!Ila8UF465kN^8q8b?^82gMw1o5lv6kLp^Yh}Qc4WfJiWd5F8KX^f6|_M@N;;KiH0Veox~@P z9yd)LLMSTpnz`Ta=ccilq&|#4XHaYFg&BD7OGzSxbKy zUp0zJJxJf%$@LJbiQQ`55V8+*ruqDiQ;N~dlzYbv(IY*9y^wf7|lGK1hYHE=3E!}te+}>mE1)R+fJ{iUC_qn~~4&+V(%+RJw&4qMl^5TGo~H?+E{8aJEwpL~#W_TE3Z`+(VZ<8Z|5ANcfVe&x+KcHRE|@$utl-T8P8 z<7RRY>d~rb&>TPdYbT|8^!YzOwl9V5El30S=!{eZeuHg+9pdk1ZK)^_AQ>^DA=um} zisn?cNP)nF5?zjEpf(n?lA#U}0S+v~ywO&j*#cAToY(!eLN&C@3Nr%~hcws0iv5h=7e|vtO%p+Q0u6-EZ#? zU--~>Gzdu`l&;^>FiFZK`q)cMh$*_2UJ z5UB0xyQOqOt8Lu`^KApi^%TZ0zSaD$A-n|L+dglTe>Kxq>@D-Jft97xtYHU7ZgagJ zrMEQsy*%#oGFA>bSdXcM;F@?fOwC7qAH2W|d=DcWpKZ6743{2DyRO#jmmi&9R^3DI{QP@e_;hIh;7|46r-DE*obA2-(IdXQI znduWLnx~{-9?5x!=uK2KlbU1a7>ghxvUA<8tEwu-IF2L0tfd9-Jt7?H?>fy#>Kuu< zoF_+z65~t}%x8wl0l>@?kR2kR=W6xLHvm9UE2S`dW-_3ZMRwh0w;hII7`oFtC(HHG zoXI?&DtPYMFt=`)8P3VO0EA3vv;SV#b*YraV%07e0Fb9*o(TyJjL4J?ikYdPfie*y zG5fX-RaH${3}jI^-}lK+ERWXA{_fqoNc5Gjd{snB%Clbn=;$b=*>T9BWAD9Zo{e`7 z{R*O4#QA9E5tJNj0V8Da?A-C)r53q!=g#r*v6j>i-Tr#BXzNfNe9T28=RA$0_Z|Rq znVfTk?1De!xEiSd@`rN(W~ci@&x)#wfq}8An&rc{$eR{XHl7&_GE)Eo!w^EvH4=p2 z%q(~|LqtU|Q<0R_3L_CCksqgth-QIsDMeIcteU2o^OollcGq>&G|f&>ecyL|@0`n7 zn7L^h?|sf$o;;Cl?OLK(O)w0Sk-UrE3^-5Kil)+aBFy$uTy)NF+6Vua4<7y0-@N$W z{=Nsl^MpPK`o_6|r_>M5xfr8pzPLO$GXPpF7W@6RbH{z(Z?^q<)dX^823pcI#i|BR zWP)ml<`KvWf`J(U;Jl?m1p}P}1m^qH08s5P=LsFpx3qmo2EaF5iw|8N0p_Y3%{QjW zp`XL%rVm0?LWa3N1_1%4RFH|Bcdi2G4N(l$2nm55kOM?jLsbzel7LIerLqdli{oE- zXWKUSXbf_8w`)K7)z#5w@7>?&hzY9Z*z@>tD+QPt+;mWpp{W^~Cr)EesDX?CP*DxD z5g}W}d$)kLx#IPeUPT;W8J4u_yMM^un_13TBqNz~6=N{S&P6RYj^j8EqPRYKiHMfV z)!ZXB+kB;zrfHhTGdCQRQnD70K@o;DZH8UWSxgZzrBs({724*^pI<*ajji~0(lSX- z<8<6Ejrg)$t4I|?yKI<-zG%ld4{# zC?Qr=czSuUTr?M#*L1YF!jWSz(I!?N=wf?aRI?N|^)VDrPHmXRj`rO&hB}l|N>LRv z0h8HZ2$9_4sCLd_2IpLG9s$!hUGF<0V(*a1N1~T zjdnW19IE`iSRa`br*$>3jG)*28^7p1|#le!%j0~9ZmW%zU#O`$vy{ z<}=&>!`najze~+){r>4o{$v9eAN7!5;03;GFmdP5hq8?#gsXSk?|VA@!F!kgF|m>d z%1}g|t5F>W2SRB(Y<5?@OvdJ%YsMVNW6`9TQwrpC`zcDLJgLx1t;yjm?!S0|Q>IbN*VNfII~YvXbXysdq(+z)ZqI-+Wu3W=KE)Xdp&{PT2zmbS$ETM1TrrXj&4nZ)=yjCufZ^lYrdR z7gY5__Xbrpgn4^p6t!_Y)cW9MPAl#{SXNssoj76$uE5V>EE@=zelv%eQ+hm7Y~2W)#8;DUhVrzX{r|&SW*P%QDho9 z_h7|v0Q;E9_Fmuxz6)_Hot|-9wU5j2%Bw%PT{f9ET=(nx^zH8bA6+j0==cA?Fm8*3 z=&CSkh^-k6Fr{uMDG$TY?>lmQc6Jt*(QwX54u^Bo+(W zVKE0nbUf?L$+07H!7BoYGMW|3BnF%o)P7=?$_A>}JCU5doZ|baAm*9v>ff-OkMR`+e|m zb7lY!L;%de4D%D})+qV*gyfua&atYN%cTpxACn>wsH&P2DFsZ;5Qve;ROh;k_tE<> zO;bOnNU`s>NJgMtx63&{eE6_!+n@WzU+BC2VzHRj@$bL?J|d3e7-KA@K=6+Ewm5F8 zb8rp;!VQt*eFOkzRs|ocnsDle>#K|1=4!oMIl!5IKt#17ayV%o5!gNNLxne@^(+cV zcrbDLunCUccBzLj9E3OaUQ^azgN|^ZD;F`aTsEZijcblzXAX$r60U^_P0i_R>T5> zL=q6fNi+A8)yt7$C>?;I+i7>U_geGA<-PYV^1uK0e)-@1T`&L0dU3a@&bKtPHT(U3 zyaKp-Y0L!RdkIX3ph z9*Ol(`o#(U_Q6zHC2m!2Zmy>C|xRZT+ z=V^1^yLS6Q1HF*4O{qp3LObm*^Se-!Gb zuB)mV_L-6mQzAgePIEC;9~`IwLff|csZ&)SBNImsQh|KJQ*CItkRX~lL7qK*qF*um|VeFu!1t3Y`l)Wkgn1hL| zpjDWtkSU6wr;42lOnK@6`5SHMX(}8=a)mk*#hySULw9jpt-4!&kD(~z$V zg;I6N)JvFaAd|>LHy#3y?T`j$B``yqFV$OvqS@Mw3pybl5 z8ueO=K7I1+;_A|?YnpmLjL$Bv&hDI4RaG_h#nt87EkE~(&&ZUf{dl?Ew2R6{*^K*k zxmY{X=H-{~o}@{>@b+636CXc3N$2~E|N4#Qby>f()PeIRb4%cVBh`SA5gO}3S|;R5 zsDVSVq$vS10U4v9AX-#wgI>S?=5dq>lvtf7wQ?I~C?Z;lnTaWpGqa+*s^A~_i9eJ- z_vxS7Ud8L!;kqlZ05n7Z0x&cq03g$UvfQK(`}7fQNHD)xGl64@(V9Ts*|T$?f?jEM z3_ca%AUNVjzaqFrD zPD|VEvb%8A&on=H8t9=Pj*5Hv-OH1!U(EieVZ&exBI;JW4*rLwe}e6kr|+%4`X8|M zc>l*1{^dH@lY4(*diG2w&7_`vlP21a!MpEO@6H!^fq$B~byt6$Qv?7gBgeS<^ygCJ z-+k+6{iPhQ+t(ky^gABD4AKEt75U7Ss^wuA{Z>m(X_|BC2@OzVVDA=}7guwwDy1|F z7>P(k`+o3&5f7z6q5~^%<99>PFVu(#@QwC3hhaY_!aMvG9fAiC=caxp!b9)?n0M|Q zj{Q)Y1c!ux3d*VlZ#Vx^S1xUYIJ${U7Zzu(6gXCXWQ z%$W+a>CkOy-g~cfFjFD}vRQErL{#+P*lP-gplUObF~>{FA=8zKV49}B@2f?-S{(sU z&N;^Dq65&_M45)0XTdkW^w0b1n3?8c2z6aoO#_4_D*!kbR1@fpxLPVkuR%Exm-d0Y1VG_AudVsN&woQiauX3j^B)laG3RWcDUC&RoSSXCkT#bPlG0|<%alv1m+0Ef2d5E0iJ+~!~ z2x$TO@iM1ifQ^vX6$GMFQd>QzW=znk)~)G))sT15;qm zDhAX|!!Qi1rj5(i1g2>!WQbVT^|tS?hW!L%6{3QW<2m4K7>3Ks%U90M9$!AY-ff}} z-8gh*(#cNO$DyrLzeBO;ebv-OMW%GNSVZbCcE>-r`Qa}P51wL_oZ^z2c7;@8?a0+d za4KZvwP~hl??s4+kPr-Trlt%497Y2xbN|t-1Xp!v#LmrHBIMb&P8Et-*HYni{#B2lm;4 z^GL3C5!8yq1>!Im6IUE_Net+KO3J>9S65d;TD283my`o}=iG9+9A#>nrt3N)niEZi zzQ0*}(`_b%Ll;6&H4ISKH2|opb52CEWKf~tyd%@nOu zHo`)fMY83j0*Y8US}pX04?gNs^#b3sctH<*Tw&Wjy1I|Val$>MxV!uLcOL%GEAPI& z<@#)O^z7p2eECoG;h$*Lk2U%4%~kpZUf??d);j90*rK4Ds{L}D`sJOk$3J5xRUPJc zxftX2s(btGcPqab9f5;k9LG{Z+j5AR-K1i>VYyuP`+eY$MW$4CV;`y}?}wsVg{p1a z*+5p6&d%=k*VCQF8XElhM_>Hfn_s(ob_^bu_fJ{~S0oSG=CiLo{NTwpbYG2&Dwb)v zrn}u2e?|6k^vNb&(Bp+Ap=L+dddJ$Gn`X~!G<35% z2}}t1TkL&&-bng3+7S_l=tzxi+| zf(}|`9u|qriaH(80123+m`=Te7AkSIOD1)e3izU}E8IVI^{cr9c3$P@_@VDx-=5wr zXaB-$Klx}kun=k9OFP6(?h%9NE(twC}@7|3uK1HnSx{lQ_48t&lP^rK$3}zBSVD@G< zjhUMUL1#t$%&-#9?8UDc@Q2EJe4Y&WFkR-?Ccyz_}J& z=AS(kAYf)gG*!(ifXK`M>BjiDs45Z?k(kj<8G3FSn2qCa4dv&iff(a#wM0bTc@ZfE zQSG*q!#Rgwhb}ZRv=4!ZK#U@#6e3qum6;7=+HSW^vq&jTQ=VO7rfE>srfF_Y8qHCG zH`V7u4fa`l_1(L7bIvIxDrL6WnY{!NKs1Y~6_%~;Xkbx0cHJ4I|$V|GfUmq>ZOr-StZ3vZ!9z3i}Z!;bM z;PKE&`%Bu{pLlNpaO~uE$WZhqHOqCI`WOT9e4iO|zloF9=UzX_TBoIzrS zb8iFy7#d2gg2_AAR8@0yws94lt?ufgtrm8%GHEd#hADcDoJi+O|~z07yAEG9M`nXxwMPJHg<+h8%N`9e z2A{9Bf12tuPDDsI1g+|vSwdu3XH3OF$svWJAVtlSC=@MZK!60O2mr=NT(pI{4BHIE z+N>7KZWV^1U9HZVTE~9Oc?hAZL!FZyA1yEn5nbb}ch^3~e&5xTu8xi_$03Q z**Nw9(A3d6$G}xpdG_0~nV(|pP_6jj0{NV?Ampj677h3y-M*i2A79$s{c*=_O&wu{ zK$GL0NQ<;Jt?E_aTx=9-5EdtxUM+9h#n0Zq{41k|v8>kx_htKjhH3(9BkW2UhhKi_ zUmjln^NV&3(oq(kDhm0Acc)UyY$z0CoKj&nW>1D?*z(v%C=PP+jED?irdkTjOBnQYVVbsm^}TueU*KbbkJ+Z+1#XZFb8N`=qpy5Y;krn& zF52aQcgF7Y)&1-Lr*|Iw5Sqp92dCk5>-vv+$S?2$|Nr9o*Z6Pxfr5qx&X9l=tN=i8 zuBvUf`L+xoQ-R{iO>78bn*mnC1^}}C_DAdAb?@0v0gS#4*j5|1(|cCZv2?$zmsJz* zuTB-9Pu-qIw_F8?(l!fX=X(LP;%I=WK5<$`W#_#gx}o0=pa0Bf4djdGU-^Z%-aI=y z+39G}wPz=__g7CY2hX2;`L$)Wc9r8kZ3kY1?my5k~?9Y5V~CIMhJbC%9y9fl0&fw(?xT?9p2r> zqnP7>(rCN2v@Vy~Xg@wVanpNG-)WETLOCTYp)MC!`sN2$SNRRZ#cOpNr19(c%JS~8 z#J3(ly2|+AD}Twkwom)Ji@R^V{BOYijdf#v)*i~zNs`rMUE?aSu9T``lwzcHa-_Up zuY7KO=g$7}y&c1j3j=vx3{y|!ocZJB!g2A; zT1o<7CIDde-p6Ump{lB?BIah%oK>f}3|-fitRVyhO}U3zl${Wn@en7#^J1p}?68y! z0JHKB55D9=t^z=?`9&1bnCGBT&WE5`(?gqq88Cs&!f-;ISK0YBkq)`WX7d}a7#0!H zjA@#!%x4;ah!_dY7KA$awTOTjm=U7Rxd8wOCUQtTp4Idi{jBv?RrWL|YceXbpN*Ob z#7xZ!J21=9wFoX*iqT^NalTR%E1FD7$)X4@1aG;dX+*TM2X|(&Cl?)tAs0n*?V_@w z_ul&$*tsG?hD1z)ZQzdij26nzVV=cU{gB zL%pn9=4c$U5e8BqovV0e#7IO8$^fNgLL{cSUaW>H%7QMiV-lOcM4o)8mndbE1~Uuf zCKH=_BmtC}LU06moE#$ncn&N=4#7-_3UG`K67}1Sb76UOcd=MpTwL@iH%&7PgLiJZ zT+G1N-u?drt^fL{iRH7K=qTd8uB%4DUaqPbcl14I{55O|?S}^$7dQ zL+bO|3ghnDEfQcuQ|BrL|8R8A77zaJ)%$;Lzdvp6H0klSA5YP5udmR%?QRSwCz7W} zn{B9@T|ZoJHlO^&C!SrN6Ug=TrEh#>zlh81oFFnfKvZHS08k=QRKSCa1|o8tZ$<

n$09 zNSY?yUfnxBJt977n#M;kiR&e>bG!X+++U4D@azE+!1{Q-9iN3I*;emEzt{Xr^0~i1 z{T^S3dv_Wi9hrJ6RhA$$IUxd@6KqP(*n)4l%v9H`I|l${05AYigPoc4XkJn_Y|Kqn zW+i8K8*esKS2qvTkgPa`#wIF#Mqi!0biF?|`xTwuawFCr-pe^aKq(_%EO4nHZMC?+ zJ_Du8{pLJht{1KIfRY0G9O!A&4X%mLE}kuqjzDp<-^42H$EgbSFz(Mz zk1sEuPSHQ@Hv8S^IA$nKY#Dgy1`(;7rarFwu^Ttj(dn_Fj^j9vqvw#ujrWngM?}r$ zDWq|7l_%z9SnRFilxnDaXz#M?W7-aF%+{PSKvQFzrL;y^(PY-r7_dq%+Tb6?rT14y z+h>2^(cgOW>Tf^7(B;~Py=b=>3k?wl2I!|~0*rV_JpR7V|`Zu=q!$`ft7P`SP zfoCyOG8Y}H70pWrdD!Z(=Q6S?l|qI{72Dx@z@fJZ005*AfrUZ>AOxfg2*AGyH!u^J zw|gcuAOkcorP1SYrpu;lr#86^$(erbdhy$;>rJw3o@?h-wiG=~OQ4Te*U`5N2Vb~v z=wbSZxB&n_#{vfI4#`U<08$ZnVI@G1Lm3Is8Dq94R(qR1>Qne#iP`6KZfOO=Z074~ zb#mH2eLQP@o|pA}&*KF>@G%8@XNI#>1pv@;Fr~BC{nMX*|0n*P^H=}X>T-ztH$b-< z-BY)(KDw3b1zzC45#Qh@56@Z0d{Z3k57g9DOchZ}ku-dJ1Gs10Nv z-7kIlm+BZN9l!J|zm!V8b9yq2-R0%w_kH3MCOYN(?CHh({d?g2qsQ;Jr_23*7uPEo z?3Dm5|MV|C9^RS$*!bWRr>n>I^j)H(e)JTsc(D`lkc7#pBdh9X#K}1`#3a~n3@s75 zlQqfx0Cl|@c2`dpL;d~jlrH|(+wXj#$-NKA;0m*=qdLDAU@{|PVDCKuh^d$Y(lGRO zIJ!Q6{MPZ0z^NnD3Mo$n-=c;7H@)=V;0-csS^=PxgdS#h0!Dg%-aP;L4;`5i&G$~d zKsDhAXsRSS31^*B9R;ck)&y`G3prDzH2{oXPz`q&o+@TBHQ6oTa?zy|i7i#HJr zO0`#ZGPyL2Xcz-=)l4cfNGO3}SyDFdfQ$f$XTc+gf(U$E?S^0A1-^|ik`U2AI93#o z!ppzy{de5|`6u6x&wlbx{&T-;MNI({F)T<#75gd1L3$Aj4xRJJY^DHkbaXU(cLe8V zXO)~28ZfD6Hye)(!(e9KduE=7eiqsjz?>5>zg>7A7zqv`rgI$!5jdAJ4;XQd8JKg7 zarVF!v_q-^0sx>M#I%R6nCW4?6gpHJXTntt1j(Eo660r~JphUb;YW7DnT3KE#dk zeU|CXZ3^?(x7+Q-#l>c`VdkUbMcdZXlmNgx9>5L5s|0%RaHKvOIRh{T@JArS*P)k0>5Hn&aWoTnin;?a8PTo94r zIv_JT=HQ7)70`@`O3HIT0W%N7kjBY-Z)U?dl~P1xHYTcKB;c_hreOl)5CaltDI(~M z3Gv|iU~^L)69E#KQAyLmedy+B50`*RRdbfP@gy(L;`92|aV)0wCC;rn_>F(5s75Si*hS78PK zSwT(QO!J{5L`4tTm))fw5Q?`Zn4Rp&$wWHH@+qUuH zON%e=2;;YAME)^^i8fVjIt^2HJ`l5*BD(SQ$FijINA-TExr<5{uy@5G&pd%qV=Tb^znboTkX~T z)=^4P6C@-??;ZO9LUoZh`rrH6`YolNhjzg{x~pN+ag6nuHBHkELqCkEBr)CZI%cd~ zTr};rpAcZCs|6&4Fv%h8VczYpmdjX20PS~E-}hHnMC3xKhGA0GZtN%!+|T1@d>Yzl{#1#x zbHgMiQdy{66gc??0^(*-yC&=+Y)A9% z`Zf6K_VSaD_*m|o*6X^l)aUAylCL*}$gC@DltIt|%;Sh+n!cR|Kn zHFwu}z57S)J6||G|LPNu4Z2BTn9@i%l{{!Et|A-6jEKmwh~z9P7=n-Oed#xsQig|r z7&q6PhY2~$m-&Ey**h@bBz|L_Agmo{=K0+tT>llbdCc7x}C&Qv&CR_i%QjG=_O zj6yYXr=9f(;1Ll2}3+3rL{5Q+HU1$P+u%w*h%`r#(H7!IL_HyvYeuw9ym6hhc+Hp}I54*R@! z@7`{=8^`fWU;N6+$%*&8K3cx=%1hn_gTvt<(t#7W(Hs!bjjr-gF+5nlAR5T+sYRR( zHV6@soOu>@=o}lUBV<7}0t9vrXilIlf&^$}3qTPGL`?{@5mU*05YtUdykEz0;|9AWcROs zYT>}ZthgB0r`H$nA-P3epDmVMDTo|G0HEErTP#-NFf?_Wb9VmVV5l(r0v$Y;4)e!u z#Jcm2MQ3i0=Kq7qYzm~N^Z#%rtn_dv0xBXB9b73sr0CLtTKjO)A|YZzQA05>L+99- z=Je?~{uTg06#yis$s~keO+6&*M~O^;$(vhvzEj1?pM2@9_kQ-x51w9bCXz3m!`*xL zmKB12cNg?Xt@1%DQ&mGiL`73VKwxs-d+(tXwvs951ZKyd-aLM473=t6!)M^*qBsI{3)yXoq zN-AKeHjd+Vw@Z19b-iA%-aCI>tXyq(%cG^MVi7^prjF+5*|7WI>e>5G9{={&Kdt-0 z1lTks-R^hUnX6*yuHxb-M1NiU%YO5CPk)m?3r@o_Mu4ai(@a>Z5D2T6@2pJFd#?o$ zXCEyCQO!ltbQa${uD|ci@J#jD3Bg5E^%9_awpcBPaZKG9e8t{Hjbk3CRB8=P)9i+X zi1U)=yay>=%A}x(s7gdR=bY2syJusc<|V~U$8pTL5D}6PAQ7p|)Grpvs>%dhc?BU) zi)N8hK}GaPkpP6;A*{vwIREzR>+9{1$s~EX@7n$L(PlacV{u2KQ!b%|#xI&h$;J>t zNRqPiW>(NTa1&JM>Z<}@J0ByDnV0pL8l#R#T!Rx(6Dcn)mOm7#NM$F$+MkaF7e}YK z?F2yxiX*4uQ}%tvRLZy^RYD~sAX3IMkNG>iZLe`Ps7SpZs9Klx#Ok`Q51X?TW4rwY zzWcdtIEj~Up^g-}bXVY(-`N4Zz;6m{D;-BsApt|h;Hi$;)XR(ayWa`~W=|~uoINwS zh?a4hceTI1?C`ysy)okjJ@7Gw8L9#?5*U&r8}qoo3}-qX+2!T&c)n;>$*+93<2;HB z0N>r)z8Co3hKM1eAtD;;hgFQ*jJ|J#8Xy`1m?C?2&X~~j+qD?~MhtMJxh^itROPoQ z#CKl#GdufzufOrvefR&ovY@VIVK-Xu{4kE-pEBY}vs^W+T*}3`P5po%YYN4?u^)?S zDp^F^Wm`&#A@IV3w?K?W4_>}cj^BLyt#!NHUThq3+b%N{%_Uo!AXjZAW1R>Wo>$|O z&u;#2cgoME=?6w~+vaZ9e7>Og@jAF_pf=0Omu4|+l1kcdrN5-oAIbhM>2lScO>)c> z*_EbEoX+3=!UrGx(!-OmXaWwHvoQtlf!S12tQ}0Cq5uelpo{>_P}eQK`h&|a|G&rO zXZ7ws_h5RTm#;p3^YNX1pQ8UQod0ilxIXXA4RZ$I?59DbYK%0yVWQgY7jLUkH_4ob zBGEGYf!N22EEOOD2TF@R`?#&79#luKO4@1&(Dq2;SV}UGNm4J;FyBkJr(V7d@K z=`J6`!yVV37=Cfz{La7n>c98LF8{Mn^?x&td2^{x-Q(Ccb$3;c9^9`5Qn-9`Gg+)p zJcUr%5YD&G9cz34<+>9(Dc4`e@&EHf{P_KQ|I;2{p^MZduY}&48!i~U_?pp0wu#!E zR%B4ySSg+3S+rfSkzyFb+Umus=A#I2cYA(%odlfs5`&+}jUt9rM=%jnA}9a|iptJ7 zy`TqP;GY3RTuRyd;Kd3#M1^Y|9ogHL<&DSBe&Y4J=$e37Pt(XCq_frjT<9)&E;^8@ zrg1b=?;Rpc)98rJY)EN;eLa5#)l6k(14IN&XXW)Qa);YcU&(XZG8hq)0uqz=(I^1w zb8j7o>LE^}nE}}I+ND|CJb&=!^S1%0f?#U1rHPoqoK8G{eJ-8f`~;gVn~O++0?&sZ z7|f66xlckwN=b-^P+}|M9jNMDI7PZ;1{@tM(M@iOs&f+{JeQ*|2fUisv$NCF)3dWPrXV7_{pQ*E_0iGMa@nesl#<#U2mN8+7_$d}S<^iq=7GqV z%jF*u2}DE=9T6e0nJGCyhS`dr2-zcn7C=BXP%&UoAVVgEDsal9Y7r^ZG?b!5>^Q7X z&xWCsoMuh_yuA_AG)*Cdxis7~&8)9a)8w36tyT`Hs;c08PGgx8862P7MZ_XHl>wN@ zF*;=LoNovjk>Mt6K*Y?-Ob$sYDiULylchz(RGAr>W*Py^7y$g7P7DG-1m1fF%$m%M zotYRR5<4QC>;84?k*FKTluIdgbaY%*Rkz<)RW*(yA}*K9aU5swsd)~l$doe?rIc0G zIm%fOV6j+ix7(RT*zIjCkU|U$yt!_^at)tei<@>Oz|qNicl~}pPHXZZgk)ey&bbgm zO6lbX4{{nsX5w8-DMkRQM1(|oxIez>X7fOPIOCWyk)qAER<}L$M5>+ceM9vvIB#+L`*SeHsX5`q6B$ooXXqUTfPdR}PT~?Q; z_37E){gpTVy}$hTzjW1it4~xX_gn+<TZv<{2WMXc2wcT<_fVldB&Z^LxSnvJ6wHAo$}D zf+DCYJFKd(OTB{i(|BZc48b!e)w#)L5$iM!F-GUSh&ty`Eu|?$cY3@YhG`h~&bd-j z-}eAyirzaPLsiv@%FHnkfhUf!>HD6%bIu`?h&Uu>LNzsY6wIva1~F~hmYf@|ud5EP z(*Dtt?Q5^L%^rXJLI2(xk9NP){x6!ZgVl+ zdH7nrD&RI)V9AA3PWIHHI+lau!F#lG!zgijbj9E)T97+`gl6!Wv~> zEs81X98iAqF#Qf$)x)D|)s@Ay{Qj~jRr(-e>iQ|N5AN;&-<#$_ z-z{kC#RzEQWRg@BazSEzAr}5EgAmjdflSQM6MJGa(_c%ge{Zt_w|GGhd`w}62w=!s zvLj|@ol?j3k)5ZE%FC@<#*vm@lGl+byr2hO;Ny$iG6n$7afLW*`RtpQDT#;-WTw4(GzstzP=_y?^zSU;A>{f6Z$>YSfVG#8#$MIqEdIh~NP! zgbE^%O67ddx>zmtn?6Z$?1-q_cf@sYK{S`5j)=&0-R}7KxC*h~cWv7a!;s3zY7EJeflB{-(X}H?>{aE#* zjpuikn^%uFuK%FyxBV7&6vpHFBAM*NOJ|>;CFSzO1TwM@6>*7xkb95GoGW&yD0#=| zn24IDd0IFPL-*Y}N|<&T%rErhnVjo6+<>&KzR>%a4K950-M{*< zoL^TzcwR5m>WljG14XerfA8o|e|z(Y_vpA!8_^A_0o^X&fQsb*TpT$;uUSziKxhAUZHX>^^73sj6KE$1dkX<6;H^W|~qCRWtx1G(agP zrI=+iQ_1WdA`$};DJTjuE3x~~ljjf&r~m-7`dS2Us_pZmQx0ZB05g|xvnmQ4BJ1&B zPz(B8ht@3_KYy5*V~nC|I)|km7JM)u1`)|wXWs@y^q#%vl!|~w=LC!ZOo5n~X{P=U zhAJXNlF5SS5f424P6=7Y5Y1&K`&F`JsI3J{qm zHk;-8y6yJFASLxfFCsCtRaFx@_O6CHITFccM#KnU3P-Dzhz!FppQu@+%*<`GszNN1 z#;)(CWM)~dhz^^rs*0J9E;cobFtB$lB8G=>hxyRvG0z4a^Cfo9%`QW8ijN^2{BUOf zDG`8#Ml*j90-hb&gP6Hk|hxzrG6aysU%_%!KA}Y%2%e zYtHrrxFHhQ5aDJ{&L)fu1kS^A-IIFrlUdEoSPBtwjDTDe)j)E-P5k!8fTm=Cs+ZeP z#B4~@MD@{vaVP12_J9A+_wt$g)z2?aUNdHP{ZZYt!=BXAclmRFUOkwxia`-Z0Z||# zaDJ8_5h@TN0uva5U>Yer+2>`m3$DpTyM7Mc0gOaMXeLQ{ zOhb_(B2~rXIC>yzR=eHq{n8yDAG_q9UOu~Xde+9O4pDM>`t(WFHvKSMcf0+#citbb zkI-OI)$<{A{qLo#*$y=w3v>53V-Y? z^=IxbU0t~_Y*7tRG8#Dp08*pp?c~2RP*u8hx&vbX_UJi&MgQOjkAJb-z$ShUyxsSI zyT}926#&Ip7aF(QEmc88N>&7rl$+%$`mo<@z4wQO*Gx@IDIjVf`>tCo+Pin}Ow*L} zG_P;o$GOh~0L_GubIyG~%${ZdsH&zVR_tS?s?#{-Jk{+A)P}B0<3xb5YNGRp7o3AV zUG~2E`rA*lt3P*o$K&p9dv(4`G+0$}P6m)G&tzK2v5QnD0BDR&$6lwW|W~|Y(6;pk-vWSFR#{jo1i+L=cxm?z)5C8mj|336{b z6%we8Tm(1}>t;rCz7WE9+FxjC)n#mS^K z|Ijf!>d8<2#>H>1-R}#0YZ$IJW&)<@7?sd~MX(t7e^v}|zRl1>ok2{sBqB%fVNCb! zr~VtWGru?Rf*$yof`~} z*@}sjx&0scn>WLfeb{+Dn?4^b6X-kmeq&~&n@#Cf#Z~@jh_xHKrh9wWET6sl=N9Mx zVZ8Xj#+?qJ)|grHP2gc)rfunwoU*Y7Kn2HYIl79yBVbh}hes!CcGzuqr6?k<7HjWe z+wGYsjnn<*nV}-XvR?j;utGPbaYH&v&hAg#vNw+w z$lx{SDUG9~Y`JJ25h5N+59ZlKar3{h`ntRGgj5Rm{rw5xuu`ZMcuVde(r4-+HNOHcpOYWry2ztdTp__V5;sX5(9 zGv5Cf_RaACsUDe(hzS~VfLu9Q&_(OpZp!iU-@hIuYPWE^7_StzxdwS ztBZ@THf~kWtMXFB6<5&IOch0v~52yF~VcapB`68OFSFyz4ru?*GEq-eeED-%s5{<386wLZt{I z67k%=#^jN-Xh6`ECesqCc&n)w5$8h|8B+oPWOB}#S(dWB?CQE+uh+}v(m9uN9)=;M zWJm;bQLG4{{$hx!EOf(fM%JP2?`mBKEKzvwN1M5 z(J=rd_KU^B5Qzd97cm`k@UE&*5|&bM?r$?3$8nCPV8)b^_r7TwB1$Q3Q%d6`A~RzI zObEQW+Eh*5u9mB#b<@lSLck1 z*|+`r`g$fOL<$iQ5fiD}JQ!x)cUv{}-q-bNxm?cIE`;!GJ6`U4*J1SG&wnzUKmOo9 z{a_qk{uG+kDU0(1JMW+_P7RS@m2ywI1!J>JP9!_W|HYH(HMjYfUV#AP?&?~^7-i@- z*H~7I7Rma4TSq_k`=?JI55u4(UtMq8K+8q&gl1qx6b#A34bAoqK)h|!Q8h5Rz4dM< z5*(hgJf|Bo9_BsIWEdhEVb-}QIp3Sl^91IQbk0IxuSlwDVgU2CpxKdWetfIxv!Sc& zs4kCFp&nj3KKjRBef-X_tWT=b2X{(z+ue18;G-s)Zf@A`V$3>p5CbbstjaV^skjiU zI+z&|fQh;!SxpoaBBSx@-3vY?JFW(B^{BfEVYl<^<#7lBmB~{P7=|HKWj3_&-gD%X zQgr0&O0A4}tYd8mvvFCBak*@#VR*W^>hiSmZnavStdF3G0|pM5$FA$ZF%Xtg+Qnkg zF1AEnGAm3s7#~~o`d?7$ow2-PJ@~5AEW}!aWUvzt<*Iqe& zNiQ#kE_WT}Tumglg?aN=Pe$S~pqo+Tm*Y7p{Nl80f{NcxUe($@BhYR<~(NPS5<3_=Vfrn>s zbsm$He(Ma$0E>#Lkcwkt3L+)NIC?FyeGYg2B06wQpyxHY`)T8{Tgcnuik15E&ppc+=0@Ezc*fph-x~3sfW`$Zv zNs6-^ol76}fM4JReiK~pwsXTZm;!)fo-<~DqwU3qt1#2869NN%=a%Nf8(O3_vM0PA z$5=EtUtICiP)zdM2CEkzcGAm*4=$w zWyuwoNVF0>VOw?WgYofc{+rLzYkM17ezwHN$NSCVY^9Ydn8!v2yV#eiK8|^;)~5w% z-0uRxAbFn#@(z1XEa;1oNioMQQk9Lkm?~(olCzIQpm`dvyZtA|0^k3Kes27XJ$~c- zNzaHds;-c~#Rcg3>-bF>_&LqbOw9n1X19&!zcQDEzTvxS2U)yUC)vuAt2d@;AM4hq z$K1VjG;n$RGxh6#U_AXyS$;;g9`}!s0)R^l#7BAP>!E~c?85OIpZvGpjO*9m|BpX4 zz4ylQx1Yx!4)ks{ywO}=kUjmClRw^{Exrc6bIg6M)$+7{004+MWf@aZ zRb*ecarVyvP`nAfERv<ow zs+uVneCWDkYO|{|0NAb1&-_^g6}@$k&p8VnIu(e(6ldljrBpR70U~0`nVE^)EMS=( z-)3$A0On8x_{OyP57+A1>}U>@L_|{rH3ICqZuVkw&UIZ^N*Twosw!q!RqdTSch>7= z^b7zwi>i8msD}}`+g~Rlx2AJI#E3V4G@8v-Y(7<{w|uh!n9Yf>AVm>O1ds^ykOr*9 z3Yis9CD70;a+!){A|`e!pbARNa|IKDh#WJhfg0dJEbqNnEu|Cy==;9!d(Tcxz4uj9 zD?%yBxAmf`j?PY9tdI$b3{b>MQ2^x##K1H+VF7b z4xm~P95FfXi7=Jy?O=XIUWJ&>$=|V$5P7a>FG30PtTvls#>qt zs(N&EwApN)K7D$6dOBMtIp@sed26gWjbNUx`~BX`>bfqaTwY$Ivp3(CJ0~mP*1-MQ z?;Ad~m!G_R^d8?=AXNpH%%M_Y5CQYXECVx(E0wa9I=MWC^*7oDAcK@rK#b-&a@&|V-*ku0o`Z3a+?WW> zhd3u)A7(hsHdc&?W<0kI7|*sv2TLd|>6XR=00cBM1T-{55G{G$NAo zG)vk`DdM2-pQ=e4s#8w`_`mjrxBc?9*^TFfTFHeqQ@on2) zUtgb|oE#q=eeL|c2{ndbs!yMtuh(mf{MBb4WYX=h%j2}^`W})cSg{XvtBW%ktG``d z{{?=PLtMpWJq-)R-VC1G`YI6L8qR$uF?W)|ETtksGg2jh)8PYpxUOQ8zjE|+SKi~u zQV(gr%Ij$u*LCey>&@6X=Nu9!l$6b^sp__E$22)&5Os{s*ibEnJ zTCa1?rRxl!s_R%)+ucrc8OKpXd<^J#?i)c92Fp^wux%HFIPH5PyK{29@JGWm8lrQ~ zdw+3x31aA=3^4MvGJW*Ud;M|vtiSugdtdn6a)*oQ(W7UZ{im9jzK=cb_d6e&)-NP= z#*~#ZSPd|0_|@>Kc=G9d^2aS--W}e-^Dq8_f9<^e@hPO^nCb~87lf(^x!GRm?y2Nb z{Ho1^F&mQvhe)i*W*`VM&bF$6q&PpR08HUK-1h7F<)ik!`>XTo?oN@b9$$Vr$yxI& zC-41q#s|sXy1Uago{bPN+| z{HRC#0x$3zLInVgW??1(=v)~9pl`mV{RZC{Gb01MS+c+Fxc#v?FG@)jwB82@`z)^X zu(YAum9}{)zxwjiSN<^XuiMSfE%(1-Tp7#&xVjFkCDYn9sZf^T^*g6Nm#3S} za&>1+x?0mPUG8iTMrHsIfE(rLvZ2lG@IZhBC@J?{w+8;|Wx3*bT6zU@CB^EjOV0qR z-@*z1hT{G=b`0FeLTAz3^K%0L=C?a{W6NuETk$M<1$yW7FW!r=D*wU#&EHywvd`@l zn!9ap);|5xe|p_NwIBI6$}9f>7eVO0o!(X>PN<`mS-X^44Yin$W%bKl}r96rTO%tL}P$vsHa>vl&Cb`SRlT zZI3?(-CJjmzO2)x)HMv7YM`8+9=9jm*?()O@A&IS15_o)ySuA>>B9b;FtpXnvdjtZ z)RB(eez(7T(qE3pzgu4W?~9HMp1X@YCMm@OkpXDIrlLWdiUbKG#Hnve-qf%yCp?vE z6e`6$KmdhrUxLRAyuimBlh(C!!MSmoN??ptuP(pz=`{WFewsE{&ukj2SZm6r+9Wr6 zKlXjeDSAf~ZI)x_iAr&f0jQLMOjV3A#=h_I;JVL@&IRvcXcvpBs&f8t1)H6MX8(%W zuR^VWsj3#qX&3>3TSO#9CUHW6)XhFQ78Wq~P9Y;I9u&$1U~uyaINJdo1mnywM;Y58 za^QB2oi)w}vkb&}nWY=Qj@emgmUvq-kq-xaTX*)}n<5a*6v9moEeOc$NiAk`M8P3q zK#cuu-oS^9|7>eBQxSAqJ^io=0B%*HbzNUyUwiNG-aVVE;NEixVQv~j#H?~S&w#UW zys6rao&XMM1P5;5_W1%goC@7EQCT)~4oFZ{iWDtSgp9P&K;QcjHMh!xa|m|0a-bC9oduCD8uYiU=@NX`(L8JG-#c|i`%tU_mm zyP@lb$vNlHF?sKTq21;@imlUF=8gkW}a-GInFr%nuEM= z6~|Lfx6@KpH=E6F*X{TF^(RllThB@pJXVjij}82(`^!HG%Rl|ox((4t+EM_+=q#4r zgo+OalTal-w*zXb1}yj6lP`|)#`&YyKYjA*`oV7d?8(z#>8`g|+t6QLF#FxU4^>>Q z7Ue4K#^LyAb#!`GVG)s3gw%*xB#*h&?wjmS=GzFs=H#rI`%yJiL&VwH#Sl%=m;eX> zc|XADW#MEM(oTO$N|{wuAcLxs#R5aN8>o&3r&qv zPi~k7PusuxvtRo!-@f3NUhkZ5eGSas!6G=5Uf4|icYD_$InE;U@+E0<9Gf}}x$L_^ zCC$kzH|?g32nI<4YCLqq?uMTkf8b}sYtL2>UcbKz z05ol}dv=YhfCT^;P!4Y{q>P9N@|`sqJ}e_<^F0n~Y6VOLhdM9xLQm{Un@{%JAAgnp z&fX%3s%l9o1V?}fFb(5+u>|WS=Mcj2YW>DLZ!MbEJ1c|$0mRR{q>2*IK2 z`+mRQr&Ow{@!nruU2V5pw_1yc_dZs0`;;*wqXV$OuBeg0qHdNkTFH(%jg#gQ>x$Y% zUDt%1rDQ2EG@!fQv1UHs$K36GSzSLn|B<8js`UETyL#H*PgUF0i@J#ymzNDR)3Nu3 zaV$7p*8Lb7>cVS8eZ>#n&!_sz>mud2DnJvKW3YkqZlq>04Ary8V+gzmW7@j7P!$zL zH9}>unW$!PYNUz_74!?*8~ie9?@5|vdmWS>uYdG$uBUXjo(R!3=k|IyI@ zbN{wnU#pelT=t9jBJ%R1MGB6Yizyc2Dr6vWWR>UriwpBxLFL1kld1xOn4*E=p(61O zpL{_Ne0SjX;{E!6`R;|GAb4P~R1i2g1Ic|@k^%vM7yuan!yHQVf*W{&k2M061OIxv zoYHT0hkkeqs{sH50)pyyYIAhETtrc(xWj?bD2cMK*(J1y1@q2dV6q22Q7%4d*XOov+g0d>UZ?4@Y|PPGSI^2Md74V^SYwtn zT3$`mQyqhIZZ%aA(sci5d;j-+^?VmTcw*`*oyIs^8#|?M)$`P&Dwr4oVF3d*R$^pf zBSTO_6Vt3{iXc`h1;khYh1Hy>WzFa*`UJ@rPX~v?k9_enfBN31&h`3zdvHH3t3GvAPJmm+lTmu*<54;aeH0c01y?RF zpV%5;ce33A0Gd_+FhFfNs%%1iXX=OU;a8Tw7hQ!2)1#LVfBx)W*H=!^F(A%mJDSZ5iui6L zIskz4uC4+yl~SgRoO1D%M}Z;&1vi(En`7eTn$NFKk9Bt*bwqz8G3!WFvH}5QK!@fC zCjbU>_+qpF0{>(n^~DVZ%oR_7C1g`s3_Pcqisl;jXW^+13fi(aZ>)8jDScXh^^g zfRRW=s2MI*eF2S;@N-6y~76kGRoy|3FRP!z}f&)EX)d0DaR8$p^ zFwReBcH|s`k(n7;Da8yAE=2%rvy9!7j0nU?rkIPWW&jm1FvkSW(J*%GY};QdC2*k_ zYc9;Ca?E7G16Xn11G~AdT|^8O*%6_FF@M;;FkAHl00L;rvqw210+7H#aNs}_C>s_@ z&QU`_f@#Xm1v5i15j7+iY6B28wGq+Sluc#IB~3mw9DE211<5(5k{zLAM2}>M&a-o5 zHVnf!O-)m^ZCy%XXE|r$L(vUTauzdt@A2a~@^{%RXVLvE$&cKanrcy1y6L<`L(SPA zT;r%{kzz$9W${%+AJMrRr8@~QAUkkHE42dPflXz!=`Jryw;zUy5L1?6-}#zz zMs_41WCckAvBF&OH&r!2=NvgQGdJ3t1@a-)WTs}~bhfJ^W&<)HLLO6g!Ovy|Ap}*( zr66JxBEf+k6TJ!NU?k#dY9~$JJLS>Uo(y)I?E;GB<SGd{EaXl=pq^B7}yTNeY59Rr~ILzLVhG>NBs8PLl(u2_B8aMC_eN zPNfj2citg^Z^4n)kIv8Ue`fu@lkL-y*GPNGm~+Y9s`5vRWj4y171MQh_2B=<-k-(F zwyo!3*x!sXW>ZzG-+Q0_o^w04A}P|6)lia2DGID0QEbF95CgG{Bz{N?7zq%>FL_Av zl9xQ?B?w|5HV~z;>@=3-P_iWjq;1+1JF(N{ZBM_u)m7DO#u$H-hgoai!`3?DAwdo{piQ^pCf#gri_mpo_t(egC))UW-Iigxx!x&rJXsG_)4E4QLJtUP(zIP zsFZmI8ONb*!hSab;JQ89j{B4K@q14{C@z82>(E9ju$t$YY_qaw0C+P`X4+Oh*__rk+>DfT2aPwgKG;9zJd7O) zD<}Z>jc6wCiT9`q9t^YlnqmQ2n7jb+kq2i|B)Ie5G0}3HvnoU%Zx5=YBCHfYpl_51(y2 z+Efn|$0|(>vkLF;`=b9TF=+)_u$o!qn4ZwE*J@7(&@zE}_d)(t5 z_xJ=+6e2q!unCsI2Z=#GkQ_kSLmcjs}Z5fnG63O1d~$NQQ%1E522U_(MObPr$z zlQ{td!~n?75q+`lmq>0Eq=GYJ2-u=mlst4OqyXj&9)dbzQ7dx7G+*wfqlaI8<vKj`a~nJa~O5O)I|GJlqcAcDxK03xPR zRW%^%5|O`5xh@?H69@MKECb?2ZTefp`#=Uj$P}1dRkOPVW=g?aq}IR*K`);_8HV9#bF!>; zxN{hY$dYS>0B*~06&(+<^rb21E*yzoBo3JwP%wXSusON=G6Hs3ItdKGz{ONlO~k7+ zGcX5Ei0p7^^i^jEGz>@qKmb`Wy75w*dC-HKFBStL3NZn|5-TJk+wFeIfj5JtRMX6s zzT#5KlA`|d%P$kr<>j-K((&=J8HBJb@9*}EdRRdR0CNNM7oDpg{mmCUSG5v$OpF9# z;(F&D0j{N}S4N!Yc`;(+Yfg^=rR#Y|5mC!$0GO_Ia zX_}^aBBFVor!g;8#!Fz=H09&ta& zZ9+HNN`fJG>ktB&w$%1b+s4#TGuCO&Q`5FUFiX|J za-Q0@C5|bj{dU`}yOh%I^16-9(ecS_R`#=-RS}npecz-IIfzI^a8N)}RjD=S83_8O zRkY9qb*ok#%}kmm)m)j88N068&HL^4mYMPS=t}GL@p%kwvi(>=AHU>hQ@d~1jd9F* zp67Y-+jY&s9EM1Q*fecW$||)=$Gk~l*j;fcK`Vl>qaU)?%*+KWn^C|(oB%6GmhyXb z@-0+xj8T`{kSY-&(~{ZyPY%Ber&>r&0AC^mAz#9@edXfeIFE1j2E4Td*e9!hjxfp-#7HL16DAVqCD>rQ; zlqls{p>SN6CJ~|{dI+J;`~CiDj0yPo=H_O5vnBP07*6>FLfyv}nWazs|Gc`k@3ZtAwoXh78K(%)87h|9AqgJmlYh%MvJ!N5)2715nQ z%yQKL*r=g_R~@Zh>v$Bbr`3B~KP$hKIMhcM+xdB#Qp-A&J=4cDFtrU-MR9gVb3+$U zb!a@iXr&f(Fi>zcLxe~W+hEnC7yxk~3S!u)l!SS0*f#4!9^0f|u&Wpkk zK?_9*8$CLKbq}N{vz6)S>92Wz3>TlNiKyu9_)1+DxtZ_7Dev)zfmQQ&6_A3MiUTv}T1qL)*cp~ybO5Pk$u|(w7gaiCo|iA8 z%$!o<7zvSxWGOL5Trz%^Isfv^KTXp-&xqKyi9=9DDOEHRQH%;ri-_)O$N<2os;X7Y zWXU~e#u!r_iyIi)GM@$jRVz6=8r@Ajk(SHJp`YB$(2Xz#<|?it1qvc8$tkG7i?SII zNR1&N5=A6OU;uDpKyp`igC!;&zUlL0=7r&0GFbd>u9_GBtC>j=5v?Lp%p~@G(=_gm zL|wPy)Ra>CzE2@wpp+7mn;Fp|B*~o2mo%FQbjT(`BsT;=a9Tq1%-j@IojHKJxh&KN zftee+DS&~KtGTF>xGOmVgwzoQA~HA(*C}vh!s>(wpi9dD03ZoaIp^Ek+x>o2)oGMvr!Si;#@M!9-}mcvP?6jHKE!x_eojO==eBL-n+D2n zPM8)EyEuRwGY1YtWbVxDgfH?y?lul0qJl3+0l~sX;YV*m^IVz`=Xq|L#!M+N0fRY_ zr${MM(*&f{w>;Ovfn3B)KtNU9(a^;!VPGI)G_zbvF2&4GPfr10yWNV&LW(TW;p_F$ zdcD59xdbpqn5K!)#o+X;{Wt#D(J%kZymN0QHf~cC0SFH$SxLQ+f3AXTJ1}U#>Z~so5MK9dAyquCA`HuT`Ymv>_zo=&nrBDKK*} zi%cJ-Dpl3h5I{j4-4Xrn*>m{?F;q2C53Ys|=zVHACeXS(meyK{2q`QF!;2k3giP!J z4sL3`Cr~xZW6fqp5itaA!=%!6^lZQF%%>@Kt$+1q_rLxndh?=Zn8^o9op+IfHz{14 z99K&sLhb=+(LydgB!2|)^q&gahyZAyWM~dmMduk2StX>{zWk&<)D&u&T5y5FqUL*Mw~I8=o?n-+G118e&&2ZE8kfjP*kG<3Ryl*yIzn-3nfskyzqCZdz0qpB8& zW-UkEiio6SccH2Px;kk$ZNHmt#40dZH8C@wTvcRDT@Z6E${m01&Cf1T4(dp^-a^Ro2H%1 zm8$Kg{H6E5^!+FFwtT)6q?IXjV1)zD^SlJ9EoC`crKmQmzM;^@R@KKkq0tJt$z{B` zK^IkJRANem-~jicE0hG8IqY?{7?C*Tq`C%^ijMBY#Dw}sX(qbIcPPH? z=>c(YBy+>>pk$$Y+~XeKQa}!iK!bLS9zA-vzug)6`uOPP@rPK- z>iBGg{kmCQZm$qAH3UF?n~LT-3?fb#9B`Uv(@GS?Dk3rwB8$3Oc7>cvYC;8@$I*Rk znr3kuv`rs5KtLjo=m59~(nAQvFg5j*LkHb6^{lx}n9ED=4#%r6|3}Z?{PCCH`rhfb z(&e=#hR7c`(l&B50fh?cjP78B;0~SNX)lSL&~UN(ajIx&)c}kLC?r;bz3NIMtA+{{ zW5KS(m8D?Nm(&2%PH)2PP4FO&hBtBe%FXcuuHz9GbClKTwCc*$EtabgPyY$;8Vf@Z zBqK#vL2RQI94~WP*|b9uTC%VJ9L<+04#MyAZEg8naY5nyH%rl7OcJ%LRZ2Kw$n!J$0IUqQ+IE=Gd(Z1u^$|SkKS? z!k_%hQf8?+HSJuhAq6w5)eM%3L#gKO=;+|%G!>~Y8c>)6Gb1y(6H$zDiGO*Kxs?fE?V_ z0Sqlg&b7E2@F96%2{i`}heRPF|7L0`vsW+!1`fzf2v$m_s$JI}rlanRz{_n0Sjg`B zib+ik3C(;NK)NIP(pIy)_l0#}A-4S^YIhDTBIYSixu7{MBh*r2-%itnh^zIHnW4ea z<~W433~J2;0nyZnd`Y*U-9GwjW%-nUM2F$=c zs;Y>Xu_6Kzr@-6+fPydk(EtvPgk|5^;>iFc2UA28vk)kR1n6c)Zh(e}Vc8dV6%NZS z*xjYhOCiB>@GO!2DJ9jqwEJ$>>t(o%h$7;5sz9_ws}8h`3z0*hrF+0}SvF$=XI^Bj z%Z^yyOGK0i9b79QF;XBDx1yz%vV2^fGa}YnfvN2~EudD%VFIwDqa#MEB}bx)4pu@4 zgfUmKYK~OPlxqfnzVG|KC!$&+3Ct3asDLG_(JL7n#0uyxqL~^EyPB{*Iyw1IpIra^W%|o+ zy42zP;^OA|df4tRZ?|2)7S&S9@$vCA=BM!d{N%KYLETL(1}25&+*!_dS93j7TogA% zq=?{$j!-jqGf^xGB5J?}0ze_wguJwOh{$2CN4hB43~3=49B{Gmn7NskvSmtv8ZApP zITg?8bYAuk1ve}@;ls*-)V?0 zi0FhI7yuN5a}aeCtwzHy}I6PHs`57>DHr`+LwOaKS>XNHT{`q&1W!hINexe%uhfBOlUDl0WX)* zZ?yqOMQa_zgvF~_t97Wikzf_l_I8`iNNpN3sW&l&#H5S)hK}3Kt7k9m=22@+O?!O2 zxxT&O5WIui-M}2oeAV{|f(}hg>t@9UR%$}dCMm@2?gllk*Xy=zce}Fu8k#a-&RI~K z5W8+AQfe;!W}T5HGXXU)t!fC2boTH;h@>h_YRjS41^U%yH{V{5;rRF~e{KDne{25S zEp{s~O>3;B)*M1$baXX1w4;exTsq;woFS3pv>$J_y%hk%;0EaEVCb%3APFNdr~;6J z8bU()?V_ZQfQd}C+QIAvhf1N3eR7AzvAx`q7GGiD5Q4R5`vN}Rnt=*6oOq!Zt4ZY= zTmOgur~3K7jPWIs8kzup{~+;cqKk>l36PP(;-5Txzx=e9>>l6#__n797B?{>D!>an z)^~hm#69kDk3UKTa0d{#fB`tTyMiNPhpqBlwV7i*(XaOY>f@7tHnfjc<>|@xS0CUn z-{_NOvz8G&K!0&GE3vWbkf$N5Dw2mVWF5vQ>wbmj=iSljEAM{e*^{TAf9tcS$H!lJ z`#m;lLeqtoktJ)HcG7hk3L>d`&7&Kpl$xd!5iubmVs=uk)x(iNC44!bhD5zblqT!=<0ommVYlPz(U@B5?%(6af*J znZ$B~!@GkJ5raTy(3BBG&~UMXcz|7bt509qda{uZ}Klt>QZbE*tesg^_e%hzh zJ-!2Ro5qbaXx)I6yY&zImmlSKf4ci~bIy%|g9$k*Q~)9G#Ee}GR%8<)hyj{9@}2Yj z4-PXk3a%cR)NGdQ(>83M{gJ{d08Y!3GZ>iJFbpxq zwry3_(3uY*KT8>`yBDbjx>Uk42KZ=JsVjoH2?bszb>?mkZikljT-Du8s=G%H#Bi8S zav%qEQxi1OL*X!!9kK-gpyHj78BN7<$>z=}A`_T1QP=ksY!Qhcw5MjvfO~NgxI6h_ z@C^Va;_l46EGEFAR~G#+4J`so=KxcPF%p5?MX0+wfWFY^n2D;ID7*+$xNBknQ89H# za1(O?YTAC)c4MB+09<$baTvyzUV5oNY8_6@83x;UQF&nF+z~n2BWs zf8bvi!-wh8>g=wn1yq-07gci)P5{V(qa!L3Ac;B=0W(K-4zSFN5wYoE$&q11Hv$1y zcMC8wGa{&(yCPCdiHN{SM5bvlgM|#}x(*ft3TsNKX_`YXv^z5m+qrGqZq)<8FbpZB zwrz{bl7?$$XiMna3+vh8M@LZATI-TKkwRc_1h>093s`PyOaP8xV2Jo44arItFpm_+ zdA}@7zgaWKtiwWGn2EcZforYRz+tX6#nuB3yS=FQ{aVBU(9Ci!F(zhyVV7aIrKmZE zW)aaZJb{^s6huU@wu$TY=``ji&z`;3KJcT}pS`fZ_iI9DamahMCRNwu0Kf>ukQ}Rd zbyX>3dAuB^VSv&^kMYxYG$<-vqO7?fjcM?EM4M66!4IjbvS^r z2d6w6Gn*=+s^qVJ>B~3QABK#s!|QMTU~s%0p8egoDXqSE3|^@fcPkZPXbC*AVPpX` zM*t-MR#cmR(g?87WylVR*hA6~(Hta(5Q}=j0C+6e1Rt5%Op&41I?r{k98zxvh&b(s zzVExHU3GnGo2%QKtJ@o97WCC>6^Pb-e{yoNUaw>1I%jl9A%J-bF|0SSX~$`rYoVA} zrOX)_6r7Pb-oLL>5HSz{ z(YG?kQ#mxM+IVAezumnwzprKY`uSUJn;zdh!>-%y_wzWNoSYoPI)f$gl&mtnGfw*cVr}cWxhy)lIn>Kp(T+0$F z)o8!XQ{1z}pZ@3ZgTHa}#Sg-do}P`ZI=H%$l&Y+B(B4M@K;T+c$(odS+R63?$9+s_ z1bVRV5C9n=Fa)s*HZTT&sG%W3uo0#4^t*27C8lh-JHN=yTbfiq77m%M00$A0f*Dk` zogbC7?r_T6VU-?CyX|e1aKfP=c{sZ`y?N*F_M5Q(zW*tWPgwO^j;H!ALoc|!-gQNz zqk#wF+2s=x*WBZ~8{hWyfSNZ=0yhy+K!SVgz&-A9k53q+Zf1rgU`TApX68WPvw0ET zHx+iYsWhI>`3BG4JKyZjf9x2_@q2&g;^x0hao4O?sPZbPk?jyij8!m3W>&Bv!g{J@ z$~o5xgaz#Jv!_4ueLrw^@$l;A`uWW@6M;J-#@L8ROd;p$j*(LlDLJz@DMbeXMI;Cq zkiZopgeEjaDew165CYGWFmu;)v29{KlJ43y+f-)Fs_ez( z%AS8Pe*X`j!_D=(AA~$nJOyi5CxGT5qqY;-+6276laqi6O4Ku-QbUv}vQAOnc2RgP^(i)5|b;X%TG;)Mh7U zLYp+tAvKi(e$1jFrzjpACF*Q(W86panmV+*I?GQ%n$$(5MgeJMR>@Fs7MYDhSABOefscobk>I38u!DfC(s>nyuU@>;|~XV zI>v6LPXybS=iT?`FAW?V$KU}`D-{LrG8=JCED=aVkz1w-0ZoYAJpk%5lKz9kL4lAx z1cquBLmaO^EMNP@zxuEIIj?gOYr1~Q!W?3Z9MA$wHqC|55Rqd5LUm6mm`RK=rIe-K z{c$rp$oH0kEHfVH0gj8BKE;?)0t#YUq_XfbuwJCoFAT4jx4V=Qz#%hs$uAEns0k4r zQUnOW(GlDQ!Ocum79Us$0gwQojx)y)n;2qrLM@{1F*dIGpcRy*Kfn$FLkY~xts*hH z2Bd%lVDmhN5X?7CZaYqA~FvnAg|YJL_0k?uIBE(^!FYgulu$s#gPb1nVA_e1P(z&5iA8lrg@%A zDa_n7T?{OTedNpK@dY#CE{E-~Fb4;nCLtnoRkg)zL4g7jAs{c2BTSIa z?%K|lSeaStEK-8)h!BttBYP$TGBc~PtiI~PyTCF)U$6V6Qo7b`CQZ{25rD0`J~Ro; zt4dhr>h8<`a(4oEz@>U&fjJ_%xh>iF%TW>IH&3FErb#RX2Eo)1HVphH)H22&&pN5iyC>Qc4OTrGQR4O{R*DrOW`tj45DjHfC09%{ix(mI?UH z%?%%HHeun_7LH_^CT1STBBcZlCnrbi^-+~8MSS<|4^CH`AAfmu^pCeg@UHBeZHClI zRv`?;h!{{BKsV3M?X(MY`;(`%PxgO#ultAZ4u9L;`xpMqpV8g35;F9wT+29)7pG^N z)h18#nCDH)mzURd&Tl^YehSgF8i1<4n5rWaMFMkCbs(fbAs7HSx`8;TX{`zXOH$23 z76xW;uX4xJq{PgN18FI|KqA6`wA|noT+!6XYR;(%ecME&c^E%<@5B0Z|Fc(Tzb@V7 z_~b{PzVoL(`}*H|5`WP?vw8U<*KtNtiVde=tR`iHbbWKzFD^^28YNl=IV;D6vCKU({wR51YBzsvA$nB zP_8AZwcG#$CL{HP+^7EG#iMnb0tFL6cdI6r$>FHm+>ZOU?U(qhs-;bB&Y1!?fuZV$ z@4N?*=PH}c=J|GCGhny*>hle>?fNJqb}ZUXHyg{lL$Nx7FW!dbf`;E}M1fH(QoJLo z!N;5R{Pj(UpSgHNz=3GL-^ZqvQiojHw$*XGI64V!2kHT+YrC7KyYZ`4*Sh#ozj^oF z_gDSq(y$ds_(!5{#&m+ zQq@}3z*p<_@v3JeE#FGP`4WBjxa zz3*yxl{`o)9yvN1g$N2jFDj<)agTp`eB09lh?r7(@!+w%cyArJ$35=xNy3nT5dhql z0SDni7_bL8GP2MLHoeS7Gl_KDxVE>geYkz)ugOdQ;_CgsaeDbvZ*1x;r}d^kZfV0R|&O0w+f^R|iFeK;SK<6q;m_0Lc)LBQU4f zQb^+FZ66=LA}QI`bDU<3UBRkeT{MdOG>xT>5{|mfR0`ZaA0bAy_~Db%wt$oK5g|o2 zqU50o*dc<|83Fxnfbv@i=EeE%zoRIOsc7l2GxY3E)FL=j3pRiAt#@L7x8G9@{X5C* zJJqcn@`9J-XfwALp&f^}EPB}GErPD3!JP<=+<*wc4B2k|+0=zoJDzZFAKIf}L%jLj zd(ZB1k3S#+*RJLYn-$jUK>xw!&%Qc6`LUSAUlN`XIGO$;x_wSmR0)*6;( zT_mZc)Y`Qz0+^Y|>~s(esu-X%bBGa%-Mt#l^E6GcfKvD75S$%-;YLlBn8mFP!CgVk zK^;`hL>MvZUEMMwy2COtH#5e=&j3KY=yJgULSPLEiHSH+5_K{K1Y|IPYKjgH0AP_rbzO{U!~l0mIR4_WTQ1pP$ZUvT7+MLI!}UE(Y#~$Oz2Lo7Jj_m{ygdb`Zs@;?ldYL=}nY zV(eIE>3WCQHs!L+^PFNAISj*)b8+OGY};M^?8V8ON6&ua_U7#IJ4Nbvao%*tIW^EU z%t&Z0p%Mx;r>pTn_u$p=Z`l2jhlV5esKC~J^#_4 z5&4y|+A*I)gVU}WC1wM|QU#5}A}`(n26*8+Txtgp-TjjxUBC6P&^aLlrlszkQ~|*O zSys* zon)%n3OltidB1w^_WF?a7C5%eZX66DG-({i5F#KmM?|bei)E{Faei9YA|kibF6X?x z*?F-pT<^Doi@)~rE7#+-ZO7eyD%HT-W(`eP)e`nrZt(n~?EXhbKllGAe|j&nUawV^ zeYqoxnW;-y_l6FUnHk41j3a@1z@SM92ngI+4!NThtV2SGO@E|VOH%Xzc}x&~|8f6| zo9XhEgNQ!vIQZ}warY)$?06jeCqI8;H@8F9bS!Q!Jvy1=HbRdWL?n+}vDNS0*L)u?AF3@DRV2os^r7Vf9`|yg)igu(!5*Y`o&}5u&~*(FwlN_xRQKq0o*DRP zy>1%b+SbGwDR4xAT8o)sfHpMSalao%U;;HF>YC1jt81-Q10*BWisq3x=Q`Oo=j(m| z&^{zf-aVsN!s>WOr`ftL|7ryesdgS_PapT&h@v3?DxxYOSwM^lQqp)rO$#YHxK^!I zp?XjvZlk!?VokuKlcE*(TdGslA+#7#ppO3ZJt$1`%qPvzJ(_QCns`NHURj)3ri4*S zFehOIE_DX;5CFq6hd<0rUM%5@sAvEHw2v|bhQL^hSd6PF;v{7SAfW5H&ViO+004$1 zEMalI|9)HJn;|535cVY&V3{JSxg$72Q2*#JA7(@DYcZvufdMg?n<$W5&F07pID$R5zMMqEhGoP{ms?%{QbZ1!(U*~>+9>jeIQlGac-!A zmeeGdVhABccPb^9S|J2t1_fp=rR=AD2w}Ba0RSS_yJ~?Y9A=r`0zkbZII5{uPfpAn zQhd?b`_bvSy9+@Ikwd^`kOu%vOKuMln4>KuY777%D&`&m5UHqu6A>Z@N2;nt>bNh% zH1_LN42{$Zr8Zs`Rr%r@Ac7^JXP71ipn$Zn8D=@>vLBF%Ltu=EPQ*myP6QYz0z9X1m?)$1x(9$?@4~2$XXU zDG*^PC4?qMCZNTiVTg37Cnk})q$I@{%v`jXONh;##meutwz(T2s#Z}!MCO(c6$Kh% z<3s@Dj_Lp$2sr{-P;v(+Aa)~F7e#Xm_35EX0Tj#{sF9QdD7QqUx>yfjVNF!El)7w? zB@{xcE>TEr+r^j=G3P8IFPMw~HqRpl?wS;v1cd4afbQ^7YWKysnJN0RT^NYSpsH#q z1tWj-e(_Oi*-W(7K!k(=x%k3DX^Ig5Qc5YMuJ7FtoNjm9&1M4tQYvwz5Nj@Gu43*m z>l`F7Q>1|CG4ed=5|FiQjl~~eW_K0H7(!ga+*P%0+vVfhE`>;Hbr|xL^M0IOf9-SI z{SMBL{`fI{{r#!Qw}oebUULK_i?JbWNFrk5D2CU*-gNpim#b%&Pygas_p2NJ$Gb;= z;}_rmpZ)pIcQGv!TQkeGM&|W;{d7By1SvMy_hvTk_Y5U=y;dW_C4$P4Lt-OzBs5n? zFc39W6LS$$bs}Ou9Q<@(06|TVXn9dp-PQcSlcBr0nu>_HC;&tbAq08;wg2#)>z}#A z|KhfJ7GFu5^U&G(Mf*l~(#XxW$;Y)~^%A-;?sL;1DWV1w15ikg%$ezUj3(-4Vy27HmN+o>tM!ZkwKk;2X{t_`RzbGoAf_osPO(hWFzt8a&~!QH z4CZay0)VQHyM1*tv!ia6b56*sqm7v{L#_rFkIeT`>L6zDH6MT z-}KvQ=MF?P?}zI$RP-jMvvp4nC$S4f+8V;cSEPZ}^b|rUB{%62*);&k;ru;tJYZ_( zFWBX85kzsM26TLKd)e1rjJxyAtNT27;wE()^Ld^HV0Cgd4dX?(GF6@C7!#PM(43u| z9$gC10GLzx(+8JdaIGGgF>MUAKsMdVJJk>Qe_&wNz70Dcs)f0DzcdO1u3& zgm89tw(eSS7xlwsP^?K|-LLADSE-%*RqPUS&VZ0vY( zss+R7U;qv%h&6_Jn0Y(ZD%No7SNyEBxxDC@`E4LTaa$^pk?EsP0Ury?!|ZbVz(s11 zV%xTx-LD_{O^s)hFs2YgwSXmC=yX!dw?l+x_l1AC*q}*!HPn2SV$0-j60` z0Afmng!k3N_xJ?xZBGv*ka2qhcf|wlzl$F89{0G%J^nx-H_(HM2M@(scTegC01!eC z84&6c@_-#!VE}YALn!6s=o|o^o^3{34cpuG)h~Q;{)eAizyGkcORc@}tZ7evq~)v+ z*D+p|$&-HHXTSgb%Xhx=;n%8Q*R&0UqobqC+nX=E@>(Px^M3B>`E0dzG|9@nN}ZIr zX)nk9W(`LteN(%epo2)@XomwD(|cTpazIx0su&DRe<(x$gdP&$!m54qyV7NOcBTtj4Np}0M_5( z8!$LT0CCGaTS0*JqPql|!;6Cj0G3RkLw3dQe{lWIdsaL&8XqLdyeP;Vewz#7#TzRH zL?k;{Adm?FKwOd9+G_g&J$psxuk24>YuH|S@*nO`et4`I0CaoZ09S4~y5M){@ZcVQ zRA39h03b~j0DzE_53q;Vf(kwOdb?#PQsR*VYZ%+1(G+z0N;yj6$!SLr{dO`-*nA?( zG2}-;H85HjyINTFZXF z=~tC_nC4m~tC~Y==I+BdB6;@JTsk{4J5V7kztSUKIQxS_X7Sj-7o%XnTX)G#4 zX}tzQ)xqlsQfEa~4IxlqHC2UzBv}H52?n0r=j|k;COGf46f0wb89n-L{L zRCj_RRUIr6gR9gT)gmY%IRFtWc+OeP0cb9DCXLKB%WgNWSF5?y5V37z;^wfTOlAfK z9GHk?iD^>PQngfC0#)2olf=!@+?_%oPg!!nwDdp_5fG`mIRXVlL`t=m#J%`LF?e+c zF~HTjo%VxC?fbs(+fwtY+YIEO0Tmh0)YVE6tzyU_q^6b`m!^kWJ%F(mD>1G_#Le6g zff&ivz&*ABz)b|1ORj<3&?1L=Gj|BK ze)Y1+@$R zC6)5QyWd!^jvhRC@Wtb!T<2-7DRP^dIkqa=S`!P^NB98lA2%%}Cma^S#~f;q~&;tMty-Xbo#JAByy$G!NL@hkds>QcCT5 zHBD2VhNI2K6uQ)QNH;e((MCY>yzGv6cyaX7@$ezL%wtgtUpo1LUkyLDLOH4A5!3WOM>A6K%yK0VtYM^%!k+%Kh1N6jqmC(RaRDPu_?ZuSmWW z67o<%Tc_w$-R9^{+N!0ZY8;wI2u&d=5kN&Pq--D07ewB4?*z0 zyW8~U*q`&NrG0K--K?98_3=oUNB6%QMy07mWqwnzt#W)80d0VFE)%Kh& zSRbeqN|j_JAZS^Gm)h`zthM9R9{K320oE9Lw+Uo4@2gWvaY=1Clt3Xf$>^H z+`RgE+YfU2Sae#G5S}PARnTz`DMf~$>fe3l%8y1%fAmH7*?I^Eoyo?5LHfJ7wM z9AoUe4jn~gsl-hwfm#ThQW6ma;MkZ#459CP0LY~%I1t`!Zw`^`4B|SK84+WQs_3Ai z0tz{2cW;`eX`1WZK5`;r6&;2_O78lWnMI`JywoHvF)Yj+iLHvd%~Me~ieb)K$4T4_ zlsQF0F%``^jZh|Vjg(;NqFr1INC$hAyP=bzf|gRGpgS^i;IQnqMa;hZ(6%iitLiw8 z8H>Be$j8TH!D`Sn{fp#; zMQeZPbd|c4C>~mg?^+o}Bym`b3nH?#_yU0HGC4gox1&d2K5QXw@x|Kv3t2rP60uVt zCniQ`^ys$uK@=U_od5t0$s_Ty^h9I;c2@&{h;SEYv<#{MfgpHTsAx0?M1Ap@6oQHX zK-YC`+ve@ zBj=i!6D5&Z%xbMh*2K6hiB_5Cxz?JR#LTr;j66(Z6GIH4nz|YyrV!kf%th)!Fps1T z4uoLtN`wI5U`T*~wMvXEBI7tN1;reKs)|-nTbhbRq%O_aDQ-@VWq*B%>H3I|Qx2PR zp$pG;vwge~T5Dzwsq@$Hn#8W0aJK5h{*Go?xj(vEmkCUY9_un&nt8H6E41xCh zIWPlS(=_umHC@xMj$}U#!!S)#pD={rsN@vT0U6MiJr79pRG1ZjN+}CHu(()a6U~@az7N2$f%Qu;KH(<+&;OlY`Ule=d*zL| zRIMTynwdF$M~4D^QgBBHSS}EV9uXlhJMONZrw;ml#Sw<_W;Zq;&Y@kY%wCy5x1f33 zg);F`$?Ib$#I|Xgrm-q*+k<)2H;*nJOtoTS2H0Kg+rC?J9I4_@QYT-{u~J->T=dUDeBmWRyi)~b6~6juD4!rB#o+J9{J<~Zf|*FW+Y9zEh+ zU`9kYgQX{e@Y|>;!UZfe*{qEF0OM;%#}DHAmJHS=sN6)$(|OCu?YLX_shucX4S9O@ zJYwVx$W)Fu=QrE!PNy-C)it)U>C)})#uWiAgk)xssA(E^U+RU?Nl4nZKR-Vgk;}`= zd7fj8&z?MeeRaHQ8g-kd0g%08h#WROt5%s)O5jRj4>l*u4T~6vAf-^oTuNEU{-)nt z-CoZ_-fUJ{wMgyL5qNp};blE~$iuJwbJOl0@K46|Yg60uJlAx}KIELUdm>v3pylKvEez}{YUfOp2f$Qm^=BEL)TOSe54PM*s`iUHkPqt}&W~YzJ zG{xbGhod34E0jVF<<(y^gT8p`^`m@;)4eB+vIB@Tx}(fziEB zX~=U(v8tF8rw|f)mB=Z!ZAt+E<~#}xSUEtE(%k50KkWYb?dCzdK2GD;QIuDaU{VlD zEj1@0MSdz10?FDUl(GYpPhCoqO&7{`q@FGLLhCoClA`2_9sFrQX z*#~#9MP%(&mTlt8Ko4O_>aVrJ(%QoOBB$ukUJ#h>vO^CSHxX59nx@vuNLA{78jp{U zRaH?D7!GrPMnnLDyMY_R(gwR^JfH)B!IBvd4g^e~4n%;42<~v;1`H4o038Sooe={ZGzUU-2J}El+(fI0i7q7#xCHMwK#VbjutYN2}O z2EH_EELF*Ym;x_^je={b?w(@96sP^x-3gG1F|wGuTi{0AfE*Cii`jvts8UN#FN!)u z#Fp85jAa_v{i<)fo9*o|jtZ`2M#STz3)$64rq7&rr_Z1FZ?#u8ujl9g>Oc3xzxm-7 zzw#XFxVM)1GaSzcd#nbj54S^VIQp2chuhsaY|WoPd-n3nk5;SI9%Lb|ma~tURV#<) z=;)|9I-=>w%UH^s8bpQ!!2r?0T=5W;Q~&_@&>gY_i^UkX+wH>tEr0W{XWegauT|@6 zwIcLV3K1bf&N)Cgibh79#&wte{eS%4Pi;4kUh3B!5D_n8aRhQ-j@3{5`u(jAge63a zz)n{`S}g5%?3GzS3T>SE%0 zJLijwi;3jv&GX&u7Ln&tPMFQW3`)_=@YN6A`|>+q-La+W4K=5$;~)FXk0qR#nP_}k zJD;wf?6%%@>i}f_EB?kmO5cm^=Dg9kKD!v^J~c#ip!2{H!S6cizb$}vWE2f|F>E^= z&yUh7a5_F6_-R(NDpbp9fAsM9+;R2za`!=g+k74de{#D|J)_ePePM&geEH6GwMqdQ znNzH_wy6VvVHjRMJzH}~97J`VCs1fpclGpY376*9`?gIF9yFmfvpUyEQC-|I#Sn>s zLff`UL`>&eu$bhM6jTKQ?Z%yIO(}H(!!&A+g zN7H82ZA*@ffmx+!b&H*M8%*o@h(pP=-=Sn%M)S7xxP83+B_TzvtY(2MQJ9}5y}G!1 ze_yWCDoH$%OL z>ieI~KfQ`ylRSRogDu3lfAH#c=GbV8w(0w(O-JEqdNvQkP{EFlk3|F!YOQkxClV1x zXkyxxU^XX>GmRfkZ*1#(eD$8%iFR+*-A`9}T+*9z@`c(r)k3tFLib^&HKVSded)Ci z{=s>De|Pc6p1kxI+udb1{*+9+E&u6Z{Y8&mO`Huwk*4WmXhYLNB2hD)+Yl?HWh$qR zs*;*6H60=j(-=cd+Yg5AGvIa_$KyADC}9mZ@A8w~9OyRrNd;PUezAckw_x9ms+JeU z#oy!!Y@=Lb$&{KfyLfo=#eehWv;Wy1#^=(I1NOrpy@?(y3|nC8$k z?g_9WN6*n*32!{Tobc}tFMlOJ`Z_&2J-*TXskweq;-|SGE*(j6@SEFV+)W#s+Gf4! z`kQfVI52a~B}AdNQ@1KI%`+0TZC7h?0Ch{DL4pH4km8GO+rIC|T9F6?EBG)^Q=S#P zs?4`Lcb}%I>$-+oRmo%Cjk|SwL@9Jl3jq6JOd<4bms0W&+;uKsu3fAIw8E6 z8-lwoB4Yr;-^Qe>x`CsCt1GCRI*6MBF)TwmJgguslRHy$Lkh@@%O6~V!4F;w1u$l2 z;H9$CRLnrtb?E^HFta7vKBW|6ETv5IoFX%G&Se=FE{o?c3LA))O$~P(iMy+tyOXJz z1PH2XszewNm}#+bkfVbDfZ>52h>X>(RH;(it|1~bH?yj;Y~;lV0_KuSjL}?|vgk!M zZ$N^g;;2H1>Lv!HC~knm-JWeULtG;R5?RMLwrTAe+5ULdsi8(T>7X%~% zaB~M$U%Vdurd+f&%{4=2$1N30#U}%nr1PEY=U>{!~{9Oe^Uc55s0B)$@2m}B)B>-v~^O6D& z;rttN^@!D^__hbx(YjT}-fDy2jYebYhI<$ymrTEFw&?CjZ;JEuBpWV5Zw?E!13EA5=sfCk|pmu@*ClA4CSi6eR1-r z-ZWF$XPw$MymPsGXDc^ZuczXorz>VWx}9xuBBK7o+s~{LMk2-7G`UQOE>a4hs4R))jHDB_OTW>#k`T7z_hNVU&f70wpV$%;7cv1LjxI&q5HNZuYQkuV+#@N*T0-5|?H*)cq8KQkIiUm>tcfrn zRrQJ;9IfN#0j70U&}g9o+kmQ$YKVkL9_?f1f|>^h4`F5)N1boG&pC(J^Dj@AKb->R z_R+~Jt>xYIv+UgSE3fVU;{Tqo8{hkN{NRbT=<#SQ3ZfkD>48rZRx%PqBn2;S5r7Z_ zviN;Y>^=UVxTgp1agTf4;}gb5)f}`4GnapiIRt|)nS+%WJVdmB6*b59^|Al#Wmr9) z{bcnMZ-y_&@fVu%G;eROHg7&{|Cvkve(KH~N2iI0>Q=x9mTiptKtazh=I)iReD$UC z<+~8IXG-(TjM~=p^l!fY z-d}J1OjT#rRs33a#i^@LSO3g6ZvU0?xi<(A%V;&nc2x|`;o|tzNrzz&u_86kl~Zi` zj+v)h4?5%ZrR6H2H3UW!aV41}%lMA?;D7kxEY-|zY z$~_=wt{>Fn?+N+qAM`)cRcMFbaKupORk_D^AR;J2VV?{;bVAgLd%)U- z{G`5=8+{D>m-kO=7%H`0=&Mbi$mbRqdR!6fKvq1K;q5mc_#2NN$dti6rL^1LwsBo+ z&80keR0VySCM55=rfpjQRMlb{0K`O0YOMs=#)Ql8+H{yBpPuW$2W0M*?onkI%2mP=3wfatYWBzFVJB{XRvIo#bW zquG*)PehKkXl30!M0R%v6E)BSOF?|NnjZ$@s(vU}L;$F@g8MSjK0ZEvkxc{u=%fIM z;I2zdye}N8BLLV(W^ZQV6v<6Z-KtSF58#!S1{iVzLIg*3M|VX-M{-05GB-0rcL!UN zg6Pn4Y}IX9>r$8a9byhd?v7?As>}=kMdd(yG>1eWKwJ(0M1;jRu+%=|Qpe-VN|>40 z;nO)q4#7;9y5eOAO-m&*>yml|h|4#TTB`xLyJ}t9ahFN`Hyex*4v|)Fi0Gz7U}jdu z5Tr`YnK-rxK!^~D(FjYnT<39~O3BNrE&HXExs4H@AqrOZcX7Z>Lz>nC6R!M7efJU<)eDc1r3Vs&ZbynO!T;l(3#$T{bE zh^2@KN(tPG3L*I2Vd;d-NEGVQDNTWyyRHkVSypfy_sc!^MeNm`J4ehA3`;5IngI|o zrGVwqn2t^#@y^=e`cbf!dA&IcIKLp<5SFvW-JSS8EaLaW(%Zc_X_g!?bOQj*6_GKR zp?BSCaDH-g`Ofv*ubncY>~nz{I;!eLl%^7Et;2r5S*^!9tT#O~H*LG$?>~6_xN8~! z+wb?Q&AJUSQVhf!AZYs=7_Tp{$J^cd?3Gn)dJ2=?4pLVS&Tek6dYrz@Z~iCmJ#R7o z`0D&i-H+t(V72PVz^zUSjqV-Ru#l!;PKa>#uWy%pp*WRUEN6t156fHI>mkSopDkDU zre2&i$0zZ+@~^*hbv2J+y*WBRQ{uL%B}d z&YBIdR0XMh-*ZUwRHT$uw+@LB9l*va>#px(zwS5BZ!hy)F$FUl$I+@uH5I8WlgWp- z&y*~s6eG{M77 zfyE=MC9Rs7DfH3itLEsHDa+~OUxWFHt}cM1mM4t0C7wU+z{s@rxWj{#%tPrnlX z)E2(K-|(>VCZ!(pBh1dXEOtckdfi`7VFpz!yZD`lsikpfaIjLC5vSrQtjqqpt_4}KU zQ^?@yLHP}yr&ljt)$nHEOY<*H`*d@)8_18+*}Bw=^YeY8=lg9k#B?&~(XE%{9aBvR zjfG}E9}hqK5`E+5;lH&$d)>N=u~Z7wgHFSAd2@YsaoTi!i)ac%Gchw#aKPBE{C1KN zHccyl6)nH}>t&wEE4p;*K^O>RKj=Ksw~OZLj{tEm6o@*XMqMSV`?%X~%KP^8KMt?` z%LrOQyzN>XvdBoUeARsu zD7hd21xB}V9G`D@W-!ax(qa#neZ-2=}yJpG~E)SGUq~sDWyOdLQwTmiinxX5~yM} zF~=Aq0J!^;4ptl7JW^-ihcBovG+Vhy%xgF+iHo<@I}~8h5mf;9g5vhTVV=kV2y1xD;R@aaGCcT@%M?k4!*Fl$a^CQC&(Y z%h|UK%FRsG=egvZ(dmWbAte_vL@cFD^RQm81L6|de{pfK$$7uuyRWX>>uGqhd)%Fe zXFqy0{_S7?*?9y1?ytS}>b`x&Prpt#lTLMV(p!zsuKILxe4H4sZm+mGev+D-X?$Jw zZS%%{*oF``C&yP;&-df-{Q2`~n%w>Q^UGl@o7lB&d;PrXJaNN<#-L_Khz^8~PQif! zlZez>Ypv=ga!7eXgm0cx3;SYLYefe`1tKJf6kV#jwJ~MHIt&-*M>(cn_@MsGvny^| zf^TMLAtE|2$IiE46YaMe+!ScJXxv>^fB=BN+Yl?t1P12ra08on%Gm?EZqn{%J5lXU zA2wZzLgeE6Y0{;N!(f{5{QO)I_S3#g-Rbd39w##dsjq9I}e z0Czvc3VplmdXGTBhUFw|Q*3kfofTpU0SnJZ@Gi5Rj?vtpZTrufaf7(G=QrI}Ew( zo3!T0DMrx>pv-Zc_Ck(EVm8ixjqm-5{+B-(k3TG@$17}7yA;tw-#aK;MpbWGnkST* zC&WY!C}_a7xTZjei3Ly#fq|jd8k=a&ls5gz`P6qiE1~2SvTm=rD%MI4>|EUp0qA3g zPz4lsnE?U&R!RzwX1#qr+m*KQVx8(3oYtj{(VlMU8~KYD<72D2rqt&fK@k^j&bICe zz8hWI-wIgn6no7fkX9jtz`w;4d5?Si0dY?c+~XeixW^wZzS$hB?0`hhURVnmHpzqYpD{YEHoFIiK zum0tFdGdYucP{9|Yugklh{7SRQ3e^FeE8o`Y8t&Q!{rd_OigPFaBmjA$0v^pi4jJoqcYCaS8g7b53k}QlbdtXTcNhI zzDf~0nPuwvlOp({ZZMD&?XRx%E5GuS7x)w3^JeiNkj1okZTij~m^mc`fGKB3NFln` zd7eoFAXn3ADz(;C--jmVX&$CMGpk$IwTKuJo10i2%50#{eze}?oO8`^S8M=k(|+he zd(dmU17VTs?jcYNj0j~GGjBs&(6;H>>lzAy7G?T44#Pl1 z4YHXA@YE(JtVOHpZk`cT9Rr#M7j-uSfTn327!a5lDTRfe|H zGpW^OQcM(xLQ1Z6N+B#OON5b;LKBJOIp5seT-WQsOo#!=-Cb1C-JRi^546haTss;e2<;C0GAcWvZcIVb}2Sft^$3>BkbVm=s647I>hYTEZ4NJ7X zxq}OWIEv{a<>tf07TFx!fe4WTP*?~61S1!)3IL#1Ol#FDqTq<+<}oyCYO3x=1PjN* z%sJ;pOHV{g!-1$0(J)V{N`Cm}fB;0~?gq;+8WF(24GxljQGb4Yv)hlk)~@Sbdib#G z8&lIN?#_e@*>i|CcSmHGq6+Tl;tqt!Vynw^J=m({x>j=JPOy z5LRs`+24Bf#?ABRm)Gy4ru-W}`6tW#^Z(+X{`~*-m&?EX3twCJ56V_%8aCRj))(jJPoF;RQqwezOQnVo!*r>DXl-dsSB<%|8>vbp=>s=n%760GhcUBW*e)@L;BqU%r(^Bek zA)4nInHUiX&(AKNUtd+NQyIEdOXi?vATnvQ>J7PCUgG{RG9tt#gv4fa(yvrq%yO+w zj8qjupJzpf$N>>mbsl#yks29LNFB$R#}ZORVuz3lE6B0LUA14G^yPQ|`hV?n`0*dI)U`Xf`R4P!F>T(@Ov2=}! z=6SBtgDSYOyE6iz3xNYUeB8$!6mmd}=!|F~Z?V5>l63 zxTgp1agTf4|Tea&xBvNmw zJ>G1Rhj7wf-aM%ZajvDvqVn4f)9TWXHgdd<{XC5#9J^$z%jr^^c?I1F6Zh#+6W6@S z)qe5W2RCKAqL`FckY4@Vn;z`zAAHcAw?2$+ND+bs7m_L*=&bKrpX_0nJ6Zg$=pD=s3h_PFlX)> zK?kW3=+oZQf28!h%o3;~CY8WM#7(Q% znCIQJo#*-GM=zZmALm-OyK&WaecyvgttA&h#HvNL8hdIRGswB-TvI}FN-3>YD*#By z!&pQl#+bUc)>?9g7|XB^A%TZO1`*;s&snm854#bE{c5#pVsiIIw!a+*0BD*xOyf7c z_Vp(6dcA(|;6WX8Od*B@v0pY__+^Ibzqe6j)WqJiWMH z`A4o?ON!1iL0)iZvUOYX>yZx&bBHRkno?Q_45=EM1VE~XUShyIkHSI{IH19yi+D+3 zJER(c8k>uon29^PD}bSyxB`HyE(`$?fTJU*0S5*kLk9rHB>>5tu@;%~T&fZgr({bS zQYmm(x%|;?-tB(3>iQTH0H`W4yE_t^!Lm}S0stvQ4v{GoT@r_Y2;ISfmTff;`(d1S z!*H|RF9YjWUw(N>>~VKcQ&k6X1O<0@_5(jus+3aQ5Ze$!(h#K9#r3i*l8A_@*TT#p z5HQy&PFSn#E-y>dg~XJiBTOYT024{c4sq$GUaSK#HYwG4E~m%1D578N`lV=@0#}i` zuceecP9k!&*&w1$bYo8gVM9m1md78>;~QtG0s9~SnQs5nUw>zAcOv^uH)G*Cj3dRq z-B%HVoPu>YvmTu^C%xyK#pV3`{OQ%RaU7>%TsJFyE=TK4+qNgC4`e?BK#Z~Po18O7 z6n8W=AjF7w2h370OhiD;cQICnHV&etPkSM>mKRk@&U4c=i0H0FrW!cl{BZ zzx8yOH%;vU71CU0?5iY=wwp_4z@}}@j6-;SeHEKfw7zulsMfOGZlB1LNK6DG<=OKm z>jy8F-F7aSBiCBD`{DWZ%^R=0(a9|0E18tm|<^<^^iBKo7-tbY#d!6e`o?aSl>W+IX#3D~^*c#Q@ z8U}7z@ciVZmwkWL<_}NX+|&llCFj-grV8BNYym>xRA3U3*ft&&zSg-ea*p$^3OxynDKV`q!-CqBD z5C2*%g^8dc{r|J~r!l)_*?AbYhQ0S0-r<|>P`8GzuBz_pW)C7Mk|IUYwq#hAVFiw2 zCs5?X0&D>Zk{^)~z=?qX3Niqe6DNja1d#(n31TNuV#|nOBcZL4mMDoL*&Mpr?C$EC z>yF>}PUoDx_Zsr!y;WV5ScwZ(tXQT zA~|vZ3gAYr3>qM40dhse%vKNrL!?j*(!2pv&1XJX7ZPDGaPR_5f=s~d1j2ykkDd+| z(158yVx62?88>n9g%_6HN*2tEAk%uf;>BXj0_KsI+U8WJF;$#4iwP=lO|7BFryBPk zAKd-q zA%;?}xQP>LYn@X59+UFPz=su<_b2&UYbm8~7uc}JX3PPRwcJnZZ@nF+@jk zmWWy`PM8|irm3UkHK_+m+|YKqj4`pzE0o7Fo{fhc;1?G*4)U&c0hNIZWqiWs{c*t{ zL`J7BZtv~V5rDN67BoKF+TU4o+jy#qkN`D12dmDX&T@-Em>G$eN-d@O6W#bX*9XD# zr**&kC@{w<^B=mg{i~+|0+VtJHW54I$8!A%o6d4?t4%mK88>$pH@-T&B@MjpHvzs! znFN4+?)Gt=Zrt(=4llN+?D~5ToBsKbFT~BjDgwo*T16CNW8+j0mtUO9rPkc0IOUyK zt)>uI4LFu*tT8r`+kgzsXYOQhJBBEDer2RTi0aZ1naJgQ^7^0o0lzl$*;V(rW zc0|R8t^;b)8MV z0Zh#i)%($+M=O#Ni&h~bj>*)EC{PF?nFF!zOE`oM0JYYAX(Y(FAJ`$65Q2*D)%N!T zkBF+aFAnXC9Gg6LsrjJmi-?-0skt~lR727#9Eh3scd}10?|l>iU_UP*>{~*7EExdQ zEymc-XSG(e3hze<=D43MB63jHfTYFMGxo{0CcJzp45#Bb?#F*$s)@uR5*XD$5gkdv z(e~d%s@neIA+VXbY6$ef>^mVKI-!gDo@LLeX__%l`vDPCX5K$Gj=1;Y-tBgKPYWWd z0#fs$?@UERn1YDKrd`D4o2T#kvb)N*TGd&5Fzl|7>JnySyQDd6Lo*u(hU-{2D<58+ z$@P=NCd(v6w!2O1jk-bB}R9UwaP#Ro#nXNrmd!B0HqU zFxHZ~_G&C4R7T20n-q|JH&m*WV~MrSm&@vYx*o1FtSWCc38hLx1c4|J5+I|a0|ER) z$cR4{6zqPyn{H2jjx#7lNfH*)uJR|NYk@x^Ok)sGV2s6Sa3f|y)I6nW*M!>(nJ!Ap z4IU7-2giM!mF-ywK^e13gPcS;#nZA&77yl&n#A&iRo0S?UHiWYCixSHBn5zwTK z9gZ2W?9xq;%Z0QSrU99-oq@X!BMEX7g@m50=RX)Yr!4rnc`2p5TNja(tjZI1+-W%M zBpYZOI2cK_LhMbNH#|*~luAVV)=Y3GqDMWpE<<=F`rjJ43mOI00Dvk?`_28+2Q~`i z#aR)^DWd)8eDjODXVEVo?Kg{zx?j~kNeJlSAnQjg~H zSbA=T@a+6AzkN5TI}58Demb{@kHV`z`O43~`Ccg1dBP9}t4(d3GSZW^4t$7w2k)EU zX{~kOGjDIS{k+4iB2F3G2XcJ*ja$d7Y-ySxujMcvr34qleF!}B+19q497Z`wi|2p# z`OCL{^X}$$g>D!I2i^Nmiik+*x^A@|nWCE^Vwai_!m!;nO;c6ewe6azPUARo2#72q z93M8QJ=E1T2Y^5%iOEx+w-@WW{mLJ+0v20YUtp1t6aY9LLTIuDbVaRU+{xvG7C|Wq zk=f94)Ut;7ctnJPXpV9E?7?Kxwv~3P#A})sxQWJ_x${n$;_*o?WSE;&9(MzNGVCJd8(1a zVsX&MwA<}s;(eZXyWJ8|;MDhht@ZBiqU`d?!Etbk9GHn1rZOJ13soP-DaP3My_w}a zP1BItZqJ%Gvp^hU+7q{#Ii-}4`?d=ql$!V4$!v6Xb_PW5cyx3$YucKNOCKL#)=3swX_I9$11PCajLHI)mts4ahE?V?^8P>RoxL8kU2GKp%wwk?mmtq10rIrd0#1MV*613 zf@0G&(=_er^NN{>LI{ZL?l_^UPSXTxQ`79tq7jjiyu^JHY8*#q3`Eu4OwFu`>7kvq zdnqM^5MvaP%``O%P1iQ=>P}`X0jz;qM|oHe000$NxBYDpK`G!+E22(TgH-0l;@R!R z8)jDDukpCq=x%0#%z)Hz66F{~WEh6+cFn2TKeRD215VQf=tLB~K}1C`vue6;UjTOq zF{HRB#g|$YylvZ@^S)%Vud;-Y_T{1&<7#t}$Ei>8=x`ZA7=}p==JVydWtg|!K{>v; zy!Myhc;n}O>u-$I8bv?89u_xZq|$b&Oq2rT)wXVaDqa0Ex)@4bjdhZ7y&C`|BHWm+ z6U0(wg7KwSo>^VoIo+*$q^(Q`2M7CCd)9VM(*QupKGhuC;6axh=GE(S>8z&#e6@hf^qGZQ9mQoBYwoOV203?@^A8sz=G$DDL8Upkx({6ZX zeEsGAH`l{qwE;~-TDnw?gqTCMn+L}yhez8oWs?}AR+&Uvz@}{nFT*e#a2F}W*(_D9 z0WWu(rf=&w)hUM%xJ@yn6k{o+*3z~u0PJ=sbzvxzT0 zqOS-#0U`$0n)h3ns`^8(gGZTnBcRT85Rd?Y!_h3*x4-k+>hxE+7O6fz?m1k$w>=Bn zb#0H%zx=14`S#CQ|Lo=U&t^Kop8J}{VYN^*94s79FTcEaaN*~_Ha4=%&lqhdiVmF2 z32P=rh*2nqCrk8|P}K;Wcb{FBQhxRB%De`4^Cv3-R3{8=!~mqu>Hy?Tm*#9`Q*){H z;cjAPV2+4R^^vas;>lvy$G>uC1-k3JtExckLC~J8ad>PnCsd~h0Hu_Z#ccWN?7#TV zn}m->hl&aY4n$%K0Eld&iG%QyRR;Sg^rDiwH|}o+1;@Q%m?A!$^uVz9x&qrrCQJ%& zdUa7Bj34)%|E@y0V9^**(u?n$-Ctb|g(*@6d$N|Z?>Bz(`R49+_ubRAYanwEdp$%3 z`-GIk9ubHLCjr|+v54;F)O+eK?o$1k! z9EULf?!5~yf;}2$HA_W}D&P~h4(ub#?7*`imsaW!Vq7~^iei7`HGlMjsVdk?aK)x@dR*9a^IP|S>)G4??stFe|5$i-u=-Z9qZ=>2uvpCAK07aU*8(f5 zL#d=KfP6~#L1BrLQ?zR37S4v}Z@h7OU(43G{kU)KcNaQ@NRmGL#_1awtxm>d!0B=A z`$Us)`!&2(&S{y?CYV~>d?$A7593VyP;CGnUwL42_15*@`bGTW%`a}H$n8!0EFvnd zg!)<)FcVP~-{i}o=;5`O-nsLew{O2EI;j^YFmPlpB5W{U%n*bK$mw#+Db8}PKoW^b z!9`O_p-Bb0%d4w*&hJN{rYV|La+!5~3WSK~o3p)sQQ)*)vD9oPW;TfI^~yyggwVIM zeZG6v&LYgSnoQKhq^bt)02}oxyP`5|wkB8M_*$9r<@q_0cU`BbrPlRI)>lbOO)2#u z5z)g0jmQV|aKcdGs)--~7x&&Nbr^4eI2A7;ggqr~Z?fv{kW$dT)|zt$0BGBN<#?aT zHDP&QdmR8`j7(JWh)p8NWqX?IUNcS(4-e85 zty9T9TRMfhJqya%5}+z%*)#N=hyn#huZCc?=5ZB5K&k33B74o5NPNC}Ukbi>8Ikrj%fm2ix7*{zftN@`vfpc&AtfU6qO#j!j1XCB-RILqgO!w!ZR1GInl2hzNL@L=!9OS9P|WxZbS8x$gS6xiIe)O`U7)XdDA z@X!JQVKeNIXnruCrfDz0HI8GRrulsC?&LI^&5BN^_wHXiIU?A_7>8lJyjXqqmCuEy zT^ug19z5u$;SYT2g$U)Zf9s9E^P6vNzw(D@v+H7t9k#3`?k+D^6=Yj3|M8>svWqtA z8@FE{cD0$u1yk9r4(12UVXE8U!?e5DUOw2CI=W566*?f|$;rubx!k`vy3MH}cM-ti zV-?9|;NU>prD`4WTE`zcI(l!@{^?)1_nl_(*15+B4Adn$Y_!u2Oc_Ts{h3=QWlXyq{K@LZ@L0Wg^m0AZ^zvt4fAC!e z5A1Hga~Y#B21j%QQ$*f7+7rhM2wjhqFq=-<*?3kQD&`RkVr&vjTy{lO5nYv+p3H2T z#6(<2LJxptl&kesSiHFzyUn`9sPn@m?$)?SUE zFYYfEi)Rk5)hrkjgAaMSyu7@&IGA@W5mkn9RoQvp&b7ajTC0@8%-0W(1Iy{zJL_@t z#@ly~PELlMynS|o0L%qiH;!K=;Y;^Lxhn|a@+fQYC!x?g|s`DapK zY{%UI#39D%ye@Tm#F_ahAfy0+OU|Kc3Q@ocdV0k0yn&Q4k1{TrPV<^@{DWWk(zmbMWUH`-STW|cge$OyY7w??DwaE6y;U9mEzS#t>GYcW+EW$>gMnx?B;QHQD2-83SR zIi6o!F%m~o)2bGUfzjN}p(`a-ES3(+MJVe4W551Z#T(GyDg{Kc1E*4beNpqo-6F@t zgj%OcsXcjyr&7-EV-x)`vHOXFh_IPyRaGWZLwAHHa^C;ZF_ZiEkA7yc`r3T@n^)cM zzv!MdlYkyTwT$ip#QP)w=0F+0j)GQI_>_7%m;o~FjZ(Eri)^J5k$y_&{iCp#2y1|~ z)EWVioYBqjG41?U-1owj`=l^7P{10i0h1X$Y!}#n?t}8cWBOj*^=IGczWnQ(+3D9- zfAS}fuhTB3!%My{!>NQ9cjIQP>R2&`ZZTI#n_-p9BwE3(F45e&cq>VV6EvJ&} zRKeYfh7{(DSs-pW?A1|JRjVix8232?&(OZ838#u-oiPDTsJ*aO4UPOAd|z(6-d3*35DS z7Z-nD&mAa)5IF+7As7P@g73Q~9LdTwJ`{l=+WQWJBRGw_ZPT@?1rggWI@;d75ji>& zhtT!CO3Bj>Nr@0Wng+LE%7J@#aigkG%$XyHNEF;WXW6UG2Mk)Ymc5u#XacyJt21E? zgyFwvoD{!>b3UP@EK-rZKZT)EsKpv=JzEaK743)6|dx05x;bsG67I z^1ZtkFmH#c`Jdgcf5i`*scTrDKkS+S5sh6@NwTWUh!er+4(Vs7_TIL{Njo#pI*%9^^n^0J>ji(VdWyxN>M?lu}J{pJpan z2I;7c{SujC+o;R5Q8#G16p0&Z+&#_~Qu4UlSjpl(x)<)(ei(S2cWdaoTCQS(C|E10 zVc#zrZdaQ%3Up2H?$fk&)!DM|`sHpI@7rbH%{S*~H?Lg}7!KwK6To&FXI-^ zm)(?OjIr;B&2~EsO-zB~E(#ob>YD0mm6NB?Rq#mIz&WK9i}QGwP*;50IhQ z0Wp}A%36B%?d&tdtAD)w`Zu<6qXJEGHBRE`+Rh^M^RfN>g@?O+(`~hQPJ##dYJL5B zq|!;_dF=;HY`)Tt|6f|&FYS!b1A-uUa0f;;3as{MtoH9Ws#&52i?KSZqZ)=#ti&fQ z{uj?65CT(-s_F_D_P*;AxFY}(AtJc#9g-fEHiudoCfA3vvsaR-QlGSOcw9iF3P=uQ z8bVN!2oM13BYs~4H+OSKCSU?&M^JPAgpAu1gNhpw?9H>#5x_v{d*uOm*mP_FY6ghT zLBo^v8h^i`h(KJ_CdnR1$vm!VB^g3ZSvyCEX(TNGk!mdjLg1%Tt@$)=8vEFP_OpKXwO_jb1}%2~jaOedxh6VY znJ7^(v#KJ(sJ?=NO`RrD zCtT$rq&B2@xmh2~=JUfjDk4Tvufav8szO9yj_80^MPhuYFe75BZU*}_^wqEpZR5oh z$wM^aB07~?w3JfZJq9Y{G*n~o6vDI{t7!^paj;ZXMlw|`B8m1II{<))F*W9pOXVgq zHs)E)t3!6L4ly+pqPtg-y-V$WUaFg`A|epEdsRgUa2Sil$fT#x5ZQoFz+J{{} zx;GkDaJ0QYazjB>Qfka#oXWw$98*|rw>T9WuFatyb9BWn8 z5CWT)BBd02Us}pl%@INbz+zd(0VFaZaDtGKqo~FhoKQsoIW@6JO>wTyqVAlWxEhXm z$~mXN4wTh`dF*;t8&$PdR~OVQwMaF0BC1t$E=@ZR9Mz|y0)Yg4S4T0m0!@fVi{;*s zpwf-V&>%Y(4JTN?v;He61MQn$;s{YW_z~t>qqC8n=R#o#etYbN(|AV?sfx& zxY?|BJ94uSLd_EqtyZhU>n9Q9+O1>n<}$8%{lHgS3yXL1W#6qa)1SLo{#W(*tFguI zN^sG@wpJ?+MvNSQ6k|oo@_{Kg8o>8@wD@Bx51?aps~{f8%!Ck$1>jLdDn2fJl=lE4 zpffl)x{(2ylM3NH@OU(vb}w!6`OWms?xMI$M&5xCL~=jdZG9qamh%`xGn*~?T-AZp zz|>;XCV*T^+qTto$oca0%H5}_P}{9mS9)>&;t3hM(IPs+nENtCqcJ^MX`_msn5-Ja zkTn~jN0usxzw^AiC2=GpL`2>@HZpQEV~o=d$0?dI_StN5v!o$3Y9eKH_XaVh6aZ0l z6*Gf#qSW`zVe6_|vs}`HU)_fO*&n@D$94U24Di&YN>iHxG^%sfk@v!B)E=aX7oY+>OKDGoWbA;~0p^ zA+-I~IJ|!6y)loA`EkchL%d8guWIg6D*~e<8KRn18)3Tj?DGeU!_8`2YU#TLqUT~_ zcYC938NQQ`kq&hue^YyrZ&=!CWgVkaY zDryx`_D9Y81wkN80jtXP?34jXB!&36-uLe}0x%IURJ06|(G3v&VO;=%!TTa}2n1lN z;AjT;Db-y5C?pIZ3Wfj_LxZ3$9I3@8tQqk84T8X!h@wM6bs})Ufc%7!9v&YILkM2g z)R|M_o?X%S5hVdX@euJgq=>{$1d)uv146r6&4t#LrN7v)k zb{Hq9YLi^ty^DDO6Eo-Vc=joAiWC#Z;2}T)CsSi$d`#BI9~VAK+tf^2TaG&cwthUc zpvoK8%FsVFK~*%v;KU9Q*gmE406QTNIupf)+TejPRxkL3X^(!tVPJ%jh5I<$>WD^e zO!COX@KMj3@&=4ldT>B5>O>M97Q`PRV)>ZZH~9k7%^$9{>8JSSuYGNLZWjA)d$pUQ zrjX3Fh^RYZNHIoi2(*@IsJopEN>a6u5d{>VHpOYOT!w(ek;d%~Dr6hMjn$@MXli1P zKxkOOEvt;<$c-u%6zF3cDDI|FRI?N$XTS)d!xqIFXozTj+(McR&6jMTI( zBpYO#i&U*r&COIRQ>uWGUCm1=1hP0h;1E)Tq2|q)(P1&0Wy=++gYUfuloS9wxMMP6 zH5PTnV8o*ukee4XQx$hl6y7(=btW)(bu};|asbsLo-rl^1BZyw+!Rz@Ma59WiE0U|mu5Uti5B8ss| zDbe1;H^dNv8B{eczY=dTN@Svq^FH*v1gj-r^FK0hBPMo>kS|&9rGc(TqfZCaU8! z?MpjmHhC4FibkFuNz2bq9Yn&Koo^y6^#P#`^ZV5hzpnkV3C5%k)lEsaxs`xA^12= zP8EYj#Kod>_lt{*-Bb^jN8Hk|*%(Sv64hEubwxx2E4k#U3^8{zs$!yEKHD@C$f z435l9=G*Pg-H9kRX~-*6sT79YZr=5Wi(~O=wXJb+NU1vt(j6V9I)tR_?IrK-|BwEu z-}Bm^{eQmsPP4fA;@BNDDYgu8eQ%ZGH&*;J*PBhgJNndOo*&KSpw)q@`*s)|Yb|=Y zT9cR5_WeN|hC!qt`Zx{4xQd~1cdxx#z4zU-!<)B0_v$P4a5)}sAKW|r>dhP5tC#){ z-l^R~a%vw_oQaNviM=6XS7OFM zNiq_DGWPwCQq!pDm3=qDnnHo_DhtLU5LD9w%(4iw*nrsYsajcjaKr zK)hrh@Coy*WF!|t@RS)|4`Eo9v? zHSHqhp-kg8P~UOKXw!D3)9!dV%O=CH?HG@pw&U)#-3ridUAsYIWtfq+B7#y*l9G&D`cRK}eH$a!7Ip;i|clX|VN3IQR zK$&XOG@N)hZWoKA>$^8zJASEchyx9)bG|waM+c8DpBB&|C>fdoxdSUCap8v|1Z~yyI&feHP6IxfDw)Me{-EZB4Kgz4$IDYr99j*V<%pnab zyZ`!~_FuMfT@GK=z!2uzY?}e{SAW{x{pGs4YE_BIfzWyH1^uMu6(ch!qBCo?=7KRhwH$IZOq zMKtuhi>QcLH8I+J-p3d>8#AM(jXCEknK+78*HBdVlm%uqli6%W1Y&i{qq?WI;RH#O zh*eW}4}k+Qk|VIYw>9OlOd*z^{NwQamui-#IjXV|i$F|)y>GQ?V(J5>DVOHzZVRdKz_UA5N`4@HvH)Yz| z{5rD+gPEhQAR!?WfxAp)FOrVtF|aDvB8bSX?$(>lZp}zd(|}K79zz$UOj@N20hzf? zi9?z4JjJY{W)!ek-CXS&P7c^a9BTmrXE#t$BSK~(AOhZ7vj##(E~PLBHAykKSAaku zWg2qMvQLg%sWQ3yVsVU!Dl%>cG{1H{@49BnbvF)Eo&Y?AHh>fYXFf93rh65?JmXn!em*o!{zwTW^q`|XdJ(SZR2Pyj*)6RqYZ zipWFB9D|COS|FZw!*0DfJHK~+e*XNkFLW_P_Rw}EPg<(fF*Kb>8OM<$`@Y|r$jseM zRnVN!-4v}Lf0KnDuvQc}RpZ(+_V$NCW z!^sLrfpAZIF?TJt?;{V40f}8rm9&~D0fPB{9?{H1+9}3jSb~W{FS5=CMf;*y_6|V}q-EPE6u~Kx}mhI%l6Aa@R(oBX)v>bNx0GL8t z?bfh^$RSz1rNbbvC#8EW<#M^YXX7e|r|M zah?~*RUCANi_xxt194PGRb@clm$u9SApG`K&fWdv;w70!Co)I#=&tHy5(WJeat`{a z>?k_88N{4^{=;S-EefRjeXWAwnEU&?E857=WN2;5#eMWst z`edbzS_c+$1+9W$0l>2a;*ZLE5EX~Luv<_>b*PAh5Nh>01p-BO6ZS-A%~(JowvBQS zQ>Lh@l>k%hkOK#)V=WcjnW$~%U7F|5eM#QByUEVv+j41`k~8#h&`0vJ9ack#>K@vL znOkn#*j-YXytYkWq|O!xC(Fa6goi0@H=Esh-gck8am(G;({S(NY-X{a%>uz{nD!EO zd&id)qDv(LAjgRJ&reTYxG4nNaS#y&eP|8SQVXEkG#Ja2Nvz>8I@~z9wme?uJid5z zbn)O@uburA94CP|H*S{Q03qt*;a8eFQJ@e^1ga4tI2$58Vm6xsX3If^tbv-V?uNL4 zZXU6#1}V`yyQ6VeXV-JosR{Gu?3*?GyV88VPlEYyIzDdl+x_Z49e@4v&%rND?GK;z z$KmLQ=C@vX=4*fE;`9IQa`X3;v6xOoy5IOLK@IqXk@A7f!JGiejobntR114u*(V8F zc3o;gK}D5db9qUomZ$K(mg5I-LU*@E%-;rspdJiR-P9P=rL=7RgylUR7uZS)?ztxO z5sVy?GnvC9j+-Kyd2vVL&_*V*Dr)da$)6)6KoCM8@)#Y706%EO`_Nvc3LGOb01}b$ zlf{<(z7G>t1OX;#>ehD9B)6(soS!U za!OTd@rjr%`43uFJa$}nxsoYWNM<4!scJ#|bXJc)3L|7qiog&+G&3Q!Y4lysi;w%( zej6N3T>;>?rBM)><`ZgUGJs0in@Oj2+|`5aSY{{E+wm8#jW7Pw-`f57cg9P+yy7q) zxx*G-g!`YT--wo|=FxMnZ7us&v&al8Zc1G{1Hl**qL1ScLqM{wZ^vnvEssuDXTaFa zmXl`i$jn-$X;Yc9y9JJj;N~DzMLKPd911>H?&XBL zD3o^3=yfkBiWt}_GN4*9XM8ffH~!HPNL4F$F>j`9s9Y^-Md2r`Tj=`@OIk{%D3cD# z4hfwVlNZ8vxjXOmnAxHEB6m?;QRBQQE531)6`b(W}zJ}^y27Z-j;B3 z?7q*Onrd~;rk2N{i_3#K*udLy0zmgbKuo;e4yCvo^i8AMcL_R!4aAkQ>N{1w;fj^5;+wsTJ#ydkqZoCtyQ+$UDtJ(==^NGn9Z08 zm|~2viK<#kF|#s_mLc@XYR$t=v4R;9nNSRoh|qxz&CJyi-QAcOfQV>c=p_pPP$#R^ zR1MQ!Pv6Z^1&m0l?$+5dv~d<7kp_xEGPG?I0-6~BnHV^Q5NfG=O&NDKcXHg93P9F@ zh*F3#rb6PDb2hDg(-07YqY649Vq{7ohCo+WTd=6$V35ac;O>jBe*S#AbNjtF&+p$p z?7NmAxs_pu4iOrL1i~^IG6iZ0JFB}=H0-?%oB-U#-GCgJn<_3+Re%TtY?`JW6`)E) zqE$4CYSTu`QtCL36%j))0N7gz?(Gs9WI{AEuMjac+yWsH$!M`-j!~tUV&pDzw;FcS zwsz5D8%TnJW>-dZC+fPIbA?c)mf|4<0FX&ZtEu8%gU~>9uWJMffX+xlNT?NDz%jZ8 zk0AsD2NlA&|7gO1u|?#u3VQ(Mc{c-b8C6E9svro=(LaN`synogwuPcrqIO3%!k3#{PVAL zLs%~U%+J5^mvT6d&n=tTuU3$0m`i*G=A1SqjS>1%jMHUn2V_EG4#mKnqfrzEp-O-T zdxtmh_bUS**SXs~3)hU32O*@Y86h|AqJBcQ!H>$u5Ez0Jm?N19Q3Q@+>XU)B)}`%@ z-RbE|&o9rWEqYV6PPZ>k*VAsO)BNb>{IIW8i7Xag3zlv3AjciZa5G0fYOT5Yq- zDFg)Ssc*3Rjj#RY^Ea-WhDa`EKYj4t&z)Vb@iS`HJYjJKk^nKNZDgoHJgJE}ntjB% zFFOJ<5^`uDwuWpJu<7LTZbAVNSM1Xq4d8N{M0WwXb^s{g)OH34Qi$Uy+h8<~11O}f z#efCX2-dNSR<`1YeOM3Y5tnhQQ>T{0!O1nGczUr4ZI=T1SdWg5yVx(+Yum^L0`M~##qPh<(o5{%@R`s#cEn@ zMM|`Vg~!82R|IbprIg13MhVOiXbi;l(emz7phA1I0&RXQ=!GATJUcdS3KY91y zpUclR&)@lrx5AxEOYeR0f3Z3G(RlF}j^FvuX4BcWyLIo`|1sJB{iOWz39XN)!+ZbE zu^*@Yt8bnB?}oG6*YEvr?){7ZZ$%U-%xIvF?5@={aD1fu{CR85YPaVNSNg;uUBH(UrW*i=EovBnLhz0{TGK64~rx;wH zuyW>O8&48%Y}##@6Gf<0ici`+ve#w+3j~TPB0#KwpcZ5Nqh0AwRpM>NCt zjRpgQ?pqvzlLH7=fa>n_$@q4U4F+Bz1tgwi!OP2BDoKm&lV&CK{RRP|MRX=q05FN* z0jWG<&&?+WX5b#!q99kQ0lZZ%K%W$Ak;gU;r9z0{fgz1X2_u&RpHji`zYkhv@Mu_3 zTS6(=M1wib9-A2UVdN=HgVb!?P)PmZiJjbn z&(Kcmyhu6SU0(k^Kfn5Yf2@bQ?Q^%5_owxx`@vS*V#-iE#+Xoy_^|Cvurq_M-KBOc zBPbu<*j=9YZ71L=01({Oy@up%lK?F>93z&g>ZAdJLqOh^^?3EdPt3a$suw#PHk&lJ z^(tz|4h>qggy4t*5GR_c=cjh zeC6LRWj2SmCOpz2GQR|wC7AEz5aa9M)jb3XW=>Ds3-EYgW)>1pKrtaBnHIwq7xV+R z@AK3VLFOuMfYTwjI*)XZWe0qCwLbsW zgL^;u>dU&)*ml6a&bz?8*-eq5i){_K8ZNK458Ox4G>wx1VDR;7Ah6|pp>-@EVif{! z%ha02Ce&OJaTo>wXk$8`HUhb6c^V6dHXI0SwYi8KHloZ7ZVcX}l(nwQX5KG)3l~>s zP2UV-p7Jp3dh@Y~u_~0!de*eFt`{jOrD@tVO+z%Vwp%uF8mESn6xpq|CnqOclLvS1 zHeIW#oPwFLYES4cS!x|C5cadVxDC_Tqz-Z=hauAhQ}OB^s6nt;Y$`G(4uRQKz>EOh zJO#)iDJ9^DOe$gkP19WIlp+VjJnd3SsqePqaJIfYvfQN9Ejt4-b8%~C{Zw+Xy4&O~ zCPF77RdEEPh{S+kpjtBkq?FKM%DL%do~np4b8MS3?g~~Xo5wCPPs7*%x3L3&7??17 z4PDc0#z~Q4*NeH>WSgs3o`2=dH{V{3yW1Cct{ordkwa`1$=MkzJ7X70L$#`wbJd#g zfY}ghsiT7iis0xLu2$!}VE_gZsVb=ns%q6Nm)JHbcGFZsNHXbmH-LF05|I!B#ZacP zNnNf50TFAhl^l$0JFV1nj4{_8VT+(rB$Fd* z-P@mg<<$-7dKl7W9;Pckp8d@A<1gms-+5zw+m6yOoG+faIll9YoB3G}#}}6?p=Xze zhw0$z@;vKAmUA&+@}|VQmw0@9JU`53y}G>EoN{ufm+#_V`sMqt zl^cu2jc57V1)Y})%~IEH=sKJheq_$ zYY3@T>^*D1tBISQ%qkN`163>rO2CO*)yG7~4KXM+p43?#f=5EE#sp86$j09X`yB+G z9KisM(ScBXN|r&oQs;)lRE9*($;jKby~y;GYnFPHs%S7o-^)-1F=ySTnQJenW2 zvz}{Rou6&HIY`Dx6KKeTxiuWF9UhHS2_Z;X^|Qm#azQ;fK6s`KC&$OX^wzJ;7R#IA z=C`(E8O>mvdoX+G{_=5;)YvB#FIuGzd-!`)>>=nMY%d>xlzuj^6_1r3 zJooDD^Sk}xYH{{A3&H$Kw+no)KA0b#%y@P@_sePX;PU*%8_%VA=)KIBvGF)DEE2WW z>|nPEJJ^g}b1n5=#`|7-ti0WBmz)1ljyDt{Na%0DW2jfp5iG!^+0nVWf27= zHAh}pmbz#{c))BNvDW$g4h^=R;im(htVmwm8fW3 zAmox)S0VEJ`RlTZdk1uCkqBbA^lxs5@wdnIb2>~-;GWG(n7bS1R z1DZ3qgS&&~h0RWgGPV%nlXL-0i3bjxQX0XE=g9eFyWq#nYGb9Mx zOkI#CZ-9JB_@)S~x98!oNx~-#$qLrp;hG_&u;y^L9sc|%578OHp z#5tF#4ulvvv?(}Z&1JjIv)N3Y)x20Xv%qO6IRx76b}h%0Qa_t*rs192@5R1#_i-Fe z^kQ|DQaZVQ4HE~9?mp$Q=JELO$jwwOH4W@r+lwH22*J&EyBz?yTQ(_E?whVwm0V*= z9q4Wvb1lqWNYTWGF^dT8Mnh<0=ZI4&3J4(@crB9h$ZdS)`iL=vqx{DW))$(LsR-5!=)-W6dS!+&4`KfdeLL zYt<~7k;XDw2+X{jMk?g)S*Cq8e;h|s?YhpvnPY5H&Y37M@lbNECA4i-8P!fN&W4g- zy7|(4)^&{PrXps>h;59!-3}2`N-@SfP17``l)A2~)42CJcvvj}*n3BN6|o8x%(DkB z3dV*GN@nm_{onuqfB;EEK~&(#2=MUw5ByzOTTj^T2>LPQ{lP13P=tq@2| z3{==-aX?T&CJM|J34ws^eB4&BiVB42Ah}>gAY=q2m^2s65yEye*hVVzb6A%B+}T@DR4#3ro7fESvp1LjwA!D0>-<|cU&Pp6 zZKj{-GuN8Cp6%Sm-RZ@>gX5dDcI#+P{X!0*93L*?@r_%4wf^=mfBT|L?eg#tLs@NK zKehkj_48le9mRRy_D8d2Tp)(s^*8I)mfhh-cXD3HBLO?@R^emDA8Ys$SmEu@q#D?Q+nxX@hPc6)q$e06nYW?k2nQiw4Ejb)co>f64KW$)2-`|h3U#!b3*?b_T?G;h!D zKi_s=OySG%^!5C5(v&{oURHB27-id>?7aKT-M>lWx6k6Az}@zvKK}t57ZJhzgXcdu zV&A!wiJ1WfHl#Wg<#E2S`@?7R#p+AVoq2k&wzlBJKAXyTA+ne?bv26i-*|idE3e&| zwK&4fFWwj$oac&wn(*LkxP5beZJFBDX51>CZPs11dwH|n3@2T8z|FzY^0@%3rn`G} zuITDcNHH}AZVJ;dmO36C9|xnRX+j8yXy9TpmRzI~(LANyZr7!5wOYOOnU}KEd#~S3 zv6&q;*YsScm1?MnO<6CSc3@BTWU=NQKXB_XKn#Ec#%>4WcMg8(fA!4l%SyK8fiY9uHrLq{M5g7`ObfQ>+Ao<=}-QdU;M(K{?g_zKDYksAw78cy}$7G z^Z&I@+uL9LH;1F^@W!t!tu5q}tPil&0Oz=y#pMr}mEmIph1nLy#nO*1{@Po&zS6AT zZDSmuZS@mVzI}>M9G;d3p5iH<;we7mKqq%+0uWI&G66yZLTb=rh#jc(%oLMe`^=gT zeE#C?-P!Gn?<_8Uyt?Y8Xb2*LdD;sVM^7C+eBbc4>T!RMp9692PR_#^kC?FN{0gi5R#dh8HV5G z^ZmnW!DW#bu72~4<5!N>Q%9k;fYZ#?8r)8)K5MVP)BLz@ZBc!s`J#Z(4;GvLDLx%2 zrHq=|z=si)`DoGKx;Ecz%KdS2pdqg)2Ef8W&9DT&eR}s|cg4XJEcU%kzFMt-9Er>< zS819kBC(14#-hEFKDakcV`jBfRrTJ}yw@=o(W#b1oKi|5_HFmz?5vo^F0FQ}hMS|K zV-eeyZRnb+V%f|r5UXR@FLZ?FK1|~{jxokM834#?-F36bVYS_;2%00}GA^#JuGV#E zQ(~r4vee?D;syYX2X4|>3o}o(kV@BeZf1;HUERzLfZV;-s>RXJ%)miR0f2;v$Q)zp zky0+w^c^!xodng~JVWHYj5ANQRFT-E%d4v%!)!KVX0dv>Je(cQSL-WrQ@32E&BdCT zQjCOLYt1=RU{&3xuvJw=_7`db>rsk_ujd>IQy-O)8pflgZUx|ug+FYKRaCRrd-_$5k*V@$ONjj zN)agl#vBzPYaa5@b)A_7qCkKSZA_84Xq9OyrM68!O;fFvA`a8IPu3Tyh}d=AI1Z{h zO%pJfS;CH($7$M)!#)cciHcNo2)mu2ugdt|=G@)q7pE!RipXF}K>H09Bg7a*L{$+{ z%>cm6L<9h8RdEX(R8>?0L{)8KaBxOu3Ia&#jvkOE6ICUrK;#I>4}0ALF~>Mn^IQ>; znSs!%T#Q5FuzwdKaW%=VLa@cE<`oJ9C2&9>U|?vw=6txk$ZJo~^-bhh$P8U2Pg+73 zL+o@E1Z*wXkg@8~(Naqlkzp*0#iE-Z25tc$1>UY#1pe%^&(@;1@7;g-<(Dt--rh{> z=4g3UcXT?vQtX-2dvCWd9US(!xYnnuqo!Fiu{I@yy!16;x^Z-9p4x8L)vr8z+-rJs zT>tCeEqCfGFuz6+Hg|UQv}+d&Umm`6#6&<)gO80NIg67i!6VWIKZ>Ooy~Sq2Qj4p! z6)}vKSso;O79;~@U=UX(Ziq-E!;_VE{Mhj@>4}a8jxfd8R1anVq>StkRUBX(F4T{r zSvw2Ec(C(D0#<^UcsB|WJ@@?cP2ZfJpR1~XkJC0a-FDcH;}~OFbbZrx<5W#|Ip^ha z*&i$}wri@T>AJRUy9rX4wo~mwIN7tLH2u)s|NP$e4U_p#A0E60r*A#?FSU@%{L6h8=dG55ABXq;;^BC8Z+7dq z;9u&F9xQj?v^Rh5=H_%f`Nww8yi9j*hq5ZfA$6V6ST@8Ennli63HpEZoBsg>RcKO( z6y)HC;xH=CSSE?N^DyJb5>p(dZjhm1>DY2(zh9 zwbsox97ud(ka=ny?cJXoGlHgvYLt1TxAOy*5g)C@1LJ_JvB`unsc!#76O$r5yAfb zB_ae!9LNC?RTa!*j7`(**E)B4vEG)?Z_r;do6RHiC0^lcZ2#1t&7hh0dq)>_R|N(P>~*40e4NWHqcy7KY*@d+@1 z`;;>v8l$K(bCJq1G%+DU8Aq#?nXQQi2DVQX1b_+vPF`w(y~6;Inq{dm1YnF1O`u3+ z4jL5PYb`NGwQ6RQ6r^q{bv^E!92ruG!>}cy#lbRkakX2&edn#+I4oxK#o}O;;qKYJ zzMUN{54DOrA_*cRdak+Fhdu;g=FB0rVhZ3XMiDc!w(T$`6_cD*HE{rdu@*C94(7rk z0QgvTh{%Yw*4Xt7_(Ly${(QK+zdgSguG)Ebe0=!e{^e%38}n4^q}AKDO&oW_cHC`Y zpXx#!$K6y*N-YtkS(j2%@<_NJCBzs-WGbT>G_e7nrfI4wpa6`!T+|&AODW=7#wn$c zQlKURK(3|My4&qS2+QSi61m!4j9PD9zmd0tpucnfy{kMdXYD&$aH(7+V}k*iq*!~o-3lI#qH7czrDWxn=jU9`{C}YZMNe9E(dO2 zJ^I2{hvR0}K#9x68}GoM{;kcox*vM(xL%JFt806thUgMvlg5n&77RjhLU(p>0Vv@k zj?%x4J^dm4y{v|PUc0f;G8UxDJU1o`BjcQLnzoCstwn30)X$6QM#q%kY1Mv@4~0** z=RhDhIY^+kGHNwKB_Q|4lpJ}v$d<~u0@4BF<hAB}zJ0abP~hYaqM^UOfY{8!jzd|)Z9ViD#fMLN!exY{ zIK$X3uD-4HYPj|Ln4>#>(4Z^x!G@cEbC{bPY{$uy>^Ktz85%GF)D6{jb$FshFDW2jfp5p%x{9zbj z=;rPq(Gq04@{XAUa;f;KwySe>aUzN?# zFb?aha0u4VdD<)PArT`71)bDNtq}>$0c=lc+^5KqeN)RZK&Rz$*l4fL$Uv%@xlyz( zPjNS{y%eH3a!tVvtqVP7ra@IisY@b9V+Joa-xec1gqGuFkIqPPWC4%0Zz`wq}TwXBCG1m(h@?gG^-84=8(Sb<0xV&uJ zHl-BXMpTDsQ%dH5yWLL8(4@`~R8>_&in&hr?%jLl*_&p43L%B`khULqEEyte7G&Cu z!#Iw>M1Tj!M-LvHUT%hzqKSqSQkM`B)%L9tXJ==ZSL?p-uU)(5K0QltQAJ`EAAd&+zbFE_tVP8BzL^D&ZP1As@tB5$7 zDxj+aIwIDpxpQan!jayCU?WSNwTGb73yuEKSC_Ari*Jl~ z|H8dnfBxq24CZ8ZJaUmpJRJKL-N)vvatVYb?hs)W{( zfomCcN=Jkq2*5;K$surz#3GMM5B^?S=fn0!9@4Jb0(OKvc^jpoOXU$dnSsZq$)#ke+i_@8J6|k{>$F*SU5rT8^!D955{b#@?h#4JT*yOQPn#a_ z9zWNzPxz!Ii60r%1au>JIRAB(i|vh{LDLw_ocC=3ACAZ!fz%BF5%h;~(cXLY{0uxI8AX{Tuzoo6w2L*2%9c{Dq@yM5-}%h26=u@*TD<&Xs@ z6gP-thfSw_7s5BsZZEDMAE!f~%A`3r^5V14zI=1~we9+szWwzd`Ro@?ck5q%{aZi! zrLVmBr7wQ#H+}=N1O-=%F=9X?Sl0oWvefDZgcKNwVvH%oKJw}P2cRb4v)OFkxA*Sd z>$)!VH&0i$+wj-*`oA1)E8|%Ugt6b7jXgPVe5g;qKa2pgVNi-t&pWzW&3=mfusK|B zPCfMA9{)tMc_RTVwmQvn)U%s-B$7{`{k@+#{l)hdKla+}U%Phsf9cMC?%CTnF24LH z?%(_o-PBCl6~x$TGui}{6Q`h0`R@Nv@SuZQ0O~id7h0sU4o$n`6d#wV#Zx@R--D;+ zfv0$ir+A7_Irc|T`cU+aU;rrLK)}e{su4h6Z841Nt1O*jcUZVl^(6VQg~~^HHiNaN z<$5Py{H;^^9R2*Y*+${o#`6+m({!=tX)ITps}O0qTvn4ZW>s}3R981dOpyV@MVS^U zLBb8X2QF9;BeURa9s|I%t6F$5>oD7ZqqB098;RjKc4ZR;i4eJ<6pC!sl%r=h}Ad=in%km9nv_QKJv`&+)GB*QRqI#OF} zRsyf%)TDVtN*IG<({u;Rqp7SeFIO?NDrhDlQgmi!BC6Gp$<1SoV5F*2s;awt2w^XY zlXF(WK-@HenGvbfX`J#@r5RLE2Pr{HlhT}70F(EK0Xx6fsHZ`jQm>FXzh9W`$F~%5UAht5H7B@*DQV7#DAtD8jF&cQy zxrjbw!B3MCVBp!ovgy)h*j%kH0Tt1&9bZ=wL^PLS9E{!29c-Eg5ow!#IiCRTKoP$K zP%SE=P1CrOnV1U!O?9Fek?7v(X;Ha*|IR@_KRj5tyPBwK*~@Q zRW)h|$uBpTIX3S8+%vak&1~FkuPqKE2QYVJbpsBO*<7jCX__iP-?YqZW;y4*Miacx zVMp{3C=y3vtZHUf4X_A80;a$r1amY1G#^VYrR@C-0uuL0#6)2?Ooq@!tV3b&L|k2q zS9Mjh494cAOwiH@xk7Pw00J;Ejob(nb1WtAr=yE0grHUX*mof&mId~Qea{S2DH6-= zv)c_fP19X$E`^BO&~)u~*hOkjP8Rp>Jpk9|pMS1Q!|LL}v|C+{?;S30c@|m?U4m_~ zeleT1?a$!+r=a~?_tsy(k6%BGB>a`*`Go0a2Y>b4{^MVJ_lMizkNv?HzrI-qTE5yh zeHpVR18!j?w+z9k39JcrR+Ye+IWRghsFk5o`kh&ve}8YGeN^`0vi(pw^{BGARDP!0 z%(Hww&)11MWpOfFEE}5HJM3owDsd|TT^v2Lkjogcz#^>Gi?KN6CUYTr&*#r--UZ1u zq`&Kv`!$CE@k3OXeTa(=UMHUhgRmWHK*Sj%VlETtYTAn3Ka=f)i@x8?T7?|CqwTup zP-n+ro@;I=yOFNl(g-x2qpxe2y6za)=RwCA9J5BjyDvVU)+L`_*5y2=j_%)k?+b@F z{@|B?jU8dZJe#%3brLJq%n{k*% z0S~E(2wDY1nWL0^e0V^Bo7GxGo`3%Ni>u2A7Z-QmzP$PJbK(XH970xYAT>he32XBq z2M1?B15hO7D523ttv{}*I^27({M^fzf9ZZYQZvd|)AFZweo`Gl&Pk znz?ztRY44o?nwO{v5;lr)&{;;1Q!4w>^x-%;TjeU(9X~z?p zWOr5LvW9*vcrk97bTMx$?s~sIT>~yapOZo#rOYl()R1n9-ZG+pEj6gDr49J9=EVnb}5@&gGHX34b;zb&f&>A-aN%q z{DYuvI?1Ed+^BOKW{cK51p(o>w@crrP*ZBXqM5JHSKF%%C2L|D#z_S^(stN9b9fyY zifRa9{}Q=c6N9zwUXLZjh(x8HkrYKWq&mwsBwLE37)g`_en{Rd2Zn{@ zDGi(`h@Hd<5Wq2lG!FcbAdv-0mSstHBvB11u`N=?YHxq~-K=hQUo*zY!@6~=LRASY zid5C5_V48!Y^=TZT62AC&Toz}{{R2)`0V}a{`-#~9sOanuqU#{%$9RZr9T|Hj%J#6e2)EBq8(PMkVvJ~L;+PJ(_CZyxLCz)SDdqq`C6xrZ z2)E5#ff1oV&YC5b{WM%&Uo?xQs8;hj)M2yPC#+Yrt0iZGVtT3g~pd~ck2Vu^_ z7#x!M)a#!pvAp^4P2*i2CT}e>rxcZtND~i=91ADmU_;Z+V~m^4rfI74^Yff@KOPRW zZ>y$h8nC9Hra{W#audhotY0seP-s6Mo?l-~&mP^sb4p}WiO%@lVK0SIakX0Q_Sami zG3I2ou6#K3{o&AAYa>sn&590K__qDOu}|;s8N# z^kJ|KGkfm}voI5Z$kF=G7EYXVE)k1S$i-NL2y9Zx32;g&#&}#R&N+g_%#|h)DOpPq zYwb~4A|eimiBc(1it!dmSQQlx8haj#ssf6nc0#+ zfESMjSO4}qqnqTj*0tmA(!KZj_32psz|ZcdH>q(i-LId-nOF1>h|6cWCnP&5eQM`Q-S2Yo}%!p71ge{u}Mn zf9=iu_u_K<==!03{*~(0|MW$8E1kIciaA3Xm+egAP1YyCacBEYotR^900AhbjEn?c z(J&k}G5|1Fon#su73q37DfO7qaP}l8IB#w4?iPkF~!N)DvA^WmskOYp)>o((;iRbv-8>WG0Yf? z7Dg9TQ|x(m8$_gO&JKH@Dy~hgn$<;ie`)-Tn|($a8PiiYS{#Sh`@fBc5AL_NU;4|^ zf!HYNr{DSftJjx5{F(3n9k0Cc{?+rh-hIzGcX@sN=-v0f#?t4}wE=Pj6cV-~^)UkH zsP_diNX|NshKg8{#bS2Y?Tw>{4`06CZnj-t0DtszpP5cxf25xZT-w<>`CS|&Nc>*& zfH8W!88iTd!T#x;KXvEuv$E#E1o4o#!=ao%{Y&ZO`}_KJ&dG)_ zyNH|>ENZn)uw-}VRq$k)vW=&07efl!l(|!^vCl%6&Hf-Sg)U=GQ-Sq5ma(R;0(Aku^zHW!q3Q zcWz&@u&96`vX&4^DGZ8;zs0Eo-{QVR5E;U{e_RZ^(#%!N9JZ4skYo)RolNbEDV_`X zqWgsYJO8^k-0nSAQv&#M9wUbrew)lL%w!ZXb=5h ztgRc@)@_j-`@Wi0(=>_5G)<^3mF%2zF0e>S>Dcx&#*9->L?w%e5PdyamxX~<)f6=> zj*Rn848HgL$=DA$=jC!9$8kIM>)E2N`~b?vr&1~#h$v3k+f)jRNR|`P(Du1 zI)f_hY`)p;`eER}fxJ~PiU#O7Q9hr~oilyk$DH>2ebpEyo54iMF;5Id5CxbV3CJ)F z!!U^GY&H|%X`CMIURW~Sb~o$W$fcS!2{p&@*n5a2XWvs-=d-q?>>B5qz!XZ?|q7~l%j&eG2Ry`+X6y&N)|iUFU6>H8bO!A?hW*|NawasSVBf z`C05^-1W{PqLC#N!IUE_#?;3Wk!@YKN|-n?vv&=t3n7(sDO3fb%(LPI&`-3 zuBnb!s8q`3<@MG6I!T^Ne)Qynx~j$@-afm%YS%!JV;RR7WAwffW@BxP$r*d^-o5R1 zJ6|`K&z_GlVO{$wlwqPOGi%+3X^IJpa}skh2GRJ;r4%Njqkbi&WUckyBO(z_)1)GZ zR7!DVkTj0bC=wA_RJ3TE1uF^MWL}Z#cc;akGjgle*7>gFv{5n&RKAO+?$gBOEdjpDKRQJ~3tMT3K^q+pfpPrRB zR^2=P;%g)RVmG`yG_jpGQELTn{b5MPeE0T1-;*eQBiBJyWsJ!w8So5NkqHU`0E8H` z7V@nRbzRMKtcLJ-kV^u1c5zX!=2THC*?Uh)0C3de)OCI6J8O*%{>8<`m%seQH{Se= zV^B24m|+}m-@E(Z&I3*2sEw$kgQxi~mxr=`;LX8aUVq$&EQ(pCszvM3EO*=v0yYr2 z`L8jkp~Q?=H`F9^v%x^sEfqSry?B zYMOS^@6&81-4Lr;)65-@ncR7OzJ3ws(Vfmol7^ z8voOmZ|#4vyMKP~$zjv&IsmwP``(P)=byZ_OJlQ~&jIID(Jn!ZL9}XW^fs50xd1>N zDi2h~*et5;_0>@__~oyDC9{GxzN#X1+spTdm;P`&_lGQtyeR~e{hkYvMFkZ_1SM2M zU>*3Nff(_saxWbYSN4^wSO3J_U;IBkZXZ6r`@_x9ug&w8KX`KTZ>`V9X;&+?)>`TA!yPSV%cKIL1pRkxsQzQD}*{_~N$@_a>yL12L_`Dk5 znf%H9^p*+XAL(bX#LtB%QTdB-hOsBR3VGx*X{bR8Dv^R11OOvIfMSp(3I+sg0gw68 z1)z{6NeeA(G$T0%g)$Kl2e4t3OY4?xH$uCAx=az zaHh(ol;&!^P^jlm_m9TX-?@J_rqLR>eRjLs4T9lC>&^)2C0neCXpu7MrSlxO@$bN3_A}ZoE#y#`(%M%1jhBr^Y)XwB)U| zB+$%TL{w+;&>3&<++JQ^Uvl#6^RxZXzxV$8{Wug6?>#w)j8N3iX4_%d^}}L54|TYT zn~Sttb-Z$|1G_nNr?*ZdrIY3I#jx3@GR4wbna|hMRGT;<3ySg0tRS<&TsRgXH9|D) z53||afFZTsIU+HtDURYp7C})9RQo!XI2?LbK}c$Bae)g|f?!pwI;O`LA9Q`6xG=Mu zyIc@h=9L>V_i3CCgE7dgO*`KYBdd%iUvIC+aWIHHMDo_4vDSkb6)8utzCsZ#rM0cH zDi|4aFfJtlG6unP@`=+2aC`RB`T4V}r;iU8MriGux~c}p9E-QUGVPRl19WS7ip))8 z9N}WIK*YmAH@jUOnvCe3yWT&}D!%xt#y)i z8Bi>lB3LmonFyDgGJfUVcORbKT3gP(<*bysYU;~=za6@i zQWgB#){{iG!c+RpgO}QNCMjP}IkY}>BF6m3<@D{vo3~(ubh*#N{A1qEsZyU-FCCF~9&eyMu*@5-w`ks9aXVf3|-52;w55@iKr#WV78g&M$*I%tX@YG)R+8k~LXl-Nf9>qNYW-o@JCXCn z{Qj)Us89D-<8GYK=b|KHNJ*pax0_qHZuOjVH&m{MV0U@4)>^b+>bc<8W}ir5RSAu? zF1bdkrFr$B~SN1>-O)SJ^zQZ$0Nqm)#_6h@CAd1L-*!>@rSzkk4W4n zI$QJ?!@VD_r>hK$r$7AXo|UmKZe61IN_qI9;V=9u|NbZVhn}7;;QUXVz5Rcw<|~nf zIMcrPCAMimVLXhtp8peeJB$zHXw_7X;0cwyX@N?pEn zn#{rByc22b*{cuskDvM>im(6^eUHhA{)ybQ2X46Gh8u49b+N)BI$-9ab~IqBC6;y* z0Z0sKBd(WsbLdN@3DgVyu3pM&>O99_6*S39+p$Qys9G@iZD9fF0Bx5#5xTnJqGaiQ;|BfQ}PrE@;J|5~@Lz zaLB@H1Oj0+YDkT-YNm~q22ECqI+oU4*5Nu(n3Y@!gZiYlfFot*$T9%TzS~Reh8up< zNF};R@@vq zGOh_tN~vsf-FQPT=WHzYQw9)2wlJ$IIUMo`MS=}4((b%Qp@M_RgQV;hfFYQYlFs+8OyZa9b$=f+e9v|st;3qKJ0xJ4&wk* zv`Ef4MAJ5PBfg)uRTZWwS?e<^qKs)Ivgs(nFuseUtLk}GIp@$oKMa-kb*K)9L$D4J zHdohUnSeOdez!Y#Zx>B#tXI{T6R`lA%g4{Fx(%VUizPN;NYOh}N_xJzs%EQ)_e`kk zZa-mxI{2J(#!R4nNm973Dg?7w__Om{nd81cOk=dx)@@V!rklFNa=p9U_gyM6rIeY8 zj)C-u#>7&}#HuPqGf0!FX_}h03L&_vszP;${dT*}DbAbvbhXyP`+aw^Se>tL870bU zos$|PL@r#}YBig4PReQ(LkQk`1VaFNbGoVUDv)OFyheAYQ zBdDlOv0m9GTwgW~ad09o6$40Mk-XFMIS4(l;XqrKptbO56@+5UX>|C8n!7QF-5x_JJxWx#k zD`SkR<4mH;<)fGM-9Ep5ukg)N>K{#NE1`&f6iD#V;ZjusEQU$Ir<82A1eFP3=UqB^ zuzYqo)yM*6{+01UjL7PI9*l3>Qp$ep&(hX)=wM9^WxG*|;;E#Jd$AB$zHJ7u+lfV1UXaCJR(+6+Q&)N@O04lNoXFXo3 z02xW}+j{C0`S2q{QB>ia(-hMbDepJDcDQb8bhFE0$fdVFmbRi{@CmWa@p|@|C)uls z8Z3RCWBR(amWYfohAO@P#*_X1(9CSp&=>#e*vxA0CryToQPXuD4&9=vXth`^XKc^s z6y7|4@a*|jX1&~RD40^%TC0T-k%%f+dGDREsyZtnHwd+v`h0GdYwsTIo(&W^w68ad z4=N)l)u9v+1-9R&M)^fa?!s?nfByboe3EN zZn)uw8-86_jerQbOvX*dSoCP>I19g;|L7a|i}!}%(7_JFVFNFoyd!ViKe-1@Gb2Dw ztcFCepL!&yfRm_*6FsP{&`*AMg%{Ugl#aLIN0Um`V`jo{^<9;-$|S|B!Edh^|>GU%*B5HAAOtevj3Ny zk(#odZBp;3o42#1E<=2=Kyog}$N?gYh{}MfIVNYUIkw)I z5m5?TYrPF!*9BKqKIEK3sMRY8)y5KmC5kaxGPbg@WP?^jDj$f5nVs>W_99YJcNq6+ zoKBXj#e6XggE!Tf#{HqI>)KjdIS+=S@TNbA$o67z#@0R?OgV1$UDsdl_nmJ;0tRD=3@8@WR7x)F>pC!Rce}-YETt%mcg{M;Dn*Nc zblX80424umhGM-{qlc+;jg=%Jcpz0^LoQBZDFsxZWE5b7%3N}pv}}f5!!(=C`o1Ft zYn?Tg&~X9p{mivkXx7?3rbFM=j#xvC5deg_chepWV86`e=Oqybk_w=rWh8 zY7Q~>T<(n5|7d^ff8}pIYVOp2-^!TtZ3IE>f6dlH5d^?|Q~mv;QXK$z=+wAAXKTHK zSx!P#1D5J^un3q{rLd%8fUHqN!Jv!NV;woAf9L-8C-2A?p7(#_>G(Hy)HmyBLzAN~ zN!?5-%nV3MQZGug5Cu!tIcr5IAyFbxlnlbhmxDxtfO0cI3UKsjBK&5Hrw>0q1^|R( zc@$JpkN!3mEhAGYcIqbQP}rd3dil%o6Q8if%XKVc9fEVRhy1}V|JX7J;q&z3%kRH^ z>+YSba&mG~8~3$~7cXAC2u;}U_tx6g@-(=LEX3|bl;I!Ao6r5ruca&69e~WD)+itE z_JEQo1^eXZhwDc?|HN)_dO9A242yxxbga6m0sx)Ry)hOh0cnNv>)0PC>&+4ORMc6a8zlC7LS8kQb=gdcrjGi+e~aCh&I z$k0Q$^Qq+*uMZypO$3!>8OsipjQ$T?oVxV36x zj6|f&M1Ts%eq>$7f_56oiwO?I*!A+vHAIl(B$c-)Y>&S|iKPBci-OQNDz+A&SRfIMKz}ybARWDM(Hb- zxf>1T_xIsJdDQ&avqgPJM>|#%M6mhG_V<{0 z_0!>BP4fD~^v;**M|t+_0)Ai|uU^x?*RK0q)_r*CN*1eYYkdAS@9w6@dBZ;?wUukxa%h5oF?#o^@Zr)shj2e{D(L1{nYf* z3v<4$&u3k4ofx7`%aY!uuP*ekKP(YCpsGJjj>=IP=U(w z1e#c85HG}6-l<_ufYv!{a7b001IIP>nGJzzoOJ zkq^E*3Zu zCFRv@ISpgicOf`q%rIn7b#*hv9H)sy+xg5H14^z6h!|rkr5Ir<#Ug0o&3;#y!I`Jm z*X=B*YANNwg@}qsx9M`u?Q*%Q3S){*U0-hd?YRMxt>_%jJ?)fh7cQ0141I z;|S3ZaACX0@`K*q^c(vcAB124|nkCPwI?NhpLxwWso(Qbwf!h z6{BLLlpIq^CEs0~kZ4J4t8#`5LxH8hkc(WgrX?ZGzIJ0b{sGRtNh zuBXx0Rn<20<-$0Bv~3y3F<3YDlf~hS<+hq^rmm@)t`seMppWVl`+@lW-`nYM|K!d2_NzVA zs_Yyxa|o65rb2sqdh71${KdsJfpNy)IlaBx4s+K$+B{n?PncCw=EBoBh7iE0wRVa_ zKNMax*7A7`Ufq)(Omp`7OSdKBPmOh;y5v07wgIqG7zywRn237OWI%5#?|#z;>CA+3E%MB5Jf>00tQeq2CBJj zR<54KM_>Ndk7pr60FZ%Ml||H2vzU($et^5{-StzO_O&GfGzA=5$q^Aj*$^e*KJ`wq zFv7Pni_=A2#%#b814Y$FuBvJ@=Hpdx|9HLH8*aGah8w`T7^qPfMFuQe8QQI! zSMyD|^>g$mAJTt*U*0)8TkdMYtNG?CKC+#$_4>5l?MM-t&i;lC@8FFk;H3LsT>S8P$0RS=^L;?X+!)z=yI+!w| zGT1qyMel1eG3PS*T^dmMki6r|na>#)dpCJ=jvg>@6ys@+^JZ%9x*>WwDEdLG3Z`1Y z0XkfLx7XJVH~eN&7$^tVcpKKOJ3zzcSAz5 zL_&s?lnjun5`2^>N(2TKv@n4!rKn;Z8k2KMNeQjB!eESXQE3hVXy(08Sj?QX9K*1N;l6VVzV4MRe)&SlmY{iZlgDN(h$H9OmPheO}-u@U0^ zFlC4mfJA_~Fw0f98>XDbyw3+xm{oNhJmr{68DiXg@E-U|ov{LosyS_f7DiIx{A_o1 zdvQ9SFC`XbZcG4y(`xoirvi3NdUdrwS(#Z|r(v|-Z4U=y%!{kb&Gqi|^t2S&9`;1E zu2-d$?e%6>g{rEG0I47sGL%xfyu6N?UGUvB&d-)=>1w+P);f>YU`o?4jRHjWDv8?7 z*tu zgb0QRz>7#MxgW-17=sIlNEEu^peTHNJm5%W7`Cd4mD!I6H&Gxf%%Prjd0bX=0nL*4 z-C?ua-MMq;Fzm!=gi^Q^6d_5PCmwSw2Fw%_h{;nbr8sMa5Wqw&MHty&Dz0)(-NeIC zg8PMczk(o!l~PPonpKOoYRSv)YG;HLiH#Z4=qzo!!};7o9slB|E{DIfJAGkSFWTE-HkN<|8n)tA2=HySUd6V@pbpw z$;1Eft^C1QrD?1*BS0}TL{stPMRsOEA{aY2?!vSw5XCk~jgyhkAR>Vg{itD~gdZm_ z<9`N_f`A@Jk`Ymn0PrXW`DPd)6jDHe9AeCzvx-oV`l&I-Agz}t%h}u+I~=+}6ev`Z77i*0GGHd+0Fm9t zy`4j$&7=g&>Esh?eb`@3X|NilUSOPzW+W9o`g$Aw_IceRfFOuytfg`kCP4wL-7V{% zQ0lvW#m&9R@WOtvk**W01D?%iznosyyRR8?k=Yw>45pL>)H-Vbm{}2kFoaV(4l&hL zd$OC{m&*BP3BZ#|2IDNjFm^T6x6f}|tfaU*ckWD5zWV5c&wb`|-~XC>_WUtTd2w>0 zss<_N?5$IT{eJJgubO&4j;onN&?lFdg8t%qw>I{r*Y16x`H>g)bY(r}A(M{I&nKg3 zznFt#aDKytL3fh;g?T_?LGdh ztNi(w#{K*DHdubw{qPXZDf8s#6RPw0$^Pe`_oLqa)7oFnt)^-|!c@f}qQ|el>>JU* z4Zn#bGX(%*5#}N&N?bQ(xu!?o+4F?;f?bjE8jCRoOJZS|wTtc9nR8B`UA2m)#jZb) zl&P{+`qs~5w$T6@RL8=g3W6X=1UKeFH{5W;4LAJugUVn*3$hgSU}y8i6r&u{Zg@+i zq}ADb>Az+5=N9X~JzZ`}Zymbr)#O|WAsC}-N}dd->e8*R@BS;U4rOzJ+skU~cXf** zzvl9J+$F0j{1)#folOZvA(Eu*{MOATplW#PH}8)4;1btd&t9!|Z$AI)dGkxZB-uuN zJfyidtt~#T8oU$ccAa-L@_=J={i1@!j1E8YTJ@tZz2@h)u|NEw%P%&M{}#&l+WKc3 zyST8Q{K^}@?>fJ}Ph&{$8%eB2!FiRa+<0#iGi$O22yFrZh(s={Q4tCRxs^k)VQ-c_ z`dxTP!&Ie>wc?<__#(zk^E#y<)IqFV7c()bNxO#hB0?fWANx98tLEM8p{1H30=Vz? z61(As-!yF1k`_!Osj#)(<}$yynAd5aiulorA7L`)q%u#J%?VXlIAtyZ45$QTtu>Z1 znKY!zI{>Q;7EK7XwYH=Yj>>&uW>#VA$T{bnW9yD?)W?FmAwtpN(5+_c`EqGp==%`IU6z;DJAEu2AZnEB4eE9?QGG` zwuij{^uv^5L2ImcB0@yYIVHp*brVV))k0m(L1c5-3+Ir~Rl%ZXW=qbI8Hc^0wlcM# zY=%uvQ{6OUj{9LGFgAFn2!={2DJ2CoWD9B+hxcAQbprFIHlXX!sG+IE>$LY|Xw)jy z!Pg?-EXtH(;-1R5J#f~hT^7!967MZ*7S*mFUGQYxtnn8Y7pQb`xL(fYilUZEDIyX= zmIRHC#L*ZR+9sv4UM`&) z%%mP|RomeEI0(SJZDVGeQz<#7QB?`8F@{BIV{*>mz+0D6iZPZ_talIEbLCvvUR}If zNWt38bSPFQN%Q$U=iD6zR}2~pT;enxY^r=1cr3Yi>v$4YnNmEYL%}Rgr<@WOYYciK zv{#!A8tbe{Ig1D)I^(lsYn_Oo0qcE^V=7}?&w>qvbSP6%nfWRz$CB%k7H*BLJzVbQ zi#D?CrwI*a$(n=ufi~n%lTp)`<&Ss-S&s3Y0@FR_V8g{*Y)9Y zsO+D53I5>e`hW8C(}m8gA1}xD6ymqc_ccBo<3Lpvkv>WktW+#gyk)Af}P z`YAtk5FYoDk#4*ANxb^q>-?Do+F4yw07Z5yBUL-~De6x>orc-0Hh#un#Ad@rN1F>3 zds;5X0w^IOvI^1& zJU=<@cl+m0o{K=ab7#@a2+=xQe1#Ey&|iJYwYggH=0ayCo0-q~yM3tssVG8IMvwe_ z{S)SJCEZidY(mSSf*g?r2t?4xQ9bZ&vlaxw5IW~v@DvMkR%2NGsL{tuxOg|)Rek6D zY;|?gOhhG4jpxGl?DFgf{Bl)U?!bAnWb`PRa?TM^j8R0G8DazjAx9TJJ-4*K?sjhN zbWrre6zkv%Ck67}Z+Crzjl>+ey#3zWpM32TM&Z_c0q)^->J-T#rCfl8kCHIKV7#|F z^;esI;z+OEUEW(5)b{;x@zedx%v?dI!d3nN;=sR~DKL#@he&~k<0D_oBoTvb( zv5Xl6jRGh!Vyz)}9ng&GQ^OM`E{=GAorMkHF`hwHv#p@jf0eT-rpd#z)WV_78on&`5>o&cZ5cq_i`K6p4wjI5as#gXnXWb>tR<7*R z>Qi=kKF@EL^mbjgrE<}Ba(H|8^#6YS;E&f=FOF~UrbBSUZz4sKL350=0`kciL{*62 z?(t|yghh^%K4^%r$gn?@U-+ronvu*%u!@I=p}R zwQP(%{nRTyZ8Gq)zw_DQA6$Ow-};C1Pb6OZVGzxL-lJqI(^#?rb>z_@*?mKbhk0e#Bc*@(BYDFO-#ZNb*czA_RlCRw8cweG#V7-=&MF=cnWybu7VF!nGw+yp z$$Rg;7lVKb4Dg}98F^#LnUbLtA!7-xsxqi>K|}#PCKJ@5^4DKnx@zjNANp=w%xy`z?|TOud5cx&tD5L-YBZnU8<{aM%sT8f)WFrv2b+Um4fBQ2E(3=0o2(@T=A8 zAdtqqI;l&h`s7v@5BYF4#t{vGe(3TRg&p~lSwv7#K$)2oJQ@*EOPjHK+HXZm0Ub0J z=EKm>Bmhuk*>w9=GgFo8-LbduRwp@s%7n_Tx7tcO;`snW6`-W6i45>O)p-DLnW3_B! zvBsPBWEHI2_uc0G$C)`=)6Rye6@enKD5w(d`Yv&F-blhEg}f8d zI875zIVO<;&Wq@j6DlAm6)?uMZEIF22vKB+Q;e|=RpUZVX;V67LCva@mbzPGf^rw>d1^MN>6n zKQikyO~vGX8YWFzfwFXQBx9U6Q=eTGhzzHQXq++1VvxKgMaL@MS}Qe-iL5#bn9DLW z-89x{wXn4|=G6CxaV*m`&1W-_Vl9CIMaW##P*NsqbI!H%i`l&Ex>?&U+F7vSAN<_U ze(rOhbET}7bC0GBBU)p77}qa-=GKFMasG?{(e_Dk%>zF|I_d`D*YpUos;G+UF&C8K zLzA(~ls=iB{K(9{b2WU`-U+ki_vN?#+pmW2zugS6yE^pkV!d3i#t}GnB~POt8>XU& zl%15)&gQlIfqM8EiAJ4OYlO&TYs7fNu7z}66>$*MID-~}8Dzv%DX1s_!iQxwMMOaP z&{gN3dsI~r4Iqdrel%7BN=Ac%{#BC2Wxf^#l>Xs}jB zmIhj{EbIyE(_8(;8|&#yE-VctV*#9rCGHN}W_EgVYvF2tx?Fi6EP}>}qhZ@`_uINnxg2|INLi%{W;DP8 z00ejxb10$nuGkr3Zqt*Op8oA|_B&p@{KrNYR2*@xQyB!hQX>zxCP$ulV-q7%Bm0oX zQtGXqE&Q_+tiFn`bS9j}=}!Hktj+1w+rM=3N8cU4@AdLm=9{lnnV-!6I|uWH+v6A9 z?%nMh^*6uAZzv>gxZ$4;j~*1VkO*)oEL)yBxG?Rv|CM9oA61pbDv|^i{eL^`YUvEQ@}r>vkzXp8}X59jy3ZtC${M-I)QJ%h%* z`s6?V><9ngOXk(dW8VMF#4jB#4z4n-)@isZGqX6qLr{O|a0%nInw=R z!=ow4kG^*H=|#|P`=zer%~P;-Ft7;Ahj-@J(~N?#eD&hWv5auuYD}Mc@Bj7rSN;c6 zwZf#FCj+d)Nwh3mZ>_ODThE997Jzz$hT8M--3a?@a~kD@v)oD_ys`fW4?|al-LJ%# z|319Z2iF+N2@9y1oS#Zj+jWv;zsOJ2=FRzix*C_BaCY{*`kfHix~eG$;+y@z4Zp1j z&Z@}{0kNG$(!cEY$1es;#T%7cX2D#wqQ) zgAHC({j3rxW6H=TSl@NsbUh(z)mGimd+Puw=R8eQ(=^`uQc7(cB4)@2Y2O`o;~^m% zZ*>$z?E8McoE;k*v!)*VE~_MEAhf}vC@3%H&Dnf;nDO@VstNOx_1UZA>q& zt`G5$Rh;+JG=XSks+gt}z*!nfA^;2(s^rY7cx%24-iO*>9dinjM0HTX_u&iu~f4;GjIE$i)k@$31DW!%!Dpx1yTe+jnj14_lU;( z&@5Nh7yw8q34(JW74ExEYoApP!=6!hhplfz8+>d1?e)npjngzaZ(S1vbkLj}c)JY(;y6vyw%<%SWn?bEAmp7T0Dx-N#u&M9-;V%bgULBJRTamf5De4E%!{hA z-dM06tuyw?#q;HI?)oGmSUYq!Ig|_`gmE1ALtn2~E_fpHt~!jvyl!0ZhIPN+t6)jF zlLIO_7k>Q5fA9I7TREoP#nmu&XX}&av|Pw`HD5W>KmEfG|Ki{1p3YuUtpHwp%k#!C zMgTxWYb`3OYDzih{HNs$Klwv1ulXV`ugG*`LA@o;c7OegU-9Uy*oSCqR}DkT3WC1x zo9y?1xE9BlsH!knb9=^*!9Z@6J@SYysYQ;g2Qp_)nY3g~QQ0vOQ8*gA6Dbk^0SJEB zypS7np>KgG5CDK416=`CzhQ!phzei;lR=9_EZK?!4O|eJTOa%6eY?XSejxwVdAYbY zSTE$F-`_dE9W5xSuj{pUL!1KH6b~nhWvJ&0IHovFBO-QPcNn)THzxauwv4@*twMwQ zePiJ_;z0ksMC}Be3HhXhL&Mi`nt?k+@<;@VRvFN+>e0#xl>xu?mQs`zm>fB0@&tt? z7fGBP=(0U;&R?6L1G}%cPklG~c3r5eYIC7?vP9#n++}c(3jqQVG4oODa~!%)Db=+U zbX%x{*+sNs0)5QL14RNGX)2%t}?yS zEoPfE~{yD^oSJeUt1eP?QU>ae{#WmmZ@kjBBK_jS~vwG*q1Y;*tK-3dz z;oNO2PB`?#6s=AsbDab%3|)`JhRAhOcK%yF|F9Xj>+;26?o1N#3<4yUpnROJ05|1; z8*aGawulT7UBCfA%#C>GeNp-L(_Gu56tT3iXFRLLbh&004pl z$Vbv~1OWN^fEXzN3W)$JC;}NqQXunfgoNDF&_Ki!95bzTGcQK6U+9*PB@( zzP}Rt`2QPebf@Kk|tm`Nd!Tg|wXCKAoSQ-`yNi|LAM3UC<=sX?SQ`%lf5* z{%ECnzx%02&vxSlLn(c@HKTj$*))vBl+(I?(QjgHcV*`*7h>Ax@@)UKe*SFt>L-fC z+ZWb2z77tXH+XeEq$0R?ITc#jD$mC&KZ%SN1MC~LFl)wo(S-aghc8Z_c(Z%5cB>D@ zb!V_CX;S36YCL&W zws1A2@#5;m&{xxx-F_TJFXF+L^2XhlpItoOl~NEaC>pXX1xnX-z6!V2i_K<}O15=i zft^9le@*6X26 z8ssn>F0Wp!R;yG}5oT3DcNCVuaj=|$vzQRj5->z5b#OMgoHJQ_kZHQ;nyRT0`f;qC z6V+iF!=jZOhoR%>ZdYfwni&>ILp+%AVt2{RCfV6+aXD@d>2Uw-?(^wtxj1|N{P}+D zPfk`*x$8UO0&Orezw5eg+$(8HlW3`JH4a0ubk@ud+g+G74C2UiI>kBpGG#H&mU*)3 z<~>Kk+5_z;m>43Iq|vc+#pMH^`@2cE(^Sy<5L!+=j$L1-Y(y&~hzf>5G7A9}082zj zF~+8u9Yq6W%!VirsRJd3)#B7Dauy=HFS&AfJ?#%ANi6erHujSr5K7J~J(RNBY?rfk zJzq4w!k9)bnNp5%5-6o?ueYes4bPV+YX+VAu7aSdo2M_1ej27}4qR5I)T>3fb-GJw z?dyN{cQ^m;&%X1{{NWoA$vIAF2$M;P@sR)(`s4V{VqU*r@>4$f<=#F3_XFn1AA8xi zlRo~^FKn9T)mL9_nx-km7?V=^9reA#;Q#=xts^m29usIO%XpA%Qtbo9N#_+Q8CG1D z6f9__nAxI5bdgnjIRWX9qYzdRz=8n*AUSSm1;1%mgNS%@QT{px5EK*?5QIK75Hbw8 zo*@^<$rJ`*L?c81IDj-_qhx0d6j1^K)Dn$Qy*QaRo9AuwL+PTeAMWDdSI)B8UcY!c zo3~Zl8dn>tku52NMl1~y`&hULk=ahugfY~Mn(ogi^-0f4h1FfNd0uLdNzo94s;W7T z*aIM`d`wgzF`OvvUi$o>d+$g8$397P8P%-24R>1r0281qp0x#EhJ~wR~n)SRI+x@$odo=SmPR?IioNf;CW+TzSa}ql2w=bS1 z9joQ(%&xDmF9+oBf9>I!&UU)*bqr-*D;v$f|Ngq0z40GhzwtGG>D-(9<-*rq%)Bai zm?TvO$+29Rew>whFu&d-d_+sM&H%7ckjN!6EoQh}%6`M8_s=^6WW;82Y1YcDO&>(} z`s?Fej*HB=poE3p_} z2yN5lQed$>M^lWy*G-oXx_@+j{rOz2XE7(g*x(V-c%&3J2ARJu5iAN-=98Y^A4%0I_JbdVHPb02}McaLt!7N5~AGnNPZ(IsVW|OffmV>$ZeVyQ^RI5d<&3Kqjis;ELz41lJgOoJtZ!VEBZL_`B%!~j5c z5J74sEy{?-dMW~0jd!VghPkY(5U0U83jifxE>ooz{xQYO_pH&@&GzDk8*cdcLx>;A z73u4z+M7%8KPya`->Rg`yKO|7&!5@<`Uzj!)vS4zyPX6Zn?{(b%-(ZjipuN|myALhOtsJK6?J7di#sl9`@W_G z6S+6;-)mmH_4%i7-JYFfvBNOUmNOT8UAZ`pjj3Zv%v=(OCY-L%EJ5WR6t;df_Fddh z)$-w98_oxY$>tZ!w|^F4z#)%uV;!c&XP?gRp32ikxI>eZ)a8jkTq3p};iYwbHjz_4 z!>13rFDVYUa3@bM4;9ih&X!$Cx37k~2d(?`z&Uqo{;MzhujGlp6#vM;%S6)e_l<_C zxkpe40OUA_dX$ILH|g?kxZ&djQ;Jwc6QV+~;;l1onZ4bceRZ*eb`~!$&1munk;0YL zL%3a4Uj^Cetd3ZFn5=~t!$^e@(0hM$@+m22EkICFaKRe@Ak18lg{&iMODTwmh$^B8 z#-K4qRI8?rIprMD5D^(;j4=#441+KS=PemjGRB}mLaPcX6;)i!W_4BNoIw;3xnyw8 zco#JEeP4#DYHD>f4P8M*mbPw6DF(?or>aALAewvcE8!Sp=8~q^kHb|t40))kYCn`T zF{mSJRmv-8x2#A&TsL!HRZlmUSviiWZmUwn`wA6@Zj58fIZNRpguecj4`96XXa91zHAV2y8FQ^CT=&KNzH7UG18CcW>v%kKTX$Xmzsc$Kmqwa^B9)&&~y;A3G%_ z0uf0W$RKxz*0@-58hc}mI@kC8UUNWsIbV1z(p9t;>uPBbH{C4-0nLPR-bRb^&l3<1WJE9Z@Kj@5lAvdO{>1`JY^ zVx4Q6hM99rz=FoMi`i_w++XywSyNTjZol7bx8!^^pQp^m8D5iQ9(q5fX-MIO#nG(m@*kFQ6A^j=JMHR&UPZsz;Y@10P<9Sq_18vj)!h1 zSS{K`JLh2_066yA0Z~NA5C9Y;5J6M|IhJwRgote=^(&WuAIkobSuKqOh<(a!TfcMh z{9c} z7w1iI-c%M1Ldxo~Ry3^TJ2xCx#iEc2&?D)9PTR zVNYP{^?i%JQ_>*?7-oHvk+0W!UH^FPK6PLJip|4`d45$^3+@l6KZw3sUi?+@Zz4at zz5B18-TuigHP)ZL{>%TjNB&cXwuvyltUGVvIK8j2cG%R)6$+ zu{QuW?SUI^xZ$@0)~JYx0)YAGN+AmLJ$mN-ZN?&PCx>$vdbi*%pU0m%8Q-(cp4E3; zD>|fZLw-?LDBUo+`bQ;LSS z`;5ZVnACf-a-6A-b~+v;0ucfIaH~WB2yi3Ax#44o5mZ$$#UXMcP_&KAgQhq8az(3_ zXCVHG~n^HdB^1+8G1rSS4 zTnv~GZQuFBRKv`v6am6iA~PE?$Mk}J8cQiaV6Amk@Wz6u2u$NBBK7*Ta1oZo1-~9V zd-OxkAaNREiYcXj8q`us5}e~wN-1sKOv5k|Da({%$vKAvhH}m^M&}$^qpE7Lt!8!G zfUra_38$QBG01W7`KV$=KXs48o^~`zU!Jn`dpa~0=PCb>Rgt7?8j|?SS%La8slAX zu1)c}??sERJqvhihj9=E=fWg#=;PUZJ#S}c%e6`>Q=H9acWV@p&Cm;y6iz8IXbp8$ zuZCfWW3tZGZDoup%(-OX;>cJs^Z8tR-RydD)&!q(uIsw4Y5e!$&2rtxQ4owVsU%i5WDKDSFWPyD={jFqK#Q)F0!pP6 z0j`=_M7nWM7A7u;jGxUG zub>)B#VtM+NoZi4lO5OXhTlyT+TU zstRH%5uqUJ$YYscWoH&i)iSU_L?Q*SNJB~h#JPCuoO7w{lQJ^^i}j|Il4C5Th!^iF z$%SfLQxgwko+8J>n2LxRbitwp=dHyi=L|M@)%`4`}cqP?XMKyLHlweFAb0X?KkA* zrls-le)6$kx;XGSzI5xfLCt8M%A#%Q{QTTG$2kK4GY`Xnh%VH}Z{;yJ+Zf}$H^y{$ zig~KhihzmmfDGr1@#@cf`urbi|6XTq*}A3fkoOUcGY;UUZ1fxF!^nZ7v;q+TFf*#) z*FA*+mqJnCsx41<{cKv@vGdvNV2(3PB=V68_V~g-E}uHB zUVEW5eB#gM#jEp+FF`I;=RZAZ*AO@AK|zcKJ?NzFBlEnKT_{ z?E-YHT-CWZO>1psTv?2ehuB52HA+$O##w7w1>i#!v8@_q(K2$$fS!n`p2_m=b6Gd` zcdgA2Npb_>TQHV)ezC5mRapFla|-z|?U$#k>a-b#$WitB!nf9Vx9u+DsH?O4aO$q` zHAFBQlL838AT3kVVI9u6(CR$5s`eHVk3))?l(z|AyOJsO+`jP0BBSyYcfv&=35r=4dR#*IJAx=M*wgZ zr1025FiSbVQ?6ccyf(&IrU|Vlfsd|J->ag5ZyfFK_6xY-h8u49rv8Bu#L;aBQ3nI7U>2N)@)5&bDGfCN=gGw8QX>=Sf| z=u{D-IM-tuyX(dE&)8a{ot3bpLszS!I>Trf&zFmfgu;3@U(M_4>hi_))uyT{3+}=4 z><~v6%!|v5pLtQ9U(C+VvzgKEY8wYF13bL5ynlOrd%YevW!r5({mPr~T|W8ZyI&jQ zP>@%v)voJ%CCKl-d){Bo*7-Snpi%rgR4>5JE{{_;C2`Tmbf>URC% z1)7S>>go&g?&t3F2UPDrt>@?`MILu(l1jzmtG;MdPQ`L+W%3AR-sWHqr5mEiY&P0D zW0+YogA!v=Jbqv3qgz)+l$)Z(4IfWP$cv&fcw3CM49r->dUsM9jFrRN0j2COpFX)9 za)JS?>wWuDcvul*-cAIG#S^$=L>5{$Plr^L+8IaIZTi&@JIC8ku^5;+?XF_xTjZe}g8W#*Eyh#0`gDd(K0 zm@AE3B4zZXsxeMe8W&+P#v#VBNER?-imI^yex>``{nPW+d=VveX=t@pOK8o0?6-&L z9nPGKEXhS1W8e3z*^;+pfXN`4DoklSOnaI9{B)h691aI(D{Bm}y5KZHnTnE-AeRCj z05GMLrl_hz9FB$Zx~_pRGsj`H)~1vs7ge2N1Vm;Aw5_YHwIpD?MdK79DmRuw(`ez5 zGqY5l5QxA?VekDg3^DU^wOTEgDd%{7?V7r6TUA{ymm!3vX#k*Y=ka9jp1mK_BD%%Q z?7J-c%Uzo<|LpJdVOEbx&JnBI?gcEax=R=3)w}1rF(yU3geu^;*|-o$5|NcdK|BWYqnVR+6;mt<FAu|4w=Z4W(dCMCKMybck=Q&?|ICyPp@7be5p2L}4 zeQ3W0ADi#w4L96y!?#1f&hae%;nnx%8vM_Sj&g-%WbgIhmTq(Phr{ka_VH^Sx82iq zsn;j>x;hdLjyp_y?%S3dT@pOd&*Sy2Cnx`nDKu5?;Oin~LHgzn0sjA8Jqh5$5CRK3 z01^a70>F=MvW$Lf+d($b64)swQVms?VcQ+no9zg3v?gi>C?0LkIHXvS%$#eTtCx#) zJu5MF`<`<~mIt?k9TY`JfUjJH7j&my#<#xqD?jkL)8!eByRpVrRnfp^yWeg4({}EV z?w#L0q@kBMN$S(o=V|R1wvL~8?e?Q*hZoz+`t(ffKBmE8y|S1mKTL2?7*JgN!oB{N z?fWn5y_Y-x;6lP(?B8e}ZOS`OF1N^)cSFbG{hCYMXdi;F4kZ~nJ3m{`x7+^hXPbku z^(mFnY%bmB)at=~zh)ykcFxWy7g{JKL96(^xLLX3hVK-n3OPHO0@i3NvQC*DN3-z3 zpA_yYY^kJT=TBqqy^Y}BJv3w%59gH|zdBBrLCn%ueJK+sRTU}D8{`iY5dpw^@4ZJv zIl37TRaJ$ES`Yb)o`KnwuA!8 zDU~s)YSjc|j7OKakawhlh~P|Su6#9)qcH|m4~K)b77^>V5l}%$Ij53~u&S0)K$M8K zyX%Artnq}PA&aJ*gxq`k4S=lIYY7%2b17Xv7J)Etm6`Sb$KIdD+O}rrdDv*aW;UzY z?dfh;x2jmIVo$1}8q1PQLQ>)&5*){I0?0rR13@|<35?{IlOQmX{7PU1i4g}#fWVd^ zDUo7ZiXthZM2U$+$|OaySdFT9>$a!e&FW_JHDiqYSmzdtR9!|_Aysvy{owpqXP>?I znQN^%=lZ_!j`6-wa&DW3JOO}nWTH7GVz7pA5x2LSuReYI)b*$BYTONndE&Zw`|*3V z)(7YJQFP~Rci49!uAA;`z1-byZg;ndX_|)f^Yca5BO(wIv!v_{+kT0N4ls?=e!m~b zF|;u;+%_UFcjhO+Ns7`6jd0wyA zG4|zIDZ*Hatk3#3 z#HWv+R5S|Syf>>=#YBn7L{rWt;=LEq7^7?Xn63#3E_iSZMEhY75zmZ>6-3d5b#(Ie zr|%XO=Um_SjSqd_nkpAJjt2&4+YTK&;yIU^^9VBJao(ieJUZ`3na4CP*NY*Ko&}^7 zn~SJ|1V`(pR|NzkPh{wv55WP6Wx;|{>MV09sYZu}s!C?4C}Pot5MVAdIu_GmnvQ~* ziIAF!FcLbhs@{7dV8#$SL@cEkA(7kfM{KMir>5S!wrhuZVu!U>@5ofiv54)5!!V|% z=@2o5aD9Dy7>A~5Zf=#6gDNzS2wFy{B}P+d-fcsJOo%R*5!6@=uX-*T~U>B{mLoMN8e)wfZ%)xp_Fns z901@rzHr>?mr{=U0Z|<)`|1H)kh1Q(uS|Tn_sUz_nb9u2d4Zuo_G-jel9}Uo`j+|6 z6a+>HW@-k23cw6#gjE4vCRYKx^sXcTM5)o~R5SZ7ojiCduix|UpTv_o)nb)fZ^LNl ze&~(Qe)jcG5fA;5Xr@AuE&TfP29 zLl>hO5zGkEv_L)-{bXqk)L1sUdm} z>zsDstRLQr^+nJ7(CIMd2aUdK7kGNU_KW@g`6#*O23_b|H=+{J@uCxz<6{62k%-12 znD<3cRn>yvDM)$9E6Hb{K1bEKVQrg=MK7SqyrI{*C}`32s8eQk}O-rD9blFKMh)_>*w!)@K( zsOuq|&VNzftM9-1ugiMZ?MHYSM*JHT0{U=t;3x(F02m@7U=KhazR859WPo6X9}!Iq zcPgaCrVMj}T8Mx#p!HE2Tt2$!h&@-WsVKW5Mzx+SwN_m*fAD;K zcY1ww@1fs+`6jR4d3XHur(?VDna9dCb>O;(FQkdU^{j^qczB^W}h7!TGM!%jQ>OxopSJ?N#5-4-|q$b?)F6 zC)1q$3fZ)9D9(GU1`23^W~z5G+Z~@!lo_-F6cZG3>Vh$2L9^PP^!1Qq@&$;_E~b&e zH-347-TJLf+O+5QI%r~SWuJz*S+)RVWECX@hMc8pt@WiaPz7HC-z*WDnV2G&8XU(0 zh^W>I03v2)#Kgcyi*_?}E|BAzOD;u37!X5j+P2mzB1E79YJx_=MagA192V=9_g+N6 zvGb0I5)>wfqNee=<4D~=I)825QnWt>Yfgud;>H0LZLRP|)B zeEa#c5W?yD#4oyvNPRPu{3~DkN=AJD$!^^)7nf&gnxYTa<9LDd$TCUewHoz2vN5ons z#yFR3W+E!4-n*1)nv#fYZ?;o#!!)|EHZ-dx##n0&F7$n$QUU=NotYh`eXZ3YF;Z0# z5e~j-TQdYhG&3^iT?j28mQoHnE2s+&JdR~bX>x#Q(1(?gwcdN@$9c9|Jv;9s5pDLn zl%_#T2w}HnGYhNUo3-8%K;vEOT%qiZP3tQ5B0@y$*`&_7sA}iihnM%S_O~Ju+a`qQ z*>!4a*z}83i%6}d%yo{gb-_n$cjMTyZ$flPq~HKmjMTV^$NYv69J2?2V;Q}VO&go0 zX|z^Ga&5FUm6Y2>w_2~~aoX;mquJ@{xrlieTHohf(Nn)#j^lXP?(W~ezgn!H69=!Scbad#A=?i`B||KaW!>C9~_g&N(+t(@`lr3Ij)Tp<{ry0YRO*SlA{N$fJ2W zUmr@d4C!z+)nqQV&M&1BNr|o4UD@cj4Kt$;8;1Z@?dbMzs9+|)X(SRq+&GY6=W#a< zid66YAfzwBFxO(PZ}#JGO7UO&!+(rNN!8l*$shcIADz)$8LF1X#Wcf57Qxq)#qLNK&QXcH~pI2R?9() zqO$RHv!NzDJbU$o^zEnHtMCIi?M3y$2P%7rtB`69zCrFcwOHfXWh7`rBywrk$bREd zAb1k|*z$l%1o8^Ucw!^ttdR2Ls;{^xAcVz%I0zM+0$60ESuuJS9asfaRH!9;a?=j1 z)b6zO%c*;?_~1(?zxwareBn>;*MHC+owB~;W!2^>%zdTK2U4*aY6GYQ#%%41;@aI@ zwV@AO-*wB^$6xxr*Z*DzkE3-@;sb3z|NGwlcmMYNf9v%6Z+=#P`to!wIi%d-L1!`&EI- z5kfv}0e}d=oT~7*YxxHNHk~ywu|uATC>;jAv9-?sInPnlTw~N$(w8JPaePj!NomWwF)SU)p8p5hdF}{ zZV?~<+OIsldcmFF4Lh+K0J_*H&MtK8$YmTHGj+>NlCF963~sayhI4k3UFe}%dETdh z?L3NQB{0D97esI}!f+eJ`Jy zxBuW9`#jxD``^R$PsUe2o3mSXb9E~ruVi3?FK0H@J|UCucii!t@S=rKfvKv~K>>^$ zDq7JIE09y7!bpqfin z_3XU&U{+N{#4*22L{v32WKt0^6;tONp_-ug(adToARto6v1e9S6~I7M%*+|8SS=+J z5dk<3&bUrx$m4z^ZX8D=3Qa&FmEwIUTJoF}AvDo> zY6K13Zo8?aymxz@ri`k2oGw;p+uiQPix+L*Eo%F4aoL#PY<9Q%%|1_=3Y(&L<2ZJq zA)+DA4TRRm%ab!fBBJGT2>>^nTl`Qh-qYBsd$_oFb@c*@z{Tm;pFYk|&ll&*G3puQ5 zh>FP3AP7imQK_aNXh`HjZ~^_v>8g}cL>6u5kmi&^^qQ*HS_Q;ctBQtes+AiLnandNC_W<$#*7ZuOgcI{yr_q%=D^rU8@%dVfKm}hK4E_Im3s=ywR zMXexkTy%!I-|y#AmR*0C4nd6o=8~mm*L6x(pek7F7Ly!`**DOJ@Uz{vLG@S3R`t|)s=iFpkg&=jD^JHej{nsvN7r>lLolrSD zRV@wPxeNW1^D=MX<$BQI*x`$6pm0)mesN>YLbf?PmLex(=4>ua|D~@JF?|SUb)(Lfc4iSrDoxue?+jBBh9G z)JDpcwj)}_hD-&`f7}(cbHoB9RkY6Jw26M%6l=oc4FCxen7LxasD{Z|R6qk`FW3N@ zs)#B>QfgzEl@`^!wlbZ=GKK^tD?RK7HrZ zkB94fbvGhz```VU=YQY%GF<0ZufB}0p6|k0*k2LB;CV60=(cO{AMXYF4smSqI4X`r z|*rm zRnsrV-zIs$l+iP)PS-C0AhJVZ6{C-k_s}Sn+;QUwr)jWZh`ud#(lj~OK;c6!?5eQ2 zCR!Hz^cnI+5He+T-hh+dT1b^q6{!axhhesJkXJBn{QyBJ4 zCvI`^{=+}*4zF#)PbNI1?az1Ry}&oHKYRW@Q9Yb=XY*o8Zaww*=!5^$X8Q;BpZb&Z z?dC2&jBtBtif@DlfXM!Xzg8*KxUS)}hEoYk@GBs9vVZz*k2BDe&>;BY`Z8eJtPllwCRGj#To?XbknCv@WnG<9?#a^Wegew{gT2?xMhFIqXaz2|iB@!e8u;a)E9(U2-?Ms^Ztu7UG6+-EGDmA*u#iK7%9^1C_gixj$7}sk_P~6%3!$~s^f8>7u{Ay&z zLQNf|b52!{;{pKma!LTsE_p;IP|l$1-(`#jz4bM8jc>8VQ`3m68Y$5mHLXgtZ!B zPL-LDjz*1d8XwwK=-b6G3}Z>T)LIfrL}$S{COUP!0&IukB00u!nr0A=&MVtARxLnH zmTXe`zRjtI5aucE$J{ZYVjo)yPI7+z{Ndv#PlPQ=Em^$b<>KVw#e?hJ=Db-AH`_y* zCQ|J^7W3qw@^Rxm=X5w6+;Wu!ZmzEzbWp@>?3YW3eKxt>*UF{J^h$5A)*&A_E`39Fu2UKjam9>|hiRN8?WW=O_SPYEu@A&2 z{c_exR5$zGIF4p$2qFT6`~848HcbEkcI<-_sqA>komW2WHb96~L&L6VLkI*&O`CHr zDHD+%Tg%M^tk$AcymJoe<)}R2@odRCGaw=k!!V3vyXZ^FCCv&3h~qpkvvXlr<^?f; zdDCizf{(~U%ENAEhZNd5=NZHY?%UQmcfGmxqRI_9u2O3)MYCcsraG0Ba^+I7c<)d8 zuBYBv$T_dq>yBia1~N7QEkMj_g`HEYQmYR^QEIKgbb5M{4x@-@sU~U&Ldpo_oR|?E z9h|N4L0s|9c|#)J&XXdT2@!N{8-bo(Z$zYDv`c4 zHnvzm_t3uD=oo4dB=E%{kg7ARcOSFAt(e(2C%Op06pxK2-?Accd6TecotyS$Hn!DzXo0rB1_?@$%`^;>!P>QJ znzk=qzyG!GbGzNm72UcYylP)_j+^SJGgd#Iw6c_FDFC&Sha7@W+0Qw&PMyk- zoR9i3;~BCQ#|{G_5E{6B;^7uYDV?xE5D=r8@Hn2xh7&~apq@udS_W^yOXm%1%Hz@( zcbS_N$l+5z^>4M)FFpUUe+S3+{Pfhpp1ac|d4v1?G7HRDCdba9y0EO>!W022rcLsx zN$X8`QR&e&{Bfs08^s4s<1Rn+zx=fQd~`3|;t#y@$`9_l#^^Qa&R!h{sDo$m5?13c ziGJyP|HW|s4_-g|6JdTb?yf)|c^VEag-?jH7JN&i_p$lIdv9i9mNNje82W%s)jV(E zd5u{p1}!z`TuoC-q!!qP5X3%VJ~Q7X3>`xir{DldO~;hR>zVhzP2NJl?6Opc&^iPF zHO->Vc`NB#zwMiq{erlNJy#uT3dZ=>yIHLFe&!b1HMI+Z3Uycan;ByQZf$~pL>Tz4 z%_zQ;2kyAzj!!&>ziuPMfOmE%|CDitgLeQK0KAZ~A>1}2w2v+}^nzX-q@;_?l zo6FtLow=VqcTbS$=5XNK0G{`;cchowpKgEk>(5Vqd_Ma^d+!0w6Yh=rwU#SYCf7Qh z9m%b0cJ5>`Z=3i3+AB}~;`0Odbgj?p8mZbwJ#k~vuq!`22jZ_n?_GdZ~)2j6e&z|1%|OCcHQR1G(J1DMybpp zDO)`Zc^-D>ZTIGReCyp`8Km~LFZNDXW6?_W9W(@`S|p({q(9Q<>@eX zpQx~r-* zH{DxG)d?QYg`-kfc79!tI*_L+f0+Xf$rUcG-l zO~Z?;{eJdyPNi5Nb_rb5-OhWBNW=Vq+Ihb_Y1?EXYASWvb!YujhC$7)haDT7@}jqx zJAXCpYprMnn8tZZnw9W&Jov?030)fn44uEZx#>?|roR*_crs9Bhgd zz-m!ib^UbM*EIC&#no^#j^p|I%;JZxB?zEasvJi?W=g{}N<;u~K2DkqHRaq=d?VL@)%6UcMvtm_G_%G2FbGB|G+gW!y$ zic=nPm>d=c*S1SZW+tok2_5E|vu^-EfuK~|OxyM8su3qc(wwSfW8R5+@4fdWXEUP; zRH1EK0EjM%79V&R_AM^7%)ar0>O&x+Aq~ZOf6|ERi+R_#(R_Guy9pRSb#{sD$6Vfc z61vtFK`qrz%=J{$&!Qhm^=>S2GjKH0-I24sU; z^uXIQ6?Q{R6H>^GRB9=;1JBGbNj6~vXb=i|=Z<g8SuuL}OHL%e8esm#|mn~P@s`+xLD7g(-fEjDg$ zri*)Sr+exzmUyzBw=$Oq{nuf+8-5@R^JiA?-Cs=qrS!K$`QcwZ{Lxc47ZJ0`9IaBy z+>dT*?33}mC{=Ayq}Dqew>IawdPX_N#zKXiPvz-J(omL`YSf za~?e+a1-75H6K1#Uwh^9=l(;rZ(w5WUAMBvhUfOHvHt?`w%re3@oyZ6p6B_|Psv>)+hiT?ZqG~CXrWr?w0HZ<@OnM< z4?U%I@t1J;BCLMNef|KxKtaFo>i4aDWX#x9scq3j$XF0vL(Zvl0AM(q^-RIf??D5j zU?&{hsnpTjaikp-fDu8B6%fGKj=p4s?l|{AIIaFkz+aryCSw0~9mI(oHm32j%(6=Z8=Kx4-A{e|%G)e9FH1c>L5C zKl`tC>c0BmUxJ}M*LUU-{dQJQw+&oQtJJ{PAN}NKfAQz;{oMa}|L9-2eeEY2av`PM zokZLgk>)#hKBECwt7ONzzY@9v00#8!3kb+Tv4CdJE)iq5oW~tlisYdx18W9=7(AF6 zd#YBM(X?t72d1D!3%QRSRc&UD7yv{>0gXu+#ms48_EG_nK~1#=$Dm?V0q7`H0{|l( zC)><>#$y&PqM|oMQw1R*pcWxgnIpL&fn)43Cv+q@d#CK$B4%~+)u6WZ+rMc7C?d>^ zXsCuCzOeXlvq?UJm`@bQ-t0zm=$c-rJ|0jXBiS!>q>P*bbHk(6P-d~o5rR533a z?-E_T$74zm8^o+t3BWO_0T~b?!*U#-a1$>+iwa@9X%Djw2fyy!3?c;^Uqwunl{`2U zEB?E&V0E{*zvGTO?zrQ3gh$UF6UO4uI~)B{!<*ao(dP7DJbVAIet!Cy`dXTA8ymd5 zSL-5gd~5jrayz7(=bPOhSX|t<1⁢L|>lI;e3Y=YRLv+l;-Wo; z_Ap@a`hnY6++E#!`{$mwZ^&nU-{#Z*a`cV6xhb7*@jUGJ;y}B)skXQpK7&91sCj!X z+0#WeRG?hTIF7?~KwyV;w&cC9r5u~hN+|;7W5}8-xSVsI#t@t*dUE~l)y=h0``R1* zYSCJ>a@M;n;@ArbrfJleDC95K}~&rWx2f0IkS|fr$-7rKGgz`drJH zlkXZyv#9ufQM4Fwa0~`@oY&NL-TK+~w$ACaUn04~Ziq2HI(t|}hBPC)ah{s4Z)415 zCd9)ybx3Q6w_Dh^?eG1e_Jgne?SJ@1ql-U2SzNYtZg{UJq`Vp)ha{d+oFM=bq9Fj_ zai>QNa_8Q4$0v_2?Vb6oDxhY?048<^&*a#VvSn`~pvKMQ&vQ#p4)3jqaVShN*A#=# zX+X<}4Ir4B3LIO8O(mP4^I+zYLffK2%2iB>nTlGq5FHwrY2#z>qjPT5!OA?$GYB*> z2I9uC2g(iSDuiyXWj7qM)XluFB!Zxz&bi=ysTI^Xcyesjrg0YHRo4T+VH%TEW2~UW zj+jh9Osm5|@>kydhHDxZo1F4%%QNRr>QIdEX16I&kzB6h)924L2>^^K%^*nGF`ivq z06y@J5#{{k^u_IF+8>&>_l<93yF6K!VQLsBDY?|X z>ldy+lyr}~!{+vbw3D`yL1>%4U(VxUPKj10=l$xv-SzX$jg9FC?%(UB{MrZah7gFr zdv0SmS*^!;>bq`p*qdTkwB&KBx$Qe2X>-`ELWf2AlSQ-a_v3y)>=aCibga&~A*&0E zigG*b&B1!vAM&iFE*2+k6Cd4wAkKD@ymNiqI;69Hd2@49r7YH~^?FG}!!*qEoEVsy zfpaNU3IGt1fms8JhALHS)oNy<1c=P6s!|m{G)6fl-yUm5$G&1T08ujp08g%3K4v1S z8Ud<6H6x;Fnrh0->|^Yh87X^zu{gULc7{g8X_~vXU7eha^OS2~2J}sI>H|9QgwSLaQ#286CX8+L{I*q@!Y zdAE9nZtk!7AbzEX%{Fd`+9zX5g;fq)QVfTY>`l>Mz=P-T1zd+w1z4Tz+N3x6gj~y^9~2{nAgjn5uw7j*f(xId~>-2xiBL zLPIjpYAR$(hy)79a(eiCuirj- z`{Ylg-B*h1IwM));_%B;{6cjya2w$>>YClYU5tPC{+Ivl@dy9=-fe6aiAxLEPy6wc z;VlHmQ`#7n+0E5o?X*nlKeG1CfNExD%w+s>(&gm<)5pOGs|u(gA~ORMI3~wwpAD3W z5x@xe7<$YWrOCB8=L~VqNeO+^IJPPiD>{H|Hjxg@4$KqEb5fFL~j!-rOc?O0GfM#9WI_Jj2U`#Grni4oN zqLecL60xc&pfXlfC$@@5@*s?>^1S*og$V`8U-s-T3&=D&=_pe*KynFt6o z!p-jHu+!zL^~k}k=4nQ2blWx)Zwn%o0mTGMb9T5>LA1nSUkaMmx@i%21bT*p+s_l|hl&&(lDlUZiw z5GeHRbQnme=~}X>ntD_y2nD5@@voaF9Mz7a^+zrR0LXxdj*X3u1A05;y#N@YL#L(Y zTmuD7_3Yv_Ha^WI4I`rqKBDrfJ0X=oJR4lxzjw9UT;E=`i$10F;Ow%b41^Sv7}D^a<**;KpZVsS$}d-t=Wt^vgJC0R4?^!!J!=4e`P z<_gh|C&2rp^VRdnuMYQeskhQyrZNTRW9uYgNm;dOselT|j9kQ24B45YDxhP7M8sxT zITEs(O<4^SrYq0s(ekv3>D~RwRvsMW?5(pOE%z^*_Pnp;4>wcEgKHr~N1k*h@=VSU zL{wFQkcgPTq-xEiK!|G9Bm-3fZvY5JW=2SW$J@(P>dS&JfW9;pL=|Sg!79SDV)l&OqUVfSb#JB&$SAX%}my5Hn{jUG*;qv>u z;@R`BzkE9o6hMy;2$he`Kff76ASfA{7V^w~PwmpGH4naa{$G6al|SZ>Dn$t3;xRu7s=4Smp-QmkT-S?r{_=|4uySIK-?>(vae}8!T)oeI{5%PDHIQAR$ ztlx;JLk6Hy4XQdVswVy;zyF(#b7qDLrV2+VnU862K#rwUBXCF6tyB}S%4`5qnU2HU z256!ch$cjFt}s_L>)X?dbKiD1H#gnQJfV(&4sof7h6N!31gmX`Aue>-L@+iRrbI4; zq6`otd*)CqRh!8%d8hf?Ki?|sNkz2^IY#7{%bbsCuJxTl#WMNs{K0Vj!ZEq?2RB#O zOV;VhW8vuAvu;s%xwQLl%8Ms_^FqBM0Gb&A5uqaxRn;W5&I9jnIsnlbO)9X93t?Bv z3^}n9vms&yLuQcg#@FXg9=PL}u z+SvEKO1H1V7w2@lRi7&lv+n8Nc+lw8=l=&+uR^ogT+yW4bt=P~^%vjlenATC zM@hVl%+s6#b2ZJiq%wO7F~sAbV03NQFAj$*)#|+u(Pf$Ip`7$x0Gki{uwL{l7tA$C zAH4IyDxBOqef6zxyi-EzA1&f4nu&8b409>T%+j_Xk$@Gcc*vQvw2@`SVplxHt)p1io~SL;;l@M8Dy z^#`w9-ruF&?YJd^mv+@$PSCZk#I8+c{pqvjU&wFk)i2WajpYZwwv89#*}Zrc;NoOt zuyGqER6rrCgaE*VC^d+10pg4GomB#NL}m#J1MoOnIalWQP&a_D>We^I!4Dp zP_?!ii&R4(;;NEsF*6hGx@OUJyWPQnN0P%d#6{zqI8U=k)to&zG(yEZmn0Pc7*PqQ zRH}-YR@EXEnIspf;@b`l=RC0wMp)mug8c1!YavHT`ONGS>RR?e%t=PJA0Y zr<4{=lq_ef^}1iRN@t6c=j9p*rZkQ7!N-7zb1FJ#5xE_=Q<~U29|ItyTCCXmWZAYY zGcUV7s5g~4jUIh8@{ZJWp67tvug}MMPSbSENj~=e?{+&RilL>IyV70OtOXdzk!K_k zVc47rwd z(=$f}nTOc~0BssZ0z|Y_OT!&Hl(~pibL70A%IuWPY#c{s?)&~|2+qu?ddyTXGv|(0 zAP4~D6o62b*{gBG{(OCEqSxbXMceHT9(lFs&o3X`zkHBWX`R~+ht05meD!=Y9@gu1 zL_S}yB$v*&o*fxD=Rv^{^-UWYP-^rc5TYkL~_w8h?vLeD9;!& zG8vGVRu!%l0KE4h1OOz}&)xy_SnY(oescI)x2H#R@> z!Savzx{)eKfI3$J_X&w64(2@!ijDC_^}*~#qeGB>4$6Q6$bdv}lsvzg4$XM%lt=i` zjm=s0Vq6r+GLbP66H;YSAtR&UlTq6RUI&`14D6G^v+ZF~YwNbkzF$7HX(aEQ_oP-c z0stR_rwtTA!4T094UCQ@c!OEBYn!Sz=IjlYCLuqw&9ANci$oCvJ++fB&EIo-`h}Bw zZ`%DgnlwdklTBNO&@R^u2%KH0b2bDq1w%9YP#f@Q3aTcR%_##36fgiIBLpA>Ks5mT z(7T31U}ho$(c>-gD5F9y0HntoJtG>SDgzOiq6w%XAp)2%S|AiP)GSbl7rqpRk{7Sk zuDSnn|6c#Ke>eB9J^h}4^KkY~&``dDY-ZyyTZMi8-&zDckcAefRf15bl95ez`~(T!5f zOn_#lqV~;JfRA~cbIy!NOa=(Yd4#Ge>X=m(0aPmhfCEt@grG)dWX5JGrR|fa#J-eL zk&s*jiUwpQgP?G)oH10t@T>c&N*zYiO6oi}MkSXDllh_|Hl4H>W3#RGBh+m|Qz@#z z%m}QiL}dDjnI32=?4LY~=ci4GM5LZ9z+A2G)>A1w-q!u|ED!kAH*@K5TU&I92w-M% ze6q-dgnhxJHNXFEXoGN_wMym1r|u2LAhSKYHAz-#G%zs0?^1dAP9C`9jyvwS<9CEu z9CASuAT8zuok16Dk;?9D{H1Bk&*=-D{Mt$Mr;Ai5%_TtQ9yWEo+>f{M>7RS$`v2MD zNL!XR)b65NB*u0Ya(dI;__yNcZl^`+*6)Pxo14!s>=#$*KU>RT%!FrWa)r(NzpxJf z$9Et8>02D=owrVPyJ%i~ul>TZ(v1YrWrJ6)I7JTtwPph$$BG~ZCIV({6C9Fr?sT=T z1|r44LWs52)uP92wba0jDPP}Q(GXF)ElxN2_T;kb&z%EpsGsWysK-K;R;x$r;;NyI z!#v4YPnzJpA2vIxda_uaoj>3VbwAsbqo=s&Qk|B4r&ZKwNE4(=jjL7m)7ZssJ5Pk{ z+in`BVYlb1jUq#cF*0D+wnH6=D1@d~Rl`7LAf)}1cW>Xm{}^02J-Jw|+i-aQl@I=^ z^q-sjA3BIq^udw4SjY{Ch*lvrB4RXSQ@X2h-0_LT*n=p5uo;-5Rs)GTd&v!mgILKc zA(;XwqG)KR9MkYZsZr-trV8W&2Iq1$F;xX-V01`?ibNdHGqR$gDjBE&8qQUeSfnts z_r98n8U!!Z%+NUpjJ4{VGpITa^E|h)Lp22zFdtD7Wq=R!44D}bkF)4fE0__ADo7T|<=9_HW&l7&h-gNr2&TslHHRc%h=@j7ty-j!{dU1VqvRXeMcf)39s*Lo? z{RgYXvJrGyQZxWqc71T3nI#v;eAsW>^_l=oYRyopH~(Q1M5Ph@*i?6POtgYpu>X?m8(&YZa9u0;RCjM6&Gc|MGV^#Y;E6 z&rlq#vwo)vV*h+WMgVXuvjTv9GnoLteYa*oj3}9cD|0*yY1QJ%diR;%^%b0d;OT#~ zD9@helV5mpT`MR`8I|Nj29&F1Fq$M0Sp>MQ4G zXH9%jZn_BfAOH2M`>)RK4X?>lwwlbh-|V~pE(h}=yA~u#wI64ka;N9 zBaRQ?sat&Jx=3uuA(d&)Ih%+(_V6Mq@k;@+Z6qHJ#CV=(?iWKUd7hSyJJH?x`n`M8 zS6*FR`>c@j#QurAVGqBLFJFh{s)g;9b{FYpF+D@k?9LN)4#|6;XCOpmcE{9#BMJrr z21jX-nGh6#jTq$U$q4`qCTe=jrDx{>Kvk<$t8$c8N-54zL=_Z{V`Xsc7Le=-%|MLR zj)6vkP@N02Rtw@rm25?Ff1wZ;@BXK+zW5KGzVgSOecyj~JpZxN{af8;vn?t2fj`2n zT|p3j^S})dROlG<40=)rcJ2n7P}5ms-@vEcezmy$E9yVnEHBEX-Z>ub_4>8# z;l*O}|Jb>4h|MX+9PdxpPwVn~w_o_L(E;mttL0Ek#^$U`(|7Q!wqpV`f`I+jB~e5? z1ZL-&TnS(Yxq<{};UnY$B8P}-W^l9;*_ z{|^HwFdqj{9Uw8)QmEE0gkj1hIC9?dj8?NZh{0Qn_g@{lQ@rSd)BW3TIR@8tQ!UOT zDEqi7U6>=m>t~K^{>nS12k{9`Oarv43lNlBdPF;1+e|}u~ z_b>l*r_-Y!e)4~{9X35f*W_tF`TAdv`>(#U`q!r2_O*BTegiPVn-DH|$iWf83A1@`;l(b;8t(JZFv z#pbFt-$w^?HE_i296Euv=}|F@KHF|1vehz{d7e|d=ughi<~fHho}4eQhui(Mzdmd# zgY%)NwXtan&qf##{rGr)^>{D0L;A5FcI~;8A1&9NqnZ%&zkhon>Vtr-o9EjhfITDCj&K>&<4~dmzkG z*}yr6-crf)JX;kIV6ncxXqr$BlG;#HHCV=Gu2M>Bn$X7Jn0?nsE~I=o@m4529r(c5JMZTxQYzei(*fu~=lBnixAz*@|T& zhosmyEeZ4#a<+B9xI8`W+fLPdhKtL~XNS!&PpjBgQYUiKHJWQrzK;zGh{EPDfX58= zX7j?e@zL1{IqrO{wazIK9h)%?2to*+-FO%dhrxUAn|O0`^WgMRo>NJ)I17&ZuGeaU za64=O*@ZCAIlZ_R)Z6{mHxUh|c?vNG$HO>gsUniOh=?Nwatc;UMgwLxIT~viI)Y=1 ztE!oW5Xi`?sTDKyJ|2Z*F*Gr#)qzuxS{2a1P#F}#3;;lnw#EvkO3Y?h)k>`ZK#q}6 zz_xjE;FH-jj2(u1=NC=CFo0-IRnu&$6_^mwJ4Xtnwi^!296}J#=tDp^&-2;IsfbL6 zA*YNDoObIxW~;y}mum{nw{5FvV@_g1-kGSFk#jDDTx+fsh{Oy;(7+KBAQIGC0RRz? zZp>s1Owbf6wom#w3gg&=7S^ z)W!n<8O+n%cb%A_K@;LU&kru|jfY_x4-To68r!zytg57?DoLq)>~0oCRYMRF5K#dv z#hEfYGxO1tLjV&g0)UQ>eo#p4j+ZCVC{;v`&C8kWjNt$M0>3z4e0o*mm@IbtvX2zL z)3?BXE;u@Sz=zdN{P2q()y-Nh1gh-V=Vv`%?4LXt=JV4xU!hzJ+;1Ok$9KE3 z`QF3NeR=&5NF5%dXg?vvGk_YRDY=rJjO3#5O1PO8MBa!U=9g7xRYg)!GiEr(rkK$& z2^LYnBqz#%)zBwwVj(aER7CcG%;Zo_(u@@LWt#mW%H&R0D|__fs;sx~ec$6hJIcQ_ zp8a@|K}|)(%-B0q5mh8JAwzWJe255aW{#PlWGk!wWHtmahv8y<`Y7H%bahx7$b<+loyRJeSq3w( zszQMLGWr4m?bt6=TH{UJC9J#)#)A|1_WOifJ{0ii1C(fa8( zj}8C;2^vxZu2q~%aE=y^_O|}i@Uy4S-VNKo+GzZRAOD}MR-f9B^+z86-(~i@#-BIt zYG->{EZA>{LzLQ`of(es0(~xIc{HJ2O7>a)#ntEZ z_qOt2gO{zpuFEXOJ-c}roALP%`2YBq+Mn1w#^qTNcVb}zo`9b=j@e)C^2TaW%ib|F zrzxu-6zUeSZ(0y_NXX6tZuhljShsE9P>dY#{>4_3c5TlbZ|BX7vuo_(!;|R4c$l20 z;QO%_kz*Or^3lXVm6^>+7hDA_^L&_7V2^~)(+&^86w5T9p04|Txm+w)O;^S_5sDNA zaX=tgs_yGBO*gMMXUH^+!^2l!0p||WIA6V3o-QXCuE(40FdpV?faJB50;n}HuFv-S z7mbtmZWN;@PuFF9lFNF@uLj`xOXKw~Jz4y&@xkw2@V(Ko_iaE2u2@N6m8v>afKSL& z{2h0E3#iA4ZL9=BfQACB;Mue>Fq07qRDmj>)!3lBIE3$A=VuEVfjLmtqGnEV(NeY4 zYGh`$C_pKta@STBtwyS5mQpS$BcgTeA_Mtcvx$NjJH|=~AS$I4GYjnCXi~4>z1Lca z%|z7HH$J$q2#xn~%t@#!!c=pX8riV{88NXO_n;y~&P*kjL6QQv;MLR-_RD_NELtCH zU^N{N6Z8#qEfvwWTyF;ibp(z8th6pnIajHn4Xf2kQ!OcJ5&$};xf)rA@sP8m+BKn# zP0M5iWfnu2r^&NhE|;}d24v@|%Cz52BXwu@i<)B!!7XBYu{zo8hPCVZ)Aeex49!cE zq7WC`aoFE%+YnDyizmCAo87j{xdG>mziON^aw$r%8|7()m|B*D#E3+N$DrWhA(LEDdq-uvA=PNg=!fms|vx9&sN zMt0u&Jk2>JFe|A7LL1sCWde2_oO1-ARHcg43g(W!!-k}Qs8*|}YNdLt5WW0IgIe`C z5hA7vcC0d2Qv_rHK^4?v2QC@|0HP5A0KD|~XD3p$Ru0}dK1_p0wRu{HzFT&s$aH-p zwW11w)hbGwq)=gpl#+KY`hW%^a=Kbi z<5-HEoSdl$1BMWCE;$!4Whn-()(U1IN&to^`Z6B^R8!8WmMY4@$2PhUoO6aiM;CKM zQx(u+8T2KFa7-LMgJmiDfB5R07NL~eHgrxp!|!CP>_3Nmv;2nuFSWIgARq%mlLj$0 zBA8zwx;Av7Uw^IplWE?6=IP&*n;iZVZBCF|d`0Z> z1>DN+>hgTa%P#hD75x5PJiU1RVg1Hc`L*}kPjNmtp&I+GUTTAd-c+D1Ucnh2QPY3Kmk%QC#z za>~%rlsiShQVU&ff3?---sul~@X|}jh=_?4O;wN(6_{DUjzXP^fU1c2h$n5Btu4e> z!k(7zQG3mgLQ@9p-aY-%x%onSv5b+ia85Ig&8(U*hgbsuyC4*&TW!jeqlO8N2!EhE@@n@i}jNW?O)f^&mP`;eD>bYjBosIsqgEvzjLYw zv_}l*o|@IL6ITH?xw}8)ThrP9>i=2)_)lnB)9u@}>wq16=gii=HA(w81cDze*QtO< zk1ixpRgK6=wDwxgvMo0ahR>B-E;^?DuMy6XAmX!_vvzjL$q8+v_J ze&*tXpKZ=wNo`r%d(k+j4|WBY{$v$t<=wPC|CQbMy_J4Eo;ke!dWW_0!s%F>)A!-W z4?8+_x1;~QAb&sXS8Sv9(T8QaIsoi`_xjJ=p3)?-qYXD_)p8x-Ic{(g_6<1<&Nn`w z!8pty;4QK@UM>QAXpu0U#y-`9kT}4uTikB9P%Q6Dq(&2Nw!0MPum}$>&x6P7-Aq0j zYRwtRmt2XM2umqaDFH{<(m9)|c|RP+ z)uMm(@?n;Iv)ik}1d01*u~=+A7=dXx>@}CA3;Sm;w(gLMpbt)PO2sKQO+%nHa;h7I zuFmKemccxAEvM}G{o6Fx!<*+PVJX)y{-*3csQ%x;wRKA@0ZhkjIpZwM{`Q4y_AqZaZ!~ zIc7g=MYYH=0JRi{G?pv?SXIo>lSjem!#vLw&3OlibIz(234Pma$3sM?4oj7mK)`CP z!?=0v^5X1rHKy_EFrtDLnM!VhM@G*skCQMg+9sG!CSbK+bW(+h+O|DR2d{2O=wFgAbNhfZh(Wi z=)*YA<1l(dEpyW}=-r&tS-bEt+z$Jx7Gs+ODrde6+v-ub8v0H|6@#UEq&m}Gw0GS!8DcU^GFRgD81?=eZ50SYEChB}}&V5*{S1rXD zY8CM+C6jSO&O{Hpoz~hn5v7W%n3OulCV-jq)cH1YG&RlQ(dSYEMkb2X zD4L;lU6*q{#tf)xDTSFGQTZ?$fzf?a9zbLh0D+^|6A_Km#LVH|YwFz1VOMKSnxF2k z#p$N(x~4l_tmaVIU#^ z115WEFoOS|7w~s);f;I#p28D#%aW}P_{s2+`NV?;24;YEw6{H{ zK?W$-embS3^LlBT-+Ap%37r05{HaGP{+V~j|LFRA-uACA&(7lfTx(Hx%4(WN<>khv zH`nuz^!ML?`?;*+JBN6=y_xT~(Rm1~^xl_F>(BqgPyc|QrV}#$*jD7*cix-YJ;T5GNKrOi4Ju^}>|A+rzc!V-K^L6B;tLakQim?WqO0@jd!SOXS%r00uc6!h`HDIB%PBv#wv6{AW6GH}$$iTx{N- z_0N3I^eoo2tG%V&)!eivL+T>s=cC^5Mt^U0I&W@qet!D+SH{)nJr4rC@OQX2ivQT- zJ4}?Xcd$!X0iz%5REV3{nSA@GUPCyJ!D*@UJgW#Il6N21Ow6-8P8<>uFezBA)v7cn z=aH$*6EK^KDtI5NnG!3KB2;AZ&My~5>a+d6{b0ur+a@f7?`?+68JgBF`o_&29U9mX zsUpt`-jj(QnAbk~>5zs^Rh1 zaK)iRVKxQ9LSD6PT!sAq|H1#Zb0-h{_Fx}s7wwES0|o_^LYzDG;Ax%%Zg{%;nmg_F)`7@W$qxL?yFdjBo` zU8Z5xoDGnPp!nq`uE|c%7rQWzhw0?id7pJOxX>+d+#cN2XusTIDqXB}n>L+JX@<$A z*z^ixj~trb1+QcFif(SQWbUB{%W1liOd+^oJ`ZtWVPMx|>j#W89EKTVlo+3GuG+rg zIxo*$;w{n z&zkLv>w*o{*?}X++yH`75v(QnKGf{d4wHZ9gZ=4}PX*!dcKF(#8-DP=e$87C&0>8o z39nWcCGa?0H~Wp604U*0J8UpA1930o>4?beew{2& znnxXcbnP;vdPo?p+G*^1Zb}wVG;>gVbehssYE3DbB9SBH%k^oUN@Enz3rjBb!NsL# zcm3ijdA^llO-|LxSVxfwsv2zO`8>8Ket~Uxf4F|{>RCj-*RS{EwpvAn8VuDZNk!Dh zAf}tc;A>{UfDSXZZ6BHz)TUuFGi*Z^ne*(uN5(KZC{Rji+jdT+&LUoX>x;p-yM;my zLM}Fqsj9S#{@_bV!-z6lMa%4XF4?0+hGSZQnW;c7834SGr6xlSA$Z4Hs(0v}OG%_w zj1d8}N>x))5e0M}5q&&X0f?E|0Vs&(Q305MRFwdaE%`~439721AR8hg7~(O?0$5E| zxBJ^tQb6CgD=8*L(fj+$)d0gh=cVg2gDAg$z4=soI?uIw?37~bB=A(n1uYzhA|^6- zO$TZs!Uiq+RPsR%z$BUkOO;s>0I|%HlWsSAskLoebUcqUIc~bPTl6-S&peQaVJt=?Q;4b!N#w%p$woJAW;>YL@W?UnA?HxboZ*X+`6 zwOrOY_5FeXUGNPeiZqTDWV^ksec!j;Vzr$1hbDMof*IQ2z4z$CY>4E^h|Q^Lt_D;K zSO_76a12;Bvw5Clj3!d%9AgB4i6ebD&llE+iRW_*LLw@ETe*gET z=g;etC*i#0C*=5(l2lz{>r!&;%~P%ikyY?5>AR=T-+KJ|AMI;0-x(xz(aPSz1?u7D zPvUQQibj}BhyfH?48$^3jaK#}R!bzOz1!OAU45-vcK!6T2H9N6MBW#!VwuTb$9fIj zn~}=aqb3D426miFE{0x&MUBugJEc-eY8OFyaYG!}Jph=ueN=!GX0A%+X}@^Yc9pQS zG>P-`=&U4R7GNH6_PHN{rYxzPJ*hRtH_SLgZ38k~7|kh=ICJ#W##@YiLJqer~=epX6N ze{Z=iI&T{FC9q>CBJLv;3nw8TARoX(1|W>l;G?tw0B8Yip67W606Jz?g{7Jj5qx;$-z<|_O%{4YbTXq^Q8FY?DCPhZP+rFN8kr#& zBI$=Ol>Mf$kKdN_#4w1<0KgW&0B{BaZvhZS5cCl09x8xiNLzY;yZN3UprM95HeEAF zW!GSx35uXYfMfz5gD3z>wa9IK{N?_}Cn{M=KstC~;8I7V@5WO6cjGaA5;@*Oztcx? z#~pXv@rmI$pi6cP@B#q6fa~ke{XUR`&C2z`F#pi&!{3?lwI?&Qt4o+4Z~S<=;O|Yp z_p zL9m*rf-0oBs2CB6i0}Jx94WXqHrwl)_x4YQVayVn(~B7WnCEG;9S^&1xo{p->*d9{ zk>y&3c~aHN?2tfhC=z`Tg8ek+oPhw8WKLo&p(A8+1fb|5F--dr1Ef-xXR8mMKQS}+ z;_2fj?~BOg!_zLbAH4r$*&SYc{dMP@l=|qwqafWTKfm6+ST@UxyE^amA^auxS zmI1qPzu*E*8?6Hsg)mbAOkSiCJ@ie<+W~M9?T0$fw zRLPP*?1RQ*vLFB$fe|nP0GR=psA5q>Q&qK@n_wTRVa&d5TQjSrl$@FQ*iub^FT1c6 zdKVm0MJuJKnnUo2#)yuQ{=e+~S+8wbnjMCH&1}|Md+*cTHrni~A+yM0u}F%POiC~X zT5f2QpgZWnpdlKtp8Oa5Vgr7&A8f!+1}p)TVFR{ISfWXZtRhQni>#hinU#?l8O`la zxA$u1oL}RIb?(hzDWi%J%&dqkpMiTX&N=t&z1LoQt=Wukd}FwQhcz7$y^Co#FcF!E zmbSXv7r-17QFOv_91}+XUTQhId;DVmWS-}4yRjzCU2CgTJ>DFts*Ab1x;HBZ3g*FM zq72131X7%*qN>ZXNNbE(R1q=fOc+E&)!ki0#x|EyJmPk<2{~O%hhqT9$=? zym|_;%yUX9<}4y*UL4%;My?)0AR-1lJ~|T6sgCnJccF_Zr<|p_S?xAaA;gFwaBJ;` zoi8P&6k}3Vj*N(ADq`*qFD<7C*#Op3F~^ispS!p!>)nAF5m8M5Rn-we-4xUT z926ZK&=6K{0cIu#GgH;p8iq(jh>ok%5TL4-QV?+}l?jKGLkMN5%yS4i=5VMba_o*e zqR?6?0wGXDn#qzl0vKUYv9VH)h1$BkPa)6Ca{t~v1fCEt4(IC1z^cU!9WC}<>bq%w zxVX4*$CQ%Z91@t>rr)l5TOx`vs##Mpv)!=mHu>V>B7`ut`Tp^}-RaS8cMJ-xwdWV- zMnEZ!`)dyA_51gV$|B7ZZ;p>d3=ypeNQ23IN=UtaxLn@=2RFqAP$Ipohr)Fo-iC z4_#0tGXfw7Uza$54j73q4u^}jY3xcADto*+omQZ*}aqn?Kfm{X+i47&kkw z@(GXUvjRIf(%P>E$N?>fV8?G9y0?$@LK8-qN4nF=!PpG0KTh6plg+-;whrJ8TXSB5 zrq)W6i^qO;&Tn7B-O>wiK7-LjhIUide z#j6Y-$uwfXmYC+k)rO*NfSItHuXulj^OzABcs7vTZuj~FdiNb8a`#uM80!;KA~bM? zTFmsZ;HEOjk0FY7w0I6z7fCK@F2-i#W z<-W+>!K{Ea^9@8|x~%i8SDnH!rwXu`*!9$|3poWQjA5uD?P^1vF2`Ds$TZsZ#~;lc zy2yy8b1M{yCr871n+6dubHcTfgoxl^4%2vrObG)SkwF8jA!!VqmjAkh8WD+OQ8g7J zGYpQN1B#-%;`bXlUJNU2H%pP7e(UL% z|C(R^WPa!8Q41!YTjzOq7U#G1=k{{?lzuwmMR3aA+yp2I_9gW#(R=A*2lY=Z}(I!%zOnuV8=i^x{dM`l;dT4_-e!xvt5`Sj<$#(21Fmop2HD zx&#WIVx-;GxTmoH%Io)^?*H!lA0F}7Ue~EvEEQP}K&K}q?oxK%99tRNbPcTp@Pu4s zAtG=x1V^@;wtn|+004Aj2Qpuyn8{q?&6@A!-jHx4I&4(i&9+DyqIbD2%{rL9@3FH;Huz*Zq1yxea)uCi`l zza`e-V1$gs#9%(;A#&nAm|5z(m4gT&SZ$F*a7-K+srqVL;H(a=#sMR`gPW~f7(2%p zBeSX+qXJQcwO}#yJvLA`HFbxoJ}r8dwj6U0L`Y?sp&=vnec$&zG)<7rylFKucSk{P z;beQ984qQ=I*ipcq{tYUoE@BR_T0J#J zro-Gw$b8Z2hN>Dc5RsebuG6BR)>_-|_j%ZW0XmYCh!QXc25ExEiMv1;mz>aT9#!EoX9!nF0UEaF$Qgbhzd!lgh;(Q!O0O<3urNFV{7*ty`ZCZ<3XtL7lttkL7Gk}K> z9z1wp;Nv)6U0s=3*LBRi-ENsVP#h0)mj@Hw4x5Mf9%k-$!zLo^m&3)lpQSXB)aL-c zTpdO!FPx6L!Nsk~-R&`_fJk(+h}!5Fi9<(hwca8uVycMAQ>Bgs9d?KLVtT1l~SakAFv!qB~+&UprV(xZc#EG^$3L~5n&I%flm}o7B17lUA zKw@rH3~r?HL_~J!(xksLSp2?Sx*obP;CKl_B>iLzq)XOIW5+x22LL<0GJ~v zD69!6$YSJnxe0ln!c32@@BHi$ZQ@k-Lb!n}S0<|ug@OMP;-l!s&SKfS` zL;zr|oda3|s($dRcMc~%@hkWLm-oK&H-0?+gLmqkllm*;=D+dc?B71S`m+Q7KL`GM z+rwLrmaEr~jwtUJnV9e3rUp`qbV?i0NROUB{Eh$hqv4A^Y)SM2uD7)Lp-OSIjPCj^%ieBLARkcTq1Y19-4P%Kk5uecC0sX|*+)ty+RIOw4+1F$w3 z#}No2GLe#bX|+r`m*w=%(eAi2tLV+dW~oc9>b43z5peYmP1U4SC5b7=O$GpNrOQ1e znD&S9YHZ`Y9X7jRE9PYy$LrY)5~uCaDFLRT_X3ZeKB}|kJYLt*ogO_MuQt5#$WxsU za~q;#6!pWnPxExp@$ldX9$%(f%iXi?xw;fT-JT8GO;_N9`GSae6+nR^IuIfvyKN%5 zXd6RF$7*)2lbZg_>t|0s_=i{LZ|m--uIEYha_h%v0Z=M)# z5izB-a>}*V%{HiN*9}LT6EnNMxD*2u-Gm-v2tbRpDyrBBz|~sHwrUO?h=>8qtdvq^ zA)=LljvND%5_Mh2&43vrtwWQ7kK^8}jHT6iA0RN0w$32FxV%`UYOS@t>q3kexH&9KXE<9ukUgub*^+=#*|F(Z1}13~0weM|{L zTM9a2jDbNNoE%KG)TUCUMT|#~hvpclwl=j{SYWPFTa9Ss(n<<3rLas>4!P@wGB1j% zvk-&3YPB4rnWK}7SE+s9AMJK)Ri@TDj$>eMv#Dx~8Gz7GLhDZmMp84&v4Tj=G^Awym|+8aZ@Bztj?ADs2%FW+rfok$|ANxU78@L^Mr{ zh{X^PvDQVTB7?gRL&wZ!QcBA?1K|EJ9_@A=$2Qe%*Q4S*kKg|7ckZ0+8v0lki&(v# zZBFj)PWjlYXyi~zdGzQ>aJ>7~)7@?Zq0X{cX|tF~1j5tPQw&^6;p!n!jIp*h&odFl zoFGs?3}so`dUAja!4OvAaIF>H8PV2WhY--+=8A!h7?^;`%-A3sl64`Bfg%~e_H<{{ zapM2;cW86}3!MqI$taLAgK`dDZ)@JaPheY3MP72Dj)=|%@(C9fL5kKH-QCG4#I6Uc z3KrT->h6lGUmci^Nz;GWY(d>$C+rMxf2)wlY=6fa$D4^ zN5r_v@oY&ay1mo+H2T?f{Ue9@1oh`4OtE$i=2>PQ`u^~3y>C#?C*>6RG_^w`VC#FV z4OBfbh7i_LCC3Pa&H;$k&29C!L^Ck(l?YQ<)!EiuK!d8ja!g-GbpRkR*G8cO00uN> zB0>jq7x(Q^Y)+m|yL`+EyqZ5d&-+J`7ziXmB_*N|n5Ws_{lLjyQJib7_lCt*A*P7n zE?7!z;&Zie$z4|0YE8-31w5F$LEmgY1m8WV@yO=qHiu#tdya?{kRpO2s5_S_>Ks2l z>*Ceq^3gT|B615QK0KNJ%LG>+9{>4r=j-j+&ppQ14`2Nc`saUR-hN^0FLe9Xx8SdS z&8}Y$FSeT@Q$Om{&ZAfp3t8HJKXklrQm-|(Gl-u){twCL%NdlVBxzL&W{joflvE z_s@Q74u?kqn-0n(kYYFQ4f@t~ofMf54r;kWiayol=;+7|4~Ijol_+U#YE3nZxdH0l z+;P+O8yYCY^TVYoEOWaar|x*`AR-8Ez{JcetzDM_3ae6O(FPnSL?@_IDTj%~wq0(J zxa3Zw9%R~k4W&wnG4>vgk5A{N&f{`?e5|!a=2GOH$B)sKnRmzACgu6GH}Ga!T~Q3H z7-*)nJ2?xQc4Qa*`X}%G=i^uJ3ctQToO3Sa>T*dnaf*(nR@4v~^49xZlGa)i z^*{lLXDNt`c@qw!f+|x(vS1;^z^vMi=ITc1;Z?~g+{k_x8Rh*u)?|U4X(+qKHO4@d`(eS;cwQ4g{A~M6g z?Kx7)nGj!D4_R+9n=sG1U`*u{V=(XiV1OxOFdj67^Mn_k3< zG%zQ|h^%1Cyo8jFc1JzLtwk2IE$_l+yX+5To@82i^Rr=z>Eau^HiEOj%4b{0EVhAsty2zO$CZA zwHUZIamQg80Ki2Pha$~X(J6!wIEbj4ap1sYs;d_e`AgLsLf{Z%T>XgD%-q(iLfsMU zhNM9QKm!L?M?$8+f!B5rGX;n55jyDbG9%C2%mM`fG_$4DY>@-1nP|J#y(Y%MO|_~F zF*387Ol>~OyB6>;FTM8eRKZ^Bcg-xvba{CxoHm|#i90vA&vqQDLWPMjhD5PgqAc0@6ARaWOsZwO)N0br?1Fo7-L*qXt?O`V`A}(50Uip4zoId@;O} zPnt3RsB}rT);u7A17biz2Q+8Eg{QZk{1+Er`b%dWHfMF|psJ;h(|5e2p#gvZoZQGD zsO>>DFYI}N+2-eDlOi34*JZcuw&nt-S{1`le>5`Rd)nJCU+MDR9%l; zM9>>E`%7^>nj4X;&P%Z_^dZsy3a+opI60KOacbmD>Y!31wHy+nVHrz?kECtgoE$_P zW5{NywN$Fn4CG`A!HZ{i1G-TjxSAA{%t>LHpb$lJ)`-%1+YWxkKee3yy=-$IK;r%8ja3NSbU0w@ zf2h~36T17l@^MEsCFXAEp&nk``8U$x;P-sqJj_qOIT?7UpOvratL8#v1iV?xF>}s2 z#RLGL=60hJ0Du~uXZ58{PhPltEG=|dvQdsw>S|gjsNhTl=3-q;^U?*TT^>%453|vz zt{>}d78GRLgLvDI^RD9G8?a?O%Cbv*tv=Ur@H303nu{oW$9H4G_V3^$x8E-Ml^h z#9z(d_`jy}->h(tTh)}U#PM)F3>|Z@Cbeiz5skL}27%n{CeaZLz_!}~)nkgyRNQ)q zN8K>jC3PKM88nEoPpL~OEoClEBX)p}h>3$cI+&wnN{BG~s%AA`j2+If-}QN$x+&7= zqc!75%&cyeQ|{u>oz1O|`{~|;2M81byNMO8M&medwt2a}LY%{`!;u;^@I&VBo9ek%!amX|Js{1E7?;F6I8lssc$l0J*1#h+0Ke zxd{_GAP_3tIM^CMGw3K0D47u|fFU3t{iy7jZ}A5Qs_t$ah#ApAo1-(qk3my~vP16y zr=#iK&pw5(e`o*nKFjgdKe;>o;t@~Ob)K}DA!G$z*N4nZOz0S6baz!jgp`B3n>x4$ z=G^5{CT2#5c^naujR;t(qDoU0)9ulYh~~1mI}qAxBNU=5RFkHPOq61x8yo#q>uqMW z)_I<{u@ezQTul{H3SG)E#zUEpj*hhGkT%l+r>UxGpSubkV+3;o^d@y)PL57P2#YL$ z7Gw06rHG|2k1ijTUdeqL+l^T>lYmjmeIyYn%VK6Z=fK#^q!mp#&y}h72vt?kI5GmH zhK+;3!tR5HqtFd8Bhb_exI>j?nz_$34D&QY)1lAoB(3cF!2wEXoER9u&_$U87&24j zlu`u4BI9A4Mcvg>2mpjmtw@X+0cx!xrc#gz7z2j3loUI1YNeO~IJMH2spWnUk=7Ie zmStHPe-T+faSX^50KiOD6%lo*EA8U$;)X9Rt()F>9f$}rg^%zmdk8bX@t^z6m+hbO0--Ij<_7ghE8`ug2(fBWp@bo=JprcIits*0>Z z7aQLIP*pKAMlt|16Vu=>syXMSR#nY?=k8Nkn7L0~w;3#lJPe|>l!XAnwdfoKkvMEN zcR2mlwfygXduwN>M{<>PQtE24jF33YO+(}x=kr_quz>~;j8TCBfWb)#PyjyZ=AA%V zP%&T@!w|qLvtxDdGo~}YuF1E|jKTqV7vRa&!7WazIZ>fVCV}O% zR>A%&p9N+lGy^n8P@~|83CI2J{_vasR8Nnt9{jiH>O9(#LN9m#|Nbr;1g-$K8ZjY~ z0|Bx_mrj>{VsTw6jiWa8-X-^T8V~hP413%4?{$v*spZ;+rM~^m|M==_|EtU4ZrDHH zPdK04JK_Qwa_&LVU0Z8X8%9)jKms!&T@Qeq?WRs43JgFu4Syk+H3SM2P;KqX5)m^Z zxG~;AZ5E7}i31`!0nutZws8Zxy3|MS46Vk{NeC_hfXzHO^%lXbVFN<$fIYw}g2wMT z3jsjnK(2=7!30&&BSRu=;zUTq;A#edj9q5V*E44T@ZK9|A%H?@yZ?K~m%sh~YyZRX^bZ}iaXJ0tY|s7tosIWK z-I%Tr-+Jx(`5VjMSq7Zj^X8l$5B33A*&Vu7;s+9y8!(!U@sOkZcxHkf^CL&<5F2?-`ewbe9vj}>^vTEzT zKSZwa3i5DfckfWj*dPH7o2)IXyJ=%YLINiY1%U^m!@`VE#;w|98i2dEnFIwnA`=TioIn|C%racqt+n z%nxzi_Ft6480DR=OG%CJVx%|w*lz=xZ;3CMm(oILAo(W17V$>zX8?#8LBjO(aQsK~ z!JoOid>4%~Zmy@RZ8|eWS0MJ2v%HKocOfu#UAN{X=A2Usxg!ew_BfMz&e^2a(%9iR z^%wgWqiAxPTibM-KIXtFr4CI442OCJ07%5h9zt-AK&CpYtw~N~9ss+TV+bNLbhtUr zD&BR289>0?r{@>vTKwLL=V8J0g}X;juYdjeap+Hf;zhURpmKf{hJlB{VlYC{O6JkDB7~gWjS&$&M-pfO zMaay75y_kc*gQ0sMdp+kJR>6+I{`R=<83zaEq;GNredJPfjgoG3g`xG%~-|)Gj<(f zC`SGuq&_@&rwucnOnUkN{^?nIv6VCjE3xR}H-Wbi0pc3ZD2^T@BDyFZDHY)zn+? zs%?ECGpkklKDxWsM$D1e&D>B$cH2##vy_REm?<)QOweJD$;__Hm5KH_=NuIrNIjdn zw`GhW$3D10Ybwj)2El+*l2%J8fX2)r+}Ig2Sic#h^x9vm4*S=@MKsi7`$<-So0E9wKWAbX*XFEYqJG287RD}GXbuJ0e5dACK_X0 zOAe~y?nD3|*)20OkUAQ2BmhKKX^0*ZE1F6vv~WfWOh~eF1ccJ`Xt%2{JbLyN<{1r_ zEZY#~aazq&+EPN$s=nE5wlNkdMI?nlDHSOk0#kHVL~2r6YcVHPou(<}u-hCpk+njQ zbIv)B^F%~R;i%j0(k4^b_Cr5(p}+s#M;~02Lju(>rIm z%N?*B<^{@bvq{@cmy?+xQ<0W)J{^W-oHm=yX1CQ=%d#*t0Mz~da5#v|$=RKpI{|S8 z0KmY^jL2@Xepm)hF@zMUOU#@%n-H0iu&GxGK!)m(YMpkQW1!4rW1s%_|M>FJ@U@39 z6Oly_sjdzQ{bphnyq(#8zhDSP4C~_1*vvkKQqwV@w*(rT5R5D#2#W%e(??to(biX# z*3xb>Jp6Juhk6iU?mZ)FN}YOZ^8zU=$%msr=p3$pWT=?P5or_+p=(buzkPM| zM~;^lFKpPzb7B|AkMfwm1GJjIgAiB>7@b$mA9lYQF!f!r#>u~n<eu&W;8=UU=#*fi^1a3fCC`_t>s@LPZ@_}=r@~L#&R_c zN1=ZE{t%^;E5n#G`IF|mP%n?lf7aun!B1@CRjB|xTaD%F#r)bAuiyIj z$EwTOm%n!S=la^e_x@{t1+Gi$jSX|_0oxDtvULPPgtfO09Dx8)+{)x|{k+5zGvoTi zD3OEz;BLl`qpd%VIV_??}vynk+Fg~Vk!+8 z1>DTdkpLLOHUxEWX~>+n_kmL^SFO#h``92H;^CkTpMEe5+0!Z5Y{g2xbG#Xr!(NvI z_|1!0RRsi=iZ0;jo&cHA11K8l_p*`vrbzw)Wdd&cO(xpo`?b3DmLJvzfgJv#!*$K3rbq98rwayAS~cF?Z?6DPPZ%g;G3>EtUvDu>oGS z8o()FVd}Lbnp;&-4H20ToZL)Ri%Os6Qe%}EVE25y{x6?g{P-JheU%R1eDaUg z?co>x+{O9f5HS-cWGsp#%1~XZ*2DvW)>>uebPpMPLK~br2wOL^F0l)d?G(My|S4gAjuWENw~jg<*d+9G{>5+2!gj zIsAh=@+&=vh1ci!V%=TG7+=QHnrh>Ktu<|{>M9_Tnbc}#T5HWI^?iSGad>=G^8j}y$PBHM?{Vsh&fWM(ujxwt2+Wy0975!(pnn^-X0%Gsn^%n zt8f0DyQlMY1Asa&M4LY45O|*F5CS50DM@YfJQHv!1pv4UZw}Wirwt6>10}#%M0?K1~w<^nHJlhw924M4bS$Q*>ZT@8KK)ccGihG-2sd zZ&fyqfnqIU;KWSM)=W$}1_+_Gl2R)35|DBVT8gR0v~zJ02{Bxc2R9<%E_UK=bxy#Q zrcWv7ywrja)@}(i5D|5o$9dQdDMWIvW(2}r*LDJeRJBi#>idQ)grKl%eb0CNKsHqYqS`sTZ)K`h^-X>uG1`my3->V+xYCf zby-^zk)xxd@i4o4pF4MCB#c4IvecR*ha5S$12of~f;+|-XDIvqKBd(6eTdX<$}p>s zG8_0j&%51@%?Q1VF;kij6t|cn5p`ypu1CaqS#nN+6LuYW zou_g`^AY*VytK9o9@TBz_uD=LKtgLMO0*ns@4_dfVr@`d{y zY(t*30AY^2iP)em>c)&%7~;SBpmd8*4TBq_LIVN-0tG~K|HRz`91tXd0fL(V5U%X- zT8%@nS2Q3R5{498)yf??ka<&SlkJyDv_BaxAKw3R;h->!;8>so{81rds%ch1jD!eo zg*rtGa z)HRzJ{(q{o(SX?!Goh zukk#9sH*c)?ebaJ=vKxIyRnN{Sa8naq3QlJxVr#S%T6$&A3mdtXUYmWM+5UV zbL*A=^?8%}sgwY`ad6fYV;Ghw} z7|Al6fU1GAul_R#z>e(ja?|IK?dTe>&X(tQ`a6$);y(!8n-4$yo7+BOpS9UR3L0L$ zxZVui$!@!p>8OtYK}0xoKpn^0XIl=LB-OfTz{@8W!^xfJ<*Ma&n$y*Ib+Wy~ZJ3{2 z4#zuCxtb3tbn4`eNhvB!7X_f?%|?nGqHQRaR@=CEl}(?IcPB%ipI%&sVKAl-o<9~5 zB!2(BNBi-xdvIs0vswv2M}2>NePCuuos39vs45{uz)(d^fx!ddxRln;Pj*MlJng-; z8Ul4is+$4!iJR6s+jbz2#XLrafm5j?Ix2Zpt${4mqK3h9$RY(`i4;x8D}nuFhYud* zNxpFJcz5_`7w_)j=#M^=dvjT)R=uYw}V1jwP!S95bvf?&l$ z1P)epQl~f!LzwE*{b&?Vw-IElhZbm{&Y=Mi8ljWB8-bw#Ie$Lo7Pt5j0Xhf+Ij}(q z5D}t+A{0bM&)%XnBUK9M2|=?1EC3K*E}0Doq9_WU!r1rSi~c9Saej2?(VZ{YuRX}m ztAZKk%>b;e5TLhZWQ*fcRudReWPp^31G^d#n@h}Hid#{r%_yMAq{1nMr7k&VL(?X6 zSz_coY;D%6Vu;iyL_`Jzo9he!?!jGih=;>rv)T1sdh+ai$Tt=N+DMd zcT0xlnH&!D!!z7K6gWLBIaRAHV4Mo9B;f zJs6fSmsrBQV?JSDn501Bm^+?Y1*rh7VoXK~#E>Bff|@t*km&WzDVX2swufTUq|Bps z`J0Ciea6fAP{jx_A(%@)WC|EDSf*8t?v6|GTCi3KGJ--NmPJWI=y=#9b*Z!G7@4t5 zb5j8d*3=9;4%b%)S2bt_)EGBMJCKGk06D5LscIuc>O!#^10k53hY(UqGeFfjgs`C_ zv@kDA*L9e9bVD8y(kRU8owpf(|*(iUV?P+(XMe^r|kjJkxegsy+)%aQj1 zp~EiqscEaFbb+~#bnobJxVm%ai{|58XO1FPuI7f=0mZKE)$HhatKj>qYXk4P6hjb+ z3HeUHIZG$YvK*GVxjRHCdeRL6?D2~ymV>+RHb>DyLh7lv+Pb6Rwc}%)mgV}YCDeUu zO;bw6S@hJwQp9{ykN+-oOy4%+VPjM@P5>YPCkX&pNBwl?tl1R-|!K(Sme z^*EiVfz}$0^!uzIeixDny#kOz2e2+O1rUH40LURJz^iUYry#&>PQ$Yg;!mBy8*ub) zsYfyD?)2nOy!FM`LjTTE9zFh*p)F5d|4Sr7Xy88@yg_nwiVk3C0GzAlrw{4!@pRc#u*LL;t!FPXtzWXPyhc6$cUn{Ly zH&pd#(8*=fdqN8$gb;ukIT#TDHMitW4ysDvB9cM?MF1F=5fPD4RNb9IV&h!lK+~K32zxVW) z+QYwAzWk?fr#6>(sN8qc-Lvl5ax^Xr)p28+!w`dimSw z@%zV{^Di#{@sK}!!vE7tyQ}O$y}~YN;P|@!ixW9-oPC_xWzxk4qc}uNbJbP5J?ut0;l1t>p~>STzvH6byZ>LxuGp2NjqO zlvs6Ory4>m>Wm2K2Ur{U#PTN8QYQd#TIp|WV894B-EH7-<4boFjT6@H7Glb#001#k z^R_zk5C#H(x;}s#xW`~NudI47r!uPPLLbWmFT@P%$Y=u-fGJpZun*ts5RqAkn;9b^ zI;a|CFko0?Ui8(V0RStZe`AjH^5k^t3%iB7SMTHQ&i+xGo;=TA>05ny2=?+32monw zJk19yjR?rxO&E{}iZ3%Pt(jfF-8GZ)mm#vvFmn`q8pf*ni9s3Dn6z~({4yLl&k$XgtzV;J&@%a zkNv#92diCk|J%eu+^FH@?w`_ zw{w6?0W(i&lNo6#6&W4Ywh-ktrUF^fZ<`8k@i{<6B$rMuUxWT~xPSHF-+FvFi{-bD z=@&J*S{$!YeHyx+^(aWwXd>!{AtX>+=8EKqAk|hy`1$#HjNLrX$0zrKsWgs}Q7MFA zXfZ}Ht#zrXsT;>chDDmYkby{bQA8#ZGb1AAXlBc@48vfpl~U$;Zmlg#MdTRc)hO;u z7_Lr^j>|ar|V90C(D0e}+%uOhgY#tTr3FvoR&zAVd)dNc%PHfbRQ5m}Z+ z^=3zErb-S7$edydfgNCJMWng&`qIpdc+)JEQo@8Gkh_Y&TKqu7mrsZ*Lm(nZ>yFi0 zYpdwB)v9JlzW&0?4ue-C`S+xDL_`qD8?0Fcpzbc#tm<0(Z>_mwpcpwJd5n<*N%b`` z(P1^QlCav!iLLz#t5(3ZIAGv@vm0tAVjat3R>?zf-0qGxb(yUcv5w;q)9J8v_v>+f zt$Xm+TW|f^x4to23q+=tj6;Rczz(%b{aX*-+IIcD(>wd?{hgE3VHnCdRhf^r$0kA? z7HN08+^3hAaM-CH%V$a1|}RC2;CXh8Ua(N>}MOeTE1JJ{qnp$-tw+fy3*Tu>azt`wbKoIz%J3;=7gO= zXZXY3r86I8OTIg*XJ0>h|CeSu<#3mm>wd?}l1`V$Zx8hC&7Xe%`0GWX|Iybvup3AN zbO!?cNa#TezcaOm2P$%|wiXdQej!1~qUrCX0i))TIc(dh0FAVxol3`-;0B z?Cmgr6QbeCpSm-A(`CF)_`=)3ErAtl|H7-U4#0uB;@UbdtlVn8&6U>+EuQj{+Ha=E$J zhK>(Ur#i)|KCgf?$Y&i_2?p>9d)u>_Vd&K9u0r{N&V`B!zWLH4{tvC|7?@|#@)Xfm;GyB{4;j{i}m>x zg}ykNmkr1dDv1Rh01?gI6#&p32!Di}+&(IV|DlTFUqw%Ux`%T>1vDl%H7%q_%m56Z z&=0nnBhn@^A(?2c#Sl~Y*zs__xNuVhZ~z715Cu&{5`KSv{U#~oK}UR;fK%051&2p4em~MTE_@9x7HD9snKy5c8t_i9GqM? zfrtVU0YGg{-ORuMk8^6KfZTN<#@G*=kkfuV$kokMvMzU5hG<>u5TQ93po#;n$!XrC zbzKJztttY$xriZpnU_`tnM`iNE*-GTJqGI2U1&jR9(aX49QdI%_}(7 zTA9=9uRkPe1~(4!x$lVRFdu5IjMV2&1*;l@60HO@nAu#U<80;t5My4cT1*V03f0u8d8X-V-i#mnU*=@$oS&BXvfIWzO>AR(dv&J2M`)euSb#*YLMO;uIlP*X|) zNkP#(5+k{bA`>!EU{_En=&SK{Kr{eQ@f*J>P^8fJc^T`v^Q6G2H^#g~ob{pQiLCv%(M{=%C?w7#pGz4I9big&!B1aa>MBrTamwmgA!_GEu=JCU? zUi_zTPQUpCck?(&6F}qaZQlEA!?Rz)*v)rNF{V(9H^0h9W<&ND9b6%876#>+c#C|! zd+)_{qlI|O@h$X1GqRbSzA1ATJ3u9=rEUb^b`EGzx?3` z|2ZORxTsE>IHuj*^XL1^Z~x+*Fa6K2pUI*BiFv_T#=x7>7B$OWXa2$LAKAbWf$-H# z3IJ&Eqf(~*Vf*BKuLkzAvi0c<(iwblXbO=qGX*eP8y-I4GZ{xW^+ncO%n=w3&Fy2J zhgwY`AW;JV#L#C0sPp{0yBDlBS+Cyj59CbVoI^Dwn9MRzPd%HKkmJ3(%Upbapy>eQ zfU1aK$c9S@12~y$%64*wM<=!{^TqSHd^ToQzdLT%F24sD>8AMP20&=;peikmV%P7h zEQ={aKu=@}Sm4%E@)oza#UE%OA_3YuB;I)ceZH#SWGR9QQft*|s-d&iXlW_z%q)di zN~udZKH9q3ix)5W?%i7Jswzq$5@V@)*f;Yz51X^i2?|_aUg=`H-DbDjF4J;(eKFch zk;EFgxf`Qn6?1P+#R?)Sc!)9O-0I@w#K05~klmR=jFAum@mTjNwiz-JonKx&d;Z*+ z+;N)As%f33Y1J{crbJ}2nlceO5)vS8HX9LjvgUbeA?B{@Pm+G~>EGzm+unaYQk*F^WTTD6kPnl}A~UMCxzv-BEi%QNs+76RT~9!ch;vzj3NyE*h+7DO z2r#l26m86Q6W3;|5sj%TDW$l|72Qk_G3N-8V~nK~GfOG0QMVq4B9|PZ@RdMsWx;-96|_;v@FYRyLB@Rj7}nRH4}uCI}sU% zjjHaaQB;!&28A z=W^dCX9CgIntL>>;7?6p~Ow3fOV^{~uDs5R75lJbjs+j;#jM3Z>*-TrT z0X#%v=9GddnaE9KAKnBV1Ke~B5CL-_qE&WkW&wkHAxGdFOC>Ec~GyKfm)<*j-=2az&&TFszT8+R&&`ik#IYOR}Lx7%#Z@V z3k6OoEv=$kAG5S#wK^&GedwZUARiLK1c3}OOBO#y5jNOKpY!XeQ6cmK@=!r|O_dhmMsP9oY$s7{99r2)q!90~aA z^SfVy5AW)4{7U?5c=}@umoHk^Q!#fia{vP*W^!1=-svR}&~F^lO+nY>Q}`&aitx%- zXq|c2+N!FW8KJ46f@2rM<`~QxnYOWabXPYmpf~(AG7v@4Ce}QNLFh8pie}UrVptW& zgzoC#uB$9wucn+1yEoB1PR(|C2yihS-umEg-7Wv*g6@3y;q>MgH|N_Sx8?fjMB(*# z6gv3%qZi|c%by$Xo^~g@s+qglu+yoAd5?ISQta4M*c{qCwJe)APV86SX#cmi{M2*3 zcaBfrI7umYK@n_hFgqtFjyf4(=ugK*Fa*ZA)K1^}^V9HJ_uZfSwWsgi({I1^_Rl`q z{!BXme{8Ov)x+}>qPYe+Ki}30aoagi*wS2pe?YvMJ0UrK&(#N4JD%GNGXVIP(^6k? z<)5NF(4+`wN+Ezzt>Wf@VDL%Hu1bWwu5Faum$J638j!P+VK{4X~;0MVCOsz1NyQrBcWiL05EgCL#}J;%1--7CH5q zn7b-)TORmPLLUZ0M1w@B7Oa=Z~Jh*zI=P z-4of|{u{_5c7~zY~|a%n2HpH%U*+vXAxdX1SaXru9hMz5WWZmf2C=@s-C5 z5n&)A5AMsdoSfb}KR-9~X`0kYC&3=E66dKw3>~R8>^mdK0uk4kVQ?wiw^s{q$?y{ck+^$MxKPBj5dXdb3b` zU^79}5Zs=6&vNo*boYk6YN-4$2YNfSyI2=0eIfTR*Fe0*{Ghm;GFJiArB5@4C z^`GpG@IH}pZSh`>w$Y%bH?lT$`|^5luTKYaMG)OmlopU1IFImc9MVuDRSh{%4w zzrc&*wRUv8Sd{ zH(z=83;$tSzD37R#s^sU?63Ya4VYcqu5vp!Y-T0nA8Lm=Qa0386pkuGgK z^~v;#Hh01pP3A64UMWOW06;@`a6vF6M?h)vketkpI@}+?|h~F;?uU( z;HPiwrlpl~y)SS#?QC;(s;A4L$@EKi_m8RlbdCQGZ#lNc(Y4=*oFz^%1a0B`@Zmf@ zhod)gNYi0g#_f@Pt9_{t!yhsOX`R`kxF}J;z^*{n1ujJapc-yM^5t;dsJs6s;NhS3 z%U_-z{rzv<{o1*XZ`eCe?){lGJn-?mDF|cl$P+BlCgwXMEdj?OCjVa{&8gqCsebwDDu)}=m+41{OWUQ3}Dbf z0F%k85F(Kr=GdKl-v^%5r6D&CRvSHj4Cm*e+i^^v&#UvcJn(zKn!xu~{6ysFz=WAA zU{s5EQ3>{-lAIa4`PTH{7Pq*?A5h@;j2y{8feB*}1^%4*swE|pC6g|gtX&PT4{SNa zi`D`Lf7u#bN+HVI?RHs~8#iWvfMg10Mic?SV+10aW;f*Hu)TNGZCnlqJ=8)T zerdl{DbdTtevbRFW$$7?n)?KZ7K<3^^}daB6Dt&WQ!(~7^z-TVa5p?Z>rS8fW|ICz z{o+(5S|kI@63AU9nXl1%WFq89gceu~7?kyPn!Uy60l`r`fFnQ))**C3B>7>-;VS5} zlh+@|e~*{5d-LB5{2bz4mr2A{-+y;kP*){})F+4b{Ng-z8JsZ?MI7f6G3K0^d07?! zcxfdM%RIRmgS%>Nbxq)NG-ldvH|7H1Dwa}0#99}M!J3p()*-Hx!i<5qX1xO;6FfZn zVnhCp?*7xOug!8t`#0iit#tP|@dLR?rh*w3Q})R2E-Dg;4BfyTK}kx+Yhh(fLyGDb zi~8>5sve&WSI2{G<5A?{>TuD>l`ry@ z->n?S}A4*L>vt*6?kieX+?6C)=^QR=_Pq0F&xXVQ<@Aex>HT|u0YN&P=OzF^fdex$uUx;W zzUmBH18i4|AwmLzm%Uhkf!DxW_cb{7#=w33-*sYD#SlUyRj(=lAzAD!_RGl$O5s|f z2sJez0W!z9I}Mw`V{%S4v(0m`DG@7x0~i1i0x&_45Ha<&+fm$X%Eh!SIx}2;*RC$! zf^S}&f9pxx9!HqR^Ubb1jyvs-0v-ZE8Q(K~lQW;%uMUsCc|N^6KKKj!yFaztZQ!mh z$Cqg-2vWIo&)s}kPwtTAetz1HfA;;8FSoS4Q72fYpvG2kNpp5J&%ay z)?T&?m|FxQ{87sG_<@%Me)lK;bT8-5t&~zq6mSe>*pP!AO8$h5GCHU=6*LvJn?!&{ zowS>(LQFPePziDYjy~|bUXNj@A`LS54_4V&`qz;uX|t&JVm2z zzr)bZFDC~$9D+6^A~$vf2Q#ZI4vdb0+;wi#Uh5pf_S1i&KTO<~2YwH{nj2o#lUFg$ zB~|4}23a72TjYYIX16Ew7Pq*?A5^Sa#H-%G5u6AE8PexXB-d*c?1PP0+|L)^{L;7o zBcIO4wCVd^iZm4`oR+zi`B=A77Yac{0&$mejPdHQKU|IT%-vZKt>$uaav}uHp*ipO z<=P|$e0XvvG1O(o80J!jVTdt~;}~PS|JuV42mlyRssso*Z^LGFituJ_bxXW{b{#n| z;4)W;JTB#Ons|tHuCL~u6k^VKx7(FcmZ_>(tJ-BGWbulw0H~^KHs(4#g%H+e=Wf#h z(cy3qk<1jBqI*3YI>MuFc>nxG#O#C)kh&aWBtTbFcL%-M`7jXzMpKQ1VVaiF3Rvz! zY=;@6@fZ}CjVO9xF4Yf3HbX{-d9FFk(4Y@lnz>`%Vs$!*G2o&Wjn}b*Ktmw)C!s!>Fv_x7M}+UQY*y5933N|nl>;*mi>7b z!`abpJp1bVkGjL<#oPL;TarSE0fx04`5wrpT zAyBg&b>_qV?8l!+_~~~q-nr7NR%RubmRV}cA@+Trb6#4FAtEUt&SgR1 zRdfytB7y-9hl3fIK?)(~9AiLtMvNRV00C50soK`e=52QK)+!@*T}MVsvszj(XlfcQ zbR4?~L`~FO&?5k>dBtTaE-ev<5CD;x4b4G`0*7E|<`tRMMbyyL%$zW$oKh;K!ApUv zs*2Rr&89UGF=l@GbIN)3wP!OUMgU;A(IF)cO&!qIO0AoNAqPh0NI)?In7SZ=Hp3>nVYrt45Qn49uFY*Z z-#>i#MxB>?!^ug&>bie^`S$U>?Pl{dNKrUb* zG9udIArLc%*eQTCWpZ-^D6N#EF3-+?)p5EaMG?FG**(XlHQtHe3l3yGFF--)Hj-4vNv0Gd&FR#Xnix(#s z|HmKu(tE%8#{Zx|;=t}sfjI_>!4cHK-T75p72Nz;!->~=000;Qy$onkH!BSQZi0&e z*3dFDBTC3fj9^TTrU2^TXs^F*(+HOrFim9cDM9Wu#@uBd`qBWA1sZdw03k3L7?>j% z00Jqpap$Rn`rMiZRU8+3_UQQf9~{f!IDPx+K_A{vhYR+_G><9YJzPBNzm%5oL{m!M zVpkpd{;GB5zdA4PyZ=viSNEpVlfyO^$^l~-hO_O-`*+`%m*uWrWBb;Nvp?PM9^Scn zcgiQZos*IyyZIXB?0{}TaOt`vnu7`9d{wTFdOYFdOO0LJWBYK@`1p%|em?plp8U8y z{&mVdw?!z$$lKhPxM^-FCeZc+xegt{+yN0&006II?w_+djS4goh zFoP^)nNRM82lt>Zu1}$TGLxJfSjM@GBbjF)L}Z}+yF|5%y93fqYzVj+qJiMY><3pN z;7eoP_NjvhURo&s&L0llW{ES2JGlpNHYf%5d&G)jNC1f7fo~F@d<=aOPv(@Ube*a| zZCHFS+s=#8vF)1oDSDBH$ z5tK^PB_B*Wem>;qZ$H>Ss&DSgkI!WTnPYO6f@)K{mPl>8^Aymj6A2*ZcH2B~i_Zf{ zy1*l_lP6Vdcr6SNxhrOn(n6f~7stEj@Rv82Z-=M<3B_GX5dc-hz*7#)WM<6d6_IXw zfW~pW|KKzw(CVcWMC|+ic(=Q{x@xV(7zudM#z;WGNcZpE?eeC~OAFFuu0^)nEfLka ztgJL3gb?Pr%=4U53W57Rmjxa=jN7mO-0si5J087;ztZ!?+i8?mqfUV5irYEw0=GDd zP@az4ZeI-{hM&(xb zyRzh}lKy|}{aK7|S(e|2tzqwdhHr@Bj(Ib)GOMztu4b{jNj9651&O3ZnBt2J`#}Z_ z>4AQ+0f7(%3zlAN>xr`A7e5&gU`v8v8ImOfo(D?|OpA{m(&$*Rt?U)Z#S0i2MWfx*E{QRfg$h=4{uXG0>o zIU-U@3E+7rV0OAdVhW}vVrGU65DDf;0x(1*0OW`uvun5k5fM7Mdu$>i?#DfVMUHAR z-%z@)X&A;a6HyA?RMfIewHW)NTbMf$%}J16@^ZOA^Zj-gBX(^I<~ZfU<2%D<699{r z!+w2u_ii0Z*Y>~iwQrtXoHtFknq(&#+w|2he=TNBg!`~}!e!eX_WgsSBM$if)tQSC zk!sGhWYPV0bGTmi%Y_Y*2v*BJBqRa=xY}*^`>|(Q9UizEGBr)ZF)UU~W^R{@lPB+A zZ8z)10ml$iGBa}lu1qmk^_-a*kYkJhrd0$X#Acq%a8=EdN;}2$9}JHk9zQxf`A3~i z&dokD`3~Tx+cf5UBAWPYT+BHDc357qx)2w*}60Ckzmc8KWUwvL=dRP)`@^}=WQtQtd3E;+Q@mhD_}NVJb3Fr`GxU2Z-4!Nfy-5qI&X1}1ersK6xc?& zRs{R=CV`ow-*^q_Y)OP@;AR%!xoO+;w3usQGD%T0#rYf&Fc2U*mKrQ9>|O*mA|hf? zcE>@c8e*BGj}2o8X;h-@wZQ<_5y%cs;v|j+thC6R(?Rv<>>u{;|2OyhH&@NkhlAcf z{z8`;T#n?(V9O;K;Iy~1F~8a#X+6ocb4c3dj?%NL&C-d(@=)AoXK&d`Su= z#$Y_zq(^sfxNK?QWf-0U7pt6X8RHtJlUfKWHbJRm4WCRY^14zNLu^|Bs6JVpE6Shu zJLD&WA1ZVIko5VVoIEMEn%EezVye_I8-4sfOh811;21)5cQHlvE(G}zdP6n@0CG|n zGvk1UVD3N4Ys!^;aoT4hHycZEqG)5yow8NO2=WdTRMp^(3{vdYq{F) zWq<>D)W^POtL`3`9J6F~DVu43&=3<4@9S816=u&03i5>|FMaXR_uoI+ZB2VDWvoRxG-?o1%voa!?!rb@rWB6B z%Gq?q%t2c-DFg)Qx)iS94nqX6iIrOblO~wtiJ7uo5sQgNX7Ef+KaL|9wnXaAF)8BN zRMLA_FTZ5d;UK5?ba0)4a`!5J@oTkT^Y#oqYsigp(nyYqB&ZHX zZnwA8pErIyC6psX1qq-8(SQgh0(OAwm@@+S=zRoe&)-%*jBo`=V6!CI7#enf0@+gl zY4pPOeAslm`~3%tUwzzv?VtY6zr9PTHlb;>6}3{FI7AxasMUL3)Li_NjRgJM{=mHLE!YP?0 zlbB7K#3>@Up<1E@%)!x-fN8tiiCfooIzJ}jsum&{RF3414pfSz&>-S<6Bo=BOgKL9`q!`Vi9T#rcDyJ9(JMrR&;B z>vFl=U2Rqe>xC!<9!oAkM)Q=G50G9uI9xW%vvD$vuP*MTd;K^Li^C%a4M@kU11**U zF7AZ9-A4!wx1ntyuxN#(jx-go6D~D^kii#4dpMq`GW2?E^hm$^o$;vM9O_GN|8K*t9<0AS-hG58?=9f`a{cv% z9s22;AMnwezx1yjx*ZE)2$NQ*IpMyJUM_YVC=^h?Y$rdtq~E~%zcl>bXv;u*YQwhd z+SKjG+V~WQ;C&f}ycF0@Vb?dY+1wvL{Dp7-M}PlU{uMP`BpIBdY(qL?DGE!VS_I9E zy%0BQIV4bT$O`~C03jh_m@^gt-4$^Gaz@m8T1DD>4PzjdOccU(UI99|f&)PyFQudq ztQrs^qDbKoDD<%yb5I=v0yMqee{~r5;qv={t<%yWQyZaQk7eji&Ub)2+57&?PjlTi z_xi7H<X{HWR49G5J142kADzLv#b3*56j2H~mf)*@N(f!Rjfi~k0GvChk2N+TGqVZ@A~!#0Qzo8gDFUZ(VI55&BfIf1m2h2_btW#a9ENs zmNzkWlkYK}%DAHTZr`o<m1#kOrzC^{#y+Pv&PFpcYS`+4BzkT%aIc zKdeo-*Bdu|%QZ@FY+?g9a!*JfZyM|-=5GFYwr^&}dw{j0efx0JAHd~*)m4A+@-N)A z{>;8Z0w`!esb6-7$Gd9Vd;v6taeRxw7avg{Xu$kh;0#faHsqCiNFs)3PXKmm$WJr~IsXm)&RLJ|W) zGH_QlR3i)=D3aT?l&o#r*}TBSLJTT}BE=?9ATV?_#sCaC&w3z7gxPei)+}N~#*9dF zuo(oRloFv=mHDI)V}R=+X6U*GfWaLZU8qhKA_t1IsE#73>X;WzT%PK3LjRM5IMT#9T@UVZAyqRZ)#G`aFX{ z5h2u4$LT6XV&oWO2*H}vFBelT2*AvaKt$6pGLZpHwL}DHxk^c>!Ezb9mtI}|uReI> z=%gA9$bI$`A_u!Y;eFmj2Xn_r(j@G<5EJLo9r5=GU0;-i-ttMY7`A%!X!z2v($#O2 zP1)?$6K}49?LYWCt992lRTCWUu0raCHed9Wzx{u?dilS*ZC6!x9mfXyi@e@XAM}9~ zS#=_$5Ktov9BKQ|;T!?F7=v*x62r0!!o;s?gW0j41Wgvfs5zoUgUgH>Hldg?@eq~eNv_CJuf2goLu!Yz-@cr z6XwPD!`$GyzOSg<%%{NfV#;lk;Feo%x#bf_d>~$-S8KwAFAeb_4&N)g3SA@B!O>L@ zR*P3&eOStPadq`z@#u0ogCY^B)^f4i#1zrLqRhbz zhH*605W=hoU_b|`B49`u+yM+#RZP{50;@V8a!A8It7;idR7+K4A`C5J9gDlC6cL%o zLkJEU0wtbvm7!Jza|2MdS_Q}e==t5m0gwopQB_qPk;KHSo?q#ri=j!6)`yYu=JfpT z;n8xr_8@iAFTC_h-!Bs~I>4;^;E-zprZ69D-0cUqT(bo4+9Ct_=3^5CFgKLkh%hHL zQg;OS$QEecvCUPmBHB?mooW-Wmip|q_HmY1cEcjbX-~&sO&&=%2SvjrQGd9O7b;i< z1zivg9RSGeCsl@V%g-5v3Z5JpQIbePFI^gzt7vb$mF~V5U;pCO-+O6%Ke;2)Ldzym zq-Gj-=yG?lcG7h zS=>j5FG2utFa~tytTIg5?|=QW_kZTTe6 z?ow1$DG~sQI-#i>SOwt7EpucBBC^1-4-mXscBE2@0!N6|Eya*HB7!@pn`r^$T=LxD zOd&GiG)?XfM#9pzEk#yUQ_-s8nnG~2d9foR3Jd_X)*^Y)wUHPRm^?&c#1JVEY1VlS zM?`l8Fd`-fHJGN7oK#&zgENI7>f!?;t7;Vi1wv*FW@cmnfJ`@jZ1V|(69*=#Hq{|9 zD}kXgbIpaBfzS=rRYc}+h`#NwwwqlZgN8ZHB5^>bzHRoRn_+*rIOs!aC|zD&c8di7 zFk(p2)tE?CFLoC(##df{MW~o8k=m+Xjt1&BLToly_-MJTvJtp*eDF6e4*wsQ z-2qD{npEArA_j2uTfyq*Pec$=1+9s^iHrfAfC7NbAsnCg1^$c^r>n9%***I2AAR`$ zS!^E{KLWgHCZdsEfN-9EGGB37k7l&ebE1WsT^@n%nZ!%dy%x@};7 zlXx5hxtkc6qnd&mxCVmMHC2}j{PM}kyLkE56}`-P+fsc^|FhTfCD*e_0Q$o)TuRLs=F}(-@eg5X?|4Q@+YmH`Du52U0l>K28u~lQI!m=8pof7 zoBwTl;8Wob7~y$t41M#zZhS&N?~A?VmRoN5N#sU<_i=|$6=~8Y$feO@40mz;%3}9{ zjyj3gEHc%;X&&4^S}a;b2spg^;bX-;5xIEInHjsL{nE={czSv=>?edysx@tFT3ht( z_VTJtos31qVnhIlF~-;}799W~IuVI00#gW)qbcn3K15c8Nfgx(xfGkGVaj4SyPZ;O z5;~|V060Z=C*oSGNNJkL%rS;CQ~)5r$iW>{)f7Z5b%e->=mus6=tO|%rBp;tF)A6D z?S`#mS5C^~^6ZT_UOu>YXRKwr-S&&rgtY9t6vCo!6`cVc6rARkwu#JjrJH-!td*G0 zLGxdl834IgcQ*rOb_a)oWJH7r?jKp|&9^^z9tUc=fLOOucc+%?_KlahO?m76lihIo z?0XvSuhJjep6`e0NjN${yGx4>@<63BC`E8&5aZ$yZttf*U;Je5ph(fpjKBjDQm8;& zFvi0>-J-{R_b1+2{qp7J_g>lm{Udvan0nXZI3;MCU~F$b&8P5{59@=oyT7=I*m1oF z(j=ka0xqab$Zl-H00!ug3IxS)2M-LaSe<;TOtxq%-pLBzTK>h!le$tl@A)dmN5U`F zG6V=D5_||cn!9l@B>4D2^W^YiZ}M8gkF@Rju6n?D;aA_bKlJ(?f4KSZ{da$-N$}z6 z)q}e)_uaub4a=SvEltB!ttX_>$0y!ATp$0bliks9_76HL#GcyaV%eIHIcGB>jMFqU zv9C3oC|his)=b!-XIbY?=rF^J@A0fr_e{ zLI6iIF_U>@U8E|ayUz<_tyx4u2uLo)Ftmm&rteEWDF(RppFEmZ)CNKA`Ju0_B@2uM^-Ax|S`=wcu) zpbu6n8)}NNU#;sfwQXB#H8*Aq7~_K5wym{XTwJ(O+xA^*?tJ+xhezva*sc4{-6La6 z+_h;M3z#P6YI@lB?j8}rylvZ<5>}mtF~;a>+pA3o$rWPX%M2TkYb3jy49RF zn+-7#QQs{@pv^CA3MFd%RX4QYYEK#l;+#LNZ+L~JD93H{N&Zmx0-s|!qc|C zbr|10ojUg>=grbDlOm?Olc1XwF=kGwZQFi!W-q05%!dj4xICSt+y2f=n-9Hz#qa;} zReHJX&-DB^sNFXYGtdDA71=a-BAS>)DDHtABfBMpkE@3^cUC4cj#dKk(faVtYnR|q zCm&9D+ynM)D4_>PkPtqZVB$G;9svQI5P-%6C;&RWxY2)V8gej$rOyr#y}$3aB^{#n{Z>NCZvOAk97!WbR0C zV~&`*RTLwIrY&E4JRZQuOVTt927>VDG#XA0oBb9bf|F#w3q@s;*b`p)%?9gHP_gOL`| z{^`EEL&LAVcJQ{W-+z4Z+uwOt;&Jb=FMa#qQMTrEC%4@4 zsi1*P0E==g$fn>^O@3A77(FZ-$lRQyuYb@zT`!4ELm8)%i@U*<~fqen) zzRq`7cUx!^L<=B}fMCwW*cK!a8>(cu>u z3+tKn%i;K-IXO*gi~`O|jkzcp`^QfHzR_1I`0PLn01WtGkNbYH*J8Z<_I~}^<>jxY z{`Z>UZhGa;mtHwOT#~~(?+$gK)q`)Gm1idn?z*ox=9Qb~;F09!CCNS4+@I!Ya^@;K zHS4-#Q=IY^G0if}6nVW~48u@UTrHN1rsWv6+wC;&IfhzgHw-aG4iEx`5P+#nRYW*a zN1=@^A%;Lr69t?CCQp{lz+BlF7!f?E7Ig*W)nYL_=Kz4aFZ+Jc^;cI{4ysl`Nvq6? zRB$Jvxx~*%W=4oafr3$sU~Zxh@h%wMs)4=qvqEQV`no1ui$8ARs|77BunKG z$k5%xd^dr4c@|aVz;m0y-RHjb{Cr6klS_ZCOoq?q5o3&IR&^SN-NC`}YPERs z;fMF{-n%@nr_W9nUDtM9t@Y{Cr-696>f6-joa;C>A;cJ?f{3b_x_Oo0K!8%}>fmrT zEEp0t+l%9SjxS75Bk1;c6R#g>A7S9QH}uthut2+B>?qirIhHN+9nU# zn2>oKr^s<{gA=xkMHkcg9?woM9xWflc>mfz_kJm_T>K}m@4tV!T%DgzdWBb1s?(i# zceJvR)F}=NZT#e`zxRLq-qj_1{ZFrgUs`=B@mW=7;25!*xM2)gSI=^OV|jS@7?#bl z?PAxAlX$MBPYpObWKjlhx|oawBCr!}pVXXFlbFDm%!u5~aNae$ySbppn`-8brt^6w z>2+0SnRpP|hxMoo{e!OV+{Hba$<24&*hS1|fq7|VKy*NJB6Ks+YGw{ZfQ)3Gvs@@@-z2crep|u5eUUpYh~t?xD9Z%^CBJl zxVZYAes|g&{emr!6HVZn@=_Tb^^X&-n4yM^s$+zJ@LU9G?8m<=Jm++IA-v5t|gD+Oq3ofUB!v+{4A` zzB%aI))*l$9v-bfxVT~nyR+ShT5@fAJ~}=y9|HzaUoX~Z&de$HB2ra}NTkdQzPaR5 z%#0|Er40L0t6~f??%b8ajU0(nv#46&g^f!!W280)@!a z<&=e$l>-BisB%nzU}iVZ$NbAauQ%E3EHpEVUYR4J0*;7tZHoc)#fT@wYndtlxaX)V zBv#ISgS-7^JLE5a>1*fDe&-AKU(1Dm^R2&i;G5%ze|*~4&FOA&Cn2K+ls&~^LNyEM zk;r})wq>{c&Kbzvf}Fj%zy~MM(Eg9Ns3GL zvJmZiu@s@#+HIBkOb7d`P769ZB2aRUrUIzw;ve&9K!66W;LnS*h6VsAuAQaaae#X% z?tguA@$|{FesNs3V{G}&w}0XJyZvdYgPFK95yTiAmfOoQHKA#_>r>OjX@aI{+;TSV|=#ih(Go8h{gV znF5kcT;LSAEwp-DgxC?Jrk zyBi|%?9q=%6x`9=VvIp-n8sX76Wh@B%)FTfmOSlq-B0h854+d;TZe2lSm=O>0*4PM2M7T|49twICI(JF6SbS43L35F-Ksx1_16!ded*8E5AMA7{l7%z>hRv; zLiBul6`A&NH5QxpJ6&8m(yq0iaf5m#m0{U}m*eRS2<} zb4TGOxaB6YMFr5@UB|&vW7ID%e|Na^3yF1aH2|a3Yx^OG*-i-D8PMi6;rY(|dfZl( zkeis00syF}*6dncwWZ_pv&VTq(f%wF9TPe&?T8L^E01ubzX~OpDrhV|rE)Pg8hF}o{LwNQZ^6-De!&CT#_HqQE z=8jGdga`;|@RPeu{2Aq=5%cGhH=mce(~DYxPvtj7Q_(u?xfD|+Go4T6;b+0-_O?Cn z3G-r9xBp;9@r{=P0Eo!*x6tkW@s?X|x#bVw$6p*xDnYGPh4QpdIRiJLTd#xduX4_L z{qloSrrmD0xio?WT3z?6RoBAUoSk0|xirX$+cHduxH?FS6-|4LoLcSz#V*n`6$TUl ztx`$`u$Y*b0Z_pW9Ehvb?KHR%Xq__ym=NIFoOJ$LL<})9vx(1cq!k>I04+~r(==^r z+yRk9AX4%m0HA7fVI6~+$t-G`j~=sw1QNJ=N-Z;n5OU5#p42K+45@u}eLpP_9j0OA zYUX*H^W|>4-S77MeJRDUWx&8es<#~Bc{TPXgLFRR+{jT8e<+6l0mK~u8n~V+keei) z`FWkqyXT)1=}61AT?AwFs%?eJ_z&NGua$f{K7IYsOCLV@P`2;m!D~KU9z6Jh4Wkkm zGPg^l6smxUe|**REkC{d&>`gWzMLPx&^(Ysh+>LGk&F?Ild5@DzYMG$=fmg+@nEo{ z8fh1g?cu9S>Q)jsXBl>dr&gDY?ldZ+LnmtH6=@%-VW)ruPH05H2muiSsDc`(LItSg zM97J$WL%bU0oxHjB;++TF{9KS#N-f+n84VCq*ir+@M9dQ=LP86pV=H80NjEkY8X>M zyXQ7Fci(Bg+1Pj_)A!D<7 z0#t-aidm&OTEK`DMP%3wC5zeh%9q-nz!*3NjxoAjFWhFXg1~@`2mw*t!5soK2q0lf z3CXF66ro$BtIZ^gVAtboz<_{`-ZqJu$*+Sk=ZIZIWa0?!b53&;RYX;psMZ=|?3xGw zwN`jOE2bh8xNTd+InTzn+ijkv^WOB@C%cc{izQ zO5yVCe0{WDu2=oC6D@%l9Ab<`s))p{?fc$CDmibryF84sZ5B-e14~2>5;(>fV>FTZ z(VO=BcDcy4P7ZCL6rvYt4_B#cv(%?27x!OTe(Av%|MFG*w|`p;wy966?L`1o0wD-W zb3@1w{PuYFc@r&65j_SVW&#h4#p3g!pWDLWB%K9c6a3eQM~4WE?iK`*?vfbNozfxQ z;221UFaZgrTVe=ED9w4(#3fzj9UQ4lT}qP{ry$s(PqagLd_Ae^M8 z$;^djC`>kw5q~R~JOkUaLHx|_8E53ggwmyl$?eiDpBM>JpmDV_i~V<$*uTFCZ{?nW ze#K-MxS7a^i8*JzXYXtbi&J_(yX?RCbauST=x%h)a>ToLGlgkb^QGwvC$(-IF@@M} zrb$E@Ps~<40g7rewz8h#g{2KnCHCZs@1Ws^|Jet$_=*cJsO{mE$=nO7ciDLipP_E3 z-#ZuM{BzL{9pghA22V-`lw|fqnU_XKqw&8 z*1`(>_ubIV#)O-qq{P7XL|HKm#4EAc}9f_46 zJy86wRe(Dn7yyu!^m&1Ak7eQbS_<+<_IW^jC{!v;r#e+4->;tW>n9-w`bvuL%ZD0g zDz6hnDX(?9LmNLYHh5OQ)mu7pgnzp6Z!`92!pgT1i5ZtqR5j)fMQIfvP<#hsQJ@Q$ zK1|`g1*D>{lP|S#jFY8$sXIW(O#uLm70?<(XI)E4sNlqz8JywFFXi|9bI`JG1tz=Y z;`Y#L^@iy(EohZwzjDJ{RZKkv_!@eIoqfr@Wz`?oL8NdDs++}}$nAl2^Ne_yoq3M= zhd~}+pvBTAeRrwkg`BG7CfLz`3}J1nub*P9ZRl8t@TfPkx(CkB-SM6Qb(;a38+ngR9ZMhMkypd()eL`{T!OJ|9P~*C< zeFBkA>AI>-sWsmw>JQsIKipi8{Cm0=_1|9|my1CT&U*e7;Nz4B*s^jUGfIK?`V+E4 zm>G-aw3*${Q$aPB<<#zetFys_9MKoy@G)JV$H(esNTczT(PPFjuf?-AWEK6jNdiTu zl@6p_GIm(?@cqzRyZx}UhpoB6iKoMBhNmkC{7rAH(E@p@L>LdTMqP}gj>zfw{J@Lj zsE*9;-!%^#?ffF*4x+9LwXU|@{Ko_>PMH!F7BOA2@AIwDQ80ZCJluUuj|6`Olt`{-M%Vh&l8fV&-St|2|10 zZxuf@v}NK#@w!+Yo=!2l?Xzs}a`b6EK2z?vq%f$72{*S9&o))oFh>YrUnLvEg9+#! z@9rnylD#w53c#^22%=U?>Bu>*5c>x!g-i~Ah(S!!S86@cqUQMM)zeA3l7yz^8=avS z!syx%;TN87Wqb1YF8fBGy$So9qjmMQ%Q>Hlu7Zn6BJ&#>#=Sc3)c;PWAb?e{v^Bof z4DYwOSGGLRAx4x(>VF%4aN>T`F=ffw6aODH(8vfNF~2e31vX$h+a_)+lt;T{lXt6d zcznIBg)2eHk%3|P__JHvoaLOm$$NUjc*ym?4mrq%xt}4=Br%V$A5b32Z{)JJl79HqD&8pmri2Dg2&%IJ2osu{lofi z!OBus&og7J0y1=src~XGcdw((C3Am8 z`^H*R=wx zVAJ);yiAB(as>p`2-_gyPx*3wOqUcyw06D#|#xURg`TvP&FwLBz-@emQd7yYOhvs-&a@LfsbAhTTR z^^c%=nY~nRaMLTN2jTmQVT^l=`%a<_gppyn@aDDVl9>(>Vm`c{zL(0t;0q9n0x7o- zSRKt2Hm{4NJ3)(zqp&~APT4z20RT-|XgxNDXth>z<#G!}zMwiTX^qid z=uMf}xi6#3P6&1Ko&2+nSr;Sm`9{_;Jt(9>O2Ws1QZF}Epoti0G<-DFKrTnDjy|Vx zs|tV74C^?JEC~bpsPCDi<5TXSjXr`fVgzRY*z*BnVtQ7g($5+{rWA8Z(W?5on8mDf z%4lK4tOX*%AOgDiN?J&aq~_4ScAN3dY_*(4X=1f;_WGSAM+f%U*`^(2Lx1>b zRF)R=W>f(-Bb!f)8Q|gVh zsZ?M$E59sdgqG=j(65j;Y(b5iq+#n_|Dajjr51neHXWOq6H@+> zhTjJVi=<#BFsUd|v&YA}8}lZR2729#Sia8dwhVaM{TMWccDq}>?=LZCpC;*_xJ=Vl z%hUOAvsEpgG&TO)JXh)u)a@m^U3Tk-xWY{V^pt*B1UBN(#Sy$zkqaZfnDy zM~cws^PLfg!FgQGi1z*6jteP!pexWy&{lr_0Tr-V^GHVOh7{-e_{RcQdj=+`-_T1W z{w;vaz>8^(EOh*JSegKZ!YA1Ys|n>@TwP`^9XzLB)FnKL$1o=x%l@aT^;6L#qB^gu zE|Ph%%hRPsR7+_7%2&foasrIvv3xko_wX3mVO^iM4gGbUEhoMBZCryX=_DL~W2Ynh zy$B7HFv8ORy>(Qn#==QA5FzkUh6N8T0RZW~b-c0GWqytGGEp+6@zUH_93Cbw>y|C% z$E)=Hv0;wlsxMJgd!|eQ>;w`|g6aM`^q|L5-huWvcMTTMFvw*x3&4>xPGn+-!AJBPg;Pj_ClC23IM zmZ@81o?IgaiW0dx8&jN%las$nUb*>z!3I4nt?j^r~6PjAu( zHxQ%yq$>^UcMQ>GXDBrC{OtB?Z&g;*%9U_~!@R+=Fs(jAlhF+ggM)=yV zPW{ZzRHAloL@hDkgW)x z5K^g_LS(`4hMeoB#dk6BfJxiMZ>Nxof@viBQv{0+$e;vHVA8uB$gxe#&iR?-lk<7O z7r&s0AILHt++Uh?hIRy2jTQ`!rO<;YD3yG=Etd~9MuS_HJd;;WRGhLiY;R~Bp$1H{ zzMAN5dMF455Gczat*JQThk|H_6@N~0Vo>Se9$4j;*o-USZh$~snFs~k$d|gXUz*hW zk?!$fXg{~&j@09z8jkqMO-6Q(_=JFLmbj+mZ~%4>^L+O5&t~{Moi9#r`Fd1VZ0sPF ztue0wxXCPvILhgunI~!!jud6&i9ZO0oACfUo4-tpY0b%9cjm861gFO|?g_<*ySj50 zTT|uB&VYwVY(cUhLlGl`ltcTPre@iTQe`$n5XLsyg8gThe|qX*#IP@%?b8Y$ooASHS|Mbu3YpPIGeQ)4IdVJ(zc=L%Qy_r+whsv3hDOxNpO|?40ME zD*ag=_oX2hdv6wuLM~RHeC(croBmNrxO-03cUF7A>El$Kyq=DD!Rwo}k59p)76IG4 zv~)G>o22S(8*RGI@OTCF4v6UC2QA0qVjk!!Omd4IMm_vF zia4H4nU*Ab{+^q#MVTa-d1K=R7OfpH0`Z{Bt%sOA^ahTzQv3**l%_V`DVe=ymrh0G zRQ2UOE=R>OwcFeClmwMFa4OHbCEYVfO7&O1zzV-##?JV^_e^4{$XVq1DWZDnz7P zH=i(-tY1w%yjd&1UhL0gvOt%kv@J!JKPZzycW%jOSPHT-8sS&wS-MOo#GSv7sdvQ^ zztn>_yF1xm_bY|ZCmJF@sDg278^zW*)nXL_H*m4wj2xGNz0gK(?$ z=KW}|4RX!Lfq><`yoZnDgW42BC{un`G_S@4nfy<%+10+?vg$BrEz%sgT4|2L{z*>< zK#rDz#(Mg8${dH3H6vG>r9emdvaa?DJhtREl_=|zQ^Ai^-SqEH1sP}{6qXhI&t%`s zmW8E4*vnG^OT*luSRGaToIZGu6-odgd&%Z(($YLn!q0*}1S3t_-cbfJVX_(NQF5UC zhn-R@>Og9|cJGS1t+KP7jtBmk2*tfjUpNAlAoy7c|0S-8?xzFmHKJ6lcBFWz&lasJ z8dY}Eg2Oi|JEYa6ac(8jdh`7X5Lm0!OKHyOk*g$_Wr}+u{~-%5DQ<1k)!BLRVO_3c zkQ7drS=)}Ztk_MO?Bg20HgwchL4@(f5Agl1FOs0CvHr0BPACZV0S2b#i=RvBJkxy@ z6N(`oDHZmT1)zXVqAdWaE`IkNjIAZjxJ`7F^F(PE5`0?sq+-@u`)O>WX6y2e#N!B3 z=JBqhQ?g#`yufB_LWti-^-EcX|3tA@pB^WGNOOsROo6x$j&4aGCpOyihI6Ft=x zp*-}|*gCS5f+Gf&j6FWo$85`ns`_fFbT1;?dNx6v%Ue;ijK#voZ2CGBJ03tBY74D% zey#Y{?wR`*T80kD;02p++to~^0qCVQ0W&cMD^QOuo}Z_=I2q;tD--0Qd=NVd;sVGL zQu&SZRCBS!sFI=x0_RR`YV@#V;$PFvh(ini&6G5Yk0&>~q#(>RIFV)Z%}J3z=SZ78 z^z)KNeQ4%725IG*-7cJtgj=2N2|MPb z^nYb$RNiVXdM7$V`$iON$w&pkde_cVi^D%R4PWCZ+iCF>DxPrNseElqb?K@zEN8DXH=!zyfo z>Tid0!H(}^!PoO%PWNZ;=X1w`a0v3nY^Hy`D?WFa?Wax1Y&j7!dRlowWRJ`i zROB=(Xq^?#>||z^l;ac<@6^@&`?JA*i6zhVSPeOAkCU3g74O5|8st$y`j%mX2Jjhm znudmd!@#gtL;LO^dl3T`01LlmyI<}2w7krG!`wDOX`pxhN1(^gr>1pNuX`wWfoMea z8L~*eB&1%B{|kYe5}9kdGGqT^&obg!JRcc@Df_<1S8S!49kH1XS;58nvBt!*+Y*7~Y5% zV-v;C;QF^!aenH9LOEXu>DPG;fof7M73+SuMm+=%v@criR~AU98QQKj83u#JN1AmC z+6nAoF=g?evv201oruM$VX`WW;SUTV+JdqCBxlZbxXi?b${G8YD2s{77Yy(BR^CQ} ztZX_=C2-WfFp9WI?>?N9eb zOt1dA>9Dj+eP;gj57uV6Ha3u`ixbp~e2|Uoy(W>JlL1rWyVfl(IiSK5 zPC#hpfDl&KJ{~ixf`TXxq-L$qlx?S-X5ea)p#^e-CHCm;VL_Y6a`bHTLXEt2StUmZ zRL=5G2<6I}0gG=pX^qu8rJiH(0XNNtU-U2l7Tfb?@P{T59h}={G3nvd=|Qz>c@6ZKEb(7o_-osWx+UdseJ}%SJU_`NU3Os( zS85DB139BUT_`RvkTBOi_gFYmrLGKJHoM}Rz6MBr+p-^zIz?^~RIE?di)*N1z{$iC z6C3{3*ik{LLIJ4ML!8krB|via)6&I8m8ygO{o(*wW;N|q-L$Nki(CHb$Ma+Pg?c=B zn|dyWDZHwB+ER^?rs?K|LsUe^yNLj?rWLrG6>@mtAw9jDE3$xi2dR~r6@ebaBRQ+? zLZO4L7y$M@S?v1d<8%z-X%kzk3UmM+pO$yESxUBs=bs(L4R6sHM;6V}br`r;cVAzX z`q?M4b~F;Ofcm;v9R5f)%l8TH_7rY#SyS@w+U^JV^%WeUcm}$e-zQ?`d7N4P>!EcW zY%Tn=r`IkUPfq9f_^}aZhGC2TyN1k~ z<_3wi$-T^NP5Z4TttY&^gAj`2hpkdZOTtw(UdFuK2~>G;kapwRV{TOOArGNA@9yso z$?cQN%=ntme$T&29aLJhuQ0cu-Nn@~0GS7Lv_uSaCV0rT7v& z+kPdrd(|&=wZC6+v`}}o)cge~i1|I0qp7vWK=)S{9kB4BcoUs3%J*06-BsEhk*WWi z3(lEfZ7gs(z|jeaUb9DsQq+DHkLWd(5n`GM1>~NA2ujyFP{oM$#E*2u)vPC*BkuZPSp0-zzUl$&i>2|*+|4-;RK70)FS2ZU~rQd5^l5H z0K@w?>B!jtIc8v9OINFy7xEhD>~?B8KT$^TREoS45>okP<4I19Eq4GYaFOdG%bXQb z_o9MCT5Y{^<|)8#ca3ky9nlpOY%uq}mWqc`i4RH6G>O@H9$qNe*F1b`B3l1J*e*A$ zy8sB?C@U!2v#P{pa&sIqh#7j0VUNvJ%H@x4<%uba0`YJ__zK#Gj=0!2i1pT#kKD_6 z=@GWk=jT3qJDr1qQTE_#IXZS06P`4)q`9dwsMxQJVO7(vecircz7CC@O%08w9t~W8 z{-wl}+fwPekM(FA2B3LZ*D#c^7T^oPns2@p3n~oSRokEP2yFb6Oz6kIQGVC)#`usI z9=#k$!eAbUQ{JsZ@ZJ#qiv}R|lr51mI5c?=gzBRYQ^d@!G%Wyt&Ka9? zsbtxE#1(%|mz52HLKPU$n6!Tw=)c|$s=OV^kz@t|fv+_te%uLZBq_uJ$DD_Uez{Px zh-lpQ*gkI$w0MID9RI-MKYPh^R+sXLdmhYF_&Ij&2P+T_4ecdB;y?a~2#HSq%8@fe z7ur_0I4JHUsWVFu%ToYI%7H`KwwP^ckz4M(d=1FeHLjp|vCYlS5@u}B%Z2*IR2=_Q z_M&?w@goarW^P_FGpIPzOJ(aEV(oQ;2uOIGUhsG%dg{tMN>aQ$JFS_lvIsb=I}SQV zH9Ok1S=qBwjg4D1nZc6P$q)-LCxvAGLJUC;LQVmNaBSjV>f}(H3H_SbLs*5~9}8P1 zgJG5elL{LGjb(15^4VgO@@Ft&3=S{UD9c+L7?|SM-GpDhV^saH{jJMvpM%(uWxqCd-1@-`*Af- z5jrVk&Q}|7j8Svn@|4o@(8QA;_^Kk}!87H-xxxQ-DQ9+$Z0JX(sp}QuVe`xn>;Ol!L7MjO?0*@%Hkk6gk&#m6q;A|$ z_+swsGb%#hs#fqrfUxf?A6(@76ngB0he|-3(|kFy%__R;%b~=#Zv=#&(5c5fZ$S!_ zS?u32aq`E)q#JLp)*~!sL0*ESvbGSgf#BC1F?iVlb9T^}$5Q>`;F;rcahYFe8qv82 zoog%dtfPa62sTmkP;GVzLIdIc+Mka9ZuLS?&dG%mDu9v!x2T8-m?Fmr zyb~LF2<2;z2qIkM0AKR1&U5)3qFqsTf_sOFW-*vi-l5)F(qO&OE$8wY0cT3 z5+{=Ap!@U}ggfy+$s#)lJBa8--3iI;MP3z@83Z30NiqLngRcBFPy-4W+rg|x0~Cz@ab}c zx!~}!-x+tmd|5fGt)eiaYF5Bg*bVRC;$XL$cZ-44xt<+{x6MHr1Yxf^WjV=xIa;ST}F#S=4De8B3sT|;F&x-JzORVTgead|cX+g2AI;cp3eZ!9~@5&+@GT4liPR1pQ~)!@i~8 zYmzqP5xkRcsF2*5@4&aUchftgxeMD3nl<4==HduUY!=CQhjLIY$OQQA;kxfQ+Qft< zTm7TsgW`5;t5w3nP!^uxCgX~PXzN)ACyWUV7dB5)B-#(Tr4?mEBEtC!x`+J`hda0X zJxVJ*o)H2g&aY$X7+{+(htDKC4|R?>NQCWu{}Rv1s_;9zX}dRI7a<+7e6XB4Q!K=SFn#P?+ON^h6v^z=6aknnK1e{x> z9aE!M!2g`GMHEXqHwMo=t|1{_4AehnD8#62WJKa=Rn*DL=&oh3V zRwd%Q)ML%DFh9or!Ju2r#4OLXH?`%t*>H^03R_BkGBL?_B7c%3DpqXW8bBA>ma3~N zCrLTk7L!|tE=Rn7&}7YiBIG-_qI6*MK6#nS&uj=CC6%62Lh3^At01l}e0QIFeiCM` z#=^m!&kKa~Kbt29k~^DW_SOru311kem~VC%gRJ?eE5A7GD(?P!ohRb+bjcdRalX*| zl!u8^t=lu`csK1^U%8e4&sH4|`9* zYFv08sWJE5D*QcXF%>H-mZrVo3sHWcPDPzQ#(P0ZZ4k&?ud0byXF`a%$SYlc3T?ZS zi-o39`31WTwvWE;TPt}Mj-R&7ECxzeHcB5*rjq?R$sbNEg5~%Hf<6R4;`a!*WW}_~ znwpe^^l)S$AFR64ZB%zZ1kAk&y5H-F*zCBIucuOQR&b?_7qYOR=b2weUh}>eB`|%@ z6drnR*Z~0G0RTzF-&?F^r9-UjeeCCBwI=RMGnk)FL{9Hsa|5U6p-ObgB5dRa< zikx?;3f_DiPcHUYDYhea^tJmtaSU7CeURjMQRLPlYG;CtLH|M5jaa3W5p6m z&Tts5i)g>^MJ%R=XGwhi2QHP-qNv0pwO#0Ytt}qnUZW0OoW%g^CRY@RQHncF4S<4m zlrhL_G_C?@`^yvCSam8Wb;v(|I+41h5$MD!gcCAa#pvL^}j*hUc-J=Jj+k%DCAe7`} zfidu=htlGK)$)G+>FCWJ($3C?rw5vivI7P>8GJIxCT{0r#4z4Il`Sh@j4)(A z4fU)}E&twq)q6dS@C`XTm?^pHV0k<>32}iV-PIq)J3fj&Hz?khz~|GLnA`Gxb-PL4 zcGMOG7T0)S9I4ZtZKm^GjLIh0G4l+li{T8}%)|fnJcXRP$a6`ufVr7N4*+&vcrXs1 zT*EI-DV_6uIIDYO&NCXOjR#Oc=)abQ4jJtblz!xz1|vEOk=NH8ozWa6&s#MsrxN_wH z0qi`3a-ZCjb3_Uph-GCD!ud4Eja#`5fHf(D;+#>4tNWy?wo$om)vzoo`r0ks3Ko{y z-OdSzOXt1R2e<)O>!8SmzM_GD z+VTXS%Zw%tcD%X%xQ^SVbu4I_cwX0f84ZgPj5oAtf9#*TqP2{(47gpI3chx23;IwZ zdg`!Hx`pnmn82}Q_3?ei)2t_v&9mC&!T>YIW|g305tcW3@8c2SPNFsdi`6Mue#qbe*lg(S^&I*RGCdVe{%XVz?hd8*txvD;=`NF$fs zuk|BAXXqHZF=ugd8CTGd_7$q z1FdoEU6njzSLVZtpfNnodbb9%N;B6t7TuT4ZVL}$DCELm3J17Oz|P(^6E<0@{JfoE z$Ga`)7Cy6GFr8G_TSLFUQw*_APJsV4BlC>%Qa|Ie9q*$Op0b z*s1v;-*sZ$Ect`xO10;JxQV44bRU2-5{m^LAL!Arz?JG#X~PsB$z0t;i;n%%kXM9q zNMoVCjD-a=PE%W2zOhBBQ0DT`MkJ2rTwk@6Yg_-xFYO5NkP@;EL85r~meU-=jf`15 zH0*g~ERJR-l~J?_oc}87&QSSh>>&d+HgBO1(E?7?Oo+_q!4%c6*B(3P7dv8)H<8Qt zCm|iNG^y-vDCB1xu=BdBHoYKVCTe2){Mt%qR5MJKvkn(^b|AHV{}%AlL_g*4 zr9L%PgnvrQMtbQ59C5c{ra+p{thVh~t~Ezc(WThV%wDoe?N%srXE%cU%1i#P?oX=+q1|L;kpX zfiGRaROOewB`|JncS|}7#tV~N`p-TQ$bHaccTY4sx2mNvoV?QXBXA_=W2IP?vSOYP zF%~OOV|zK|@ZgQog4sXC{xa~pZV6|*WQx_k(k^u=tq_~3`L1>in{W|y7#~w=Sy?w;+8|aa#*gUK48<;UddHEM zz5uW`4n*i&mMjy~N_@L{237toin-ReA}m)ibQHA#jRyg^4bX_y)w}>@Qwp91`DL6~ zU;#>igEs&xYeImPiK}!HUz?wpMe#il&+5C|$e(|;pUuBPI(O&hCLXRXrF3gPTJnza zjLHOlthe}?!)>JX^GLG`@UzT)r>0m;vRO&95X!>zRcqaA#$&NB32m-pI2#wYTl&Xv zD!u)duQb%zo^$*@(p$FmYL3t6do!0YIR~oEFW~rTp7(L>Ta5!}&x<3(0Wu+ zsDBh)yEKWV5*EpDuoytrH-`~766H|n{~m?Uka`pfuyO@dtK!=(@QC`0nO*579H^)_ zPkoFWTii|O)WRTU@_tP|4p&?}#0C+B-L-mYg~G~)I)*%RflRXUp?n?{Q|y1S{rR4i z%0UuJ!Nfu@6?IkZVLbdGK*5;dm^lNoK6XWeR&mdLvBB*berP*dhQf0u7WSx6Lh!Ex z5L*H%i$I6>IG{aPvVtge2!zRu1;ILLJ$9)e+}Lxr$KN4F=W7C@0R%AwezQcg(Ad6L zs!-IB&+k%sZKP8dLk9Jv^R7q{2gR3}z|;xbB?#fZz}mHsb*jK&b+efy%{(`FcLcc2 z@fQyjjfIuu=Z*8&XTZY+1Eps(Gz52@GTQ`%`?b0T=t33Dk>AR{!PW}lM54&kx&B+; zpv_l%iiB~pj0o|^E6KLOp{K=HmaGM+gGgN0t8hfH-~C1B)ZNMMM6a;g#>vTvwwXMx zd5k~YO(Bk-dp;hoMGkB6z?-#okFdT|tketRMff3(VqxVb4+A5krMdZOfw}FgcJ228 zbv1UXEWfKHljnC+ooI3yB9ULNZC|9-RLMx(_Y@-Af~W5-M~_;jmQDMkTup6uyvMZP zTHyk$umH0dp}U8!^I9*bnNsIPH0)+XZtCl2+4&+qo(to822?&K%l@b{hi?LsMwWit zuU0*d$MQaW4tP2V@%Mkyf=#)Y%0x6Kaz-XLe3vAM<~{9L+x&KZUT+Fcyw1A09M-v> zAJ81h{UTXUn0GYlR=x6J`(#f}wd#-e58lg=YtgoH4u^mH-&&eY9_Kt3k-Wz@j|Ut5 zQ$pO$mn^!Z;Q>Cxgi0QT*%iIZ3~3?W|EMs`3F}SXd1ADqV;A(Er9;_?v3;599PKsx zKv2N@h2NMX0Sl1fUw|)e+;BDCe7hJV@zK?9$B2&&)owht=GY#3QoOZ0uE_2i^F`kL zZ)pQqzv*kuUkkhgPAR|Cn<@j($@3d*_Vv@u^`j6_ePs0X7#u7HRdoUlb%yvV6#}{y zpD+Y`Qh&Lax@C5nEeBnRGCVK&{kXmS=KdSxumGoWZCs&b6F-SuTDH1KKl;6fSKMnm z@Ws3zst=gf@tQT)x>@IZDDMsL9Fa=+XW7R6`04=Ov7DdCY$EnJxfcD(r_6=dd1$Jk zgdJf(dfB1K?DXCGpvQz4p}kb+J44)MAkc>L-j)AS=6_aW!9i~t<}Y@myF6BnLk4SK z>uT>YV&@l48u=|6|5w$JP8|9W8df-=r50^PSaY`{>IDE`;*!5oI-O+Va5vnG{XGRM zR@Ns&B$fKn*Be#Q(Fd)^T1sYB=1Tt&6Z69pcdswe%dy$h-$@JE%nCU+eYza)cy&%Q zh<1!XWsZqrQ(tDksP~yCjcLJ<773A&2 zKs@)J$YI$oApO0``;V+WZ5b1BL!!^OnyQ7LWO34q6#zaT`IwTz)JkWJb~XrWkRm{S zX43L-D6uhaN2p81f|s9faH^Y+aIzO2KUE1TuH31l5HDIV-(-EbV&9{@)dMY$=){SR zZfzP6FQ*n9ZDq+H86^UWwvn~uw?FL6btf7DVj;$+ydj)v8hRO{5p7st9MRg42 z1)3_pgO0JCufw}-QohmgcA>?oimW?juk`oUclbwpNKIC99|caM1}O!%nn)32ILI%QU?J3+`q3I! zKnj;wnuyOz1sgLfQY;-d=d9zJq*0f3P}*Ex4&VAYAP%GSXzmnuIR1nEB*x29Gn>Lj zT6WOa7ehppagB8Z-(K_?HFr5i&DcTEt~5-AVToQ{``qXN;7~_8#;hHkNOkl)7Iq|O z9sgUQD#hW~#q7mz)r9pRR;g;jI)5SY3!~Eq%#l>A0J8J1-E!%_P;`WxJRfU&YzT`D z4o7xV>K3*Lk-e~npi;0w-UL4X{0k=%p4*R_D+D9VOhDvM-rl6X3VGGi_c~KeN(IhG z7CzkR&x-y`sb1EuJ~X(eZLYRXp8xj}gK$50`ob-G*hN%oc_?C~;_NAQyuWg9-_PG3 zzCtX0Dcq;bs+<0u5morOa)prOKJAFWSqb*__1)alVJN8zx_y;*x51Is88LZ%t;x99 z8iZV1WBCL#7Y0psF#z#8rC<~+;6eBK=RajqY>J)p2{NF~_4GyFv^F!-CgR2Q$7KV2 zU#R@vN9rhDHt)4e{r)4yXgA!PXBcD|&DGiM2k-mt?qTNtmXi0eTQqaoNk&&V@Pmnq z=UBw_V7G48u2%?Y?%JPr1a(r`bSxUac)=C351{m^+wz|C@$-)jt9aa_HT0uz20mMo zdAE#$4O!Ck9zkq~r$4mI_uZ>KA^u`0me9IZ5WmdC>9LvgLuNAGzq)Noj})%}-|vYo z3{i?udOjUTy?~F~k7_ej!M84LA;%r1k-jtn3tM}Fj0+kiRRfI3*_0ZEhhZ;B9MmIc zqtZp@Og)m5V$N%5hBvrSBySWEY>fv#T*4_5FQCcK%E}OkR5D4sh_@?~?=;f@IfP_< z4UC(>j^VEKEy>`o4`?A?P*+rmo0!HPOsOX!g>ZcJg`clAnP2jY#A~ainnCo~OS%qi zl76e7uQC_}udHT<#2=p5PO!$;JLXdUbDJ1~lNQMKJ&S?Qnzwx>h(6lt@#*1Te{_^-g0Lk`5I{qy=rc^->yS9b0@Xh=vP=HrSx}Di6l;j0gv18#{u8K|1!V=|w z{j0bVwR4`oIHj}@bDigHdla6!x}}uyKkhJ1e9r8qNw-xMk= zw(%*$FqlY~-;ix*WIcxhf}2Fs6}fnXc^Hl;Ks>wD=S+VOoCUABwxo#wLo(epR7r5= z-*97cD_rIu-=N005G_W6--Ze(=TxNqPEkm^kDt3BlO#>3e-Tr`k6_Qn=4J_5{9cEW z1t|$;rXO4qm~1738u1GYYWMGScXPxEm#2@IjL^;}FC7FPm-d(O3sCw?L+Y!tMjIK` zT}R-@=C=OrUf=71Kg$~t4)@b)eMxb{5IzmBxvBlp=<^zF2w5E9dpZt=;YrEoOU~Ri zjD)gw6VSBef(J$NYiIK7*4>iQ4KqAypsg|}*+;X~+8$t3;px=RQ*-E#iw3wzs;frc zzXA}C7e~iFPWPIf4+nc z=}fkGWvj}v%H$wG$qESDM^&y%_8@7Iz5~GIDchA@BkdZ~)mOGx3F3pqZ13zvaWNSz z?jKaG9IZ$&&Tx67gINLiKg)Kapvi|)GZ7J;R#~y7iy5hww>_t$!P8Rh?&e9m19h)< z7;q87+rMbS^+xJk=DTf}i_7+eG@Y8};oim|!b%3IM#$48yum0spi`ehmO0XQJnJ1P zY3WQQM%{3=X@%kW6e=QWd{us{U=^=1!A*gv-i-Z*6L#WRCyzN;e_*3bB%RUmvK!ix zQU6?kF9VtGLx1kO=*-p+uXu$e&%5A6mG2tGA5~91?SPRWVbOKpJI-kK=S);{r>gaF ze8RxT?T?HASd*@oVS)2rz#6YOL91; zd{wrF>s7EjhIAt_JvyyJbmX0Phc(0^qn}<#;Qk7>$$VT|-KBIrKZ8jvMCF(+3u!EA z+6}PB(CMnWsW0CT%rtivhWmL1U+*pH9=hl4-glf>T`nrRTQ=X;ly44R9+bzr&j=fx zQTdud9KL~Lr>;z+AJUeM>C;n_x3Qk=TqEbSvBNTSfB=H9&#Tt{G{BDTy`m`cuQQ9* zm%p>{vxRW*_KAv&W~$o05(S^hFid|;>LDpV966r5+vLep53cgykI`yMUR}h?;4h{~ z8V=>pM;Dhyms-RE$d341++zTMe9^>z0}TOu8l=GCM+>^yoYB#Gy>e0bX8*+yHJzd) zBQJV``T-Y{&oLb_U0*icWM0P3q$1NrBwB^UaShnFg_%Eim)>}+q!QmXCpjakUS_P);>Hl;k6xi4vUzqMxC1;EoNKO?w zz-GhDmrge=vozTi-(ElsgPFQrEdJaVJqe&~u>1WW0vb9obcB|!7#vGMKU<7MOCxVHyg zuZ}7fCFDb%i$xkFcVhFTYvFAdZYaou_j(%#&(DIzfKNDJ|6-$RknwlnTc6e` znbv9gi92FhDKQi|=_;)qbZC;^PGLwrd~U2R?afy3wYx;24OIiNIHc=r zP&aL|Z|9ktgVDE~+WUe9fSLX3Hh9;ZKt{4PbU38%h>vihz-5?j9JL&aMkNAp#W3UP z?0;c^Bb3Ie7~S&}K?VxS_SnRW6%>j>TL4*@Ahq7bpf1K zP+-(V@YgCq9g54#m9tr6sM)7wCG4z={RKmyHrcR!hq9=%S@ z+<(C7rR(gUQ)x3h@P%)Bi|4hkuCDG^wFPgULWj*G)x& zxEnlbCm2d1-UeG8@3{jX8z3@ISjLqfZ)Y3#-V#hMxj<^C6E{ zXO>bSmx8=G59p`eS7dw?m<1woG3Lt|dvn#Rc4ZFlBd(m9uFhpd)<1^EPV9$zdw-Jlac_?jfJNm zj*_E(JHH9TsGzD?+N32w8)FX{%H#Isr?yX*e@-$ES1ekAsjh@=j?wT|LO$p zf5>tfjE{Lw2u2yOhiZ6@#tVvRm{6?*f~fr#d@vpJ!->r6-jfRvJI z4IzXO)D;n@u@szOSQMzxFoX3-14cdO~|`MtI6-TeB$e|Ge_ zu3eRDpUK-#g}1(`3Fp4`J3YxBQQRKAj^QJrp{Mn+#|%UyL`MgAL^PcDq)$JWe;iN@ znZPME-6Xo*jZlO|AMa&*+8%h!NFd2|A#%Zc%XA@|dw+M@+v9V9qsM(}Pw^B_@u376 z{sA9?4>s_UoBY14>9pIPJkV)`z5P^eeR#B7E}Px3o(Vfh**4f_|r)6uINY4acxj4f885JDs( zBnJh!Y>wJo8+7*&F1_N37!gGz=j!fV*QJz7DIzi}_B|)nKJua!I^i(mnucBFuMf+F!Q5CcIlc(T-6w9R%t*WGsvZcW+wZ) zg8={{Aus^g!$MUIVLs=}(g`^;5zZcr^e|;GQ6v&oHFbS>rRT(*mcW)y5%&P--P~P$R^}V)6BFzl=;NB}N*C z$=z40Rn9r}@nXF;)4gSX?fP{q+1;IkA{m3W0U(I`Hs`Hmb?hlRc(uA6GgUUNA;r)( ztFSUy*YhoP&4vOCJk!MWbf=H|S9urrf8;-0{+WNdUfUkj{d*@j?E*ZWhC_nV`2f^a zN?g#Cko_HOw}!5bj`LC@){*SrL+FwAk;a0;ru@*0|ZB_iyN>jC5>1&cCv@< zDm9U&t5?dGzP-Ac+VJcKH;e5X`e(0h{^Nf4x*lAuQz1YT4*`M0khk0<>X#x@VmMkX z!Q;{4%}~_JVC@5ouNf&vhJ78R>uO)bqD1ztxA=e3z0rDx>z+%?npk{qWMD=Q#L{ZE^bf- zTq_$z9)aWR0Q}B+B_aX^_)f9V`N3LEc_SV5r`l427H!JeTv5i1_lE` zaK99}B?EGTrvgL&SVTuqpSHVhv81ZwdfOwhyFJzl{nPfqV+Lzc^{(aYicA3MJOiYt zppOQh;XAeG9``9f#Zx@R6NWizyi>jZ(ud%2pV&Vo$RF-8faW5H8vsC8_a>H2XH$m5 zvth{{wFlWwz4pKqo1`VT82Z?aRo>d1_x;hG`?rg3yRONT5CsH1I$U9G)Jg~efJ7`$ zJ9k_xRwri{2JQ%si~s?_0BR}7Z%1RoU~pNr(OOjmn1R{Nz))3cE^&67be|^)a?XhE z2oVSX5s(>}nFF~ik-0h{FrdvOAu~V(2P6z3yi<@A9u}J(Ns&IZDtIT|X5L^h&0924 zW-?+xBL#D?ULHUyVrZV1F&!K%XX>G_zmxe%Ze z5(WYg(fJ;Hhb|KXT+Vbq%6D^FZ0-|qBvo~nwplR6CWc)eLf;n9PkhjjjaP~JgA6!f901ND77j$R#*V48ADscMIz^wYa() z%6yEjpM{ZAbXA6;!q z%|p$LK55As;BarhZI(AUcJ3nUvDfMV?|H}1ilhmn-h&_Ar7SZ9J?$ihaH zSvnZ$Q2yBJ=(oB*@t?JyJoAtAx_fSYIyPgO7TcB@EkRwN2Q#)ccu-ss>>l?w<|*C- zC`9(6Xow14xm0#d;1fSBBmQ9^02%@~IfBn5buvA@uKus-f?e{mIGdGq?e z{vfmm(|VoadU;dvkC^RNyY=;FU)+d?Md+LUV1Hk75tj~m9LIB>7A+E#fX0BOO5(t6 zTnvW^&Z4|E_%%NK;f=0_&{|rq9(*ZvM+XLz^ICQvuU|N8{}lRS62nW6E+-=50A|3% z4jjWJRbMY#9f(-WBT%T>xVn3V>U%FD4Z+`w?JruB-A%6EAXXnj0 zjvdhkSJ)gahi@;x{O|EAzp(z0&UVwmHJxg*D-l zG&JkG{iCx~Pw^B_@f7bB{s%e)kNb@NX>qBK`EWUC4o2YaLkn(;0j{@xhtsfYaAi`a zeeqGm*=o+-Y`MR64bxOi@%iVjN2=zNAs|5rq1FmOrA(rA7$)=C zCl|!U9RgB{4Cciy3o)}G=DdFZfRqxztU66h5u*V>tyM&3PlAV|12n%dDyo=2REn6X zK&c5TZ+}qp^XK5tjE@RH^M+wwH(yFiKeR!9_yB9omlu!%fdh~`)~Y-U4qZ+tI}rgg zBDz|jNYN2c1jamu)hfndGQUSaw1 z=%w~(Cgb6jg6DI4s0VWSMsiS`%3j;vSRGWCos3Sn?DwCEp+~Nk0X(JT>TRnn#2c%< z2b=rx!Pn#2*P;IbbHeBnDuB~iAw=0*NPH$9KL~leY%x8>Q#{3E#XphEOQt2u^8Cw9 zJsHF}4mG4?f!LcSAuuADRdWd#nkI52M5uCUa$btmS`k^K#29CN1`)|Q&(T`tZ3T&$ zpy&V&j)Xw?sGSM$u+DBKQfFg%k+N8Hh9SmFZ(fl)SD66kLI&VFWEtk?%)g3C69N#5 zYE@Bj017FZDbuVkG51~p5a)KTs)i8M#cUpQQ)W`a+1@4u;!FE_Lv%oG+DH|1&fs7Q zh(Hul3I^2-RF!~o&b!@inx;ISuYGy@{;sjj6&_CRpQOEOEt_HAHnHW@P#D%1hX=dur`J1++qp=Jr3$`qXxN9IG^oGbp6V;_5b1U)nBPifB(k6MC-4&yAiKAcKe&d_Ri@K z#m)bZdBw~sGH%E1`ho&?ebOfQINZ5^w&FeE!NAht4Juc_$r{nK1yW%^lfVt5}6c98Hh|6lcgtGIMQT-@ z>;3_m87~LGX4jm{e4(msmuCG^Bmkcsbo6rYSG{XeY!|7Shv&>eEnFNfU-bULc-$;5 z?k_iA%jx<3y&sy=3uu7A0O|-x1cZXlK14Bx>&@;9O*yLvA2Vvk!%xMtJKyoKKI93_ zjroU@4G(_+x_iHR{nh_qJLJuafBxbJ{*Beim%7sFo(-8EUEz238>a5$W2e77K_#6L|Fu^jv%)o?IeG{IAu@9$26HKu zv7r=eWdJCpAU~8Ra(n2OJdx0%pRl9*+jHF3Q15i5T~g7Z4kzQqwW~*7^-PB6S9>t*s#x4x#U|~$?am)GNWe`U z?)t^ce(({E`-2Y=a<4&G&4d+p-gW!*T(1vq_Ydw#n;!SA{S;5}6aWAQ@2c!BR$+i< z`|a#ibK0lv*c>i%q{fM;l#IYJafl5w#l&I)4ys~qXs8NvPn1&>Gp&W0kqIHrVg*{O z>f7$_WB#e02+l``n6w47}J)_14~@Ub25tvMl@d zVtYPm?H3$4e&Q3Kh=x|`qwF>e1GZ@>JKLP5+i-Svv2%@mf3jQm3kE_lu5UsT8PCtwP^Yl!0klX3u>{0{ z!*X>ol|=}ScrtD89pBHJU224Ha%*gx)X$y2-s6sZkZKu`oQ3qTcMM2r5U%*{U8 zC}4nqFb``Gxv>+tv&JzF4(3^P|D&(J@NXZK|Ncks{-tK5w_fi7q9&AD}Vj=-p_se=1)8#IL{4WcLyRBxn%P^Bme<` zyEg$4Llp)x;$XRk_1#tey{6k;@DNvfrn~mYbFDE)wcV&K!gSbnQ_(T&$uKSY{df=v z&=u5)JSc*gI=VYbE%0bg7jd32d!#;esWpn1GsKA4G>xhfQ`5xNUayvSyNzjuEBkA^ zXeAgeFMNviCe`EpumA5hKmPATr?lsgZ`g#VqA#;-=;Xj^=KFFqchdX3`_&CI3!g(x-R~Fb4o)j%|a8FoBio zYEO(z!DB)RhQO|{USFtcH8qpaFxAI-Rv|bWX$22Z9hzt{c`&qp8a>eO044x16EG)m zMglN!*C%9%&pafzXaT8lSI^on_p5B*_uJZ)SOJnUfEzeNaFp^64F+(+S$z8sp$!+D z11Oji_5_G1f`*oqKAd5ftN{?iiWpi=ZmA-62GjSO0~w~M!4*OXp=b(e2OKRbKi2s4 zhYlxh6cY8BO{ouoaC8slNhcd6gG?&K%Yv0lCg~*YsPBB!cHMevBju2HgiU1*Ac`NX z&artHU<7~(<-qKC(O*okH|0O`?SJ>d$!#P7uLD_MX)AlIK8;$Z+#Q62RoGpe23h>% z5B$j&KJuym;IIEXFTVINrGh5dh5y074KE zb)Xay$Ep=s6&(>NV4jLr2q6=ZsLo~s%%o7ni~C$UAtC}ob1^eV%qqs{?iH+-;%bPf zhG08p43S)o8C7(<8B*$+E|#gNDl!w1AuzB5L|_LL6<1+KW}eSE2Llo*5^9c|<`UT6 zs)GO`(k|LDX9sEf#;YS)h?HW*)RaO~UbhYEBnS%C6F*d}24FC(?(UR=xH+vWiy%Y*pxujcI=lGcf8oaO{Nf>fDL2>CZY?pASl4mW zG!LH~5pfy?(Gi(3B7#%{hc@=LR(4|q0F|5%4-ZA86y5DMWR2V8#)M`zAuNbW+w|i& zGCP7WgVh=WFP80gw>C%|FvjSn%_55CzKyPyvzl3?7$Omvn>(nxLnb1UsW5YsqM4bC zh|Duv1du{>cXvTlL_~I1siq+^6M?FGCJJK3!FS_yaB#pJcDo&hFytHyFmu;+W>RYn zk>-+C#0Y9lU(k>!h?G6*?y}0eW#W3Vm0`WWvw%jPd z81Q7;t(w)uDk96UV6n2@hPLVYv>V418UZO-piE&Z-D)AI&0Z4~E-p58*}Z;p=hH8K z)V*%Y78?5DAN+xDe)Ve?+lyb|(x`W927X z6vCMhNvafLT`koh4%;g=ZZLou1q4GkKqgX--n{3!#}f{R&^kjQ=*6ic@Wz^2tHtll z>(gb!*8l-PA6{OJa;~(8RvHjme}nt>?8bj>@y7q_+1vk@;mWhe&-^p#;6UVd_t(>)m2=EefRNfYbLtagceI0%kpp^R`IRlQ&h>D z&BvdMpG6cHZ#~z^=I&404)P$>XDz!=o zVbQbdh9t5WjD-n}q;lcDi7}0NqJaVu5|&azWJ1itu3vOnr>1Eb=h?Wdu1#uOeVeUY z;NF!UQO_^d2M0$8g_+cSa_hSxjMgkUo$aP^cR0OV7=JWsB!R=pY@>fodp4&q{%!PmFe=*)Yo|>@OTTONZd(WO9el~pkPqwG`3*PO* zQ3>6)S=F@H@6NBh^-KM0|6i-;C(@jL;=i4betPfho59CwV{cSq+_u+Q^J;fS$`z>u zocdL_=ay~)W34!SPjjI6g z)*G)kOi)c`8DWP@=RiUyM56DNWu6%zL8A}=7wSMt;EKi&03a9u2{Zu#2n5Wmu%S82 zd&`sBMFK=;R|QiAARq_l57u4ru8`p7I@Hn0oty&ZIuOHqy$(-0Dwv=Gxw@LUkvbX> ze^{?WfgB>MNKp@*D3YUzRrc@E`sfM44l*N#l*Teqz)1@Pch?W==l*UmwCW<&1eB>M zT2Y)E;PPZ&hX_y@ql*RX#ECJYwU{v+oQ}CAC;-^4s!ib21l)eWKKFo|ky1iU*pA_# z-5r1Y&fnY{zVYTTnpgMQ^-Yty&2Gq(#n9~0B3x}vGPOr{@7#Ogr59vD|Jm>S?d4(* zoJ4Gzrko2AHci9Kc~Vh#3)@ZJZgRg^fydZ%P)%j_Swd!FP7hty0-}SF6CeZxXJTZZ zMg>F%SDghVjEE7M0I2Bh4vv>T1ptIbYM|imj0^xKfQSTC5FMG6&CG$cx*DsI8-cUA zqoO`ks{q1{DKk0|m?^LqbJxTHLUf{r8xsZRw&{vgb7+;OBF+{yAt4oZ!6@t&Q@c=C zH;7G-h-FX&@E8pM(3}_%k@-^iPHmo|aEr5VGdO}%;Lgm<%}o7r+8_`T6>|qu0GO$_ z&JKh$x|+#DSs}b6<|0g8xSCeeMFtNn4y!IFI-us(!Qx7@AC)6?A@gQ!LtmVq#%MB0{)S z3jv!MPyAu^{_zHa~DWMj``r2d!+u z9O+5#55EgMnxg}X15tD{bOu*td4Jnr8mcwUP|jGI@gAfSAy1+0CZJ1a6Po46(9zw! zEqyO3U(;J3srPFsRUv37@GwecVqyRwB!vp%BJOEfw-X`}M&w{7ZUG69n3qfK{XczRUl$OUh=+JOHdYF5^yimB9IN^w{PF;`(A+A zc`L57wuU07ZRU-8ha;fPj&hi7eP2 zbjmtijFN4T60-WrUV7nU(|S5Oyy_kKh<#etLEVWYgwUjeez`h`p{?R3B@CDs-ke|&8B zKKr$QYv}fKST1}y^#3o;o;r8-~Pzn=WjP*^qYAjce&lW_OEX9o4}z-X0$7j z7hP9OBF6^ZE!KkWrqx}M9oS=&mcw`&u@Hrj3Mzx|ntHsHHy7BX<&lU43M28EDNE{kOBn31~PQ984B(l>;V@w-ZwHDW;X-g*1JvIi@Qf8a?~9 zc>L>Ehw1S6*TOjM;;oYJZ_dsK2Z^z&og8mt*?tbraQI?9{o6Ke&OsjtHsJ*dM zY+&d(qv^EOi=X~C<;9;nID4fpLus!RY;rzNo?G1n6EcSo189JfbW7j()%eoS-Msyu zADsV#ZVcx~FWvv6|JH8#nX9{RAaE=7wo19{QRkEuV5As9j}`S|1v0LE%<3!-7{9;D#KJ{`SeU!BfH-d}C*KhVZ|wU`mO3WX zoP}JtSX3v40A@PxjlX}8saR456c#bZz_CN2O7Y2_j?2d8v8zQ`DyM>EY>RA_P$q~A z1gptBSwh=i)-#D8EcQh8DO=0#Kys1RHRtEgUzs-dZMkZi;T{O&KeD_LDzVgr#=!CK7#pp=oB&6xvKs{(a9tu0Wuw-A@AYNoRr zScFJKHhW}&RS-pWLqv0QXLPXHqZj}j%>j&^h$xbonwc9udV-=kxg`=ZG*?!4pWQE= zK?#5%lL9z^37I6LXc>yJ8JOXve5Zk8KTVGM?x4w(g9*a^&DwGF4RvWg`QZH&F zW@c{GJH!hRbR<#?IQKs;2bkYyR`uh8P~j$I96Xt$5C=jR$?zea3a01=;v`7O;OZ(+ z3ucIdjRKfgGj?DDleaxjK6DT)z&hL1XUu?EDI+RF^S&NeZPgA{W7zXhqb<@$%kEyJ zm4|b~OQAMIG@MlnY_%SI_hx(cYjSe_;C$6C#(lJclg{RM2{3?QH8W&SOJ)u0)03KN z*L9#46ht&e627dPXQ|^ZyCD&Y2)9(R4l-_K6QUQJ0%;}hB84!Q8&n-d1Xa4W9l{6* z9^6dSwRk3{5Q4K}Vl&HAE?#F78h1oiL^M{TSx?p++|lbhY8l9LX=B!#mP`60gQJ@( z8j2`0TQEof2{58$5s<(EfQ%4`02zR^R(C`|!9qw#tg~N_a|j$l*lo7Nxa=tOhNv9H zVucj0@9%FmnzEC37BZqX0>hXGcb|s5?EA%HF>H5(j!o033o--4dVLYbM1fPs zvEQ3CpRdo{U=h3Js=Iz=@0)MjJ~(_~U56o$FB~3ihKqi06%{wzlNbXBt+I2%?QV-q zP^ywOYl>q;#1_=hFa(R9I1bzOG)-;Wwy9k(L%@*1kcjf{DP*sT3bWLP!So{k8sAs2Zwq@mP^vNh_4l zw@>C?ybdBekQx?e!Bo*+Vd<&!7p|Ln8)x;1cmCMmXkJ05hD zcNe~m-mS8h6p}~nx^B5#UR+#=l#8)g2#!>1-Q}`sn*)IRoAqLK08TIG;iUaE>@~T? zZU0m2)rq7`G=}lSrQS2)Bd)sf}?oP&% zLU7aCbWJ>4y!QXR^ZS1nSGUJZmG@=Xb`Je!e{SqdeZDi&sXDRyQ20*hH~wP03uTDNHDZB%|@L|M8=H zze;cYCx_laduz4*O8Z-X@&3V&j?e!jq)_(v)b>y<7+9{wfi{DoCGR%P9<}KxR<7mZ zbf?Y3`xXt~xm17G@>L;Z5e2>SizO_F)ikUXekB?y2n1{ek2qog?@n*ls( z&meLj0<;g<+#Qk=Sb>290D)4iRH!k3OrN{UKun3f3>!ykA`^p(bGwu(xn?$)g|@=% zU&Y^>DTmc2SAj ziotUu?{TQ=Nk>waEINgT4uwr>&_8^RA=X4sb;VAWW=#|Lu&#Y z$DjbN7}A8>q!d`m`-K!a8>1r#v{blJt(D*3^3Zz)b)mB%7}VMsg2d~&U2P0*H4EfG z6sED}T3bnNlRD&EhgWaB@WK~<|I1C+T-|S8e)X$}`O3ioOsu&y0ER)`+fEc0!Ss*q? zF*ox|6}ef+KaxAmvYE4*4FCXmW3$`9Y)ubr5HKVRAOe7Z0SE!WE{!W4+YpGwod6k$ z0JN%A>6^CJyxVRM4-bHRx81Gw_sCRB1)u9r?v7@L7#L|4I?w8$iK4j$^S}{IIRtlC zb?}FxpYBAIWcDOLcSi{DP|7qKnxPn~g;}$ekqA(j+!bK9MIbW)KyYfz*jy9*|#U)AqG`@#k)RZqctqN8wTO^6yI5LH0rkN_Y{rQzH{mkzI59b=@}Vs_nT4b2kN)Qpi35eN;4 zh{PVx@ag*v9|DIZ!IGq9S2lD8EnrW0TlC$~I0ZDT17HdxbQ2iMZ@6^<`jy$5pNqISc1?mEBJ6m|rh z-4~09z_ts^z4oA%mp8QQ&RV$QQ*J1)+qO*@$VsQ_T4G8~6N`})Yg35aq<~GdVb~!c zh=LO{xrZv1h$A)1%Aiqo*`#FH5{WUp_bCaL*$Pr+j_O>ihF}M=vpHlHBQ;_q@&MSS z;MGNH5^pHYy@6UQ1wur;jOqXg^QZtSUd{{v5QRsg%!Zf!Q3M1t5qCEe@7u1W)Fo;- zr4S%BB0_*wQHzQxVPGJapz7}4HxYnLjST~^*}T)Ks}&ZNe%Un)5K}>)w#u;Tn*QYE zj5$V%{Xt(!0RS^w;t~|7?otZN)Eq1i2vH?(by9&^_1cx|ZrG<4NS)SWqZHaO)uLl< z+cpwU)9(JAy9t?#)^P%}!)_^KM#N=mZ$G%d+}}G`E!P(pIp-pEu~~jZYz+Q>*F7SW z5dpCuy}!ON)bxsbU^lZ-VvhkYk{5 zRmt32qc*mQIH`4j0@$a$XRq#`t}jkb&+;_Z40wnar|UDStNzW-sIQ8fo8#yen0zXdyL#ttZ-9=aU&b5CH4E zpbyLhsGNwX-Mhlf7+VmjX3E|)!I1$lacG*ZR+Umqk%&ytFfxFF08I{HBpyPTtAw++ z4Kq*Elv3)%V{BDeL|Sfl+kr#y0=x9!>xVyYC^t+IZtLi&=0+Nlg4_}qi#(NX z80>6afAlX*ANy}kjU3(iyZaYk-}Eo+uKp|}`sVR%lmV_hSH*rWP&N)`B^-Wq@ejI- zzu9hH_pzpDC1R{M zC-~Mcl;yJze)KOKz5ch-`D;h7{LS;D|B$5%xraie#NDFyeQw#QtHW7evZ|Yop#_N{ zZ~F+-rPmrb65>OaXR+_TpJhcPMkkh@DwFo^LT;5mob|4EgXSjDp65W&M_S}s00^9_ zzqjikaJfIaB+0qU>|*^s(@owhq9AjqQ>74*19KaF0n*pUB!Uz%6Hi zwa92Fg^ij9y8Y0W*g?n+!U??!xnV%FkwcGd`hPsq3kj8aQIxbp~NOj4>qvMIww1s2ZY~%?1Kh%#a*#)*W(m2XHh%o>d~4 zXH#NTH&+LDGgr5{9IpVgos5~Ag9G3!J~6MQ+(Aqo005BzK+Tb`g7Dkb+F4f!H7wbb=?qg5prCSoEjQbk1V(uNqoks&B5Wl;kKkhW=<8*wUP#6i`o0st^FK#*FU zL43BEd(<8PGr&itIQYmt+QGr3y5lTjez`(NH+SSPJF3mZsJV$^&JOOx7;t{i3g8B2 z_Q{(s!fu?ZJkY$Kx}$FIUezDD@v+y}=NI>Hi^#KAt~|Fo+6@^e3adVPIy`{u>+X0s zv+<&{`wPF3NAAN)cT)xmDMk(eK+KD_-IcLSx)}#AqSyNi?cxic21Y)9RjYU{X39u} z=tyQ1@X0Qhyc;;Odv=|`Yhw56juuk$Va@v}oDvhE)CwXIArgXTlaWl2IiM;kp|cq{ z(1&zAKn{#(Zp6?=GzTL?)A##6UvR;((bSQQnGt}A00_y!od6xwRNdXw=OF6t%+bMB%}nQh0ughF!<3o1);iY8 z%$GxH?lG~u03eV7Ah`Lg?S_pRn9uK(iQUXZ)m20l5ji%_L`251NN$$>!Ih(wQmqw? z+&zS_SS)O6qzt0&wGe{@AIG6aP~)@BdK!yyM1Z}v+go+2W7G5%G*{o{Vj&o7A)lng z(5&lpK8?p^_h8tu+wNkx-tA%Ej9PKrO(wVR-90)ws;bO9lmJNHhmLfF(226h|8j? zVZ7+_ro<*?D=M>MLSO)|D$@sT-TQun2vGnbuDYg7TXr?Kz^?CenaoT{91txgoz2d) zpnF=HPIrRTJu{4`CtnSR*YZa{GTeDK;Bf8gP|hwk7j-vn!7Y$lxL(C`*`<`kZOFyl z%^{EHlMUidjb1xAh|IgPF{;NqU)PJc=#QFs)-`-`PA9*9aN`GMxfm-$V5QP_Z7xC_ z8e0$`5tynKbq!1*CPKtGZA1|~vPmdKL?j+<^`-0W?-8Q7tC^}2g|6)oQM@4H{{EsA zsdY57S~bRqoB(AQrnO`fGo7~tD70>x#z~2Y8C56IA|_3HsAk}fAvAqQMB8x~HftnY z#yv+y#u%9sZ~#@m3)k=UpIXw+5IF2e(@yJBX#j7@tE)lS;BryUr{iP2{`rfa{GX@O z(`h(<^Z9=nKDk#)X$M)!y>htBIa7a?^693E@@hfjr2fsTf2RKI=MO^GL5Hvp*Pd1P zu9)juhr7}&BLdZPXB5lDBSbO!fBO2j{vOE5>5G3h?tR{{ z3&+2c>N;QnAAE9B!W!8cDrntN(>^n;YCm33$j5bRx4H^(x>WH41t3?Ljf)-L?R%XF z)f7E|3xbk~dx?Sa_gNeWw30e8S|lgQ5tKqQ6odB|2Q*#M8fM#6eCT3LAFS)i5Zr3p zB~%ruF*GU@Sqde3f7(?lm659bQm$$s^F1NnsZ{k2Sd?pwbuQwbMC~_?s85Ysk11QY$3!tOj5>=Jo|il>ub-}_{y7i(lUH5Y~ev| zn{S)GXZlghKO7hfsD}& zosiMN+}#i_$?||0;-$x?s`_P-905S#vIpP*!C6)37r!jJ0}zpc;;eMf0RYT|nE@?? zFq?L&&O3)o4}hQ+V{~;9K?jD*L4-`3Ife20JtP7~xHlA)9!bO<$jOl`aj28OaMtd9 zb@xMGy_4El>VQG#-6DD=_4$3xBmjIkBvA?79Ul^4;h|Q>CG+*+oWa~g0RRF7xD*%n zS#`+ge}1SL`cPVAMmqj_x2-o;d)Tl@tzWtGOyEsOqK|FUzq#AXV>lz9E#rmyHoRxE$=lQxq%@7GK5GWI-&st1{Z}q2&bdHxcA%m=?Bn6I>63k zvOId(g?VNc=1>2_84sOMtY6*v<)3(coX}!ri)Km;Pj=jUH<*UP&;^sqBHF`FbHTUs3C;agE!pr`I~oi9NYB%F5Jm`Y;CVRyZ7tA z*X@4Y)70Q{(v;vF=C41(9B0f-M4E7I$|wE3UrN?3y~bY0+gR&nsuV(~H4{j?2rv4#_V3Ui`St^(%CsEvIe3Uj%&e*< z*BB!r&YBOgO>#-;cV-5lQVJrb#NXklta`Z{B81tPp6F5he1uDb6M#puuLz8Y=Cjz^ z$SR=aTj%>v-q6xIU1&58hURe*EFRy}y0u z{w@3MA1wEA(Z}tCu>&0nc|;x!fobpVfBplf`6Js@Y$Sfd%i`}5PVI}Y1=woZfl7sd zfz>9WC&fJQ-QE|pXw}_4B#n)^gfcXir)DqaDrAuwBdnf3y|-LN`Pt*)ai8)ZDhhgp zJ>Rz>0A2Xe|dWNBk|Q|f99>z#_KYqov5V+p`&781{G5Qm_4)@nPiv> zFURluk77^CAAHtpk!mtjK1s_wF(%z z;XHest;9I0Dk7$4-U67J2G6R>j6{@DBEo5!a?Z_)V;E~S0B>4WtuaP7#%cV_?OzhI zHbQG#w`7Vvv?Y$q4FZ6Ph-X@U)YFQ`6VPqtM7y$- ztnqDe7;}G=PwidGsqPMxYO$AIUzAcVj)eoDSA376I!`)Y*zcZi!cQDeF#rq}n`D65 zcj-Nf?YKK5T|Sz3eTW7hydtWn%F@QxvSyHdrYqk{Kk;p27{BY&-;FKsz0x9LW-~)4 zGXrFb!I_xjFrG1Dn0X_n?mm0=nSYPg=Uxa8zxwTOy=jwh00%J652Zx|kON`?V4hVH zfB}eTF}}xbp~$=-c6UacSGVq7pAfCyh#HZSwP7u3BQ{8A8>?G}CtFnHy&~;)O)mW~ z1qN1AvI<7V0Y99}WpQDdl4d6ennR!vY%vkqhqD~;Zs;P?w2N_UnzkD)0V$TC{{5CF z<7VBC!_s}K0~iK`M*3p;y{5?z8VWNS_t}Ig=5<|v{aTsM(iKR1`)99~{ZAV2^Dg7C zpaEIR2Wu1%APNZ&PDrND-8=66O&r!3`mmN5P|cEqj~RCPO1Jm?;_AJNd+W1f(y8$l z3?XpMqqBF}S|C*`RS^NJ1v6%EQfHRhX$USyOj&I&#wiz9Kzu0RGi&`D06-#iWUHvN zwhjWJg1LhNz+8esa*lxkNOjhhKtxA!cYRnAikCtfZ)eF%Lni`97qI{a0OA_RRVw*q zzroCie7-Hs44HMD+@0Ksp=laKETtS$07gcVQV1N8bI!@VV=mR5m;$5)-)nQS|(`0!p98=o?xDLA5+b4@h z*u<#kQsMm0Q7en)*04=Y(>YU>Y($}H4XM_;4ABw5ZN!YaTR>bL`>GAvazenOI#kdc zi2=yW=tZ8~) z2LW|gH7$3pl#_ia#ybcB;;Cn7)^sw0t<6Nz?R;lfL;ixN(}>8~U|I_wd18Xd|ATQrj{ZsZ(ei zDDDu~yQQqMO&`uE|L&-$&A@4E?F3|Ai!n?ImR6qdeUOTiVsNX2uZcpUfTK#Q+a`+z ziev>L=-9BQXp8X)f4}b$0ZPjzmGLZhY1NM-38NMIUX`ET1vM5BVs(NM$OOP0N!_eT z-EDvlH!r)5_Uumz#>F7gR0>yz+@lW9&?`$CkQzz}hS73f5XUU;71hlQ-2rXUv<`t) zU@Io2O)(g_!eJx|Df{X|8yHs!`^DybQNJGd4whRwP(#ZNd!;op5cP!ODvVA9Sya?R z2*@B{jG|_0=2p!%{rY}ic2^F1?$S{5KyGGj&Wxxr&VoYj5MxY)9@wPrb4BFQ%2W#x zGe-bI#xgZ1D1z>;V@AZLY0R8S0gRl`Om^!K!r`K~Haa7CtOmrO;{d3^F~ztBYwClf z-70aMN{vEbIh-vsYk?k0M>bNz)3>JX`3rjH=)ty~O2I@DY&-PLX@p?BvL>`rwY1DPM54ls9b|olFB6r8npQ}rZkgJ6OtdzcDG!U60et(Gz#i*^te#kL6 zO&CmzN}!zlz12U(iOJF2)!ZGKoX`RI113`EsKyFIVS^YjSxwuxoFe(-p+lL}SreFG zRul#!R5yl5v*Df13o$?@4CLP~~+kYl7^?kpvK&^a=SK-z$Qdfg-4S2uAXmXPXB9liz+Fn(wdl5THWkk6r5zKK<;k z{O0R2WvARA7y4ka$lq`LnLq#2fB$d%Lzzmma8OdW78ipL@PuCnRuxWNE$isb@tx-C z?jxJ=kf{`wd1Ub}ws!as9uI{(R^c=4MrpMU#oC-lK$jkR1i1}A(}?ZHpo z|2tR4ZxvZ-^O2-EG)cj$%E^PXy6s-P`P|7fx4!oE^H-;`diGkkSZ(u;681+6WN0Wc z^Ei&X-8j?miKs~(7^HYefAo(I1kJtMYIx<}jUq0fEA`(&0IBP4%kc?cUgg{jbat8pH zM1G=3tg5xvAOHX%1psi@wr|?D6_MhV5VceSp4n9nL@}tBT-|{=vgo8$Qs~b<_xT_E z*pL3k-+c9Ab56X9Eov3;C}r%LcD4&P6L=^Mq^h3qkq-e8G16?_>p0ItTy`%WEewir zj0|8^vWg?Js@G!8tSqC3hQCE0W6mXtNkS~ z)mlZWnZ+1iT&!fd)ZO`4?!EEy2wndyEk7~VkDPW06evWD0b}A2W8XJTlf$BFSoNJ% zxz***fBMER|JLmaXuRknMj7k&6!R1rnWEW;)u9$hCQJu^;?I8i@BG4VI+N|hq5~(x z4`<$Q%nL*CY#M{(1fD|t#7}+nSH5xgYZqsL38pG6KoBh$@#C^Ad%wXY6D1AmhVk|{ zP-}ki^U<~9`|CMmT#7S01Op0;T7)sy?6{nt;O>vy*H!f#R-eBA_v7~b;%;0X$O3hZ zR~sI6iaFTYtgoCt7IYsu&!^+EJk*vkA3vh zKl;Oe>>vEUU*qImbCeoCJzH4Yo?`4s!9xphlk@dsj54g0W+&Z&NYOPCrv4E zKr=yt5CWpB)`+dDipbmZ1ONckwLA}j0f3o_T(+W#gS(4X0EjU%Bg~Gp}=w zON<;ssI>+RB9il1Yh~t?F7uA-C1=YXPIWUQGw&o*NY}3RE%m?u<>S}xJ^1`*KK8@^ zt$*p)|F2&SAuN~6uItJ;4a309O-jR9tNQ7Bd-wkNy7FX;!cx*7^!@gHeQ|M-x~`}kEc@e-imN0(K6!9{=k6yy{_%|ddtd)L z0kGpIUi#<*XjPDT+U#nG-h^6fEoHafPz0swzRB%XO@WcPy5B$dOeC^(e zXi|KSn#)f*T=eFTedL>`KU~@;f8q!J-rxIDuG1oM#`m}GfwocH z2Wuh-Q4s=nWL4!Awn4gET5m6YDHk@1BXfd)F=cZn0jkr>U%#BnCvZ8=E46-X0w6Ef$O2hMwIsLjCPKpFX*M&{=wJ z`8zibe{h*!&Hs(R@Qc6l@}gT7TB#~)PGGP+k}_#6k%5`VQXR1Em$2R_m1tB0>ZWSG zTQ)g-8$UYTdEupxcnYWM^D*aup=;YFCbePay8w`gm^lELS>H6wyql&xRpw}pyWN;` zv8}}zQ%cOZ-EIMFwOWY?b)C7WY6w(o4T1Of_jAsSyX~fm(A8^KQllZ-Fifhx8$bDl z=l^Wa-Hp}0mH|V%q|L@J%)98z7JO}F2kq6@f8pbQ^5?!ded1eR{oPKqG${x=XieBw zR1D+cv*GN{>i%!Z;q^`bLO&LE6VwPMwkXKhC;?)Ph-^M6H4fTE%3;CN1RT(cYosNaT|cxkpUT*f#zQ^Z126>{a?i+_w5fp zn{)n^Z=U43O_81y&(!Y~e(3o_INbZ?-+LWoY@n$s>KJ>d`rh(jg((t;%SmfTL`6{W z;2$i)VHCA!lD&&*Q+6+0z51g+_2S?8#cx>n?lhoxW4V8?_Jb8Y1vU|Mm|Fo9I7@D* z1vN7xKxPI)aZxwM?@c|*8AR;3_2K2;L^W{@V2#JQ8W99F7|x2A0RUW~LaY0a6odb1 z@pf})md^wL1O#}muk)Bt0i2tFmVD}oGZt^LYNQV-YyREPL1uLT@(2bj9^IX((1&!n zEClY<7(24WNPSp_=r$CTzZgEjksyJrZGhToO;*@)YygCj|)27G=o8Z@DeiuB5x zuSPkzdgPY$rO&?f@~f}bTy!_JElggSxn}8_WM;!K5ulq8M_1eL_SUCo+ua1*F$c@) zVIrnBuo;zXwW_KjP_Dx4<1lcFF>tLyF^WN86O&xCsHK!b2qG#XVbPeosR9K=1`u~( z4ghZM2&h_#C`2On5WpyvQYeDpq~P7EC5CA|1gQW(0JENtF*2fAZMr1@m|3lv5E+<( z9o#JgvjG*Ufq+v`kz7mgfL`)AAg9%$E4a3CNFD(zQ(P?iVJfx67+9n*C2C?7an)JQ zsEE{B-M!(|G|epT5dxVjF^dQ}a^Px3Ra0ugaU65bwTVeZE?qhum^l)pWM0G^R0Way zz6}u-E1(`WUE4P_RsxJ1UfrHl^=`3Tm%Pgt<4^*5pW9o$YY{ zdKXiq(?ZATbd{m=<*m|QE4YBgVr1RsR2U(1(bl7@6sFz-rvuiB)q+#6axrBz_E9u611(H z5H4_ZYU=%qy1U(jE#S11$=h9)&R4`IxpcASU5s%V`nmUN=57>HDA{&{UYzetE_HYg z_xFjR7AmDQeR8ja=qgtauB^{DY8Dxq&~$wuj4@a#(l!AWX*&*;NXeZUvT2nrq`@fP=z>)b7ay01yz2 z-|zeX1Bc)(5mbf93ShN{kcu-s>0)}LIHeKUm??%bY+Uk!RDg_Fh=7B+5l8}SwSHLh z-MhgQg`;6&^C5Fir|h-al;VUUou4Ke~FbuXVfK#x^4Q zFkM9G+J5!Buc|la&nl)HyNX~#&0`>@*vtk52}6tl zRVEiz%#ul|rd0Gs^4F=oI_{o_asjY(PQk&-uJS6x6Tn6l1EWfCoih8Z9<^xC3VT5xF{G9hk&HD*1TIT+TFB>UAdZ6B;U(= z^XTB&<+HCJzxpHjm9vZAy#Lal9RPbQ+Q30;XZ_2* z`}u!6=%U!L*bdX;Y9I=`^Szvxal3`vw%Es)eq_4wL*uvp>T>$}`q_Wu^zesyZ{6wP z7QQVM_fxyK{e64+FWK2^xcZnLyd<~&K~x(KH_J8*VnIL^N{yk??xwdboWLmkQ=LG zlQVG)U@G>W>pu-7&^dP!0)PWMFaUhO@@a_}3`2+`LL0kDYNu{N-mMK#+HP-exjs8L zsa;5#Cjz8*m(~!6w;4i*)@^Hj2_pOn$zh ztF3Mx%lW6&?}n4px_xm!?#E}f9nQ*HQSwuvNx-S-sAXZALQgRVu+uz?SQis#=F!ca5OgW~+gd&3ORqSH3YnY(N zHhB{oX5MAkO*<|6!p-Z4M_0fsrBR|ie|2|$ksa?2_3-TMis7e*{+SEuv^$=>b4kf- zT0KXh4zM2cxM)&h-VE!6*z~LQxC>MjN{0c#D>wkyFqma8Qme-rbUHmQkM`Wl{{(&3(b@&33o)j60j^E|z*swLwz2_n{7U4e)~ z3>4T+h@fdBqBF79vVXY$?2Ttw{bD!4Y?kI=ZqqcW8xbxR3j?PVtIBq}1;aMBrHJU* zrTsi;$-Z3n$Wy5k25(dAhW02l0Jxio0MKkyfN#S?2qHGyC?cY21#lvK#4}}vit1O)1t|!KrCFcb}$7L}Fx4ksNgPq(KBgGn;DdV{Ek9<9^YH z>p!r1?!{Gxb0X^cHpb|BIafE;Qi_C_Rt>#=araEFrsh+FT?p=vP$;Y93tSv!EU2YO zg$U2=rJbt6P74xfC zuf{+CK8@p?ZdR*3B3iH4CK`yln3#CZzY4BZz`a&Q19#Z%cA~E8MC1RTy+04OZM)9< zurbE$R8ezza-5Xoid{)1 zvE!0#$x#wp6-$)WkWoj9gj&iJ0f-=8f8TqzKi%$DGn+9+{#fTeKorT+gCOF8IKMw` z-FwbGd#`=Y*>lY~#y7stF&lHs4&5Q>-=>>?&V2I5JpBrm!dLt{2hkuA5E*OG5-Nz$ zo1(o{%3{9WUQQAwCQB8)H<7Cdq{`>Qm6l;Td(|yCmWp| z);;w!uV+bWhoWV3-jmO1ELBx)H#;O~+ZGY~*n3}npw^4G9-N=e^hS_qO?Z~tYrE6U z%d{Nb4Ch=Ru|OquUg>D|&6tg~CFLRjA|>UNA|eKBRe|H+P}>S(mI7j(JL~hqhdXP^ z(6~?sv1ZJDn-D9Lie$+Ez|1*Ez`|S*?RwRxl*TbJD<}%1h&pQ#(ODycQ}R5b_1<>F z&Up(!plTP(>rrkaHy=i~V+ssHhn)b**qz2U*@c^V_ha?^Vg)>n>R-<_EEROq@{;tXC$*tdg z;~)N;;r>r@ecfrTehK3$2GPYLWCa#=wW;^*gICi0mi^H0xv+mQEN^zc%=voEd0rm^ zIeGBY!&m>m=JZjt4XxhTd*@5>-p?$b`RwrgAAYj`At~OB_tpBuUCT#ow3cm&iVB%` zZN4u_u>I)h!?3yU&Ev)z;A&`!%{7!lWECmJjOwhh7SU;m$^)M2GLq^S!j6_0*^gavw4g;#du@y=!I){H0DS5?mxNcH{C_+=3Tc%1wn=Sw%WpZso#CBd+ zRZv0fA*B%k>Z+a37w^3Dwhi9S{n&5L(#7-x8q;XWIcrgc3<=;c7HYhpuFqF!=+^Zk z<7svAjz|Gv@9p-)?lsk&8#9E}? ziV7i+JtzW@H^CIc!~g^W2vc4Zs9dP>WcL`&0YEIJOg%^unbz`@lE`IjoV7Mo&RXl7ONo6j#u2=VnX+TPKbt-Cb7^4e=3{rD%woQ4#`-W-XHvCf6Ms))cC0@b!{hh1lkL1UVxan4=d zl!?6eU`RwV78#}6?ox>i3INuHaqJLM;MuywXSsb@+CODJh3#=zJ@|J2USIL;VG4E< z5nv{m{wl}<<~w`Z@7F7an5i53zIR8^zCdO^Uq6v**~D!R8`+5&wCwSlhd<)7#(_mb2mnZGE!wlz4t0z3wjU&IH*kG9n47x<74E8- z`Qiy4)o!mwYeHo0j0UyVDhm-2km_WrE|PLVDj8IwA0Pu9400t-w2>_f(A+!dSb3#>0$P^YIq5Y!=>)Y4w|G87@pM3g{ zA3XZv$xBB9AF%lXJHdibq!8uyV6egWXL9XD9XosPi`V1%U~bPp^IP-1f4KPSpR!{n zj2!P#Np}DC7%|E=&JJoM{)NBm-g@x-(S8NpFs}O>zpZ@m56R<~#V#Z)rDpNV_VtVX z*Z*sC{w1}+eDJr#!{3&>j&NH-CEGqXH`H!why<8j5Ksdh1MJYxWV+9*B7iC)O|gyo z9#!*BQGpdzL5nIP8YEyL$x5~AG@6KL(V7Nafd0vQdi+j1Py!$W#v&kPg{V*f_E4Eb z1r$(#5TotN?SU(~d3$UtLxhWFs~S{8qJ^b^FbKotX78!^eNqRMQgY5y(cc*3y$57#t%&5D0U%UP zL~@DDc{0<#bnw4)AQc0MfYu;@Rh@EvfkDmUi&RHTLA|x$QRoNH}p%q|e4#6RslnN1e=Z0ZODIp>#iHP@pHk);6 z+;p2@owbmXOzoTX#mbPk){QY20kAj>k+`_va%2D_C6hW!IzaYbxaeYTTwV2qD!PvhVb0H=lRo>en z<)<#dfT+_dZO~az^}2)I%a1nm%Ji$;HV~Yf&*xHjlqp#Os;W`dz8_CcSBrLE>g$Pe zY3Fr9yE6!t!Mf+J^4^%LQ^kI_=V;#UZ8j%E8pKhsVeT8N_Quh5boT7@N$$sGGdo=D z<(QqCK;DTRh9Lyk)U&o}rf{H`^L*Y&%9r!Y67)!h3l{fGaxht+ZsU8~z2X1~2?kG|*Z`QLc`%$MuRu;8`P%IE<}YFOkrC{i7L9r+0lc?1m#D%3 zgcc2RHpVDXjwuekgqd-Ufb$q*X7;VLkoG>6&eNehu&UXEsj9f`O1Fi2X6SNZXD%TG zLMxo7Rkkg`3{5t5(A_US_wxT~pc`iP{lpjc-e0rBc_`cUOMmdm@BLHeVs&))|2^)%P{qEw zU^298_QH7H+XYNA8UR!PREvZ_MY?Q=NRcxTSxGdgG0!A>lwY`c`poY`MrXE<4?jP< ze{Xo<`||qBQV8slT?Nf7w+E_%35DFq#kRK(Pyd15|FPNXl@-Zi^G30X7@KCUbkwKO zp@w9o@3zeXV4v1~-kqkoG%Z$m@wQzaI{I0oqJZ&MvKb^_hSKL3Kv_HC)bW{SKp3#w^UO(h$o zu^-+(-OWC>=vXMY*3XT_$%kPa$N9Xqfi5+smr1!ugsOrFf~o?l$}pwuPO||zSqfQe zrmE9ac>#jyyy2XiOg6`Hyc_~R5h=`EN}3#lrpQTS%;l|J3bO(*B3NrlLGjW+u$00j zP1jd7)l~OEFn|c6ETUH5wdI*EQP4~q0wrK(WhSzvlx@xc;DX1(F~+{{jkzos86puu zxQrA=L}S2NhiDKkVX{oluN;;N=bRNNQVb|wGVN-3*HG?1Ob3xU1rTWF{pD+ za@7$Qm~8eiX71dx>Z+p2CW1{T6* zF*{#fWWT-T_Wm<@aeeUVJL|8}xXLzMdC7ctg2n;>QBl>&nOVUQ5?oFDy)Vf`hWpy9 z;<#PhtbXp_rnmo+)*s9t_-~C8F+;Hx_N#XL`h9o2h_h<@#%u@{=VY|+MMB*38B29^ ze1M3t&&-qenqSQKn#y*)>2YZ1wsD@J1S$0{oxgFny3QzR+25n{BC)phGPm;t;=G%t z(}<|_1ptVsOa?%z&R57V73zB09B&;zm2m zd(W+xzxAzy50NLBFK3)FZ$$*vsbXO^C33a&vAMSS^A7v*d{#0rJ=eYZU*C?8-u#|F zBT}@)BJS{O|5sRVlUaq+S0MR*tOK~=_UBGM{l_$(1UY+j^LJZ!C&l}@=^C*ng=qJD zNmE-_m6Ze(3J4-7dH@k1gaU?;5lTsZKh+W5{<$!FP7Mt%r@aH~<~TgErB0RC+9DzG zg@5ZSG+em(aaVs($jQNHaq~6%_^(oW1MPD@G@E+ihErE4tHhG(dRF;*eJ;|U7!88X zWga%BU*)R2di%w<5tb!yvuV)HF&xnN8u92;zd8DzxptxZ`%QJwFFr>4#DOz;Si2A` z8$|^r#2S$iBa3?K+GCWT@I%H-@vL^NsYc`ECGfl`X9J}vr7 zDa9CL98Nm`=Nu8J3K29-CCtVk08RgMGN(313j=d@mJ~%*k>1T35=EUvZ=?h)29Yki z8UUiq3?cxcL;!${TC@}~h9~>!9Mcff-h9#b{Wy-y9Ag9kV+?}x2CVZ6#9Ro$R~E@m zuSFG*&{_)!2pIqY6iHFgS~A9vnZyywoXH>} zq>?iWi!07x&2`XbX6I~>z-BIjv2H?4%Wh1HMZkgw1{`DB?D{9`lRNvzFCA=-Su6g< z{n8wq{_TF0{Gm_I_U2tDXie-{l$Xaxv2XIl15JZ1*@ysPQB?%FDn4Ct#kYcNL&%0{ zkjkZ8u@v6lJD+|3jr@_2MdLj1&m5zPzBMPvQBPmqRx{s2q208 z2xLuNxuy=Q)yliDzkgU%1k@VTB5@cw7w1goG7JM4s_PI_8M?7BTic|ZQ_e&-=aGsJ z9&<^E=v)J6hjCPd;7lpP%!p*+0(u?A}Xj+m<)mt zkq{>_ni+Eh0BZ@+C?SI?5*gB5m<56AsuAUBIzhA$nW;}O4L6ihkknez8J04R=h0i2 z#m5~k{gSnupRbRPk42>Kdt=OyMgUNf*&IZ?H{N;~N{*V^Pzgjad#J{cOI@sC_{1}d zXO8Co_`%%(H@nJK*A9+rs_MG#`<;l4<5<@6-7beo6z-M@oNF80ChX9OVUwS_Ri=0-Jhyo`)hpr zcd|(_yM@$|qL=M{^HTiu|DgJX|ES!5o$40q3RSJO3WGDgseLJBlHhpbVvK1Ri72R} zP>kUDYPTvygY({L=dG_o+LU{*JYI}&u-D9Ul@rabTO0sDfGiOa36V9%dtcW_1p}gD zAx7)TdypY-(jkdG1U~4_H*cP%%JTI?SBH7dT}q{7F~&J#0f3o}v85EzA|jd@01(l6 z8>*`B#;GR?IyFbx*vTBr2oskw9VMo0GZ8QW4f%=!`es~w^mOsPkLr(TB~4|KJ2wu5 zC59?00+O%MNdygTd~$8|v*)z88D&Uh&3^9Nc<=u4;a@nr@k3}}{_sB^@4e;h2ixnt z*(4R~jz@Jzr=R)bFl;{bSO3+`@+FF{x%Ojke(=9{Bcd`#^LQ?}vj%`v5Xl(ymsb2B zwh#$3SObD+1Pz+hxnkEBdr@cc?|z-ziIWS$If2F*UQswq8fY&d#e`t~nS^7z51qoC~ac z@(RBCr%yleZR6Xy0j0m1~(x7RJS5XJ!)AZ~L6aNMM{N ziKSl|SG}jl@AGCF6CkvTsE}C% z`r+vSfr`A>uE4uo9?1(6Am_5(ZV!$QSF2U-6C$dLF~)h%%vRNfkgnPTS6p$$6<7ST zql(SwODPJA+!V#N4$z6z9JDG7FTLe*W9m??k4t>jA2|p&hhD6$BwOhR@`@JBw=L1l z4P)!{23}l{7*`wBka2@r5vw&B#Cu=+KpuDNop)8(TN+nViNuK3Ai8lJS&Ko-%+?|z za#3N~97L>H*n5u#5Yf7+Mz3keQ#k~R3NRrMA+lI90ALV>Swsv15vr%B3qA7ai?1-o58lvo=FsAJ65T5D`6OsHInDvXjXL0#F(ns!$AL)yiJWEEI| zZEc33Q?Kd`ihzhyRK_fIUibz)tf-A20=SHNi6$XwFf+(ig8IMHveDMQ^>zLT;@i@2akIsT3uQISD8!G@)ujRRw2_ zF-9jr);Nw;-KJ8sl#~(+pdnE$!YHu{bros=n@03iWvwJ03_4Xc#yyP}zN~8-W2ez_ z1bu34N6Z|8d1@S_szziwS1OSJD&p5IYgLhmoHJ7_JTsS4Qc6>J9rL9^4G{?m5s8qQ zfxdo%B?I1A004zx2#WLJsNX#h?qfrf$I?ps*Gj?A*}3%>rCo=_nEl z?ro~DfJa?cCoMR^;o{@V`ng$kvOUj2MoL?^OKZWp?bxI0`T4n5^i^G01XT^S?R=?B zXjp(#R%QU0?xbkQAgaZfbKADF#XL9Wh z@iYGIKQFibn@+b(>tvd++0D#@vm2jphqvJTPozBfdcXCF$*4N81qqSZIRnBXL_|49 z1OwjBZEa}AuGbx`x-_ecpH<_o8^NxY&n;)m#dumak1d{WpZosY-0;?wQlw1z@rG4H z2#khVISr6v%&b^US%uv@wz)M#_hu)qWlv)jpBVdQ#HbOba(@V@3W%69tK!g&))D}5 zdFpox>MBQ6m^G(74aTKZoEk&~w9Gh;!*o!mV|F5HW{tJ*s^%2+C(OfiA zXXs<;N$zLLVy<>a~@b|*5g zWbazZK*JW&S&2z&c?dKAfj>HK|F)^VT-IN{X>r@kMx&q+SO;=q5PX3geUKRjyDi%9 z1Ov`YI(}wnR>40cu2>toc#U|K_794@X#C#T+5+fh#`OV9v}3%ub=$8b?KaDkFW-Fp zGZysZ@bjC4&scv0@xlN)1XP{u%m4)tLEl?cEQXMY3>v2f3Wy-ON}EPaZG8%s$r8*L zCR7z}OX|vdJa>p^LmtsjL+`-7z!o8s{}7T@K`;i4Bd5wmiUMBQrUJmzaS(vZe5Y@? zv;4BW>tKBORZn#v(`NO(J+-Pa22v@i7iZ_!uV1gK>W$a$8ZxSoO98~oU1Ize-_@?# z16N#e#TCEKiGCw|RN675#3{tk&YX1=vjsKL6u;Eep)Fb3#0&D8x5*j{fNHk3o+Vt@ z&T$oV{cX8^Lru@C)BdZ{-cW1i`()00+dk`&qa;SCL)dP+y=qa<>x&_(b7R`Y903%; z8UtNFR<&o=QUp;EMFd48#(|mD(y}&Fyztb4H^$%;5p`)AsE7!P)5PdBD`2h7!a3*Z zu5Yc4sjsSPa@rxHQ2D9faJgMjHP$?Z5z(V5rC9G(c`Rj`NB~d}L_lW@7{I4)NF_yZ zgovUlg;f=uu}BIkNW!9u&IJU@LlS`yf*8p;WgtR>f`|qTfRMG8Q31(VN64iNq~x5# zceffSB6wNMnl>nS>6%>%=Mq&F1<^WH6o$~$&YN)@jB&=eVHldGN+ovP&NXW@FrJ-WXa|RdpEZUEjqxq<-jzvoVQ+Q9*Qd%BAvw2&>A2Q5}*cvWR`` zjSY2E?akVzvVFJNZPq80rV)_YYzCs#GZd!82wi0>E*M#nXdH%|hUM%)l25X_$TU|e z{k?~MHbeK}S-JbxD-krdhs9R2%2ic}aYx=$-9jwbN036OgupnpRleQt@GGwP&PP*U zns!LIr*U649n4pHJrP_izrS6-4ZHgrir(1g4))J?JC`$7vq@l;QkoWIYpt_k-EV?* zh{()nl|-#kktr2MRqGH!2${=}hLq#dOP{WDKMfl#xR$X zvm2PHim(U=r7|F~6JbFnFuPqJ$LzfqVl)tp^WL^_`%Ald>QbLa7lK(VA6vKRcj`JS za{u(vHP?Usg^%pj{oV0=x!=0jKoLnoq!-)OVle~ey>{_rvt4a2p1pCCjENL*`uR_?db(sV_@id7DtiK>RcIP zoDah;MUHJVx74WWMr1G9gwX4{uyqt{HS~`kQTwbBatP0z|JCO=KOIXuORFcm{)LbI z5$9aaSyjCEASzOPRgG+kil&~7qV-b7c^MXU-yiO;dA=RSs&O>i z5dn&dF($?wV>C7xa#Ii6IVU1Hl~P6kWagBLa|Qqbh{zg)B4Eg>P6KB&ePS4Pgks!z zvpA5&OWXaM36lp?!mat{+<_`$uWC?@P+*rkG|)qZM3mI!NV9np!Z0M$kF!MmvDk4u zjolvku+;*++nEjyzjybkAI-;~FlhbjKV9~I+v7j*hus_h7_a~Ew%>%^*Ea3rm>if3mRrk^z=GjmVxPpSH?t3|kN#C0`=j$y7x!n+rVD@Dhc@p{+P8jkxq5Bh zqCy3?KiivqG7a4NP=35KZ-3?blP}J28*M1Ne&ms~dkO~3=3aS7i zzJ{h10k-{e>?jgBde&6k1mg!T7XrFckF4K(PBa=6pF@`}y z2rs|7GSHh90B5ceO~0J z3pC;MXutZ3{f#4g3{QO+5YzZ@n16h%f9THkt=Q~`ZWG!U+P%7oNaM+^-aodxeOCnTb&xIaY`htL8DsO`2KV+1v&fF;dIjRogn z${7w-TZ)LXDjH)_W;9c70a{DUEW#q-OoeIz#2G*(DPpZrGU$DbG37*@y}y)#5>Wz9 zkQ*>aXvt_n$XZoZc2NaYF~*#8uIsvPLQ11mlXJG-Wq~Z5QbA)&Dc(9&Q~?nLPys|M zjK!4cjwxg)M#T#HrXp5q)USSF3i;BA{Fo4yZ;-wAKde zN-nBkAf%L>b6CJPfsKxs47j<&<2oEP?V8f2A3Tv(Gq5Icsl6{brBREpA^6~p8%x`C zLw`}yS=_G9SKDoeV2G@7HC1Le?20BD*s!WTN;xdFBYYguJ2FRXSkc9 zp=;fOcZd!&atN+uUM}|2n32MmAr)3MK#0!PZf=YVHf3oc#}j?$Hhln*?yb86^NSy? z{4YEJdhu45UXaFd~t{G{_+$WE>)8VOKl`8B158 zA@oTEtsz!aA()1EfmI4JBZ$$OugX|g{QBX&PSv3ET6j*TB)!0WMQnAu^e(T=Ca#yx zFO7&xW68nchT9|s@Z0>MW+4b-e$A8esgA=?#5J9 z7}ywNtd*=HDn-YfyvLk1r-*2bu~S+=sJx*|WBsY(gCynho?=y1lu5}NM~+yl-$tdFGQ?cI+agoo?ZZ+Z6Mg@tcw zd^E5r{;+N%XtX6mJl{MdqRlq#H~Rz1z2RiLTYc{Or^7?Hp%U})Hc41v* z$_m&S(`(5EhZs{zb*LOUSA`;cdUi&{Lgnqv$#M+<f?}z>*p1&UF*L7Z% zvqv_aByypD&-D%=TNK%ULgbbt98>LklBygT@@|7Re+hrt@SV zma$eHTTD$_eBda)aq-%i%rp6G&F=BtL49>`xA2EA{^0@qp~pENVyDGz-4XD)UUUmA zg}t=iu=?oM15tCFme;%GZ^d|O!=pIvH@kjscwEuF*?ga~ZT4&&lC5YQN5zsQEf3lp zEmF1FY$_j8iYj?Fn~hrPx^9~0{{8#T18ZuUCV-P%x^56ca%7C*T$b~B_PXnK&Abxu zl{sKqp1Wr#mYc2HI!t-BS9<1bEs}vETtL|%T7%iBPfz&vUhfNL?w2W|Z9J9r`QjN~ z9vrV;3)l7ZjI;Yl!e8|EL*sA$4+D%9jPA9c&De$D;}h2HLs>qvz5TD4lh^jfzsvid zJ&xz@mA|@o{?loBTkAQSHu(d~Qn6@#NIdk%pKa}Ns-7ABzK46A$Slnxw)Z~yzmN3{ z)cSGsyAA4EX{aJwbjpf#8|Syf=_|)?{+aOTZ?BqNmk%HRWl=1Z&sd`og;K5l-b?Z4w)M?& z?RRbJFC<$z`+)M33R}wa&Ilq~QYcvpd+*iSF2|)-8Bwqph=9sqR1GQE)E%m->Okrg z!3e~4eRoYo6cLDucYbvS00c;Ai&1l#)r{I$Hv0ubv3-zQk_zss{zP;9 zNdd3MDnX7ZpCmZJ`g*s2jDA*cH@({+7}F;~&l<90vfztD-jT_m%bd3q$bik!=EGxn z_EtVWb8o#dH?dIjuIK){)$mE6@a#FlPY&ZuhMNFzS@w9^ z6#xXl%jlDD_Z44p#T8dv@ykKqnkH^WwD9oxu-Lc!Rz5xHIMi={bo-H${&bfv5~Oh) z_h+q$K7R7#g%@6cS$Xi_K}@3y-Z_`jh={_Vs*^^3GG~|`TvbbnsyHbEoO6mI%&L>@ zltcj0AS$9^E;(mwZ7#`}DYX)(+{kHTlu}~O-dke~i{^{~^gs*0%xrA^u9{#}+kVP06WpjDxBIAtFwm5>3-|-H>9L&*yRMQz}D0ih%bX zQKx}MvhH00%QUlKnOTI;?zY{OB;0kKbH;mLVp1)jX*sVcl#*2uR5J(?8etvBF@%7K zCB@1+Yi&}-l4?ZC;6V=#4j1#;m@*3AJh;}ldK?oG4Q0%nwpa#*=Z|k6`gYy#UOjzu z+HVnnMgboH5uV=P3AL$wEJ($g$~bSOZ?HtAum~tZDG30aa{yqCK}W&4ck`uH_3Z3yv3zqj>^6`0b9&YXQ_e>)%RGA_ zo0DKc$AXJC?X|jnwCp);@_xPPV$SH$))t`LE_XSN0vQFRhJ zfCI7sIz=a5am9Bl-zcz}-rdYBqwvyh;uj7!lH)78qzw?c8r_CRufW#0JjU9G*biVX z{e@JZloDff&ecr?VES%kKnOl3VCGWtFbwmyX?>k?an3OU1;+@n3sm zcAksmoau69<+=|2CNk%ytxiu*n^{%0wV_LqdyG*FPfPC8WZlcxHN8M%$vI~&OjiqV z*)Ud70GL8<0ECdH%o{`uA(XkmTu&Y5LQ9 zzqhv+haQczc2m)=b=uDMmNQhbhkN@EpFCEC+FBskY&On0Z{65+nh$~Mu}Wi#hK(7< zeXpOo?wX#De1B_y5sdD&%dW@Vp%2)P14C&;4T!;eK)m<(VeW<(XGcNstn^u-X_{@{ zfi<-cxm!EIF2@YapjFe1-PRjxt(^uqSTkf21moSwlhb?S#aTS*VhA{!HPF`wf2aM# za1f3=7U8uMR}`~EgAV8_9_ETGz5`G~Lzk{@N;va@^MgY?Sxc2m_MH8CrMzWhcCbXiGLAsAUa}47bz_ZRcjvWuu}9&S z?oQr8ye9B5-MkUp+HF7;EAN#G8&nV|#Tm#&g7edXqG_7xdyrBx#L zP} XMUJ?H2qyOcA{CsI?tYwkNn6Z`H?bzzJx=js+d4!MfpO#-+S^C!_mJC)~R+> z0>PnR))cHi*M3Li85MDft*{y^R26lR5-$VIyyLO2KQO-V2WQJYbMmMpHRG0sM^#l( zTb1I>kaoMb`QVwi`3u)~U!0rG93Zg^G-}v#^n1=)r;$)Jd*cW{*53P>R6oC|!Vm)O zQYEb&E;x2_a6RAoo|8Mj9ZlSFjvf0)|G2q$fQF!G$s#Dh`lbpB&Rd{#|K`0vaRh(y z@zHNwUHgKWf5b}9yLD}t3XvEfDnVHj7Mx=}K(vqz*@P9S1c?AU${OO~6KVk7c=sq8OG9Ukj_2R=N9B8oNVihj7qq~5o zCWP-gtOJc|kmXrvJEo8A8ZM`gUgGN9z?zZ^oq!|VT ziq28FG&uwSdFr|UO=K`!@vCEzB$q)Xr6kNigaBxW5Xcx2Aprm-h0AT{H}xZU|Fs8x zDfHB~X?l%MJ2G?`3jW@+!LPXDiYu=8wV)8d({0E)&}ed?Iu&Z37gQM$MR*zsOerz**Gmt=MIfa#srAWT zemRi^07MpnORa!nDO}B~)WxZoRSHji1!FKJF$Rg8wPtD+SZl2{Imc2G5m`$jLUidp zZ5Sn&0_dDWh0EB3obj@XBq^1gb6p2S1QcZT!2)8=3;@O$F0x&3tuxaBV-jwNh=Len z3|UZoccV5%W2!2E$vG1R>upLY=j`0%jGg;o6RJiuIUi6>&X9)D_?nrGG2Yvu>r))& z^ET&^No_Ht&{%&oKd7tv!D@ZBUfrC}>d<;$joUbk-Ew~snuCXr9$jx|w+?Uo!qr((V(%>3DdPchDN{m1;!=1d zB4Z3F#h6Rv%8RuX0vkhxOUhYM2`;OUm9v{Zj?5thG&E&R8)Hr&&*t23-%Mpx9FKk3 z`GYQcp3e-=tkVWI2QHlLKYu^O!?tX8+t{l58k&}TAQHe*@@Trv#j0BvnA_M5>xiMU z)smz~0#KO}Qvp!ODH3D6+R$9_-Oc6THWC^F1YwR`2JJRDWB2iAwr7ENx$(+l>VObb zwWN`AIXv1wI^0hw?RGoF5MvYpYrTk$Llkj{##u+?(l~fuRaK=@QXWCn+1h#+OMbFl zA((!*D^Qv`P!-~OZ7CQ-#+cx$Dg;$!jUl+s3;@!$?c~i0IF3_Gz>$$D;f6Wq ztct3L1cs34GH4h;gq5DE`vHiVfkjo(8fHek%utH07xlWq{&Zdy5p2mtTBMq%zIb$`xt8j`>k*5r4RMqV)^jl!vX>T*89kX zRmK!cDGPp#>#w$pz0Zaew)XUvky*{$};&$yd8O`OnP2?u;}89BH22ZqB1s+3?#pr)w~JMlS*^pe9i3}~hcR*-JQ!3cngIo8zN{}6<)`xAY`LEA`V82h z<1tf;YVoyky}M`UrX3`VAL};T^P9VaAJs9K?n#^eRsEg?= zrR_#c=j$7I>kH-j_uakrH7=t8CD%qW3|Q(pcxtdfsXML5Au^PfwQo78nl% z%9=}&q5z~~Qp}B3Tk9M@dhyYp$`^m_^ys&4kAFDNKLBNojl}%aZ11Ymj z7|SwN;;8i8gC|c&Xj#(Eo@po`K%Hv%dhg-uhWp>{CFR#44yoiU=`xrKY4QlrEG#(v zZiq3cs*};qH+QOh|F#E2reyO=Poc?!a!UP}<_J_(5$x60?}{s~xZ-^Y(`5&j6a)c9 zVe0A9dt0~7@eB7Z${Wv0TE>{bzPsGuQ$8yG_69SDU5PR zF-B*MF~*lt%4C|4I2DnnR)L6k?@bY7%-7S0(E!M0*dy8LORRB(Wg0v1-Xo$&nUtR1 zTWf9GhIi8os;cs8pGrqBCoU&3>GYS$<0$7mDKujm6u52z5g8ODTUQ=fP{<caTH}|y|s4gSSW}Jh=>>}$}DWy6fDy#B7!2~eFdN@P}j9cQ4wdY zwN_=?RS*%C#MbyiNdczs)i88`yxf~DXKg8ED9I`=>$dexQF-;vw;rv}X9s)RoKN4m zXY7-q>-(;IroZw0ty{rW&+c8ncY5|Xjxo5#xF97D(Y7^;dJJ5OMJhQXh&PrYYjCP6 zQUp{KBa5|`>KZ|u_e7L)W>DiDBIcZlFa%HT(vFUW+4u`i*9*tep)&AID)O9n)UPX4B?QGV7utA79PZ~^X zeM)Hocmt4fW)=|v z5M!)dy6Rm{5=^%bRb}Ss9t9`>s)7J8r7z@ErWQC66wa#boN?p~;B-rx8sXCzfpZ?9 zKzhjetb|tT+>b1p`o2d*L#B^|fC^~jGLGZI-+XTI@SVqh_g3?4-PRYobi%eDm!JLU z2fy+QuiSm~_MI<$;?tk{^tI*j*}aD)mX&P5fQU$Dv<5SG>&@P5{@l&mLwcM+x~@wp zX`j9KL^QAG&PkMXe|35|Umk^K6GvP50KTd!=bSUv1z!l_uIz?hDIPXd2Va%#llJ+) zcl5iq?Wb>=3%nRtmIbg8GN_YLcOkusj=$oH_dg-v+|>1*?Dw=LP2g~w9Wvy^%Oao!kXKy8W~V@|o7w%m$>2%!lg z1psK=&0)1)eztr3L2BlKZfbs0-L|~__x7Iu9ofIo+j&}_JIK!NtF`(3Jfr0LXusX3 zZV2h3Ph%M+x4Yn4S~fG#W=(yvvyH6wE%X*7-l{Wz`)MbdBjb#oh@&~JeJCPbj|`Y zh%(D`a-jSJB|MG2zIZz0s8aAXV4l@beZ= zdg-!OK|}rKs}eE*2wF9sIAgp}E`p*~S(5;h$p(=`MF>+$VX5{UWr=iz6b6f`B%t;N?+j8@A5&KbM= z{*$S{nUM$w?@JgCv)zq+PJNioxCu4Kv+DfEpB(>@z572d$6uiAlNC0q>d>k~8)`nL zZF6V9y~fW8ZG`1IcoQFm(iufId&RbZg4{8S;;$gLC!7Z0bkY_~k$9lJtcPsK9D=bT zo9&(NOE3M-T!9#7c0n@7m5=MiAVtiks>9R6?d47YVfx*xxY=KwEY5zKuYH0KUo8E7 z7=nhTXi7Lkw>AuzfE9`?sCF0`xPW1>D%_)x0U$Oyxdi}_PMe%7uhf4bkbtTH!K7w` zX_pAf2n3)2ieLaxK>@z$ne_MXVHD9Qp_!ze%$LKVm&K(^%cm=)-4$0{amD+Yr)j|lEji}+je!a$_le(ThHBQ zvzhJ`F-GsLbFP%)y%iA=o-B8!3QiU3k@J)wG;P+WPi4tq5)tJb0YnwaP5*_MIfMWJ zBE?!Oa+xYjq{bK`c400f{60+kQeU@kGGR8`e{J`<5Nj47o! zW^1i64nVB6bzPTIntJwB?@zHGxJXr1MJ4AfItl#!6sI;>9+gs5Wx8e&an4j#ZBzlI z@B6;*7xS4xD_pp!WU%Bx$dNA@ciVKZA7=A*ND+lgDLLiCgZ=&er6us*rY?pM>bgBS zyLj;EQRLD#^FV&bMG!$5>o5ymgFo4BIgW2U`niINhH@^eltblgR@!B53=&QiF(LrC zbi*J5oH9ByFsH#(yhPur$d2f}taBSO#h?IGHMi!pd|C}=cwsjMN>-%vSB1J&DxSGBhf+Ii(( zS~l)^cd@zn>_CTmm3RbLX#JyRqEcdvf}iBFdQWuHQNQ;741SbId|wTN9?x&YUxH)~ptC+AZ7t zGd+AQJ^SeT4{!Hhy5>xIa2om)=O%!yA}3NcXa%pFg|4{bI|6e#*m=1@M#RY{!b(aC za_{x+{txJEHhcT8pWgUybSHP~U4LA+=eK^(YQy{0mmiMGP&h{&u^%EL=bRPTkRuGt zTGtIClv0ed))F!&AR^e!GJ;#!!qwYVHJ){!zQ8$FBlHj2zwf2z?tE)%Yj|KCg@Zy8CjR1h>v3_Btx18B4 z2HoBP?A94+n^FZ(xiIDtRIPPr?0omoJ^1l$i8xzGA?d3eLe6F0|;dB*gi&A_pi>ouWXeCKse6Hl;D{e7%U=+#uy7HBFY`E7;1>d zid2gnkk(K3AHQXmrh4m(?KY0?8a?~@@$l1w9!a#?JcBuE8KJOATR;IrrCLztU+Jy! zjT&%ydVb!+7UL#v-_UkxnqYJSw=ev!F0MT<1f>YWDrY7td3HnEXf|v&J@MGr3o}HJM4l8e8Hb1lX>#ZV z00FRQrY91Ss}X{4EeZ^v76}X>sfbJ;2_aY{5fNt8OCi08zvbc#?o2|_>YX0w_14oI!7CIM_psT9t!1m~S|7Kxbwz*;xP41k0d zNfEPi-h1yXiKdjZG331;W1eOsR%1VVW~P(#lA$zNlq~j+kB<7hef8~Ei`+|U2~zxf~huNB8I-OW?y9Z>`_gopzh>uW51WWU+} z{B=+J7yoGFj>%_Ebz5alK)CBW?|oG_B6b`TGlNj@_A-thiKVa@L@TPpFtl|INL;jO z7v6hIZrRLY%prs-EOH!-Gqc64sHmzTTZ93mZrf>P4s~V8TJd#}IxqW2yYrd9(P@ZP4>xu?CxY3QotEF!6|AdR**TE!yZw{;kt~xBKiLnSbq1l#4gv{uklK7Z{JL^(j== zkS}q}rp=ZNl0m1$D1*%dxYO3rdIU2968LrT~p*kY{{u2$!4E%-*P z6LO5C#wa1rp5we$?~1V5JT_=bdo5-{H<~=4W;<>eG}-{JCW};AG))WZYulf&a%RyC zS_jN?(y>JKrmmL);XD;U-Q#%e!B5*EvIVJ)tOZg@3tKcqJ+^QE_}=5c1l@g@UvHX4 z&eAHcAxoCR#UMKGjdP_~G_>zDWl21yk}O)M2GE*zPZgpMWx-x@b^3Sgy}xO*3A4TK zncu%_pXsVw)-%POawb5ph5sqx+}8tsr}=vYfv3Z8h-0a3j$wRK$oDs&_k9Xa-VT_X z)jR9O9hlAL>y@O7Le1!KKIyFh;2RnY0HCV*?DXWrAA9(@|F|jJdcC{;<^PA8;o{Ek z>fMXrNVw(k9-7BiKSk{9{DOK~{$)Vd=?E7A1lHO0k5Pq68KtSq^QN>bet45}f7;QV&GCCuc|7*nzom!Wb^TZxc#gMwqFVJut}Q8?v` z^~HQ~%^ROm$}U?ZVCgS5566cW<0iCje>ofaWPBNcaqc(W!>T%18yLF>&d4~U+@F79 z{nqj8|Mww9ML-K6J04duoN?+B+rw(P+yC?{H-9S}ycEYivKg1pdOAbSHVq^~tUwAx z?|rD|iYtB{qN>xNFe-p37~J=nbFh_?S)$8^8V!fKr zmdoX`Zo<3a^VWJ1Non-n&lfXe5C9Ny7=~Tn*C8vGN_WwNs)np}yLMSw56 z#Q*{T%9oM#RaITq$fkLy%cNq+ITHaHu-0Bacwz5cwV0isull|xG@_gm8-ugiZ0I8Z zIOnG6vFX;cTCGI#qA}MFu5C1L*26`=b!|;zgfPZ5l;na(X78=!l5>9YSnD=iThEKA@8kKlc1MEv}J~Je-SLC}PlP08~weqDDnpEC~(dYR(3wAP9?y%UiNL ztq(KqWv3=vQOkxXuhgOXmdQ~$sk)c_?ll+BNHS9p4Y|6V#-1L1v95Z0?sxUe-*UQq zCJZAjpL3k;>K^duGkz9p6^O6=)SV}PX^-FB?LX&TLoy7i0Lg{g7{kR#E{JMSlL&JeBL+{# z7%l;ll~JOIE}EHgF1c_dqNJWFpiksBv!DMzw+Aoux4*DD`rcR{`VukcfnC7#ProM+ z5kx0H-pNf~b&^M;(wehVhGPbUTIim>#01VTE zz;vE1rBuhp?a}TRJfn2HX%4`ovN0SRdt#h zn0$DQUivU1A}CD>zLT{dB6#mqQ%bpT;-aO1D%W-Gy)PvJz$E;iM57{N?WLX3)DkFV z@*om;Iz2E6-a#Q$&ihsnE-o$*bvB!oQoOHJaTsD1Y9)v zW-hZ?gM%rY#<4?W#C9@1VrG@%TtK-DiSo{?VoFJ36e`9zBAUS?w;_T>Tkr4hueY0F)itvjtL*wtRn-_!AZP0kQp!1vZ8Hy5rD+%1HnMai z(LpIu#$*TzYtuAk9E@?54>2dOHmf$yiLkR|39HIyR_S{%W;So99BD)}sKSEEb+F#; zPd}+)*QFQ@5;$K4XM{v?PR1z-UdW1K}RY&{-q z@`fM&<&SN?`uwfm``XbDJ!+0i_mH$TaTLiKGqRu{8j>ru!xi7<5KiJzd^%~S0L(mR zzbofKhBUk0THkf+SMv8fGhTne{*BdYbGAC)+uN_Xb)goKEb0m7i`F@}+YQE`b1r8G z0007DVj4J?*bh19+c$3!(G=uvjY<6wd|iiXd$F~_r&y}0vX-U=+!(_O<2WK1=A^2b zv$fW$*&#`;aL!p(t#vtPV@y?52ox%B4CWk%VL&uK1OS*O5~`{yrQExB zZ-0MZbGoyCJlk)#aY!o`qgu^ac1Xg;jz+8K%8>hQUs>;hzu4|BhTWs>#k!0O6Q2LrCw2}cW+<{s-M#aZ z?Zqlr;lcLw<43nX`oRyhbz@jZqlZ=Po%hZL=X)MwjDoOtuzxU~vH!k3zYIF2ZO#y9 zCy#<_17p;ZND)my11zGN3|?gqTye$wpDAUE5daNMHWHu!(v%phc`l=kvCf;cxINzf zfyK}MJ9mH6f3m+jQO*b3Cy$obAK&~}UMzpM^n}X=XgE9BrIf0AK}0Eylc8uzNpkV7 zLZzV_E!yR>uBjk+>oask4`^Yy>tFWAFO=ginC@)Yy)|^Hqy2I378`x$+DF4r{v`TY zDa_TuI$TTEj^__+XZrfkPz#>C@Hm!;3w!kJwtr_$dt~DOe<(OS8ZI*#4Z;#KSY zIF4v2I3I^$b$+qi?dI2(Rb_n@Hk%Cy695Z)@10tx$<+=KyKY-bsjA8v8{#I~4_p+h zRgf|uZ@pgcr5`sN+cZLy0ZnkEiZL6)UAf5N#w-}!afM*K$s_cAfBxbhSS_Ae4ySE> zOP4P}BpqJI`+4E^Pf2@*X(=?G%J1w zK4~?7^KV|aKReH-*&dyJrOS}m zKs1tb^v-z~qKZlpF`^}B_UM^8Tp8N#8qb9<58&SzAS|lH4#zo!$hyhr4|sVzRJXO>xB1S&>wmu1pIJZq zKUp1re>fr6Js8^8brW>VSz!S5h)#`B4lK!<$*rj5oO4#u$&7bO__*R*g@DpiF_}TK zNK>Q0A{iu6ndUVVp9XEcKidQEjy0&N;MXTSNq_kvdQu@I=HiV}1Wp;Pu>g`rP@S}> z0wM;1u!#8YvTfgF6Y-7T)u*4iG9|d;iYwk{eEnRmzw`sY9D?BNVACv0ZxF#8^&u@& zzs?%X*mDg?_bz0&?mjkuaHp-lHc0Y)+wAY*wI@$Dk1kGU%b6>t%hJ?yg`&iTdorb} z{iHh2IhrZ13`HOTf?$Fi(5VPngvuKh72ZN|C`1HRZN|7E0+!^+BJo&~wZ7{HL=!@l z(#VpmB^DCp`FyUbr3j)EDTHnuM{7Kyh?a320Khq~s`Evo3S2a2Mz%TUZSF(mCp+o7 zF>xGnF-_f`pRMb_eHB98_kBC}ZCj@?dpBF|wd?heA!Ds}Xr4Ws^}Ed?9HEEBY|a8>iRiqb zU3c4|=cZ{2cZ60IM064~Aqg2y$Xb$SC?&Ra?Tn3F2m!=UF-9+Agb7e{YOFO@ys^%? zocQcwO+-yy<(z{zB2%bs;4ogGII{a)|AzVMzxT1_PaHk_`(8i%{9`kw z;$glxI7Hr_ruC^K64wf3DC~u;NF*(etRfAJwg7-C8e>$DRVF_xOAfwa0C@rt6*3~S ziU1g6P*qs~0Prd+?27j(c7Mid6b*$Xbb**oFoEc+|_B@=r zdwDBXBW!NXk5}Er!+ZBnyB*a2Id>pc{rcwOt#{rIRrUDsV?;bYKCTZNeY{=H7uWZ0 z@Bl~9zFE$BJGdJ#E5{{|9C+22K8@7m?UN^8S-iMAdI_5Anx7a-vx|*)3{GV!1z-hW z004plKpN{z;FeiS-Mw9wpFe%+GlBeguwR|s(`=Spoh^TJ=+9pWe=&`FXJfI~_soN{ z+^2)R0QsOwXHP_(wM1mC$vK;VzODvJ-7sV}b~s?Wqt329*TZ>hQSJ7mdg<=%-(QZd z(Rw>;Y_ZtOV)onrm;3eJ*{(dKTFuaqHwSy0i(O4Uad8ybSoZa49uC9e&U2qT273Li zyY1p8OLdYD(+juzVYSB(7y9}rhKzR>oL-#varB>ir#$ht|VxL}SNZ6@e(1V6+g57CE%XO!orN zNmwF~1vP1F)qM2z|HD5z`ggONbvj;u{MSp@&1cui=+^CqzBxWThzeaiUDwwv)fDf} zVoGF)XPvtFl7 z-T&nOcG&+^eg0VB$(rgQem0&({{bIQl8Ailw{PV!e&sJ(HwFpfalpj7h@46s03qjP zWkarZ)uXTM;PYF5)=N!xzZO)lHOURA&n{`a14$SS`hPmxbAG^6X3mRfR`~Mm9H?Cj%RI~i> z%XhwDWNFM{EIG!>H%29pI4L_BI# zQ4J+nYYSUUxw4i~)X1dUV?bmAV=w`E%PMDpu7Vo6a}^YX(bDNl|5~;F#hX9(zi`Xv zwx9ktQYV;SfHt34yuo8*?~G*x*fG=STf-@qB0jX&53%ca=jVad1Elx34qT2)Ozn`O z4vhtpTuS(MuTbv~Cb1(aOicor%ISm%I(1?!UM`^o-(+t9er=`)p7y#x;c0Cfaq_78 zr^&*WPz<7(2Fqlc$HgKH3R7w!0;rOr`Oe?=y;t7N9EWe-QR<2-uDIg;h>Fc@lu(UB z5SG#9+;?#}-!O?vS*~}Vp_lI*z0(n$)LPf|(c$s6`SHU?C-E#T_V+TUl(YAhD(8}o zM<09$!CK2jC;22Zr;?{!;wj5FGyebC`}0^^+w3k7Tf;NF!`}N$cl^eh%R{-0?P`ND z24kh&!J&sh0HqO~D6x``=tL2%bkd>|Izfq0x+5iNMMP1gs9Qh+BrTNgFfv3Y)?D2vRDw*1qqXCgki(op0ic$)%JCommz!M3~bMLR)KP zHXmHmL^BgnGdmmxGXo=lC4bRfSS!RVCJiMH_shsv?Gn-aGFI07?-6TD&;AuFE;A zW+G~uMx>;amH^Lj9FNx<0Ifs@xfU}t6+q;Uy$1@b&3-o&sY&bkVP}DY^Uv?eB}1a? zx)@{Y!*1BIr{J5WYZxg_2@%22Id`?&=A4@tkB>Jg&8oVNZ9&GqtF9f+-=n9moyh<5 zcTS)CnHT@-ub-Up)#ZWJ?!Wv%%CviXe(8nHoaE`1%|#+3Lo9}75*-K-k*OLGGLixj zIp=QlyI-*$Ff%YiLtrvQHC0hGU?4{Xj9`usz!Xrai*$CHJiU39r2B*) zckQ@+?@D$vy!c|@ob8_OE*`$Tdx`6ZKh!?GMz_0uY8?8$FIq$- zr9vc(Zi#yt)94%_!_m=EDdqb5nuxk?Ri`rM0lcnH*BrQ%Qj3Nd?`X%~?YD=yY#*N9 zx%;7yy>b95eT@#z z4rXVHQy&hP@VvanT6Z}zCT%tQ_Tbxh!~ex9;s-R zDGuSt34mzz#@L2pAQwjA8-2jHxW#WSmgUXjwC0yksSqBAu);IHA7OhS7!N0#H|+<@ z)!FLF-%#D&olulu&-EK-=+Iaj2zCs74)O^DMl4)vm2-;K=*E$nxv ztFv{CC#%&8n$|N_hm+{AOK+Y1z;S<+*QuBk(=rX96Kl4>Dmspb3f>c!ice>0nua*s zd+8JGY?x)g3sce9GD*aNq?Gyke%(gjJlW27r2Bq)shofD+gI`FncH_e+fE>Aw34=` zEc-s(zrNgO6ER;Ewd^C#usXRny^~JH_xy`>R`#eRRdEg(m`oKU!B|iPRE$|RXC7$A zJ7K>&OJA;G6`B)urvqOfI}f`k*{c9<(h37^HgPD$nxktuJAuRD&Pjef z&#PkfqE`S6&0N`5lXpYD^6po!;~u(~A3XdNTz_^(Ui*%{UF_cY&=>y9>!19eLwh>5 zXXAIgBCdn*fp=g1G`@3u@xk90!#cj0@P(f)Hslw7bfj*2_djl4`%5_O57WiTm;a9m z|M62jbee)qIvR^RJ%L(>i#NunPhNWTSCuxt+eqA8`H$r8 zQ@H9M_Se_`0Z^3Kim#?qEE|%x(iLjJuXsA(E0qBVOwN$WF(E4msi`Oc*z#ilLohRt zuUbewsXXuBlI8j-YM*}jhWEpHEeMTo$!+`B}iUnQVh5d;jG z3_$^m&xIpY{t=`4$u$QzfFPWLIudmG{ zMsH9>N(jy3v{)tOTmYbn9TBL>^PiSdYL!w-+qNR20%o!tjQ5K@HO^U`qYzw-K14T; zxnBon9!8CUymu0n7}a>rIoHyJ;D`z4G?(DG%rh~>u9KpD-veUKBLK)eqv_e{aoYz( zHpNn>%ggIp6(KOQDx{Q4Do{NsTl50;kdp}zArUW~kEM3DIAJ>HgU9j=#K24*JE?UE z56ZdbTty2&AXYUovgo|`QABdiIqQ-PvAlwF0YGXMA~Ho0HB|&%3gS8E{eFLPv<4%` zv5A~9t-5~6jp+L>*Fpqc*C1I+vvV#+KaI6k%}M4YIhShHd2)_@p12LcH^F-sd_mdH zgNsU2C8EW9D(8IDQp(vo?l$XG##}QOmsI;zKMf-zISy^pMBjjF;Gk3yWsl%UA@5QZ z?EB4{S>7GK{Kbzy{lbHn{#&12e{?%t1c#3vexwH%zwp|;>e19i22>OiLT4PRP0j^& zHD^>~CR0^aM$%_30V0B%uVML-A(fTp8bYn2QDF0=^jJ$v>|v5^Q^@O0cCErlwq3XF{C+O}=m#}`*QXH{ysZJOq} z*W6+^_bhZ7+NMEHwfe@+_|Nd+b zgg_{OVo*(&xYpp5No%8+3={~^glZM$>NwnHF5KdM1XW$i9XE>^)n&NmVdw6gl(QE# zzv;$HXz6PG@zQ+asCnt+oxggg{DPg_IqYQjbXuv%#_=r9F`1bu5%;S`0Sf;ref~2= zK~mZYY=Mub9f#usDJ6U9?Z4pWap*s3cRtlausPzC!}`0ZKZatMF6{Kj4#&TvfBX~t z_Fs?h{^y4~Kbl|qAfFtJb(V zk{t(Ch%qjPu3%14hq;L6IUU-L+hD4yiB$+$Iqv!r!6zOb%kFIp@9s{v2mFD-eK_UA zKG+I)<2I8tRW1n*nUB28|JY-Ki*y()7a#;CSaKhrsG7;I7`QxhZv1to2^Flm34Ts= zQa<><4%=VozVz4U;~(0s@9FOC5d0iY+ru`vhqVIoRm4@bT8oxpSZLsgrm&>sehqj= zby5Q`vT6omMC8B5*SW=S1Yf7@f#)uw3+$_}T|6-YiWwNv@)x28;E)6jiIfoxK@1Jp zpnj{C#h=?`FHvUS=)<|iEpBm(-zbQb1i-3QL1uF1xY_G?P2m$d{-Fnt-TwR^zWsFX zax;boU9;&eX-%@*?;*tf8($^@7yOt;B=Fva$jgqx%#cY{p6@Y?5T2Pi0xc$ehKx7c z4>eT*wUVl1CN(pxRZFQ=RK(0~%1K19TCLbYDQWR*Ua!|gRI4uW11TY)pQZ|^wcZ$U zF0q;3dqERf>b;7HNQOwv<{TIl=gCYFG5E+1$9>MzEbI{(0ZI|o5~8bEnfd5wt!wSN zZg;)UIh!a4c8)ij&8GC*?bggVI7GSLZPP4_R3!tJu3aH`BnlxkO(WTmeF)Lic84)* z2_aT7HJ#@~oXELavvaQN`dYNs>U~_!8>1Y*da2Kr=|@#XA&WFOYkNl zB5|$R3{?$)L{wF4tq5$U%&clTk3=_tY~y$!0Q*1$zfi}~apSn*mb+EIyWW~v$=MKy z*g5As5ut&Y*)Zl>WuEel;DAsC&=V37x>O4iHBDnxa!#eDz|9gSu>@Bvk;pk`zX@d; zLle_*$W@5|h}yo(*?M3J zWikUZK<9i2sFrh1rTSaXiCcUVq6nax^X5QmP5@|zxx%Y=J~E_v@-5YOPV;uQGEnoy zH=mqsj@boO2q8j61KAw)!!XSA+%!!Sz4xw-&OiZ#0g-OPX{1zhI?uC!Dp)Q{T1ha) z=Rq<8md4p3g%Fw$bI$wSo*ZvZj?d1{#&I0SaY+SGyJ>3HqRh_wm~)=SL=4Bro8vo2 zuF9OVh!m-FE>)$f%v1JV+xNYKVJXcj2JaZq0f@k6-ItQCFRwxqnQ@vXCht61wpvR{ z=$u0`Lo*N`ye1S-LUxWzL1Jh-Uuvqw>hVi?vq}eGE!&Ay5&!_%G6JiDRtcU(EcP*v z^OK9G>#pzDt0uHDw%*6HumPiKny#;}RTYT=(R#0bMP$Iw3osthv-+S%# zJD;+YyWzc%uZ0348d5dK?^Edj0I+JUCJNvhj19LfhoFSHm+aVN4u?xWWrP4m4HUR7 zquk1z9Ab@?`RXP)kk_kQqFPVesil)XJuh(~w5 z*86t#CF+le2#|7_MJkB_6Fa>^Zi&*XxwxJ9#2JG|`9C^;}9VKy7nZ zuA3^VBtivMN|91iuF1g1W)*3k^X@R4_eodpO`2v1750;NO+ye5d7d;=qV3raS5MTe zkT$O0plv6d#YO*Urtod}(jDjb*xyP1<-_)=)Z>T!KUBe^CU`1_>j6LXUjNB25B`VX zFYWo2bAEueMlRylm<_3^I&rNHmLSR33JM5b3!X~y=p6{JR^6fNxZYWZyZ2XRJVNmi z>wNsM{JwvO|Mq{-ed!;-qsR5-Q>yuFbrugl*FX7lUp#HX=IoBYGko~JX5%B6K9Al3 zBq)N^n9AZ3`2UZ7x}LhTFTL{PwfpqZqrc`teEP~CYSt&$^~F2-h5q2?(C*K^C);{D z`Q8rIrFZ6XT>=aCI30xc>Y=Fwc6|8QAAx&6Sl|AS%>3z;_N!BW4$6-zY_jV>qjw;j z!hFt;f3c2pISW#8rVm`d@Y^Z9cXsiUB|S~DUzdb|rmiI|uF;AN0y+b2yr}{c0ASTs z=seJM8qKKIiU{l25s_4>HJ=>CrfJ@LdTv!!aXTg#nz@v|0YpwIl~Ra=%;l^|AZ;Z{ zkvJ5eMr3lusiwNUYTM7H(0iZi;~2%15mVDMNI^l(0$Fx!~%whq@bW|BwAIKfN$ru zuP449?SbbWILjAHJmXip$PP)WkpX}LAtIW{(&{xc63}-?hQLE+vgu{RyseCl={^-jWXMFfu^DyLan$}gW#oDHg-MY&|DT*0@ zq7x}1;(|xCwv8b=kt%9n<`9>1bTzFai)+o|5m5!95IH0U1u;_)RW(wahmnYU;27gP zCj@qm7UKve*LALxGLB;PcN@69>aTtsBE*@O_LG^c5P z{q@()?9QE2=iGk3FQstwW@bcS#2mc$DmP9P3sb&vu83$Y(=5UQIg7~A(NWiRhiP=G zF~%E5ppuqto2oWV({`bhGS7*b5CKU;bY0gipE2hwB1`hKh!v@-i{&8KTA3X))mlw4 z_yz#>`)%8{!8fItNNzieF|60?tE;Qv`}wf%x@L16bD2}gqF`q1sMb0R!)o0*=M2z! zO1VDIg+|9jKtx0+!6%SfD;P0i3}GExcFu>+%yKOvvNYfqA>(e^YpKXos|IqhZEDq& zO5d)=VQ4~#6q=2%wT8~AS({5$m?o1#h8!XYYOYn*?ay7l_u|j~*&qD2r+M?wKgw+3 zT3>qMWc6y@?bEPR!iKynCMHvZt8V2zPk<%`5vz!aiU@>T8;M)I|52HV!8u@opoX;q zx(2Z*fgYZo@3!^Dqldn|`UNi&uhzpbsp>dSM9{Rsd3KJPCV)9a%yU9S9~^@bdm;!f z0HK+A=6RmQG_-N)*P0=r6A)DmAs|^Ug@EUhwMz7%@B3WK_OKrxKSsnD<7(BHQd3GP zC34KnRb-wMvtQZ_b*?wNp>e9J%uc~-t%gMCoYPXo4Ao?*f}w<_@0*oRb1hoDcOf%7zFsj1o{Uh=?Omogzi#X>9$;`ZGRW3yUpk4LAw20e^Y>)59u01;E`{%2>{qW@x zKS-E!O@=f>}q#ai{3px0)SFVRbggwe#-N1*dx;2 z)3e=v`}FBk3cNZw`r>O}Xu5W&>B;tT)%9&`npIcCN}fxtM{RF&ef8|_)7Q>F^Ujyz z(dvG)Lht&Mbru!VF_qXh?R;@{^sy(WA3eSJ;%aKirGDzK>`1gjaXKt&dIs`9L=b&FfPKR`s!2{g;HbFtzS8{y(Ho<79myVd4q z`}k=4F2VNv<&V(uho|P-cDsLoMk_k9a7nNZ?5Am}CQDjps-@#_c6`(|%{-4H(nN1! zMly`~(6|m7h>de@xV-R<^_#mYb&^~tCLsn^fVqe&Fc>1|v1G7dR!T8RBDI1QK!}Z? z#MAo_S_SMw7nvNrG=U>37-o6?zf-h|Ygc;XH)9 zm*JCdyKfuXJ1RKuw%5%E4&4Xmc(3(y@FxfKt)_|<7@Sy;>O~9`miVty(P<%!b(>fB zE}IXorq_F|r_LT7e@u^GfWupQeW4FOGX2P(4DFwVw?BjP>|`iMXVB!;PkhN zH4$29zR$ZX=}R)GkE`B6#l95tb-(sKXvp=c*I2K%)gR3t|KmKT)f=BHCtV|}A>0e| zHF}4VT}=ocsqA0)ZhP(LH~94O?x&uf{cdRf8!8VcCnB1J2DWM7|&^1 z<4oRrFaib#459$&IC%08x(yNS_xm}GjdLncM7;Nsk~-V;o5MH}`91_OBj=b)o)ZAA z*XvqKXv4w`QxmDS7xcvDSp{^+rKaqw%0mY?yRu4_{J&=VK6Z`N)m zv!UK;7gv7+0DP|4Km%k#12cogUj%S@h?ZXw{1pQd`ZWz$wos%kEfndGV_+PHefY@q z;vanYxqo|+Q7mb;Q?IJ@DKKXCZF>s!&;q-DqZLK!~D7NCizq(9{E?Zn2b<$`wR6owABRRm zbRGgSGlDU*l-x882VY7809C0fW+qk5&`1f%OwAS(MqB=oBLqSOQ57)K3aAcvNf=IJ zB8$Sd-wlhAO9-*nI!)6;xAz`J>UO&&Lm~tK=Nzcc07cJX)vmJS?RKk8 ziN0wQ@4W}LTI+~)8TZc4&i1>*SQ0x|N-0tqU5ugRnsWvKWHJL!p>5km zlrztB&Y76G36{#z6+pzU>)W=SrfJxZZ5u-fL~b}tC1>Yc&E@)X8$CM??7T>=f~rD} ziCii5;_}+e$YCu-f_6hGw_4aZ<90UZJIC}!FMOz;pO~Z5;axKyIT8mQv#mmd{u4~xSkcK&vq`YbSRP(gY zitx(W-IpIe_{A@O5vJ>t7eC_X*%%(}-rF?CtFXEr_vP^3XYc>`f%VRKjTOZOK&3hb zF#|7N`F+|RC`K4NTAxU}O6W}qE0D6!l@41(gZ3W5%FZ8Q&D1ux2isT*Vm7P4mdLMTa=6&*MwSBW3fBR>?<6pmX)ZTmRKRbVX4x9C9=WBMgs4*;6 zlg0dP9;f|w3{5<}b36|RQX^}I94ovqPNg~S4DE7zIoNc!hG~}EU~Jc>#@+!~&O<55 zOO>1=GD_WTvuSRkPatDazgn3YmC`h8L7yde4kW{Fwsjb@HpZoTP(Gn(kw=}^WrOTH z7S%G(g|*hU6d!JjvT$N14mo#neKBb52@}+}{v8(woIxDg1hnXHd=6kQ6fA5cb*DFMj=15b9dQtj_c(j?PIlpb`)Z}nz$O2Bo z>*ef+aQww?t_H4>M*~v9hd;Rcu~+7em%Mdz1MZX~^6~2W?mri+UBC00(&&SvA-t6Bnc0kmYRY2EeJ$=2Xd=9Z+1SlNL;J|-tPBI}Ft z4^N-w*H{0QUH9^Oel(_87I6as03b4eG9UmHl_j_Kxy6W@RfENA*8tQE0O;2%+@C03 zBe@yt>S&B{1+V)fo#b~ad_4ZtpF8=U_2J|P?C=I3H6y@!KbhkSC@O5`m>Cg_s_EA} zD-(jj;!af65DAe>t+$~%Ur&6U+5^v{iI;DGjdFG-1Hxr(Dyj;ofX+E^0Lf5|#K=Gm z6v5EG)zXQdOEGRJjo)J3kz3s27Vl#qG?jo&Y7AKRB33N_&|UqWzWYx<{Yy_C5BKi2 z>(%Be>DB(NT2?Rh-8@b;8zSDjd-w6>696bFD{9WO^FZvsqKAqGaO3WeXb2**lyca4 zk5W}c3lSQEsv1*tp}IOv6Elm9^PHO|c<*<+gZEylrj%}8)uF1^Y(DTZGS^xSq;1=- zYmZD3xRh#U-up!#ozl$4X1H|37d@Cky)l+YSgkq{2_eK7V+0WikyW)6F;HUko)xf& zm|+uJAA<|NRsl!jluD}2@w#(Pa$$tFX;PU@q+7KqB_b!)2)St+A}mFwX=<9#ulsDp zlb;JVu>ta|?7a)YPt$~mOM4*aib&Bp08S|bK-0$9xLR}0a|lf-$qdb;6fv!b!#o^v zj4cAHpj4?U-g5}S%(R+469W>toO8|<5m#+H&$$+1_Cz-qPDGq@TG4Ub9d^e<3{Arv z&1{;cb?|-P11d8+$0B7I1}0C$&XcN&$Py`d6!rIczG=Se+^YApq;l4Bzj128?_tW= zW9+&%#(2HmdoNk76e&dvfhzzIpquktYhAC`&XXBdF)(lrSM5ryZVz)RxrhLB6Psxo zSfca3ZKDL6hf&46^GL!DopWYTRh%O=&SjnkaD6zfuf4JUp6%vSvwW`8T~&%*s705& zcQK4i&N$1 z7Rh%`&vxh6^I_~(D@g^w)C3WMi2-+G?p7@y_h&2u`jLmPGT`no##J+?1a;e;#{JVb zUxRzE_umO+uV$4!Ah8CFE6+Azp(;4b@T7lZGdYlxhPnPoI+5ZOaXIUh!3@7fkt zUJqLuvxnP&nOnTCpsLI_MSVnEG*tiqLWDwThvTuhuEt}iH3y29qdU4RFMj-S_org_ z>dSBbhi_i{r*CKQ=j1`b6i*W#_Ri(zwAxCN~q zvl9=wil6w1aufR5SGSs}0`3|mwYGG2v8g{5{y41r%CTkNIe=1}nWy8Se0%J>$W{<( zPDF>IQ84>yuu`fT7^wg<0jYwbUm?4JPo%k183rm7RYP=qGTy=kT32kxVp@z|t1 ztR4c4Q@;xP0hneN84SyzQI7tsN?)7%p@lp{sM}6bnkGF9+Yf!`ZbiFQ)8wBz-fYLP z+DQOtrH(+>dOw>;RcQS@mZEb?)p?@c34GG3qW8)%t1rBqTS$73G)(0FvtBev0W zs9M;&7{i<@0%G*O;aa9zGr33+EBnOI@7e>`(iB?x#>W6Al2Pm26sH@^!`bjt8S5ke zUmoIVj15eCSmXkWfv1T#fKtBY0}d7+^#yrQpOMI7j^=$DUw`hQmM ze_J{Gz}@%0@ZJai*!kTT=|i7zS3ieu{VmyiztoNG%*&fo>@loBMN0tyx9ma|x1+Cd z=fOyzio{AfS8+|tv6<7{Y5qnJMc+dB2DArmnD1XDo461VQD{OfMT#PT0+1u$dNiX{ z9UvH1glbk1_-noI-WT}F#=tWr!EK-57Pq*?H#Z2{M41e#cUCisd60kZu1in*V~)S; zV;_Ie-`gIxZ>$gmtXzBMxavRu`PWuQC;ROV)Fc;FTO6O8rg6b7#yDn*NK0jzh}gTu zS-#eah=Rz@#~5SV_I+;@q|TmQ5bu559rl~ean9Mf*$iu~$H&JAwA<~LHb6=fJ41xV z_0yO_2+X)R>I}nBYjtt);Soesg<5L}fw++*X0~i~mh4IcEj88b&2-hfX`Dqwv^d9r z%Is@ZW@hzEmT#@qIfvf6;HR7x8|yBv358ixttn+IW!3dQ1VF6QVY?q3fpab4P0+%W zGhtxnlqMfIrI+Ae!hGm9|-Ks8-XwTP_O>on!{dR|Ng@tw!cz-?3~dGB!#qYqL+A>!dxKIQu-$Z zLNmpM`(tMO6}rr234pNd2LwUUlomfFGZPh6!1pJ+_7?ANG-`!80|F8;Y89~pj$Ury zQn)$$;Q02>l*4Yje(7f(=kLf{-yXWIBc)|b%egG6C*yb+$2mA3Ty*H$*f|J<&M^0V zzeoXs51eS4Cf|i_z1AvAH?HftQtLF$hU~nQTqTz-Hcivic~Z$>HjX1R2k&Ey0C+eY zaxROXz>_CWyo=0ydV0zZOA)K1G8fS@r<}7XFmo~0Dn14vu3D<8C!XgirE;|1uv0@d z#HMQnE$igGUv1Xom{OWj&arEkK~=@L>i`NOde5a;&e=85IXBN^cZ@~reL8abV9V{a z`e3{J8Su$DH|H4vmn^}XY%?MPBE$81X^40Z4Y#Jj}u z0n~z0)Us-ZzU8AMH9u)W)^K`;X!-EOh5j3T$hWw~ZyJ`+sYOHJoP%e2g6f>N#8=nJ z*81WLeAfHo1Ba;J=q9y0N z3XRo4dG6O~@E0Mzg3f*U{M<~A*2g}E))7I~uqt_+1pz>i$U6p6AkEj+oB@DwnoDGA zd|OJv;K7P2#jb06g85ujz#)?d=*dlLL&3c!>M-JvuRL}(HiBxcu~av@7TAozQDX7f-arJnfugL=ceE`!B?)47+WHf(-*( zk?I@YxOGmIcC$gLQkj|19S*r^+G;Mz4p{&o1YfNBz-n~Zrw|l~Vy}?OT-0%o*vu*0 zdg954na!sm%+7fM9!oyvl6kx)KF;0Aqn}&9{rAf1m5Uetwd7wk6EH(n6a@oY^u*q& zR#jbs>kMx;1_Xda#&qL8CyJmZU%j2o?|A-gz+4vQF1K_wW@$tF(e{}ud!{9YNARW4Os$&%_^+dc~0>3y>IkT z^eu$n^7a7YLZwti2;MvIMP$n9t7$}9HBiz0fdLo*79K>JEzJx72n7Tf0FdFU#h<+Y zK=7<3K%@YG|5&7m-S!4*u++$~l|!gd~nDM>Z2;kWy^fb*Lg50GpIr z5*su=CZTy&bb4`d9UFh|{vFXemr+Y@{OX41j<#B@M5Gicla@)E*zNNWg7dBSp5{Cv zQD~d0Dzg#EJkP|?IM!0pswWD)A+ieQq_ygjQ7i(BvHvs;-ZOKorAoCBLV*H|RkT)d z>cInD578nfND@VaGFMw z4B%YrWeNG2N|GAZaox40lw7lOj#xMXnFBClFq1irV&=WqN;M@F0b<8&W+DYmOzR>I zBvKO%&NH#7nz0YQltSdR=;3+@q45?~7KNO;;6miN)L~9VO%#S*L@Z6WfAAote7L-b zeZLxpfqQ~UyW^%`wZph)h)sw@6#GVN6%i;FsYzpL8>&i7WjB>(-IBMOH6oK`L@FTj zLQk7>+qNgG)7@@2rjb0Pls2oiS3O&AcH^*<$%4bNu#1Y?wySBH`q&YJSOt|1u<@ZX zW-UHCiaZj|!x%yha(p=K?i`=iZZ*8|6F*FUe;2;}-Isp&wfJ36&xf9NdUwEi*H=g> z`&fkTx-<``M^*c!z%9OcF|&i41-~i)I6xq% zFc^bfcXv+u@ToUH`?FQAhP(G(o7dkH{)=O~xSYqS#Mm{?(Z!*zN>E6MygoW6rD->0 zoykE%RAxQw_Qlz|<9Q5isi}z^iSnFtEn?a>JuuZ-8jPE`nsYW2_EaF(RGbe-)DOcr zpkXR#aJ1f-SsR;L4^_)NnR898Dw0Hs_kOkRh^VP^DV3cxu}VhskbuB9vGF!g60NyR=!1wl#|~+p=hbR8&$CzsYsSMtia6t@2{q?_)90En z`eFb0XJ2?V1&s3K?D~8wH34hE&bM<;%(CesBegz|furi%CdSw`?d9cVsR;m#j0&zc z>)mdsRflN|A&hxmuh*---89`A}%vK7T19VA@T?quySqRm|lvS$qj)aN)8+f6~W5D&XEe1qVDU&2>d!D6FXGWqCkLNL|JOXFu2I2dNVZ0 zpx`}I&{BcRZ~RSf@pZ#ZVkHp()LP+LSg}Je3=+f-kHTA5P21(o>M%@wMVl|+x{%hxD?EQmaXptv5l!VnaM^w?Z>wPUJDOg(u&KRIxZu_o` z^~j%6q+)xmm&B(vZRTNEwQZh;Gz}^xup{qso;y08rjZ=SzBRK10D+M}rV%{@naNzu z%-sV3LYzgNS5+Tz-C_~QrI^Lo>1@?1067PM&Y}-!X5N#e$(d0CV(y$PTAQw|J{q&u zir{7Cj9P$Q1fRL{bqIM9bO7K?)EFRgEstx{xXr_aWpp5?B$I5l(0uldVIyR+}U zd+}NIQX_6&_Mz-VpWH&$Du%&foh#~1%UATPV1E<5qeArV- zt!Fl^H46bCPVN1^32A%9We#jw#RadbBo5F-QUL`tnRgY6QyWOxzyW$7G^i>9<^tol znwQ=Z(PtLeEr!60-`a1koy>3fT4CW85-m}LvUuatZ8iI!49{#a0G>^aX75k&>@99_ zi(8-uq?AOYN;NIb(c0Wne6b}9H>GiL`*A*?DFUnNJkMFNN(QYb zXB_}c(|#e3Bl<~*0@g!HCq(1&Hi z0|=%l)y#|;opXrJEQ*NtKBaW?`-r8K5JD{~VrIz9#7IOTxMlZbCMhK|skPS1#9#&| zCnq9BL78!Rkt`ZQFjD~Z-UG4<7%Gzk097Dz4nd7otC*p&bBs*2)@jm3mVt;;RaBO< zw+IMSRcc-0;}(Y3lAQnm&bb>CAp|h0wJv)+W+ulW1d*x=W@>uFi#OEe50|BnfGv)C z^IUVz&hYdF4ofEUYSnqqQyN#R)$V$KadE+CH`3h@0#_(13~&==ELuwu z5EfQdAY&x&oB@J>nM8J7Z07w?b19|75Tw-W>+2)=tFLUht^~sI1pY!p(W4#_&W<#w z)|uBYuKVoc3*c7uz0X0|{8K(@x46YOKg?6iUiU+2u7%MwaPfmr$@)Qf;uH1og=Q}9sm~G3ey=>1G#50Ght3;o@X=j{M;8V zEqV2-bs3}5T>8GxHIL(HVj+Za9Jkvo5VMiUsT7c!YJo&1Wfml&*fh@job$5E zwJNaKxgamge&ogNr+Rk5J5@yhF%)JaCNlss)5S}3d6An{w@ItIWGn# zN`MAfb8*ha7zxlLsA?sU*nrfU6RQUHQ#k2{r&APnebv!wjFQ|cqP(~HzM(qdL%!?W zG^p9ZzkL1C|IxQS{X35yeK}RU6W;kC{7oF+eGgBF?q7f44}i8)KR_BXG~Bp(!s_2a zSv|i-h`;``R$(E%0vLgT*$vg*p1s0XmJEN5`eB>6V6pUt(&3?uB&Om`+kvRb=Ym~@ z-at{L(Ou?^>BT;Bzo!6UJ=KSb%`*TP5*(1s=rx?pXt>lsH z5VRDrTCB0z*sQ8|9Ag9p5%tkG?4De{XJ!i0_dPo-Qw4!_*i2=f=S)OMFx2EcHBDo{ zP(!J}*aU(yVFFP@F(pLsj)~M%q&nwBWlBT)(|`Ki@Bh>CJ^#bDc{m+jL#iu>2Z+U6 zH%s*oS{t*1wEkL?YFI%vtCM;v80ksSqFGtV<)()x9DB;suJgqjN39*<3|*xe%AFTB z&gaX=+p8fw@V1}U-&20a|1#hCK0SZBZ+dNttCJ18&@o)j#;^@-)RJh5zP)q#n(t1{ zF$jUEm;ew0B2jb{7b$}>0~o5AsQ{4`)c~#qpL4CH)+t%Bl|w|-ML|u7j=h={gMWg1 z?EQor&j3U%b@4w~Om~+m@oyG>%a;pFft!d#W!dN7wg&#m@s$JX?U}p9EpGAtMwyEt z6MORB!$D-6sr#wJ@tex8oZNZUhdN(P!){Z1eS7%A<)=9uH%&K}+{6Y@o9GE`P6tBK zs*Gf$2&jtHKt!tOvMgJ#*TLITCs5y|qH0l;h_5jX%urN)PnC70|RtC|TsbFTN!(NgF)Gt=r^ zjIoU|0uUIfLKRi3$mm^QW>T!RiqsH-XJ56dDj0ob1kN*?I}j0vj))L%KJxNm0U)$Z zEjRsxB?drD%}`2KT{vO{Xhgt_z?5^XwHCDy95WLwqH;s7h$3pv``{crTQD^tVr)uD zb1rS`kbSKgp0y}^3@KOg4yEQ=+9qgD-ZMI7gy5J6#(4tAP1gc2qGbTpI?rRyIZbnH zo4)T&VBQ^;>Y*l?YwkIynsW>QCTgm+6z6EO+2owfga|6sB(;cG)rfq$+N6|LP3w@* zVA;_r-GBM&3okwX{MbHxy!!ZS!--@);rR~Qo9W(VyD29hdndhhV$z-5uaBNM_*SFM z+~O9$f>KlKT9iEHsp;4E?>^9J`)GUh*3jRlP2uYhtaS}Rttfe#w05Z2QmX|--b2+lcXKaL}yI(AZRH|+Yp4b<<&AO983Q zIp-V}%wwJB89evvRo8WM8N03{qH#C?z~WZ7%pDG0=F~TBt#vMQ&Y2vhYA&PSZ^J2! z)AY0Y!SBI0PLeZ25i3d1xs9)`NyXuUQL;%pL77{*LuZTfl zN^QO4!cWYvx}y*D^Y-Ky|IEFQ{KzkaPZF(_|O2c7jB4HLf0LMVbo}Py}-fA=J}50l7@0rO}bGgNkNG!o>CcskkRFFfasV z1mMb0*cqdD&b(KthM*vZMk+|~b@Nre#cwH=l_vu#f{N66CS;FT+$qi3dl4UK8r?9k zu7U8;%fD}a_+$6p`RnxfuX7tb-#v_F?sLRuTb>pwN2_u*)h12mx72j(HcbeTG157}B$Rton@7i%YZDdjKelmHjzwwNh#@{F5dUhX*}q;DtY{2r zyJL^*lU!RrTy|=swh<`XQG-^<0fNVpGz8q$E*5;q(a>9+O(mDgSyPcN1n5pyWBTBZ z&U*LJSM1Fde7SMgod={U)?E)wE@~@QG6;)Gbl!PaN)ag>v27aPW36=>X2HtnHOqXE z!j*hzgP-Sl%Gr4m!y!-J``sj66O#yt`ruM6RZF*84fCO7bqyO<1#!*+StV_D+dJ3q z&GBQheb?12(NET*KLOl>Cd}uPqc!W0uQ;?_<*Md&MQ&GN9`jul3w0aoD)mNVg{JAo zU7pO*lgWQj>gB_K^rzSLy{i4l&Gkore7pW|b9`2wyhRvxs}K79xlfNYoKX_x6J{;4 zrR?nZIF6r}lTWt0F)M_p4e~fjpY^EWE#)n!JXYihvvd&VzFQueZ z5HaZ-WAxrPO~cGu7GFq1Z~#C=-g^TfBJZ8|UInLVsWBzRHDv^%2C673 z5(5}i$p8RGM2?xXS~UxS=Xo%*w)Lu&8Ds1KfQVJAh^W$%odARjOH?Y9vUEL^*f9el zs8j$C&)U_hiiXSdv{3q&JWxb#01+@VF&KF75z!kIH8k>CrDOrXGACwM14LeII#g?HLshJ`4yse0fXFwo zloCWj@Zn{DX}3ih!#L?x+Rl2Ba;uIig=cV9!ftA1La?Sm_GBOy5jotY1}+L z?*&G*lb8R{-#Gghk{l&}<<=SUZ|;Kd7Pt6jf;X8&yqPnQ3jNUA=iXgM{SE>yziI^5yj5Z_9soAOV0SVa3cmyU;Y|gqX>PB_qeoO3Eg(w3Y!Ol_F;2 zJS$ksWT?PU3C5b=e)@Pyvs*PsVU63X!+dyh{xt3PFWi67#-E+u=^;7Dp#5p)dXK;EA{l5asU^P*cNL`J^JUor!9W}rYsy8;ssgH*I-nbl z3z|u?YHF(B&^L&PCRU3YdhgLdOgMNDUBa2$wuOahtKgh(+Fro+^?3bV zKmW&H`{19@PTiBYcBlUecSCdji)Wwxcc&NrCGH=w4Zd+itM?vjEC+4-gSbF4D}_iIjquv( zN^k~XCneY;12G?R^g$kQe(n6;hXT1eZBGCj3;mn_w|@IZy0;2@oGyzU-oOw4OZ@cR zh~i9}?Gultz9pP>-b&}IQ>bE19;e;?cmA)BUi{SA&>!mM5qg=G42TdHBU?m7HdUJr zMS_*021LC0s3TBpXp#h3J7M+>g$|l^*HYrgh{og?Q6yUhBYM6={MLa((ke#yY+DUx z#_YdZrtd%HyZlyv3`8@NB}k6}fNtsww>^Si8#jUDX7DW3W4RB0b1(lbZgGoS{Idfs zBe@eIRGEc96!QM*qo4n~|K7=W|B3JW#PIm@_dfLM`1TO8R$G`X0Lu~bO+VYnEy$^s(>Gq^*)9WL+sfl{Ifr^ZmQ2|9vrQ|(U0sc zZt)ER8aVH)Dx!+PIH$|E-WaAFcKat6k2fF4hv9N_+_Gybmx?rx6C(wG6JO?|T&E3tLxV>#4Lj! z(p*&4RP$WYl$Y`r0645qFE6xIA_BF>$L6Rt(YfZ>HivOw<}7u4*mhkvrFoi5WH$`M zSmvqZ)lt80`ytoed^kE@V-xajaxn~b-tV_ZP}i%~TyoXyns#&Vc)#CYzqbn-&hB<; zI=@c+;r=Jr*RNBB=ro7Y>5Z@o}LS~}K1_;EkEC3i0tyl$SMNj}#ZM5Jx z`3as*Q_IY;Q|E*_2wfDD%Cnh|F?2o7)oQ|O3`}P7z73#xCSf%4;MjPf6QpV9h*$)~ zj0nMlF@V9X>h663q8krxMj{5#8~5$YqxSw6|3d3ehPywQhb^RQXzw>|94gTD7)YLe zEqVowuZrs@wmOOyu+JPaq^2D=A?O zzPOc4Zwmev@TqJMR@HCYZZi(hY!XD5 zui32->tJQWr{=V3>-i+w?AjC7F3wx$!cw{vEvkTQ2*e1?=FE@~jeroJxdm!90stZ- ztOOMW*is<4cO=uqq7l^_DySkMIh1d$@qF2a8!cOD6Okp5_BVHW|1ED1JQv|E)eT%s z72X#sx!+Pe(+%7xO?ja;I+w?9`t+?^+~O9u_*FqJO*%0Png}2@q4#$mAKm|t|Dk=@ zed$NO?SXa8`r(V`ul-SDt&vh-rPAld3^o!dUr4chg8(swnapd z61#SsGCK?*80d4uKF1-68r{rO!Fh6yNE~wCg>f8nDy8geP5{=gA^=E1LKoxW`J-Cq zr558HIp;!fjyX#e5df^U#u%1)aH*{m(z2Rl)|@kdHEjqnsJNVqh)naq%*3?pF-j?= zWQPusLvSH9F~%GI5r``EU0+HuLq=M95<~>3>|M@tF4g&)io6-b7?)H70Cmn0Ibb8= z7^8FSJtMm!H!=e68pmNFq5^qloBOk*(w$I5a*4_NAsgCjfBXPkE{;j+V3G**gWOT9Bgm-Z__Z7Llc#7k%6v zN`R&%O2bss)VU2wJMJ!r^4PDsCz8tk^Dp4_f6;vREb+vd-SQxB@hyx=%Nk6{B_bkN zHSHmEo#XaNiPLa*eO#oM3T@8<<>{mOwD+r3M+81PWJo19>$X|9btd+_NU6*?XDL`k zq)Z72$uR&lO>?6pU=E>aVyh;Il5=L}u3PQ*dqm#`*ERh-&#{d_G^LpsR_h}(yS%y( z(c`1jQsgiWP1DTNxG+5dV7*?8NQ~k1ZV4)o zl1r|&R#4AjF*RDO|DRWj7A*oA09gplVLAZZ(2lH{Yb#ja!i{?d2?`s0dh*EFuav=LF=`g*L<(<90aI z9L^pbw??~Z#Ncdq-FbhqIXdjBYuqv^>~_0S>r`iDo99_c(esnT_0d><{^Uo8qg6>= zH+}lixBnaGau)Pmc;++l|Km*kO$~A6ia7;ss0IeJ00uRyj_f>wQzHN(3MgQz>rz4< zt)=Xk(%ml1xYVba753nXMrxLNE&j&%uE|l22kgU{N0g=6P zb+Hb6zn+C}Jv`^o0OE5KOGG?LrM&t_$IaaeNcLsJ^Wv@th%3@0*rvh3=C$wt-+$Ye z{-^Wh#gmhN zrDlf^LjWW|P#`9A#EPJ1B9hG*keR(>RWvhTXJ)nN(m!a(gn|@7DVz%cAV+IfP?Psw zL{NdfBT-0DRQjf!=b2rIUCYcS25&*Ud)|Ef(jUciZMCqmvXeY|Y&JC4>^}Q@{;dYKJlwf{@qhfn zoByugUq1bT|Fgge?p~7`r9JBNV-7F4lxF3C^gJe05f(F5K!vaJ=s6?=1T$0xFf;)8 zRXD_`&)i48N(FI5bDj#c9fr;IFTHdBcb&cdm&f*gaed+qa>JUX%wp^6I}dvv|G{Se z6O;~|zq>Ylp02oNksjAaFc+G$+MF6FoB~W|rg?q8yz;xZGttGT@6<=!L6XWGs+J`z z4b_-EGpcHyCsyZ-MKw(`dv<}Doe?zaP-`WjWsa^XIp;Q;&E@&E53Z!b>|zWl8GsVf znC1|ICk6v3partjJjtrcoiF`g?yOC_@73_J8eg2tVAEs$;D09!PaB=)X0v+hFHYCL zJpJB(Pv3d#mCyg%WoWZyY3@M$1bK$?mTZU3cYmmNpLp=;f4_hG|Fx6Q_Zy^LlRK?r zdFd0oqu)7}eYyA16kmn)7w=&73cB5IBAH`u-0e|I+_fXu~i3 zfg#3F#0(G(2w~ZJn*x9%UMOEne)U2hC)1k6cYz$wQYM%{XMzZ=Mt8igwdTPSI5a>~ z#ESMcdRcwbS;+q2oQa}i17U|mvDO+R zNrg14naM^wL{h~R z5j;_O?rm*mA~#FzDw!FX(M(FYi7Q}-wQA0l(E(X0MWiAwZWuQ$5Oypg2oPh8J|JSw z^OCo$3N;IYt5w+{5h>JKA?Hj6OhC*_0ZUa)tdy#1A_{;@efqiS&>{#Bk(85j&J3RO zm;hi&Vo-slBDTa2EPpNvtoV#xq(HVb#0{!*4h+qpn&q6$kcj$C1Y%Gy<>j0&t4#6^ zfdL(nBHFTP^F(z{$fUI_CQ%HPh``Lds75JGRZFuvAqooAw|#I}O3AgP#rmUxXMG(M z5ipykX~({A+SO{M4(8p#5j*0hY08`q<6tJ8B_#UN-m{> z(eqyEIF7*u705X^O|#i-Qkqp{p66B5RFx_P2x_fGiZL3(GKnZCd$(GxlGdt{bIvj^ z&UIx@ecvzb0Yq#olI4C^Lzfc3B;Up^x-C&W>JX| z5)peQQbZ;LDUx$8S(rQrUrHfnB=h8b3;^Jq>(-s+DpH!Jd2s*ks$I1dncM3!TwGrI z%}I={_k4D=QL9{Z97jOpz{Tq0?fEp!eY+~-e%2j^XJunTgb@!J@&q#rpxz z*OsbkV2c%~f#v;U;=XK-EgpH-Fs}moW=y4-5S;NeBKi{E`0)Sa1JnO1(`#>j^xw?p z>-y-ed;i^4`W>q$|BHR;@ZxZ>t9jFEDTruBUYk;Ju9epon$E;~N1j-~h(N0%2r%6Q zaEYkM!tJbi0%Rlx(W;_=W(10XxssD&u1=5-s6y)pv_A9sfJEf@MtzI0bbse@QibTf z3tk1pgq({$h^a_Lrs$d*i%ai_Xq>fc*9=&#mXZkpeH4g}n$_Kx<>B4WjqY;#MQ$EF z4Ih5{mEZrl`=@uNH?X@$c?f1x#cX1@IVx{t2tBTmpH5SAzW!J9oqzE_cJIlTYRCB) zMG{yDt%4q>ZWks1z<+GGLH#ubXHo;OXBr>{FeEcD(|>Hd`gK!|Ghu6R&?Y$VT_5Y% z@4pDs+i>T19h#4>LL zn)b)Dj@_fb;jRu{%%0A5ZuTc>eU+w{|9kV@U8>mu&568db^r!y4jOiqPZ1~`hrt`PVgcma$Mo=&lG!$4Yc5hOs5fHE?lHs+kO)Ni#e*)48yi*Fsw)_JW|=RHIhHyufX;7;%S@lSsK z`#<`TnsgvuW2Czue*5CR<0&0)hRerKqYtaT&oxcsc-Zf?+Pdp6uCD;VITu3cV^}Of zvuH|#8HMQjzBj{Z%&XPP(&S9>#u^cFsyPkC`Km(5>|!ZK?9-f7Wr-hnCO#<4KE?(S zr)i2Y-Z;XElv2<+W;Rt--E1~XIeuZ^=bXLwY86nKaN*mgl zn$l{usF7Nef!O_>=0RCPIwl+xl1>)aA7 zOait96)bIorSRdsCqeIhs%dG5i^vk^JETM7gL46H(hl5`y*y7z#h7_XbO2BTLd5AX zAhM%?h+$E7gLRt@)Cg($&l?IbqU17;V{A8I1ZqvwbnVIv=Q#^m&KVKCivUonO3BW- zKvvEA)docmhl3|3Pt!E^Z3hA=rK4^WV+7PJQgf|@Rq7H#;GFY~MO>60${N^veclX;vYyhgM=DV9V!BLE!9PJLfIoGN(rPMUd zFbvg}4zVmYs45_0T^BqC=lyPfy{yL<&;4;60krKJy{T(}h_r1RLt9drhEb%N+4kxJ z1Fcr;%ZqD7Y+@&3yWN4Ao2JP*FSAnTnixF*fSH<1({N{f!pd#iHqcHr*IJ2~*yWu2 zzIVY*IptE=F*&Za1`a8uWeg#rw(Ua*!!XoZ$)WSE>0$_B+K<7tX-wAv?m0Y)AN%?8 zdoS9%C%7fJSAlP`RR9(*f6qKV7}>9dG}|`}0q|bN}DM$64H=d0q%yjBA%dlzr20DxDaTQ}2XPbHP-N zp`Zb-90=)1CPXHH!VR?jEElB$W4pHMp`8-s@rh06%-&^n%c5?Wmyi*EtZJD6(QH-H?{?gAt+RgVUy0P!1r$hY`S0Kr!fPnn3w5JaT4BdIgL z`9-Fdc!WA=;h_&%+hZyax;!6_#ZBSvr$6y;zWSN}-GPd&UVP`pKhp7|tJUdW%m3}` zviW%W&vU(-=;7PDf@OHbN6ZIsyybr~ZH0 z`_o|E_Vg|cdxrn8)_SMC_t|H>W6!r+-I|ahfdC081QnMEc48Y+A*2GSa$-Bqmkd>e zr1HT87fA(N<=CbICR9RP5aK{ZaRG4%Lb8OAgoaj2J>0%SpL5T&hj(0S{fFmy@?q_B z`gWr$)NO>i`|jU|d(Pf>zx%BBUGKZr|Ns1^TBo$2`twP!aLockO%_50-1!b?tm!0V@<20vae-RS}UGV~layWgwzRjntMp zRLc+p27*E*Yn61;?-#BHkN20|llX1N?eA$AY_?o+fAaQEJ-YRSMe*<%umoE22!OC{rU86|4PjI~<5+tl4>HwxShGSq-bb-*dyK)x$NeagA?opuOit-ph!H z#kCDqlg+>=GRQ2fpJx7c-!3{>KT?=^zP0EMZ#+CKUHpRWapz9_V&9behbK4YFF!up z7u_@PjH0g^gwGy5J$>;m1~;yE03;Qv;N@=bf@_buzV4)G6QF4Vdn&bN0Wt6)Lei9y zQ1Xsz&BOu#X5_sur4V^U3&Cd@opVN#$3|gbKQ>Jek?4F)*?Z?3E_KKyH%;)Kixyz0 zRvB>`IXR9+i&?Ic)oKXgxoLoT%2!o(P!)o6W&$Wa7$Q5zO>pS^Fbp|oPeDvYW!|-i z!(le}@GRa344i9nhV6Dcn|EmxLS5~oReg;Fb!fzf_{aK81v z9mn1`Ob%)hCKE9LaL&0Y8x){O@!nrm4IAZDotq-^xNU=~j^juSJ~|{&t-i%t$8P4G z|?EmK6>A{X5pLjiwg~*naxOH)_8^)bC}P&T*i(g)?qzd1;u*iN$Tsp_pxa?MAgH# zCk7YYMLh(L1vL$O0$k1Jk9SYWfDos5$P^3(!5Kh7h>k%GQW=L$6T3G0BGPs9?O_W5 zL?8-@`o_gFBvV>0mnnAAr;bKv^LBaL!*0EP_@VwE|6KW@d#e^N&Ro~_S-{9UnqF`% zdcD-T#`^(-BQj_t1Ln1m(99M`^qnurtykWe^2&K4<~}zyJuU-~gNy*k4q34I#jO?_JTfEKmCJ zk{v=-@K{iC*@01Gz{XA~e~V`Bd&mbhXqOxovv_;{yD&K;s$|;zyL%MzyT^$B!^%Iy(j?B0Dud$KnWB|b}nQr(w_$o?|k@w?f_*! z%;U|m)~Art{J-{*xBu36%TL}oeQo#nN1yU4^+@X{FYW^ddeD6IA1rHiDvwR$dCE%~mo5%M2Oqm;xuRHnV3_5_(P7v( zBdvxZL2-5iVbi#pbKYkP-K{X===b~X&enyHFC3%z4ywUntz_OgP))-C0DP4KryyhK zW~!}oRBILVu?ZrQUA(~HfIpn}$$`ol(afc->`rj|%Y5&5FAi?qSS*7hghH7l7$J{E zr~m~B2HOnA?99M3I;=2jlOSyGM+5_hh(rvgW?*1271Qb!o%8$*PC{WcBSb@;#KWeO zFT?>G38JVHnQ*u_WQ>W z9GbHEH7{ZN*-u>j_z;KM(^f8x_^9@@yfc>VT#jUhhjD~jogt!;L)Ty_DHTszqkwO3 zY>z(v3&U7A=Lx z&3>n<0vQ9&lmxv-|m-?cLvpxBk%X#%l3sy?AhDVCLq<3`_3~gP}11_^2MB2M7QH3gW<7 z=42A*{oLvA{-3<~lYe1#`77h+|NV>a`EwL+c4t4I!!32u#x*pzKt*))4ey4-4A8}+ zzDlbD>9O(N%oIq?pn@@hXGG-MI|cxd3WQ;LFY)_~8-Qo?VJ%gW2yxP!shJ1KM#(%k zv!kP<&BYVT`#|>f@h9)U*Yz6LxW+ZE@h=35K>w6p?bi|cJ?~tZ9yH0!8Q|alx$!f* zAO9c!E5H9!ANt^aT^>C-JGpr>?#A_&y7T!u?cRC1yZQ1`%6@-wzXQ6=x$9;jgza`) z1}#$0o;=MaM~h`(C{|3=z}PWbt+^t4Fja+|YD&d8AaW_9YLh&F(!ZDT%n4_eh!Blv zstIyaY-Y)sy-kYv5T0d0sBmniRDHN=GrmdpUj*o|!`v!Sm&AYe(i`}Xpvw)l&)*`1@+>M-u|*yo`Fz*H(e zxtiLxbk zg%CwpN{GZ3TI)D5c$(!PCys`Sd8)hDlD?buh0;gH55f>TVNc$?_UD;~De7M^k zuT%fj!yNsJUBcd?YaJJ8iuO&@NbAnkoQ_s549Chr9f2h$d&9BB8!nHo-;iY`2I#>O z!6oyJ$<6##0n`c2d#(lr02q*CLhQ%GtZSf3*2+==*pY>5J_+K^sO^JCciAS#;kMK> z@GW5i05BjTHH51eGy=I&tWNIXXZ7HK@8W5nd$P?8rkL@sI2qI9f30%_;0g$XL^eed zMj>@V9)j1+?4iQbN7=l#ECAg?yF~+W&>!C^gra^SE3+B~nl2jEH*u~IrC7Uvm z3=xQ#C`_>_CCg+McaA~~&M^_h7?1d<_JW9tP(_Ftnbe?^OpbhT$T+PEkX`i7d+&YP zY@amNo}8RG?k^v2E;gXnjH$7<+ip_*iNAUB14ko$4BxqgFy}`PV7RnfA1%M{&q%mA z_@?S?wf>2^ytnrs*q**tZ+w5g``@wd#cua>{`Ld&_?2hmE6(Gu@yTKW||~g}-(*{7m?IdHWsfRBC8-~93a$EV*K{=g4?&mZ~T7v^DnQXo3WC3SJO zbAtuGb967)&F1pyw!%C#Q036~S;cz}O*alBITlUbtOba*)?I%OPO ziA_P$7(=bK7Bw>>FteNsGQ}nWKq+Nv7f@AXq)5->EDW_)xr!ue2$`6uf~uMe6y`2? z$1Mn|KsB#aA>};OTvcnzzVY2`u40LbnK5&>Se@rfM=W4NKOjKUbX8@uUN^H-MzpE~ zw=h?SQZQqPE`al%ic~=D=AD`W=<#Bi4uiRHW4;{fdZ)P< z^LTc9c=N5*cg(3RS%PB#5LE;)RhT>q*SN;FI!JA5H=n*V5HnP@bE7mrNzDi5{b!qT z*k6W&99)B0wdq>t+&GRo=V2J8OO_%bqH_*FMKnnPfhGh33NG~hJ~m9m!!UXm5wV(9 zGm(;OVTaQ-Py|vc#5~o`%if%$Qbbj2Aw`RA2rjPI>y$_IsH&}ZA{EUTkN_PsGuKi8 zu#^%xAexW9bF-Sv81k4W)*y`|AO_!vmf_F?`keFBf-rFdQ+nGJc?2*~w)kcC-FNc= z4tea{gM0m>H)6Pte-NQ|%`_PpbV^xZ(vB z%OeyK5I_?!G@|z|ox^DuXJBSzptYhJAsPS!>g3d%)?fhgEs(zS9xy{CXY3)O0f=D| z!K`i-c|B4;nrLN1Kxc^Gx)sgW`1PTH1T-~`5h$CWW(7+X2YzMkULP$KoiFR_bK(K+ z-iO1sl)T#vY0b@i&FFP~Itw-)PGFdqOHF4OZm^p@IsLYA^}_jZd8C%1%|RjOIzZJB z+d6LR-SO;R{}ZonhqD)dRa#K2#Zv=SGe{vYB&TLJ zBoXq1OB>B7)CORvoFF<{Oe^r|IetZNW%P#R1W64!l4Bstpi%|c5ZDl53i%WxG1T+z z>Gy?gVroYx?1un1sP@L70y?$t0@hkd^{cjZR~xshzfV8;)z_`7c=UG-m#3eYLZYVK z-?IiXFf#!IK%FFI2uJ`-%E%Q|E1Do10Dw7D0-%VSEH$h4-bYEZZC{x(9ijpGZm(#} z$z0zp(Z$$wQb0#R2Yui77V>cJaVeT`XblX=DgZG$oCtkf=q}Yt`H+SY(ZlStNB=18 zS5VBkf+kw4sUweQb!rJ>Mk4s~6Vy@)D5(}jXU5Z3aT1ET8BeFOlp`^E2$IBNRXhTYJ`HL=@EH^4E03dkI2Tb>q_Y@!P z$+eza+wkMF-adADgVferwYI0f|H8J$C_w`V^Q!U4{!`?7v zZNivnWmr4_Ol=jSI;A%Pe5$ls{kX3 z)TRlglxHD7JdL?kH8>(bQFTm34MZg?8W{^+sd%R1Vnqa1@xec58kDN(RkM%^3?)sO zFwS`qDOqaK5JD*>jRmnzBX|I)n(Qpq6LA8X!GqVTl!r-cq_RZ7p zgV~X7E=f(IQnu}fq_mPx(-PJmwsl)+=m44k4Qz(7`UpaYTx&h zAB%{J3NY1FYAIE6t<~&m$HgJkVpHb|Wl+f_i$?|EVqd;|<)kVwb)FBF$vZ?u$D7Tj zo41bG6n$h+L=KYkAdK-U`A>cvuVU@21XP@paRuQt&(E^$T_5-3(Ot>RI6(3a}ATQFeL>LnYwSZl!CzCMbk{^##95gRn{+!Kl`xzeGl~m>W;s( z{KK!V|2FrZtL})(&9nWb!XZEd&j?*bNSdXp^Uu9*0!_Mrj%%f?c)!g|9|N9pV-Vn7dO9iuEXV%&*>{P8&m9tR9e_a63qnw zNFjLwp;{U(;2b+{1V)j=Y@{9EMweXKAIN#gEtNE1(U`7cc}!Imz$Ulxl6)!2sxpzO zmXuW$O9Dhj^e%W86BJb~BRAZU40Ba610ysdL`0Q| ziJ;v~(QYmBwuN?9soLJ?+*K^X655>)VX-L2%uTR*Lh^umh&~?Or4Wf5&B8IM@lkuC zj0G*Lm{~)vF#;$0^dVNUGMHItmq=~D>vv~|(_3gnjzvU7iYS2q8i7^k*%ONt6#)iB zBLI%hj1djg6qRUt8`i_V4{l%Xs(jw2#LxY<~1-%I`eBb#L+H zXXDxB`sLqs80mO?TMcSx!5r7%lWufuRKEOfc{eF=y|Dp+cXj$Qm>7sit#wj|SS?@@ z9IIM36%k?&`t`UjuIYhmT;m$oxW;cTeuE6byOl3f>DKg}K=J6sm&Z4Lei3GGHn04} z3;JUJ#sA)md1NDsBka%ikJpdK0Nw0(;o+dU#^4IXqO#pzGIQI`a?Vo-kVu)rdD2+5 zRH=|P2Y@MA0sutJc?#rft?w4*Oes35ips=-h~9f2iVCPIB8L!sQ16k5FgR0m9s#T3 z)h&FQ)F7fNh=^nNjC*j1Sh9@cI2DUsxdltfIoDdf_s%uol$$FP@l;PVH30hhd?E-T zxCUj~X&4d_F(M)d9BKhZ7g)8b5+Vk6j;UzQRJ}l}*0_M)q1g=GrSbl-8(f%oGwx=) z-7b&GId{l|W9*vN&*SOo$DcXoN6bI%6*dT1_w8Ehpn){`fnGk|to=bY0j%sguvuUb6 zBchx$F))ybJS#}2i8U-m(pUj%t+gSqr6N0}|yTstSNbrAVH4v&kh8iHb^23CVdMy=M_obyrbuRt?om zn5gD5DKb;a6VqHv(V-|KI+l8LgrB)Hyta=Y1@jN$hi}~YoeY;)6G#GAe78sn(Q!)2 znXKfJusA-qpTGQ0(Ts zJT7_9WA9bK5nd@@zcP^O8t*Gi?*Su2AtL}l6CkC$J>_K|;?4VioA(!eGxxJs`EXbb z5Bfu)u_pu_`Zxo2ErRFm?ox6I&9c*Scw9En($9T@&R^R6>@T+8F&{2g<2!Gqk00Q; z8_zGz-_Xw(xmiBrpt2wN?c%Q&X%}0iZ}F zM+ypHJ_bZAsTP%~jX_OC6_F9Z1wU!a0&qXHL7s?a>gSgGdXkP1tNdT_@nAomY-jn<5^Woz0_8eUF z&a3d!cO#`NLL#%+?3^E~s97~IWJ9iMj=%_uiU0zmbB+TT$klkIQ~`%bIpwBlCS_=? z6+nq7gpm5ejIkjij4|XS&a;Z;JQ~0xuOakOt(`fu;$pCOZB3uAc|pKAKd}y`I4$wb zaJu~?PNJVGVgQ0DqQxNrOcD}x&Y4Y_*?L7$C;}7!3(@=F2z{-Z^H_?A*z~%Jn3U?h zbIuW4Y&e8KC~a(FW;!Xbd#SjH)SBs$N`h3a$0h=>e9Obr1MODS!P$MdCgq3@ppfP$T#oK)3u z%(K~|l#+ARqVrk%>~nxm)|XZCYPH%82N5|sIb~*5^NJzP2v8Na`+l}M*&jB$lqqmW zzCY~S^%qy^d^~xh_B<_bJuEvNcTu2n5)+t@mEa z)WL(3HA+OBBQu~bI`pGuaiCSs_m>IBdpGW$o*oy`Fl$O0F3;AxUI>AJT@2iW%iZqD z#bbeLDwlmCb8j73dfrpKv%dUY!~gwcb@sQ9{>afSNqvn#^5p7KL29-nq?FY#`@N z-JaE1r)BVDOzgo7QOw{vT?|aE#l#|ldAoTLn zzrB6*#e2h-R(0q?D_yfq`ohfJTD|txqbH9KX&92Gj5XEGr1&SQdUP_1Mo&`g5_9<#DRP;zP8wZ2I+Qmhb%ZjpGl#2C*B`>Cy36+p=3LuG_F6 zA}%@ikNerH>svqaVE(CHdoSt}YM~BgcKMkXE`P4~HyeFhSu$R(!7D6x`5ZHjs(5#vX zJ!^-9t3MIo>QiNAS4#7L>T2^W1kk?vJI|m9Fg3~$8Ul$blQ97RRV7hD5JCb(bP76k zvLFJI8syF0Pi#XwAI5QZch+XQ$ck=g`pjLf#bnf6K-l@#-TmI}3%51j%w=~G?q+q4 zAT&#Xk;Gqa*1({usufLBt%Hb1;$c$(qchC|nZ%}P{BZ{`%7p@gNFb+7DpsYU$F^%G z_n~N=N|L58y>mHdV)QX0VyPJbkZ{(Jv%c*!aAy`U-4$LV)r3yLf&o{YIM>F3x6$&nPV%TYx;4Rq*H{P#J5jE_8Ff5uh{L2%wgh zoOHaxsu_o8ch@g_G!>^i)31$nI_XFjq|9y@HvZy^@%F2`laIW8{2x4g@H0cPTi9dS z_IH2#+u!#WKkzgE?&(kbn-9PJPj61XpX*uCu~?>lh~2R<>2syvbc4G}q59Ume|@c( zq<9km;7WmqP__CH##%EF2@rc##jndc0N3=uHLh`uYh2@B41VJtf_E#t5V2|S$yRa} z->w49K2RS$D&c?k=O9 zz|MKo3IJ-=`xyWL#A$#I&lX!bm#ZaXQABahF?%y~&Z$}{>B=x@sH&w1I4vquQccq| zA%s%0suEDGN<@gDs>~Rp_ult?HmgYBJQ@HqIp;*!GZA?NmNMmOSRN&%=9oqadzjEBQ+Jl-QA!J}g zGlA>*$2HyuP_SuVGy&BR)D)UvTjm*u<5;%&=Kb}Z7v({SkVK|Jk+EcvGDVqr@AH^Y z&CHymKIgvgo2HpAq^gzJEn?>bt3cnInX;i(QDbKE92h}F(CAtHJEojPu4-fYJ|laB z(#|^XuG;BEDgcs0W(LQM;1STkRE=>WN_zk^tSTvGLnWd^-#g|hq5u&HK@F-Jlwu~p zJlW2kna^PAy`P9=?|n`+rDSGpta3Djg0p(I4&A9fIKsbH?UT3t@!ribHlg2-7&8Ya zhgMv??1jc ztQ#Miu3Pt;=sl>p!1td#x?FDwZMAH2PWzJ3bE5~Y#l`vC67Fs<8NRgWHs$Q0Qxq^T z13;iJS6_a!4-Xn`3=}Yv0V`EiHLWQ49$R&tM*m=n;Fwg@5viCl830ZqUo}8Lnuej@ z?61(TjS)i*ZAI74)z{X?ih^3qiIm8t0-3QU9^@CJFqf5 zr5QbZr*G$br#QULiSukerZ2w22X;~P+#QAUETp~Vey%UVX(mW_V88$^cPpd|8mjlR z@+EBUl=+9p*=s%DtcAnbKR!maon7AkZVm0@(;xcc?LXM0vy=T7T>64@uRKrfpIlrp ztzA7p6|2?8k<`#(HOL^dTB>AGor30o+(aj!2Ef{tv)E*>6*cGjRL5(EMl$AjyKdh29KYM#`y3sC@Rl%iL>xtPf`&wZ}= zHq+ppkHLi?B8G}gL<9ztotnrIkg*Doh&b;6Am=OywU!Wq3*5L^)})5Z`D`49^T&^7 z^Y-Lqb{KX?kN;omTmQ!4@^#0ZquPX%F>O*l2E-z2B&A`n? zzy%j)X&4!V(a2Qs>oE{q(*xJI#x<^Sjo%a)es$dKug?&?yPA0Va=Pv9WjkBmqQy$U#wPWAX%P1J{t~)VH``&VLo4V zE?3>`cMMpok|XE6imFz2jtGz-Mo-2nHRs7QaFzZaLLj=5?S~MUjTwh=V26Y*1XpYI zz6lWkq}GbyrX~PDNGW;mLvUscFir3o5zP@?t+%EELVE5Io@Dp{!0Zu$h#dPQsSR<; z6as*pvzfMSn}lkuL`nv#RfG)0K*q73g!x2RN@b>|akWCtB2r9isTq(MDTFYa&4%Hy z*<4l?ADVz&siUeA1G0D@n3>2Ek!dkAB6H47rNqzA+OwKstyQj)k%|I9({v#?sZfg$ zBLJz^iB3=xRY1HdR(SraL<9xM$h*cluPUV`_RutqCrWkDqLu}LN-1DO#x{%+q1I%@ zTyO?3rtFF8n8Iv6>pJf}5MFF90DzgRRx|T~XR~g;ocSh*)gkHfXjMv)D$hzsWxwCo zT1zaAYmBVsnsdIYm=3NkEHu|y?2emR&Eiqi#XgtKZk_hM0cNNzhu9r=!$azy^f%zx`N=L1 zh^k80`kHIJZ(wwFd%;OX1%!r3gn1BLU;?T+hINfEW%+zJ*3=<0pP!%S?dIrciHPGk znpyA*cy?L#ej0Q8Tn=WSR?TR>=w`E7N@;R6Ns;MBz~q?>)C@>X7mLMy*ar&Uhm;TF znCJ6gW=+#=Hs>W*Py?+^?3_cXR;bjmLP8@304>!-nOU@?VzsIZQC0h~H&kE%qf&|? z1s6qRNTV3WwkajW7_W{^q-|U8IVE5~Pb>zW7=2UIUP|%aTYIaKy?NT$iR)|Za@@a7 zeSeznZ?PHMJ8A3U$PISq`3o~^vhBQ&tPG%Ll8Z7r1VXYQjYMuzS`tytBVxB~I`3Er ztK?krjT^U*=O@eg;%8p}^wJzK#Wozqf{rE0yqT@n>x<2RKFqt8p?c;RV=wx&cnHlB zPE{|@yI23N{s8PBUmhKYdZDUlSCxs=;9#=n{Z_vv|0{t9Q&BTOFa+@6j0G6sJ@kJD zfP~1XVp(cR37mN*;HiUJRgDd7lFWb8zdY{=*9P_>#kNMy!&%H#l&L*hMc6w^V_!#c zfZ))Off0UVnJ3ryR)ykKv^fC*fH}%-X77&1Z6;emZBx%M>)_!L-0wC&FC4PAoDN6P z@6(tcwKqHNX0Wx4ZP#GeOD$s>X_f{)c8%vUP`2vk5^L~Jw)+y)rFqbzVbC47-O0gi zam^{nBFr{wGU~mic_)7K^YGGG=H4|n|KMikAauc6sO#9waQQem+k!`TatAjUy3O&o zdj{v+lw9hZo9w((3jTbbc4KeenMY#B0C-4IjmHBaNMMhU5DI8wN*-N%Xtpu??>eX* z)i-jGVwXrSFl~v5?5gvOrlv8q_o{z+uOOPMyY;*N&y_m;D_5=4zE1%#IxIpWXdq1F znH55n1ci_QJOVmq=2CJw^y_)sDV{-Opz5#Orv)xA5uzP*DjWG z6q<&=Qo$6-bFM|e#$i31-5!QP1u0O7tg6n15KvVQhk==$b5gC8;+&h$n_(D;P&H2+ zzp8rgoyU4ep|Kd+TI)C_=UiwUm6nMKDdiMngsB@-vO_?}A_V~!^RCt!oLAA{ebY2m zq}JLrU5wFtuS!J@Ql}tGr=sTF{*bBwS*STQGX&C9)OiBP4nmPyvBox(B8Wibi-{E# z(TS^=W<%4R+IugcV@b7&B2-fYi7eo#n>pRrrVXZH9Mudq>t4iSjHQ&L;{|~3b~{hr z3`C%?ISy8=A9m;n1BltEc^0~PE#6>^ai_(R$0F!-&2|$POPh^O)`!bB`ph;+zJ&hl z$*zqJBASSEuA~}57|YnT3&678tqmJ1a5V3xB+#2TPRB#P*=&%R9bw?AWfmF$R8?T8 zR&&lxaD=R?3gQ}9M9(hI?ws7Zxj1_BjW@Ev(R`(B#zX|;ez$15g%2gC%hV6i%%F_> zXvCweZY-T(Qt8JXSH02R#E}gK4`s}V3;{Ba-2rkew5~_{`fxE;7jWoPvzlcQ?_~5C zOAGjQ`bppCFl9t7npu6e5+XoWsmRx6jBiLhW1628wj&rSfI5?ggf zMQ)ahC!0%RDToDXDb;%y0#W9%i~x{^%q}?Bu*W_p_O1@8^D~5)lZYr#@eE>PE{>TH z5k#GH=%a8^fVv0gKs|^h1#r$yJM$4nQKKgIqDUl;LU5u<#f@Y2o{4}w3F;w@f^fdy zgJ@)8W<+z2im5ng;(i#Zo!bce*_^fo=jjOO&~AS`%s!b)=TjNE`M~l9Z0dvE*=ws0 z+?G!&%yb=}Ska5q%j`^j%FGsHs3dSSRr*<>5>D?y&F;)R?)> zj$pl+S#=Bt$buBS4%=fT_RqD%B(g*`BqatXh$T1F>2< z&vC(}I5DBCVFH_}%5^o*`y2XI5~V8E2#X0Icq?KoZjq^s{$%~~e|z`xe^bW`8Xw1W zX8q%`KYQWHuO9r~pj{#lw(EgzdJ-H}X?Yqz*TpJ@c9>Iz!;M+2m(*g*4QFHNK~QT- zR13tWknBTgv_=^oJl%fo;?5iGYxTzRBreUtGAswzxtNy+yzsN7zA|=&a}B%4jgIxO zzKyGav}_(W@d#&KzTErdH@6&*^C%bVsx2Q5J<;-$^ZckO_s{hxs(3alwW5iYIk}XG z#mJO$C{|Hrjx_L{pw;UNVVi6i+PlcG+#i-`gUMhHC^dp3DmrZhp|AudQX2*dortw| zIdr$Yjyp)+flJZ+5|})c9YyoQ@xGBVrr?`&pMna7Bs0%Ibqmlioy1-l_c;a>DVA~! zoz{A=KIlS50Zm;OFMbv7e-8EX)%4?x+#G%KoN9aa84(oFg!dXdR2m)9co@o_%+xuTBwZXeGmmaT@4T?s_QU1j z0i`*@Dvs*|=&;CxkF)G=pk3NQo2D5c6A_GnPQdpat%hN!liv>r2+|=TBFCnh3LAQM zmAYAbD3|K;jAj^H8X2l$=R^t$?If=N+-W=nZnxPM`(ejgXTg!JFOPrbfB*W2{_8n@ z_lx<-T9erjVP3Tq#C0=AgCOcmyR6C#unzQs_)B2#%7PV#x zp~e}gF)}!*06@Nl+>hqFeecJM7^0N@?&QPG@OciQ?4QV|{^XNx5#IXraXFVqzw+kk zI2(uV(T{CTzeiS|a&9AO4HSqHRR9ogkXe$(sOaRNem1iCYKSO+YQTgJfvkc`fiP7U zT*(a`1As{RM!T9-HlIO(0)pgC-6F?pIkKc3v4gmnNk*m08Yt_lg!e&t)o<{&d z1*r&#U)AaG?DT%)(E|X8NYjcL;3^4Pgs=B!?@KTeP8>CwfEj=iDMGYgLxX(H4P4_I z-;&6n4PZPz(N{mXe{#+-91c(0ruov=e*EqAzkaLUd+9WA`^cZ!^ld3vT zsxpo}00K;f2WVzereq8TK%1l-rbNzrfGPSAs_K^`t)HiA0su3UBQuz?cP3x0_nw(c z5%0YzPG!*|sv?KOPE~8oDMe;NWD%)Vz^ICtbEAl=1`d(E3#8SOBoYO!hm?!MuIZ*1 zvuTDsP~sf;;i00GhK$S5i?|sTTU3pYBwle_80suC}%}shq5k<0G@?<2#XN9WF zjE;j1dKKY#1y&70E|vZS)hn_1f}m&=x8R+$e6A)EMhV1MiGz1_t?h&31IXf~S} zp)W&3fHBFeb>@rt4(svJa%hfj51${JSAYm)k~Eruff{~Y4*G9N)NwE*D;Z45oEibp zuhUmAFj@Kl0h}Qqn5mh4BfAaWJ*6s}b*yw6Pdy->TXT1auQJ`AABtdO4IZ_*r9AFxuI&8+ zS1pr9&s0iELuecYmnEOJA%_vEY6|O2Va~7J?2orU_Nf2;4IsxD&pvni!H@H@JA3i> zieF*P`7i`cw-5V`Zx|QSgj(wYU+U9^jqe;O53MZwmumD_yJak9H|BoL(_8WLtBcR<`ZN3RpL}ex zyf_P0YK$=2!FpuhHI3BjyyB#|W)9vPfb#*1h?dC!&CHArQ53{MbS=!R)I(N;(;466 zdUk&4tF_(D7}H9BmFnB~&jzmCGx22;W+ymrFpt^6!K4`nKnEU%VIT(A;vTjwbg>+R zNAErJlxS%7+%4TIrXIC6zD-6L=+5kpro+AyDe2G?A}#|2*A*oho+dt~p>pZh*dC3G zZO&BG*V_;>p8wRn`gHr!;f063#|yOuf&wri5?sBW zIvGlFigL7RS6j+t)Zq8qJ%F)z6O#<*iH1mJP_#@Ep!szk^$kZ4=oBbf^?6DcBBJB# zsKECrM6c{^J~{e#au$YZ*B+;D#y9D7Fum`d)tZaz=LD9m28Potn3wdfcP6bVI{*rDZGYtuBd zS!dv(AtMw&-b5}fs zA;6^j03ik>L_i{NZmRgATFiUO#Q@BQP$f^R_TVaJuc=l#}zU)u3sb z&^Ta3#9l#&_p zltx^ygcz#2>}EOVX(Vh2)wD=mG_mh{0Pw+cU^8<_!Fd3vrIegYtxaet#?4_*%tyzo zk}^AFPXdYzW6A5yhMREvXtmkzQlDCI+T|$L8W~;59a%Ns@|~+&Ic1^MglO^YiZ=yjp?%%L+r4z3ToL*3GsTS^v#6=jNGsu z*n*Ox!@a-rMZI^&9_p~q^QQRfEXLx@dh;D-s;0o?WAIEu8KjOw?|k&$6QM~V_T++9 z%Zg3cD#3nCsqY8NSKbVI-dT)=|+p$YTN*R8{-ry+0fVAL2C7Cq`A3 zXEv;6*0FcQY@m82lLDwpzy_L98cQZ3W@P3Vqj3fm7oz9(a5$8)Fwyb+=;_%u4QW1~ z6J5~{!!YDr!HTDVP}l|05!r+DMfBuwb|P=xk01H`%|H0s?u&=@6Bp-gvwZvE{c*p4 z<=#uL9=}LE3~AWbeUGIRX3w)`o^viz89ArwjOHgtZPyLMU}lbd$t45%u3Pu}rx)kN z>ftbCsSZh1ODSGq*3RZ#=g8+V?Z+Y20%Yc0gR12`R9)HnHh=Nn<+i8weSOhP6~IgZ zY#J0RUVloTA&tOHq6E`En$gUr-p^kP3OXt9P|cwMlA!_FH~zkRT3PurvMMS%VlzOv z8X)9S%nS{l>mGiqkM|niOjy7M%AsZ#lmLj42#pNhx&6I|#fO?({d|0b(xWZDwr@Y6 z?wxj2!KH}I_2E5v`>4Lz3||UmYmE-+P=rKL6lO3B*{8F9?M8poWtH_ieRPXC2*aaq zU&a;u<>TGv@StGltqbvDy*T^-e&J((YP|i4uHqs~Z%wFchruKxj1`I@k%SU-U(lrs zT{^>-*Ix=SKh9$UmuG?Vp{c{(mdY;LkbqhqT|QidnbnOedI>`Z!J7k&8rjUe%Dhr( z7hwdDeRA_k)Jdh(DK@~nu+#nOq@N#!-KFVXTFttN){K@NMMm9a3z#G>a{AT9PyHWP zOJ8zI^E=yD|5%#cZSz^rqdWOnRB-*qRM3FY1E4^GN-=by=Dk~}jdyf zOYLHKeUI}Jm*wPk-Qa3`ba?3x9zOZUPl~g0DQ2P45StWaVY8gaoHHW%W`>B)5iqGL z5+Sh%*ECH@$&d{}4T)i_=2GschSWk}iD`yGdq}ZA{V(X4PKiUml(x%K|PJYHl+tOn@ABn8UM_)L5e1re$=hGiMyZOOo zGxwLAL+=fO0e}gkpkwj~zE}kS9jap=B$%No14(L3Rjmq`vJn6R5F&rA-7RgEN`0bL zDEpQVdw)`j@AUCzF1I#6adz|H-ahz+j<sHSrZ$MOS4T{fQi8~PKk*s0R=rA$gy8&iNX|JkZ`kA{ zX1PihDQ(+=x#w3}Gb3U^f~onyg8(2JpaOuI+N7{HLUN{vaFvAU-K0Qd6)`X(^xmmz ztzUkEh$x~t7bdR?Di~v{ngAfBQV*G#qw`bc12F<3AjUQVLzNOJcw)~Dq*{uoHz+o0 z-^4IqF5Z6h&dsA05g{Qn2Sg*)To^H>--eAnsXh;u??OY9EX9KXS3Oq-e6`*xJ^UZs8h|`GS&o4?W}Xo z?T5p1x!mRKd`x{B^Dvvu4nzO+a$Uht#F49_ggVMFjCq%L1#+OqO5x~FZk_;iIbVkP zt#bT<(8fCUio}4^WRc-|NcQ#P%iZHd6Pb67Z$_|HV_(I&nxp2HaEPnd>&IT`9_#Mm zrXS6RreRei!&*jQM50{EFbr6jtD?gQ!_p*1w}S{PkO+h zAYwHZBh*3&1}8|6hGR)y8+ zI2#8O&4|)5LqmCSR8rr}cFhaX*28M0aFMJT0gXB}9{BFr3xDYHJN`sE`1Wu=mQviT zk28p?8CccgU1(hJ%u_ML)YD*weZOzX#pqp#wJLiTTnJ3GJs%Kj+g)fulT!jMHSaQI zpRpXLw^#h&0!)iLZh7=|TrXPe|S&>zn5XTs`-yV{uwoAD-+BE2fu}7+{Z$Naq zK&e@+1pq<_a~DU;Nh-Pc;^_*iIpT|9z2lh+FOJ=f)s%F?+ewin8f+^<%6bgMj+ z=9ce4j9W_xr-SSFa@3UZ_Ss**eDYGd{loRePr0gEi(buY%NxqHZIv6s_ATv=aw?0yjK3*RERQTyX+u!}}+i(4~ z;pIPgHv6ts|KP&c`Cg=XdhwAzvAy+e_dfeqy9YnI`S6d(-9Oysn8H(+55ZLGmQ!fa z20TNElh=(*1*TGK0swoa!!zSaAu;XW!M(pER296s0ILON@w+5|uA2R`B51$i=z*bT zG;n|pnZOKTI=6J~8+~6wVEW8YA?C(nYO46<=&);s;2PKXmIsMJbc~MG@yXG}J1Vj;4j?IAlOArI7PY zY?yhPZinEWMefk_ohgXM6me?z0=P22KjT5CiYk#S)43`65Xt<|7VlH^+y_kCM(=gt=b*o4{40}Rc z&gMj9hNhTn9f$0_2UGwfh9Wub$7m*CJ_KOOWAfgc59j9>?P?W~L(?_E=8MHa>UzIx zXI;xditPGb1P=kVXsHDPc3p=f3-yDj-!bMw06!oUa$t zEH#fKfsK{`E88SH{jEN-YkW;mG)QC)EjdSXUhDyT`oLj4yEE)}`#v16Fl-M=T6U#e zHr>&DKA*f_v$kPoLhu2N$+0a?8`oBm$B5o_=dT|(|KQ2^ z@)Q3%Zo8l3KIQIlzX|PpcK^wvllf6#_raH6S#IZ_`Q={_7Ux_l89-|_0z6(EHK84K zthFxY^VAO_7J?U%vE+iLV4;a!*ELN803{a%b4UPGq(aVzG>$nzBqJQQ!`?<$=abqk`o_mN@%EH z@QrFi`=`UN#VR5S7#JC$U5yKno~0vAMu_Jo&g-|zHGU&tQ2;|Sa-cwFFga2Bd@6D_ zyLfPl=YG~6nqz07kj@{BMb!+}M74IQh_g1irub zIPbgLHSL?+9l9gsJI+y!SzvCLQW}D!_`;2&e1_k<9p8*+e`{IZ3^feoO+&HE zMUc_^Jy6igd%FiRyZjP-r_651IEp2PYz_?(#Vi}zT&fFAJ7TUDz%6nXZ>`Ij867f` zRa8-^h-&t=-WZA@f#-VZyssH#9Mj2b67S5Y(cAyh?Vtav;lY0nn@{h*^WQ5+FEshn z#COv8Xqlv({LmZU`H>sH{8w)M(qA6l{QLV){%`2yCCHNK82USdL2s8XoQ8TZ1DJNB zV3SJ207b<)=O?M0!o(1~4?ScHtB3t%aii;dUkw~)(=pfJzp287CgWMpdgR0(b4a z<|(hnYORRK4oaB>`=|nhj?giy_a3f7c&-pam<0GlR7!y>l_(LBYMmSdO%ryzof#H) zl~p|{@~ad8Uu!K@oO8^GOwKtH$@^iN4`i)Gga*o@s>UWFO>}MB*2+2O-EOZ6&+CO@ z;tFymBE~xAM1+Kn-IV-;GR6KplOKqPh)oSar3gAi1XV3HO?z_Z8~~P5QW~crtg=eA zF%3+mopp$YFi{w4lUYmz8QQiLvmuSO*4VTFV4}5JU?wdB&^RBF#q4~uYds$h2dCW4 zXS;qol!VB69MDpG9U`RO+DVZZFIi;jlRrT!bEQD}4 z^i9{!=S>>MQl;y8?NM`0;j9nY%Vdw`Nv!twqi;f#ze8GQY@L`eryjhbLU=jo% z@B)C>L~(kPw`f8bgN|fI`1Hv=O?{dh}4NoAY*c^n|}p z_QR9;y+`?Xy;^>B2rtY4PM52vm+PN?^V9RTd+o(nURd3*S3h=t^Ayoy^yqN6-YjOz zony3^RRr_%i1lPEy=TBCvm6>cdo9W(n@7}%5#W{zN)lwQNXho|k zu?^duIf5h4QHT5C`%k3rv|V0&dJ9YjhEom%KTmeAu!_iJ0QjC^L2O{k z)kT|`q@F+(!P(awBJnQ(zh*I}AWeyjQzHlfh#4ZXUKt{|TIxzaBnSlzb5r~-} z2m-2?wqnNBp4vFE&?e)VCxM~Xmw|&Rf zuLrr%<%ir#Ry0KU9Ik%>s_sAdeZShi7PgnK;b)fp7l;1x$kB~?*TS`<@w^n=Gsi4^g@KzafL|2=W}#@B#=+P{O})bF5SgPe*|OL#cvz!Bh|D> z5C<6BtsR@c*@2dlEcH?LBj3s(7jb`s>#p32!`U0{_vND(Zc0k)^~!B<3>Pu(n0?YU zq-wHI8HePVfv8Ajc0d?HXxnywxJ2KdS@7nUh%SChG`SG${zx4$k&ak~xYN>Sut3-fIgs6t_H7<#RmW1Sh0!DN@l>ziL zY`!?w_qE?#5c&+b%P=Z{7w`4PtdNDL>tE+V-*EK6atE<69Y6v2vD*q)_Sj&P%(qHPo^=o+atJSPAs!%AHS+ z>BFzOU;KqPtbmUWkwsEYL*Ms~UDxt9%dj6@)YyjDxSWcaBMQI}qHEhmV6ortWuNZc zzS*|({a8|}0J`YNdvB&{rPSKZ8UO$z01&ZM$`oPW1XuF2UI9b^U}ooN9P0Gtv+83H z&w|bY;F+Qg0N_gdZ-8j3RRjzK07OA^WsbGhz8^)(B(e7%m&=Zfrn}{HzG3>O&De}< z%`yoq2mriQJ-K<}c}%JFhq207ih$A7uNy+BR;NRn><%ET0$^xnV5|bFmC)BZS^JEL z0LZMCBGdBg4qLZ^o%h2qlreRU>$(mAWUNBq*)emroNGZeM5>xYpbRoyEfpj3N5qV}9P|GF{K78wYvU#4gtY0BYJajq@0z4;ZZ4=3+BzXEZs*yb&>*2^6uK z%gD^+(0k+%(vB`JHnS6U+64q188}3ntKFbEos|3C@cKKmA9ySL_7Bi6&fF07Xm_#p zr0SqT`s^E@Uv`U=)6-iK&n})8Er`%G&5h#|$3coXPqWZ??<-QR#~g>~aRUb%dYZyba=PJdA`004GVb$5~rat@t(%TmK0MzGy7$2qNw%!C?h=c=l? zU^!!}{3NU*w$7}L-Y;@omf1_jaBgh}c$s>1-!Hd|mK?xAQlNK!Q9qXaqg(h>1g|sg zyL2wy?6cqT=f=YqKKl4?ek{KA&gzFR+TXUh{o60cvyVMIyTLUbQ#v~Rz}Y*ePk&Ul zpH9aw-fI*emM`A@6OV8Fp^C{4sp;=@;MmPjXp2dHG-4-2R=j zH~v$@k8D5sM{RkdDgQrs;e+Exe{g3-;X@H^b}m`+d1Wfe{$Y6Z49E}B`vbE zCbaHWdJ^_S<}Q^u#M7|}$LPk^52nXL14D331VDs}X6hU>G8u?SEoIujAba1m(Z$Lf zE;EFf=Vq>6j@=s-%K>k7v4v(#i8NPR+FT z@8C3g9GA zkiQZmh5;FI;7mpg$YU(avK7)KEt(RCCYwD}S9evt`A%op!#Aw8^2fgKb+Osi>_=6z zs+;$RLgC@Q`_9>CpS{1mzO{bC&7;HdbW7PC-~2>B#eD1IF5WR8a!cr%QnR=~u*mJO zECu*zxbxA|{v%ELmBpZ!#TeJSY4kh5#<|i)+AHa5`qgQ?S&7AYbN7L%H zjlQw${@B@{`1kYQ`k#ONZ~PC2ANb39{KF|dh|}A?bQ%_6`$Y%-?1%ri?*7jQr=1*n zJ)iVo$M+7l|N3h4{Uv?`ESVGlBQ!H3128D6IcH|BB4(zlRS`h1o)N!iuqTWzw&wcA zB_-gN*C#;%;PPAis}J0GV6*R`7S6(E1iypp@EYG!nEfIVQ3;jEn2;G6Z5Y3y1#-PN zxW+ZUS7E3gVqyWVn5!+jc;j~Z+M5zaw?0(y?1%oc|9o72+@`y?@4Wfw2{}WgY0Rps zQUtV(VbQ=OY3gg!E)EVByIqf9tL{je>~s<-c^cZb%|d5qXEhZ+0Kf!*3CKxGAe^$*aTs$7ZA-yRE^|C`t#zJ%EVZi&wbtg@oJLKFXzt31 zXfCBlZ5CZM%~Nrnq*N1iNOJ%+0j6=9vn9^Y&qLQLYAF>>9Fb}rhXILFRu#y(-a1+g zeV>N(kyl?+)rMRox7+Pb`|VkO!9+*x`jA=*LWWY5*e%-b&dKd!deL8Oce_nL#OPFU z%3D1;+OI#{mQ=PEv1W=`O^h87s_J#o^gG7$D{4+0aR?3x@S30UU53{BK_WCUD|oA| zUD7mW!oyp~0&h1V6 zVel-`dFr|l{G1KrfXKih5qm^YDMba40m011G0jV*sWM}&S;Veb&#LfzYx`V$#vDS3 z&d+i2rIhD7vm&+D%FF2o!S8%viJth$q%s+55NOJUhP_ zKGNKgL>ZuAAGm|;Xn!G-`~LQ)Ss#7%^wAsm;~I}aa&X?FtEs;c<8B-;Yktsg4^w|@ z*&*W44^7u{@TC;Rnu_i=m+R#+#<<__opX_CGxcA)|F*MwbacE}EKZT7o1 z#GG@Ctr2dwTh-b$!4dc-Z*)ISZRg+<%@^-;-b-78Yw8?^W7U+O_j>Vz>xc8Z4Prw; zL_0$^AWcq-XV z_suDQRAUZ{7O0cNTyM&ls8RKy7=uj+6)47d(cV6myj!l%@BUbY{;57)-2eRdJ^jCZ zDf~0;=(VTt&wgz9n@?{2*jhi|$YcNZFPzweyUW%6WB=8of99n8^t62-M|UXtH{JeZ z|BK77el;zzAL_0gr{VSKxZQ`3#{TtvO>Z}^tu-Ay{w1uRq4j}$^((c%01wu-=7@ICt zOb5&H=Wc!NuX-C7@%Gtv+O{j?zUA^Hok#kVzy0SwcjG70%l}|^{&DKC^`@=f2+@kxzPFNF9WtLQNg%HbUlZy_Mzuet($r?d40`r~5MQ z4%QzVp{df}S|22d`S$H8)$J-(r#@MiWTNTF4Y2ZO@Tph6?`4_v^4EXP!3x`U(EXx4 zP}N!!u}8$5l5?)>I!XeiJbd(K6S60vaFE+u+i=pY?s}mCz|=QV2&Gb>3TDSQ`qvSr z$C19M|J29hcHO=~?%0MgyNYd4H`2Hm{kmP0&&vPzmv*n6yCd?2pO>Pa-4xtR()tox-wpP38 z?DUmC(bPxk^V1*spWgezzvdtPz57QmcXexUSXv<4ml7sl5d8V4|J+263&n%&4*CIC zE1$+PJ%a92Koz9C<1NL2h&a_krOc$CIeTC(wchWg<#!c^10r(8>Vzr8q`HmuV*joG z)dy}o5Vkwt#Nr@{0U-#mKpPiQ_hd{&rdo>{0y!1~#t)WiaPE~nulQ$2(dXTt-wIE> zFI5){->t@Rw0^%dTQUvrQ6^MHBfvS?a&|JEw-y1f?r#ADQABja0AToaR{}I31Nl~( z)ZaajDG7iGKmmaG@ZW>wM|Jh-DUv{SrC|Xp5yE69iqG0@iU35&2KDdv2(EFBYrIb| zW$*^6fDt0wFdi&dZ*<*l7zR~y-6A%pC;pdrZ~V|={P3Olbh~_G{~*xxqd)i$K0M#P z{q}!zYjxv-u-fJ0Cow6G>v4V5Y_>S1>Evdt3Z-~uyx46?8)r7^-gOJviI(cfr&LNw zfJ#JCu?VQj6?M_Evze&orOsAD<~?_d#t5qlP>7LKl^u4mE2W530B|0w6<~C(3C*IE zlECq+>2^+Y)0c1rbXTaxpX^rIJ!wbX`i>J8+IdjMJ1>2kmyd6)mgPDkPVt zv{Avrjx1(& zIgjia7?5GPT9i^#Kh_F!azPA%KuA3%==Ws;jSFC3Rjc!H9J7cJkt!1Me7bVZA+-Db z{>F{tTI-OeqoV^N3a*-&jiYl;wKRT#>{3doxZn2d9g0TZFrbO)n7G%&TL-ofK>$_? zVH`)Npl#%A%cjja@29a@F7j@_Y}$Sp6|C!;G-#CxKo1tjx%8*Q`Eu2aDdk$63m|4n z?0l_*Ll+zaP>fN;0G#6nJext48hv2$IcHNuqHHA~ymEYN9LI~XfBEjqhuxAk<5`yG zAcoFpyT7|UnfC@^xd_{>s&(;z(QX_bjj2D~oIN;wlJ}>37Z`7a{?|1$kJkS%td6W? z-kpkb^F$z&3IGu1LC!UPCm`1OvP3MAL6iXyu)+(wtzP5(#8j3X&C=l2HPLO{ol)QX zN5boW@!pMJKP!D1CUHfvOPg%m5(q@U?9}BZO3{=VsZ=)Hlq97xh3Eq*E_^%;vDRX2 zg1XzB9UUA;ADYnAT0xBhYt>SU2k?X%TvahMLLR2f-iZ`d_0B;tPksxzMUDvBDn;*O z(8;`E2rkzGv&WHl-ubdml2Q_%&o8}qK5z(Ojx+J#cWJ}SpZxSkL3JF9$DpbJH0H5Z zHKm$`nZ?Mp%k!t3r<=MubZzIH-*xd=vxOCxcE2aDsXQ*|{VRvz$&ZKs1gZ|xv|1cl zHZgp>>s@pmH{R0PL6I%+dbvbMH9^_E{rGVl&?a*7+U9X3Ew@J^d9^?w^RZAYm zI!z@BYST2H-7pq(IS>-cFpgr`ML*QMSS*~5OFQ5&4!GEjpZo0F|JCPi{+ScW1;f0< zch5{m%?uUaD^@rns#Q}oc0?pKt5)yhypPjQxJdAyJW)jVMk!m_CMRF<3%+IoO z0RZ6ZPG`s-ZOi04NGFHz5GXgYyVb)77h2%CJBI1{DASAcFvE zWjusY0hWkeJp}-R72N%?r+0sU!`4whHT7rOKiv4{(*I$Xri<>CuU&q4kM1$P47u|5 zb$_0hYaQ9fr)T@a@7u?d-o2_Xf9e!D>DV^wP*(evBskan)MMQ=s~>rK^w~F#@4fkn z-(QbDwAw!k+rMWI-|E|2XN!-XfB2(*JV$>3;U@S4L~M1xsArgW3X!OZBO@~?`tr!= zXd+r*DdSJcmg5? zR0F|~B02-55X)7{xFU?fN1ZAYH2qWv)HB4~w&}nnCc*%+b>@&f5JWSZvN|$!h>QY^ zE;i0U#<5Q+<-!h-0HX6!5=zbEj$=?XXGOHCA)+lB20BatUm3*38>LxaBqL{z57QJ7 zae8v!Ik#-1PD+HK;~linog9r1iZ-X4qq`sbvo`h*zVP#lcrQ=OE>Q^H8%>iiH;2mx zbi`=`{oVRW|cUt=D7BwBU$B&z^ z>0Y@peD2OdA2ywBR-64~INn`ch=!V$k}hryf8#ek^;hijE40QF-|xaQ@(N`)aRW7H z*=t?iXw79x8rZButY`oEZRZbI<=Q*)rrk(4}zfJDpw z{>}3@&X*_c?!o5hW;>3kad-39jmLlE{*9k%RDoTU1hJiLkctOmz)%3@M}&b202six zAe{$wNs5x9oBM}MieyxSv8q}%6J#>wM zOoXzlhw2AzJaEOkH1R%k%O!|S`+gjIbYN1Pk|QR^h-e5OK!e@q425sA&MUPNVdH%h zV?-=cD5t5uXUW=gG?P_PRWw5|Lt~^_L{CHvXhddKq(0L)e;45V!~eV!bGD(TN#snM+^Hh%x4#%gD2|%sxr#^sOUS96^+c_I;mLoXFDd$&TekGYay*RIux7)3OoLyez zoPE2D71Ra>X}a4_@BX)_u19S zGB2pvq7$RRoBL85S`%dgyCA1p4; zcYVK$t{ujGtyRHN$%w###BfX#qKRoyA@7*cq*jp_0(igOZl%|?NT05wK((GftgSTr1v zd9_@ndGw#fqo26&?IPFjsILkkQzX3`vX-(SX4a4Fwp#OS&T0xW;!e%*?=^ zFLVUTxrPv!7yvLAE7dVC**$#a&u!{e)B*7*yi6;uG{uzGd->3Iv4um@4LA9k8h7YBaT8I zZ$ACoFkYr`hwTCh37OC_up@ppIUN}h5KMJ`6sj7Txyr~aF$(9S{U8k4WlIlY3Ujwj4rA1(47MB z-p~88>ss9R|J>*N4$1)a!i}?tEDrg=?|p# zDn-%Tvja#lP;b`nO}aGA8}sW&%j@pCbNV&fuU}5~`&(sz3iI9qlX6{u6Y#!Ud={#w z{SNIE>ap_Lj1TyvBS`-=?kLplAgo6+jv5)=^5Afsm%nQlTziy6axOcp9-XD<`QI47 z9g@`aN5P@C&+>&R{G30k$R3p!r=Na7(FfZ4bSV9BR%6~GYR@=1eYbKWD(Sa5Nc8;m z(}{#a`OEk3F-ny`&b?^X;l~14S?&0sZk)`ptZfuZ)S;zpkPKkTa;k;W!>J**xc`Ow zvta)-Ebf-<);O+e)xf*i?a$?}pQ^FN_8*ttb)GuY>^(W%eSLa)y~W8UOD58jl*#og z`j0P97B?oC2U3I&434q`TQEapd3dWZrOXuGRTSL-HgXYzlK~e8OFrH4hcCI)TXcKF zO2@fRgL92J%UpkJ4g^V|6cS~diSm*jbHA;hOLp%&`!%sy?((N+J+pbm=Xcyit^<(a z$7zkq#pdU!_j;3cN|n)V|Msf%py@y=%C&Rx3&+gpW<-#)Ov>%KZU9Qv(|s<2&2 zK^CjYO6bw$NObz2d2bdGa|3n^-n|o)2pfdcNE>sWmG6PB-_*xY_CsGalHq~qQV%nyxAZ$v*{Q!$_ zJM1!`Cp1=Qh|wGD8fT4H)Rr|aJoy?A962IP(M~?)hG=G^LR;a{&vs#3;uSu}?i1t7 z9xDMf%$D>a*K{-L6Wq-b47VVeIgi)M<{vcCkyY?ahkLK!6rNl)TPeAAwfqFvAmvgS z+--gL;qTudia){&O5MbIu5nB|X23wxuUI;Z|#_+f&^Y zL3Yl7?40%S_mIY#A z%F{F9iv7&L6y=c(@8v3yj8V3T%p9$H&C!mW*%b4b=x_Ow>*qmK5Xcuh6DwQA(vsVV zGR6M3Bu@*0y>@GCNL-1(4+N1E~?`qRdp+|I%2Ms~8 zSMPQX_|)brfNm0jh|&gss+-fz6a<7GsVm6fX^to@E))NG{P zfvxwUi*FUSifv^fQ6zd8tx}aH$jGMI4^roKpy&8-7KW8rZ*Q+BgU;lOQ$j}hOs03!*=tO=qhZWJ|mYlZ$ixOFk=xoBas*)D%(%P;k{37 z%4P-G3BqjEW*@JrScK{mfzkM6-pf@cvzpZ3;86G-rJ2Jxwp0}x1pW!}EfR>alXyth z`GKqlkbCVo$1%$e$bbX1c15aoFr&!XrH93-TQzG`uq7Y z`t}a*KlERd{#a7~c;MRR`9x)LUmG;RAW!$`T!{cv-h1M-}5E0OluO3Ap>RjcI zuZSfN6vLN2-)>eQl+)~MyaG-%`~)xmH5^F_tsblO6~U8vYvhaLe7)8^9u>UC7GR`u( zr${qzwBf321CWVVA9Cs*qYkIZ(@(N3UDkYe!~^Tz44N3Rk(sCJpQRaMA@U^>e;cko zV0{b0d`ngE^r`){d@^9&_(Cm!(aS6TE@E>^P5j~a&elZW(?<#-Q=e)3rPNAg@X|}I zZr8_wuo>h;n>Z#JSfiJaO5FVwfRqpLN)D3a5qx~TctW?Hn=|IzgMOZ1fRD!o%Z=9S z@|YA+Vhx3V94X>k60|REQ}WRusXpeis*~m2M70c0G7GF-T$6?Mv=YoD$|vHF!123v z%{+NAzgw>a@=mkCfN>hNA+NyTAkyG2zd@|zwKvo%L(({wW<#a;11r0O6VA;#p!(x1 zBG3OLhO+;*`I_X}i=fiRwwr8t-0+IlM@V!0cxj&wr^f>C_=5Twd?O_W|6RH;tO z$u%pUOpVhRB|d7Cf}Y#xfcl}I?KGjs594Z?4@Xt35V%qeKLx8<8ye9jkwpa-8R8o2 zsD_w~7H8SixO3iLp4RN@RZ$`6?yLkFAHf>WKY}Uzp`4{b8f?O$KPfvmVRVTfN6}FO zr!k4}T3eCSB7Kk&cb0K^3cNstB<(#n%5^Wf;)4!kV;!ua1+77cv?NQj&`v!T+!HCe zaZ2oRgJ@`t-J1djrL6_ObleePti5VsGFD!%fU7C1og8;W+rEgE8-mIX{tBByW?0z1 z(r!7vkMF>FtyQ?}Ly5qa|L$mjbp@3FvRzEI3K=Dq{iY&P2k#3CQKPSvnS^zr1RYHX zN1g>4wi|h}F+k7`k4x5TLA4`;BLNuYlES!KCIB#>pP$cLgsKNVCt;*2+;^Yc(ySaq z{t3M_5A?4$xT&ag(2Q5mtV#P5u2=RI_hr4F0`JFH>6*&&9e6>>$;q(gbPJ3KQyUpE z@j!ihTCLt+1_Dz<#+~PDekW)2aY1+}B8A~e{?p0m*z0?N&O!dwz&Bj379WLUcd*_P z(K#Le8!FH4zIm7qeI1_^&6@i5XY|+jWe$D5sBtkh3)-K|9#<6}%41H9e;iDT;Zj1` zZHGg)adz}&XxBPuQ9C(9uUXj0J3w_Jo(&5h5dnh3+@(D@w1~GpP3?!)@n>hIU_xj* zqac2A5E5Q?ttAe%L0vyJRs%H+e zaHURu{v7(zV19V+`SJ(hNlSB>%eF^p#(hG)CjQ%5I!{17^}J;2oxbiRWLHjN9^4Rscd zeh4pnLSCAbx%)kQGFrUv^7UG{@7E)fE{Ris=HhUZD41v^uz$wImTBDQNJIbLL}93X z+)RZ1ii6V@ZRRX{G_PEVu-7vNZOb)fmENBgjGo~4atxYUy_GREoo^Q$ z`wYdt?x#*a4MmFCKzZnGC0v>!w#$zLFD7%lFGzBt3_9BU&p-na#i01x4&X+4eOfDWWK2cljWmJC!G-_IM*@7-CP zhZQoV#F!uHB>1g3W*3XLPyTcNZ)#u6=t~kixRn=Cj|N5YHQp}f@Nc_6k*sb{Spm^S zXeJ8SVId}T$XoewHJ)dT@9tH&UrHB=J)WSRptrwmMZ|=c@}@^dcAf3QA4|Ji#qWaE z9v7b;d#|6|n`;Y|fc}a1{ONZgpXA`F;159QNDu~gKY*sqK@&i=kFQ z)P;;u9o8csfDWF>2SF@t00UPJYyP{0F7}}I^-9^fVq$5Rt%er-D28 zlV6YA5gNdWzkh$)FPk;^Zcg+Hjx`+9p1Qe$I_l(0b&kKrXrMVX6Mv^T@&51`)_72+ zEChx=WKqG<7X!nZId{nEL9%Q=rhXKRnb%ulK4X7I;K&%AbJkyp)t<+_^HXB+p&FE8Nux%s4{(LV~DmUkEGH55wmi6yJes? za&KsM#SP-!%26?l!Jy2*kBmux$pC%E37Vt0(alCyd?efq5&iRSO!}GJd=e4CADlTt102@Q z=(y(ICU5}i%7`=MhJU4?R(l3TRtLPjY~;vJ+xfDgNq$)Ld;-H1{_5v$W)8}M3TYD8 z8lQ94wBq!v0#9<=p`vP=M`6uGOUtN*mdd6wx`kbHfw9Sz3R6z<@GX@ORThrV+F%HS zc~){!`k~o)WziPI(Iz?*$=1A;Q+n)m|M1`K!jMtG<@fH6d1v_&{D8b&lo;!-v`x4F zHT~^bt|YSQqu<@m&DL|KUayMJb=Lpq(Z=jKU>?2UQfE-YZEd}V{VVj_7ZaDDy}swJ zqu8VMo0DT#00YKj636*sspNARap|tU*2-5Uy~g=isg0%Ppx8W{LJamf9&rUq>osM; zm{kA_Ww>RK{W5$NUghF6B_)-E4x+w#Y1vB==cMMSD3>{@lN@Uzk@#U@YlSmzh7CRy zfYXb0G*vz~)loiWSE+@}oDe|*7NrRfUPyI#6I&TP6`OqNhP-T#%?vAJ)r^&@<}1Vs zGF_g$_mRtay$s9Hguaz{yPt6`ghOFI9W<3urZF8w5$sLuAic4Hn9HJH%zfB!du;NhaMq9=-q**|f57)Q6Y2HI=ha|8CyZvg&v`9nDpoQAVSfZl~ z)c&E`H*2b}s0-Rmkd2Y_1WxytosI~dA+nMpi=7jAryK;r=AhVrOn!ZqGktt8Ik$bo z^`R!5>sdmAq7}mL?L48wm;;~4v5VbI51CXQ%L_4#;z;mI&ED_ze`g1tUfvnLTHPBJ zWKa3=G(7#Z2mZ&!AMtR9^uB6JONj*kDW$7@z)h7J!LGY&t5)=#(};IbP0b59WKw&o zxjb3NUnO>#U_TrsqLh66M)b7Z*hl|K?2asOp`A-7n^bL_-zs#0oE_Y<2qF|0ibsUn z^YIaXCX8)(W+`LB$6izxq`Zy=!YEcG1G8I&0AuoI8X8e-?$|^uzGQ}I(nqzm_1r>P zYGxHOWzc}7<|9BII!TjTXGsj@Gl$?**)h5-d3?)pubqX%5kqCRqz=kjF^ReE2x#iI zTO_XeXsIIiA^C(|8~F*zROMy4B)--&#U%^!0$~-0MIPLPeq zhIIBg&%s~fBBhxivhv|sc+-I0s(D-`mb%BZAyJNE6S{}&VFseDrKgm06@G9 z+EUU<-1Y0Kxe0zrWN=xHL58w1X#PZ@4N#^frW&_=3uavK3Ku;SjiB~Mf#l^#@dMiv zq~vh3FF^!=umYIeQ76P2qyYtE0;-!KQc?<1I80dDjSI@j3G)$@Yscr;hrj$h0f5{D z7~@W7zQkl7#pu}6oAJ00xQdGB24@cvi|8GPIxyc`f-d9f9b}wlF1lDixxK3_W?tyc zi>>po_wH(2Ll|Q&8Zg66qB#sZjJ0hsNaFj>ubbFF)gSEv%juebm^GgRI!2u=F&x+<03?W@Z=&yCcJQQba9?>UWbZgy--YDo4Z1 zoz9wC1gJnG@8FQkgwFVeQ%(Gl1P(qr+Vrd+OQgltp14woN&@ih?sI6YoQ0A(;WI1u zxJaar8Ol6HSBw^4)3GJ5noWU42W)zDFIF}6E%PI*dr~hU$^ckM%3hknR`?w4D`Xy0 zH=$-xe-ZnU6$6dA7Ex)QU2k)pDg4}=o01hxY0BEzMJY;uoy{rI<-V^l#DMRVl2kOMLxZ>K)L= z63`}BY+d(lss&HVjK_*qBP**tXCCc;N4lHzftyG91Y@$@?Kba>>fY8fRds*p^E+hy z5^E2TO5uU9GqHh?gM+i?l06i3n}^#Ii0N52h*cEb8a_1y)`)nVNnr6%j_ZR(k z7}(^ApvqAbmgoV@Hrn&60^4&wj5lux`?$u=WL#KFQ~n^6egW-*a~qgiH%vUo2I^A{ zr(}trk#E?c6M((+88=9wM^RT_DfO#g7;tBQIihKcvS|6&43H4C4~>%_r5%nxBjvcj z`9f!(RgG1J^%biC8lPIpSdGb(c6l=vZ}eH_$wQ&$7cy*J+~6UqU;xXSKP^`FwJDTg z#ujLz5CjuQ-o-%&`vN~3A`?p=ZWL71WlA)Shr90jT%V8Tdw=H=6)L zOqtOT0t0v*pM#Cjpv3(f-@Tqj30?jSN4lMs`Hoj6kXiHa@-2HEEqHARJ4k&dgc<`| z-kAGY5G8&ZZQxE#`_o?34?#C$#OhDz(>_nH&&oq@bl}{(xPYBiYHM(G(xVlzj9l6u{W#4TNuehDR{9$yj|2d@V!tUsJ$W1YN^Ir0? zdv*ENNA34JV|YA1x5rO>2a*~oO^H~JWpCGImb)9y7PLRnK0?2^ zA?ta8o*)eMc2*V_9P@Z}*67sUpqJ<>>x<6sXOs08yE=xFNH}$tQ)UF?oH13pWY>Ja z?rQ9sw~K^wv#=A*%yjhWO^I#CT8gw+e58%_`%X9P-wd^vxH!tz{a|w8~#-6wD!!Xo?NqSmTNljYfP=Z zK4j0s_#CLTDjM;&xFp4`7Lk3e+z+6}ybqEPs0p`EU{0`xsduQI62RBN@t?byJE@S!gdZ)gN!>d zhZhd!#XZISK?Rv`6=KrSPR%-L&9am*A z3crxRts$Ej|I&7l<8C+y=;0zPO#y|tzW|P=6cM>@H&HVBnebqi(Y(w$~ zGV{3en$GZX2LEZ|)v!^0NpJ73WRyui16RCkh+?7vjVqyP_lh)_R~|Lwx_l`rug5_i z=>=wMG`*yKPilB~y0bm$^xqa6Xuhi}-G}Hee&tcsI*^m9-PVM2SBy2XGKztTr@_dl zscY%JTT2iyDbVQd&@4W-;MXPXFB#G9qwCEYv%B*C|6TPTSW@*GE@A+L2;nlROCeTn?j8ml zf5qq1EnTfn$={OLZVTT2Wi7q5y2rYGg1&g#EqjproY(Smt%Z(+lA3Fsmllhy$gJm- zm0tQ-!i)m%Fm|ye_geZ`^4ny`*9+@4qd;GeN1t^1W8I;yMTykVu=GPAc_)%)S<^qu zbVhHT@8)5^Y~r9)Hd`4j&$vYxY!!3@=N9s6;(HCj#FgX1W~m0~Z?hSb002AxMgGlE z-;tNA$udI-4=VOUL|(flVNlm&L}lrNGe0j0jrH~pH)K7x&06?qm9c^Zhoj6U96bponCcpUC%L^d~sazSw&9hvvZfsaLf7v<_u_BRPvqm|Hu@VI#acmATRI6Vvu49 zyj01Bp5tfE{G55gA)D&X=U@zs0_u61D+_`523@)ml2$AxjNWto5Ht1Q4xRLtgNM) z#E=U3HE;_BZTrl}h|WWx(aYtUuai&wa|2oO(Rt4+_W>z6xVn}G6!Koq3RvPU{bYbf zc^=a+jtCJG_fyj1K@W#`8b9Uk1FT|0whcZXm)Y87e{+93m|?ef-{ojNhYc}!n||+F z1&_9j=uwz=q1i5 zN`Za{SuA1o25-?*%;^V~I>ZhgcAyXn<2o49hgOv`;v8>?K+G#Woch5Un^+j$o}mkT zDwLfVltn?dsb#kR&q_A+IKm=1pkk7e&e@-V0;L8hC43C2M_~ExuL6*5eh)2hP7qsa7 z<(PW2`Q$hr9^O)bq^j1*)BsnC9JZw*VG!(nM8vNf`no3zuXBI}(E0Ia;mmIHO%IPQ z{+jw~XFLlTdeq}#i2l;B1am|QzL0-*USxYA*xKIh=ihkxdfzD>5w%+riWDkASHr17 zhg_9E5$eg=vDIE8+NvzPO9t#><34^!8W6S~P*UCO(yeK*cSlT@yrAKo%y|z^gqM}t zG64$olR{vIb02D^`GPBgMBD&9P#xgayRWXIJ$dCo-S`0WMk?Jk@s&ToPKDZK&{$z9h=>nt05kr zja$#ucFH13VpM>;4vJ`(oj}0+wZ(4_L(Xs7=D_=|{0QEzNa%pN?!K&|9?0#F$Jh>K z`PTmgr4@9(dRYQn4!3*1$E z6c4F7d(dLy^=ZI=Z|SteK`>#y(#sMqRZ$F#9~Y~rEHD_V$iaiuOq(9K_ijReD%S$5 zoH1d+XveU0P>|Ry1P6QGno(yzH}ws+3H~HFvR+<*zfviNd9T6~?B|y0h3%B1 z+XtWPDFa>P5%O?yCEt^tnyzQc+AAWkdUu*Dri3$@G6WEvmMG+-AAN^=Y7OQ1<@0G0%e2FVmUatn-PQuWj1FWyFbb(Ke5;4YrnD95=E= zI~er4jVt1I_Zub?6=gwTR>l?^kI%G#G5gXF^_z_Zg$m z3&45~$&@Qib`V!ptDJOfB1i>f<);U&k&odT#Etwu!&5r?3KauG+w&?7rK`@R zrGW+ik+-iPWZobx-1G#=JbP=%_nT1-VNTg&-);5h2O_#>IGn?_Pj}NI5u1C+vJQu>SRAII~ErGMM) zfp`DiOPJCU0xLsto_)Nl(<7_ixYG?{ZV}@@$D{`tJTbC1$iZ zSu)`4u3Nn0DxTh3u>v@glAECSGAj$$U3FT3fJ|R|qz6hdPd>uqJh*c%nVg?@oD#^s z>k~Z#%pC6N0fAQF-{A);KQ=x?hTuqAh8&}~?{LIneys;L-Gg_vTD`=tfuys6Jg2`? zfq7wS|h#+0NZH!0l!P*>LzHT|of(xOG+YgHG(*FFm;-D|W16FwU8o`e+j%#i@4; zn*E2b@kXm=I7UuFq*5S`2~zv>8$2#4D;!&>^j8eG0{4im*$xP^Kx7D_6OGU~Q9j7c zpEenkRvxo!R#i@)@p&KC@~>H{UKH;qK45yp zEqtafr`$|APa$}NPI2hfDIWgka=QJ_pPRHDjd5vFPEJ{REjY67riJAZjicF9crm)M zc@WuuSk%l1FA1yt0=FYC*#AJN8t3f2(LgWu+jP30a*`@#)8#aXi>DbQO7lFwcZ;ASb zcMM+kNGU~f`?`Ex!#j@PVN8N20RZF^kRth1uDkUKy+&@*8uf8#3_@$4kP&U~!rn`$ zfpdeZ+zGjs%D{<&^21gDt=t*H9Ewuu7^+RpA^oTVQyMM#m<6TY>So$=VN%<)XB0HM z5={;?-|T9s0E?DYrI7uUdwu@=!^}*B>Nn8y8RIxx;6BW*w%233|A*hp!LG-|6Z%J* z^}65XNLBrDkNIx2!EDZCG~Jp#Fhk(Jrz6I30*O^=%UU|C-x$V%*^p6KNhN~P{+r;O znhDlbC;F<`2FgtJxs?uvW zOAzqicfp*l|Bkb%vc_A`0~dgeOMtA*;6W2`g{POJWCpw}!SKBfR$MMDZWj17tnub} z&+OsejA0{nMS!)K`(-lt-qpK7*ELUiqjgft6sy_^gUjr1HCAF8vcxF(jn&@yxTGXzj-fTkWGw{FKznQ~oqe0uaM2NI%~DOx-EXo4GO zVWj4t`_*+gG^9}P5*ySbB?bth=LOs|ygH`EJD1-!xkgs{Y)^d*e4HD*Huc-QyksYL z_M~uj|3ns2Mu_^WQ=eGl3C*}pf3zC6r6BaCqI=qi_m2mC#lR!ro^$*H%uCqgE{WB zb}`_3H=QaHX?)Vsot15K3bAIYx#(Af9t}sw0cm`$FR$)@?aTHBcaXe}KR4?Z?f!9P zKE3z$QstAatF`EHXRyW_9x;j4A!dG4t)?4av>pCHJ`D*cO1BlVN*s8fN9W&NR27uo z*R&+=`BU6BqE?Jkty}pAU7QZVi|M1otJuK4G|8xmj}&)IgV8@8PmDjQv=Pc$_LN07 zocHlvr4IwWO$!RE^)Qi%{Kf8tJ&0zbFzB=F)DAur!v#cHXvvh33M3)8$nU#T6+z%0 z@8n6C(nfs_T$hq=#NY}6tc^Pn00|U|U#_~eS%pWf98Px!2t7(p9z7O6e3Z0oLwe4| zd}hF#^ld#Gw;cqnsyMZBb<#vQighJ>_BeUDYyxbTp?zPmf>-9+WPU=TGYUWGoLhgbeb_Pl-;t{d=N8BS1YAzd=?U5g6##szzrSJWeolKn)etjs?0X2T4tz+IBv zvlmzH2HrC7LUWuwVZRKY)9FU+yMCU0_ttskQ`V=^1s}UC%3{13O}UwWZVfBPSNENq zFZmCfjj7+$lRK6p-IM?f5L`PJL|b`zd1YFPJFsw;8V&gQS(_d2pT#oy!v6~b$7~ss zYPr?bJ;^N~KowdZ04NKf9N*g7(NMDOG>X!$n;#qmX?whA03OJotcbOM=*VV+W5;== z1#&ca`uF?am4Dj4spoP0I(t(au(?}K#~HBI5+ia|un|%hUP`W6e5=$x$X|Q<`D8p{ zodDQVcCaF_+OD^H%JpXFMsrxjnVD-RmivvP1JnneX{H8);&JP0sz%X-(&BUiB z0fCu@<{5}tGpTr!gEPUJ(|iC17TCHFKqmf(U&I>|C<^9h z#vCyo3?~?Giv~37cwb>N19;tOqO?CS5i8ZSA9}8QF<~}?j~>A4Nk6b&%DN?HZ<~#a z2MK5xXct$}NXmUt9C4s|4{&+Q$gZ^9?V(vKArZ52g@D0 zNLUO`$|9;eI()u-l(sVTI^dgOWq{q5A%!dnj!P-Qf7&(!B57HHcuQh6GC|6`5pxN< zKbpP#RBgP=W7a!65p{12&S%N2iC3NlyoIVG*{sqCF~XkHatm8G)($W#);1@>Y+d7& zeN@?7tb@F+GrG{fuj~`QNRv}US}9EcF1WLv9?{BNUu{D&*5H3SecSz*IuPg;QB%jH zB9kThH}PvqG#YF3`kd%rU5L&{{|=wd>9S$+X3Qud5gKJAAm6wYxILeR*p%1NZdMPdxYS}kk>M_GabS_AA_Eti! z?2_x_^~zsAGiX3!mU$L_S4(9ReMiP(+?<5LgW#xT@zjAcdM3c*cV9@K5p(=_9{?v= zS`NtB4`csei~t1bA(fv$2ha*ipsm%IY+$O)Ot9HYOd)3=SCm(8(a~R>1m-cthd=v5 z_&?!RgAKOmJt8?|hr0<}Bd4LoDpR1;44-@08FyoWUXM5T6Gp0Ig8@hoPnAh7Fmn70 znQrt;D?z^#KNI28qO$OOnn$KJw~|Ay4vBTsn8PA*Crf0y7GCTvAT9-a#bPSxYwBlQ zgdaZw#@r5XGDgl)ZY7vwwLvIbR=DjE&1v+0g2t$F3=u{>{o;2lz{W5~AqUXZ-`8(mJx8Wa>c?9h+HYx#FUqdVPi z%xhNaxUEC{L#cJ0vM=;m@M5&SBr6q0A_f5rPMyId8}|w$3O@rVMo+}m`|LZSVXuB= zn=h4>A0c|YlfZo~;jxA498HCrt1=2?(=M;3Z{8XL;F(6B{ICE)M!Gfsf9z&EU<)P4 zapxrAEzcp+IUDdj$v^uT_(wH8;4)IySL?6Iin#a9Rj*rvUW;5Sw*SRe4!)f#^fykq zRUcQ}w02a*mVWubUcGHro4%zLBxirlb`&8Dv8vZc`sHyl>;ybCcBL3?gR+oCc4UAK zuT@bb0RXWyv5gNH#~#%$)sY4&2RvG`KyblW+A=bvI9L#tEY7dmNZ?ch`G_ztj$1<6 zc%06%_oS2#`sjwifW78nsQPp1kjZDRi67a(jBQFxhC3j_@;h@mc#DRSl7ZOo%2ElU zuP5G~)SN>7by#!9t}7+U-IH5x4PZ0f1XB!))EePO!S#n23tjW<-VWR=$pH^wk;A;c zW|P3xcUkgfqf*V}69B6AOP<61|EAA(|7>s1f}*sY8N*xknizlTKLZPWd1z&Ca950o zi1_>g*5S~ewk$;W%2GW$e5#RTnvZPi`@&=)PM(qm5+t+Ka&KGFOg`l!G&2|u`#947 zxYL`+zQ{MBo4=Z0_FrjnGHYq2yCb=J)OHf{ln=>HrjLinSCaxr9lmq{7tX3b@~Z>Rt}v#zNmdLq z-ne8GcjYkY7Iv+9t)c(NF~T70qLeE@g>W0t%hiRdz)gGSC$+Ap)rgF>fPKcAzND4B zogQAyf(4MGb}m~|V^s++vk5>GquCA@B694l-9Q>F>;UE{B{jboZ`QdYEvjm6nJ`6Y zA=(flgT0K1RU8vs0xlE*3LeFCeOEPuT54e*xq_*)WAkQ^UsxrGx@$`|h@Ue7j*cHruUa=-SZ7IIn7asL2~`UTyjBqEv}KQPDs zKH(W8F^Uq+wJz6rIBAp`2;R}1hC?G^U^vxVy6t4SII3Y4M@KwKmf!z_t|2poO zh`td;7#BCN_GP|@&IxJreZwTMk;r;BSok(RmI?@k+P6Ro@a<|oUHN5j8b!WA#o%sn z#`SpzWL+&Ol^B*=4cC%$AD0Qdyb)-5)mygq@&mtTJE?h)Tf@cgWPkz^{>E}QNelBb zd;!M;o2-pNo~Vd`?aHAs96M;ebMf%c%{A-gU zYYy#zIEQE3k}f}&CB8`FH?uaE;&mrA;dL0sOD@u_G&xPB8(lqUlJ>dLKTPfDj%}C^ zW+dOb>On`SzYMn+EpRDu77%Vd^8wVPfyWzU<;b{s@xox;Sc`8>HKslDEaT*f${yO(d%h;5*Yz0IW7SI*YwOsiAANGom+~n zsYy$hl@ZAx9@}K8MduF7F3ad8P!K3^toW(Z%qkn7y*>n!y@3(uRXN|E3z<5SrSS49 zY*D^~Oyo^o_A~LxDF2~I?{~#%F(-S?D7*369o~2FPbc9HQ3^Ie2VB^CO_buR-D=8! z@NjRWLSXs$#u83Q!y7W!Ab2yS&h)jI&^`_#d%+jSh4YVL+Xs&~gaJ0pXVgnyNHdiCS_z&nTIuX2CiKlItUSh-zYRVHvO;Z{@2 zE^(;Fy(rhFTMq2__+P!~3HP{C*tQ!hGL`4(`s-2);&vb;lNFidE`tqVlP@X;VH&)8 zZi85mN(4+<<(oCKM?KLfQkpwt?b1Z1X(lOVmQ|*R#F=xBqIt_(8O5_0U|29rroSb+ zOel^-jD*w3@+C2k4>rri>XZ-*^wo)l&{d_v@qlubwE!l-T&>2-Y~re#h^|m31}vpS zRqhJFQ5XvH8O6by1B+M;aSH@{GeDtpKPk-+3sIkfEcGgYq2?vFSxwq+aOG^-tjJ$Z z9!N2V;l^~>52+A?Sw{(-B>XB$nco?eC^Jmw7Jl!mO9OUEcFM9L6at;)m56jm7-SdUKAl^iGqYIz)X_htK@T8`Nbqk11pC8Eq2?4+wCwo#O{ZHtE z#C(pa00}TqX4{TgI80e_++EWQ%qph=ZQc_qNqY6M1}Zm0GU4~pm&H>-7CNzMV@Tj?^sQE4uXZ>W>{&{CKCt z)(T(O?akrm-_L!r+S%I3bq#^0R_`ip+INfW;2(M$yl9q-_I08MpcLU>%B)2wG}+8W zp#PC8;MG2vY3-M!?0(+DhOol9MLa&%j!*(=Sx24`A5zg`!Z9%x-D~i>_C5Eqoy1qq zABIKi&a3cyqgUT4vG}<-mk0_J9>HmFKE&oxYQnvx$`_h%NPQ{?^ z)G*R#JlqBtgp2M{?JnOjw2=Z7=2fCW4}Q|+G+?Zk)bwHle(yK4b%>OS5;f}WiRChB~+?xW@s*PcA74Q_p2IUs~EauJDNHp3TqAIkM0v~ zGzvc3$&Xcjn)@$_8Ar?ND1lc>KAtcPfZKQ-oq>4D#JtD4ybN*~>Oo>{6j6IbGDg9M zQ~SEEjq*M6k;={EfuR6MlDpNQeyUz1uqoy%&#&73i=Fome;>zC500LjLyt2zJCj{! zyn+8+@CM=#IYEAK4{q3LY|dBOZAKVoZ3KMLA^Sj^M9c<}>Qm%>zqrGkotds?b)EqL zg49oyls*VIDeV$=;?3G^4kjgTX73X%-K9oI-lU^rG$l3dVoQP)6TJ~_5-R4Ut|-h+ zJML{vGQ-lw?Q&hAxGG)9Re+dE!+C-i2~DmMvt4;B51#7lT0dEcn@nQIdpX{E5mCGe z!=DuGL;Y+n(2?O^L2M-^@OmIl`AU+B|K;S?33D{yja6|NJ6m-*EA$SKe38AU59nDNM?8@eojNO94O(|U(Q&diD|MilhtD7N>~p)n zYZ3tJWse_x+swY0OCET(=h)A2wwG~MIiXF%=2_ueGGYd_Jm+GO3kNOZ$~FoD`JJ`4 znYV+dg=jO+UKYU4MqO#)$V;Vg!pYsuf5S}CHpdP(e#&dYZQzBaE~;p)P2GN*U?eKz zdvV)uYb0>1p)5j4er#Q$kE@ieg?IP)=#% z*VPuGDIAw)XJ;1|7bD59DQ(!aW(ild&muN9G|n@PZJt2g{}NwHj8O!*7FpVB0c$G8 z*!qEN@@&Rc8))W=b2zFUfy$k}eO;qM^5@gOWOuS|7N-{-;i7lO8xmp6JpKX7c~Q?W z1;n`An3#<=gPZ^s2?^Nh3V|%Lx*Rnrje3n6q`5=(KL8*>-@Zj={qEQgbM(51s8q|9 z7=!buti+{`V?P2RGtX(m3RlK-RfPaOGa8Br8km}bJ?~1)zn@YjVpSPS_1*`L*%d@| zj5%lTJrW>-+7(kQDCU>2#hI-@Rzl*T-c=y0BP zuh(ms;B!dd{yz6R6bv(8=VpkrcJT#2 zmjS3&D~T$)fQlXnDgswpqVB#rS+X27?htWtTP6#n}hkR0HN=!AmEW60y{Q4le5AmCqPakaD z58+a$%Rwi#rU`7Mlw4y$@-VW2J!66!Adv$mhfQAavLu9-7JSEsTImr&)T`XPXL~f6 zO-;P_a>a`W5G_-he~sDIlv7Qf9U`(rtEKOEO~0G+4JPWW9{$R|d*k7MwjY+af9e17 zwUdvO<7ISSf=>c5hOCnFHUb2#3<{`dDpti5t6UKuWKYC@?p-~l1EAw!M7EpPCZN^+!(ou zOXGtZ8Kyj(p<5hzy=?B9W|rhL5_+Z9%zOdy?~TscObaBF5g5!G5LM4eRrsc;?B$ep zCv8}6*FSbaam@qf@`A759+mADZeG0f4>e?A(^Jyy4*A(6;M?@?DJt(A9?U_v@1bK$ zc2+xG&gVFbk0dhXZ-~_jxi2tBIkn-DW#M(?{s~5*v4kN&zJj+vZ)!P zE0WEusLVzG!NGyrXaXSwX181}r)h#>Ip_U2)UlqQo~g%anx0;qx68v7z(;PsM2=5S zPtQ*;mdmyGQFRLp)oNr{rvxUe!^7Eo3INvY^}~k`bIw=wlG$>(oa$6-eWh(K+B<){ z`Jr%h`(%F}-o6hcz~`t`6-0#?*-2!0!o~)Yp-Z^I}Ta{vp0BQ;Z&M_L~Tt%|uK*WMNFWmruWbBZD%&?~H zoGYc2Qiy) z0X->77A6S3$+e`En5bDU(|(wy!4R_4e!C;bp>0x{4!cFWYg@lKTsE!vEIRHk+U4G1 zQ*3b~dsjfj}g5?ZaG%#i3Z^tR< zJE~n~z3TJ{%)<_QHW+)ccYJ1)tgZ?dA_vAS1eg_Ib^#~vg6ld0gMJSxgkXg88z};+ zfg1AnW;^v7-`jY{U91N9-a3GNz%j2FS4~fg2;dArt4UE*Krq&X?J-`W9SD?^alLsu zHci8~+VSafb9Uw{q93u_8^t=7$hy2|A%OGVi~$76fJC9yDKLjNB+y!`n1O(j)to}Z zwbrT@y~}0VoSmhyKR7zkT2v&Z>YQU2R3J3XAAKoZK6w2%9_jAJM`;y*;O4mKzvx%1 zFXT_c{%PF&iaWb#+jz(&4U0$k(W4vPMP8--;7p=cuK;R91pv)dROeCtoD+w^Rjjbp zQyto7c`L%f#ltTfVOa>soH>Vxwboh<(Pq+;$knI-LA4q(dE#7(W1gRM01&(*X36Cu zmD|*W#m!BfB#I(aY?7JD%#1!>bDPub=5N+o9_;&G1e+u>G9wkiEAR54r>dd^yJ?`--ff2r}in%+})j+NHIi+~o^{oCM&aeC;-&o9_ z**-pU3+Vg*^MR@t5p)xao~(WYrZG#_|b7V^}z z!8u>CI!6XlYC=cM#7u~`UIvN4KnxV|uDUQbepQ^TQE$s!bJCQO8LaLBSfe2sm>ChZ z0-*yisfGxpNGWv{&Lbi^CWTe5vQDXTSsVvM_Ozy92!Mjw^?u)V?HWK-Yp%H(K=e&$ zeB1h)o8vq!Lc$@405G~33|Xq_k}1e0HeC~4R46Xx1-ua=3W|uQ$c#swh`Q**e4O|D zeQ=?qOu*^@fkmWN_1-%d=4oHbi8olvf~YZi0ME=-ETzO0fI$#L^qY+%0)w*Mbt2*& zT|YnV4_C}SWhL}xHf$OnoRsQ=i#`y;lBRSsmr{%n5T~@Pj>D7^BDx_?bAEF5yl=Z{ z8lCqX868@+U_O=nc-U@tyJcBwRV4IG$UIGxi$PUA(;9+wniGl=kax}quhY1bjUSb_ z9RUXlj`-^8YAM-Jn@xQ6!6QUGjE9=bl2dFOF&*cr#|9C1o6T`No#s<11wlR0Y0m%b z^}jYf`o-6--)!e=aCTydCW27}6pR5;vI8{^fDZ2IfqUHJkAMtT3H6$@q6oBF%PCfj zR@Mv!&(`*+(xTbIxYwyic_-Q?)8poOA4OjVbP9dzvRyGtG{I zl(gHn&gC}5YU-S8n?^u%WG$^?VM+|*bjV^ zO*u$P86Lj<%D)Bg|J~OQ-TtfI^hT!9s(6QaM3*N;w(4BkWqdtM5k_RNgnm7S! ztg$VDe6Hq9NSHjjC`dI=oYIT31JDoc^zXM?_xRI_Ke7<4cjQ0D6u}=&kU#^{Y9_EY zxTpb>p|4<3`GgNUNfm3d!OwewY0!1N-*@UGEDM%^VQ9PO!y}DOh@61kUAU?;0$CFC zU{yu5h*nh-h6I+ z-CwnBzPWkw4eVe4(qH>u3QYC=-%oG+O4vMv?sJctFG_hApPb%o|DxNvzTV8$SIa1~ zh&pC4Ff?X1WETS}8ac7*OKsD*+0dhmzn9BF!j`=_1E80!Gpi&YfR>WAR24y616vS; zRkcbDv2n!NvWTcw0s^!1ZF}sxp5S^ZJDJPr>AP8$9K-D!i72J4%Te4=rliSX=MfiKeff?lF~w8(QkoyDK*d(dU^C14_Aj9{KK3m*fo$`Kp^%RTqGca1QyLsGl*#o+AxY*69>@`R;*}v#cv(*j@G;T@>!{)n9jur7njao$cWuxB zpm?|BDA-4>{Oy$$tbz%N(%LcrXx47e3Og7RF|W^Zx1IQiud{%9r1cgSJzxUBjn&=aX+r~02<)>--pggB(opeIHh~s}qL*V= z>hynqYxnEJ^gnrg`|$DZ$#ikp-^^rf^d;rC?}vUf{N(oPkV{GotfgwLXkaG9+I9W) z^|eTO@bIGRx)>uOsH%6QMmg6~vZJsnAVuq%CB07VRh7sP7!j9cX66ur_hG&2thWu0 zgNOhq5jn@qxF!YF0!CMl6I-Uq zId^@1-T8R7*{*r5ZQI8fvx>6RT57Fb*KLMvDYX<)%LWAy!3dFv$RU~;5pOozQZfL9 z5Dv#{T;S2;2iJ#ZXP2*-)az%5;2YCaq_jOZeT!z<4Yih>yt-CW(AKRV0fjDTrgoO5gT!g@Pyh{!x0rfHfS zc~E0O$!W?{Xyak~=ifg6*S2^|@a_SJUi%&H-)bJ6$!vm54keX3dv?TOS;U1;`t-ZU zJwAOP1ASrUwih;1Lzon2jE=*XnN#BH>uch(B~9M91XycLDODARfthN7q3g{Q4Vb8n zjhR)kQYs=C0W%R1(AtG&s@1s$5t+%%){cQHmUG^0hLSQ!1+|jW{^lU1^lhhAYA%SV zW<`sFgb;!YbDCAn2v@ea)~Z!OjlElgeGFF{*!sez@nB{nq=>OI=L(I}nADd%2b-+5GQN9#`SbAX=_7f&!6oLp1P#(Mb1Kyc+qNY@ zQ}y0wkest2_!!J=P6-LT_fSPuODV%JAb@wyF}ocJPu^DtcuU>6bC4+kk=E};4?uMq z>jZNmr`=h ztLGaX1AqcB@fuu#2A&ultD%B<@~T?Z5Yc%804>!Br=>$TF6W(p|LxDd|Fv>H^v8t9 z+j4rJ$)O5bRn_HmsKxWeL#Sc^>KVHvs<$rS|zAOi#gs|Lz| z2zau{svbE4sYxwb7>Kz_A%;HsDfe;QgB*3BrS~-p2?YpT)bhdCJ%k~IR{Ah*H}32w z-hEomzVV;D{_X$x`ltWL{7)M**jV?^_;iX*Ywcqw3!Em;WCm)6WI%Vpxwzg% zeH4u8+ZvA)zsCMckiSZHAjmq3U4nbt z)cFd+!-e{|+_FfD4}AXQMIw zgQC&L{L%FCDguJ4YGCW2Sm6)R19vqM_CW?2C`cwE5O6LSauHEN1rsDNVUys9081%q z=widQ-0`h{@ECn8#{vKh*N+xp9RaT|^P{if`OX%C{DI*Z0H7ZBmAxy`?;6r!e_uRle?(t)Z z7t8qv<~Rc|uhU)Q*B+X5dwln>Y+rluIj3}Wn7TGUer+?H zeRk9J`w)-UN8%naEDLTn3pyn2J?@ChG97&5iM<$;3xw)D=A0P4R`9MvQdL`r_|EbA z(%5mW>YS^!A_@_SDH5`02Zm-u41iLl7E**#EI4SIkd~>W8k|3#j;@W)xt5!890NE} zsZy9d2&FNHj#U6j%%qe&Y`Wdr6bc>zc^O@BDaJOF#7&E2j=j_}&neH+_d^dX2DP9`PAU0KyWMUv3t$?WM#YiT zWk1OxtLL@~=VC4<4A~UsqFnYkF}Q9UVH7hOvOyeYc9(% z51~nQfwDoBqQ}t3(1g=C8sWSw!!XRJu?vle7-K3W5I5{?k=S<5xBI!ailgEr_1o{i zi-_YXwXwfF-aZ{~OytqUgJ9s0o?qQ2lPQ;nmzO@fH}>y&MyOp)r)1n}_jkYaKY9g^ z?e(FIjc+x%xW^w0)Hw}jHDRaOT&=|j;z){QRR`in zSCAc#-Nr}pCkhp6GBI^S+efxyr6g^slfcvQ6un=mP#;6^U?4+r5P|_AlV_LaMQh#0 zmV8JliBwZDb#2oiwojfsnTz;gJDsCQ0<~sURYWp_T7^-O&``TJs8vyI9WlczXB!v%+tb0jz80}+p4pR| zcv5ekLQQFw+rD#oE)2sY(*M5c8N3n-G(g4F?o z=Pad`VaSo? zYNDT1IF+qa8xXJ#DWgS06{H{A@OoH@ zD^6T?n-hm=_(>ldjBzp&)lSVp7H4jaVUa>a2B<{l{ZcEWNu_YjOzd1lq$-w+R;!yh z7@|mZWE#9_EM|c`UtG>bn`&B@T#lC=pQk;2=htHg&ripTrft(mGSXN7;%|QL-`UpY z=fdgecFX0@Kdk`z@U!i=C4D0W`tw)+UvL+1K6(6FmFDdD*7TFdi|yru<|;420BN|a zSFgeTx7~o|ABTAH?f@HnclAO7gh)t;27;)y ztTqBda7~+2MIz>)HKVC6S^ZTUzIuFm``N=kn&IYndc1L)@vII~pO_Rp_jpXKgHEyg zY=)ld6qziUnVUe+&=pV)3;=2X@=|_m$_Bu2cP%hLKnDP;95|u?f`McZKmY;<04fGe zFiJAA%T}B=9cL6M4InrGNGO2pg#jQ_$@BiyhUh|L=U?6b(*NqpfI{r##vh)P)6|G5 z0I@lcT&f}6`P&d}&94FcBh%vV3vKZuyw=!7Grj**ev8k(%t_jMQ4I{LgWz0JfCU85 zvFw2C<3fDiKz)y3gj$t6 znWJpg%-Fyqv8_!}YudabB9c>L07TN73s&_UfUuZVG$kGu22i95B#P^woB}nV?r`A4 zQKd*Oij@thAfYQ#1q=|8Y|YCx(eJg6F_;h(hZPhNY3*!&QMvLx-2;n2I2RC%;+;UUHK};GA1aDN{<$xfr8EHnS?al;WIIwVYF~8bV-ZGmJ5o zHJzVm4gF`NBGpV9AG~KWVxS^cOk>xw(-|0dPN$QqGIQIuE)qH>GB5(rQi_WWf;#7F%7??rb71D@&!5Mk%cXQ7#^9NW zfoUmJwB5GBdjw{pWzI#!OuhF-%9`B?FG>`erin2UF#-<55ZKM9(M$|%zuzO}WiAdo z5htmxzM14B|+3|Q>lc~F|0{}$q`#z0R^g&YI4*jz5;dDAp<9FYD2|7{_UX;p-{^zW zd6Akuvzm$`p|cNabnbDFdwd@tN3}S$wT_BW8xqGlN$JD0RDORm_~(RM_B9v9yMe9Y z004jhNklw@~f&55MWiwl97TVl2S`4UDpL4isb0S=HW2Usnlv_Yhyealv0B8 ztKNV0H^1y_IUSBjPDRR6aFH%H=j}x@*={+{`FPyN*n&Zlbh)`8)AF#_ zH{VY<8?q>X8k!ldwRZ?epje?gRs^et>KcVoN(r+a19@6T+HJi7T64VC&a(yue7QMS zFa`U#xPgxmtPO}4!>Yoj!9=EntJMDmwqeKj}UG%z_AN%0L)G zsI?ZUp7_Uj8vmyqAqgoVPz41-BgQ6%ZeVmkXrhFGj0DDt24c3R;vvA=sU{FWRUJXT zBer1;+X3Vly@$xmIp>s8^xVZ}ma^a9483bqO>0hB(qatcBE&BJ)&KQlYg6<7mNxj^ z-^@I{5&o^4(@lq`ncFAF=NG{|weLJtvZISmzx(#nFOToN z`uZzpule%$1gE$;#>bc?Ucd3#`YTW5mACt!{mk^r~v6n>ID)BXy=#S0DiTFj0P z9U|tc>&4fk7yuIkfU1^K(fJSZV^s+a0L(RIIllVdcRqLhjYA}^PayAnu4(`7SKj=O z-_S?y{M9L!6kgqscsTxnFpQ7eg7yQTQ?+@rWnt$VLNSDrVIGSlFk@y@S?^n*ibFE^ zcrJMvYlJ1GM9rfpMdr*1A$%m@UCIo=AIZ$uQkhx7ic|n#W&~UfA|e6=t04wd zR22fe(d*uD0-;N0=~^W}JpY6t=#?C|#HDlj7= zq5}d(hoFcADnGi9=N|XC$35DA zT@9PF$hI&C;^yqEjSU$zAf-c!$Pm6JkQ zN5C)JcV5~F*FUcVku@#ht~S6ivNtm^6tPmo3{_QXWkyh21u2eP&betC7USSW6-}5a z*Q#1$-v@@S>sCoiDR~^nocGLJM3!kDh9P**%%GT3E~OxNsaCQ`Da)LPZp(y>Y*m<< zT|g$|s)DsvlPXI#kfH4mt!Ww)$fbr5%&c=EkOLwn+_inzv{g$XQPs5;LPW-KET$oZ z5M$1HHCU_`FI5A8&1N%?Q)t`Ev-8{A+i@8;yX_|UB^{Vz9;M%OwV005$Dx#RI-PdI zCIpA(7{SQEfQU3W-!2K4NO}e%@-)ye;pw-?q2U^5n)Mi>J^0`q|HH$;v!g&d$3<7c)X)HM?gA?s1Rr zAK4gc35ZNyjZAVbC24JXel}C9Pg&lxrCfC9hm;zLC1oN4rc#QCg*LFGS~8XD+4(>o z(KDMFvv0hgb1J1&5ml8U(R)W+imZh0T3RL|%d)J~@W@UOn%GTgKP@8~>@F`%>pai9 zv*G6E_UiVk-wZBrDz%98ZPzr-`XFoG(YpU!c>*)Q55i=O2uw`@cpb-7tF_=-n+!1L zB(-{SM2H5p*0EHI=$r#X5ivF&Lgb*TecyZUQ^_J~RlvDg>#Va_Wyamv`Q_R9IF4E+ z=RBw7(eRpU!Ahzau8-GkxqZI-SKexV*5`7{^CSD6*L2_P1~%q05#Y=0R#jMsL2GA2 zq@b!bOUes23Z94nNXBUjA%dC-Ff%&@LT0K(3P~W`!hC|B)ss@8Il>=sRK48Hb1juv znu}%lC0DJ$Le!vVx%etAzbE|}xI*D99^Z%NbzI(RG|&%XiQ-vtnv0f2xh0)F4!hX2xl;?O)I z0{}Z?wUQR+=J}i<&r}yo3FhkOgf)S??;pO$4+S*HId_|W82a&WtR=0{-kTbdEnnZ(C-1`M;f-CCegE3uy!L-puiyUS z)vtZw_MbdI4EUv=|Kjq`UwQk#e7>i%*M3H=97gV&7fT=NU1~RzBSufmtg3ebumGx> zbH2;GMO+mFs%oHMcGQ`reqsKno&L@YyU1i+Y}HeF{_1c3Uw;2ff1A!<)9l>-ZN0oq zS({H;ze@m>yOFmzW&n_?swzlqfP@5yqNWCBNbci0u@%(;WO9U^2j!ZThpQ&>vl)R?~nnt=B{|Set$ksj5N+_70E)L{%BtIajNSDF_N6dcSQq{qAhMe%C}n zOx9Rpw45`XM)5631rP;r-YckpiK+)hGrWsMQ3e1sHDw=GHVzRT5-~bJP|H8i()vB= zfz#pGH4)65bBsn9)N;uRDvAiE1_pu(6%iegz&$;1k9*wX9{2dH0J4W z{q(IThyT+F{>DXr=5FVvxq5oYbnJGQr_*Ws=<@p6;quYOK$;*rIa;|8UR$a)|zt;A$aczy@;qP zv-cj?E?5iJrj|4RK*ZU6+{G>Pp6YwUDNXg%oM6ht+nqr z%mhX%P-`*3oa;P|XJ= zSun<;g^g(~e;~Yme%*9<*9<|dwGuNJX)W_STe1*>Z(>U6e7BpYsf`iX10m()f=|ce ztZA9C67Elj=>0ez87#J;f>{-N(F3;RRBL6%rVab?NZ$-2Sz#HMr>F_5`(RFibV8 zDi9J8f<|^OIt%SMoe-c2(X#^ssg;~tmgV-aUya*R1kJqn1e{WN*}=B9(65sbGh@JM zo@tnF$1b!;C>Jd1;i?e6mIVrW`asc3V_z|=!WIDwSs)x7L4@O}uZ8~dqoC`{|+AlsXc-}tw|MZ%c)9>op&u8}_=tRI~V5UZQ zs!#jKnYBN1RI7%{j08qN09MR|(0P()Cp$eyEm)Kl(NQ7f`^(Wiei)#EDH$-}OLL8V znD)HKrwK;*;WTeE16ea-(9A>yfdCBDbp3n2lh41n6y7-rPRITFi=xUzF~-$nx8#&- zi9R^uYFQOSh{JZ%G-u>}nNM|EN-3^wB6}-9<;l^-4$kWSxZ$pS<3HLQ-{YVCCDJ#? z_pcZ>&rf^OTTHj_Kl!GH^DBM++U`*VEaJ_F;IYA-!trxbY#kH7w22ECd1n9yNX%*`1_~x% zjq^mr&coWzim0L{QnEWcey896{c9bu3=1OKlKtFaxXwOzucpe=Sfbz7voG4aziodo zF8dQlA_Q8e$0|^9ZM0=$@FsV}iKh>2lpmM-IT#i7yxo*EajDLC3s$E~`H{WTyAJBL zkr)8}U}&Ldm$fRh?^^&U;|!{SouI8JMzul)xF+o*s#bXMIjVsnRfQ!LP^-0C6-IK- zGeQxW##4Km6?Dx4Q|A!^D1usKfft$sMFYJ1bG~CI0P&?F;Rj0L2Y^3;9*~>o*)tjh z12Y3+@A^5XCOQ#OL+^w4zSde(R!tvc&A>hGagTf4;~qaSW-7sal@!n|RV-IHrhl&f zOWo$LY?p4H`0?xG{`$Qq@3W7rW$1cFijL3EcW2wp&Fv^lzCMhb+U~mHo%f%donNZj z?e#H4IzQj$yv*Y~kF_QuU=egJ_I+OytSXX~!)InQD5Z$dno1NxWOfWhL{dZ$L>M9Z@DZMUXd48w4m zPf}{4R2A})q7OyNoMtt>J=_4mu{j{#oO30od7cH$dtaoA2s5s=Ve5dbXklhS zb&dBSEGhL}7h?n-Yn8U`K@p8)sj>6RlnT_Gb33%mm{LN)c}mNYyAUrgE^^MrwCmft z%w258TvI8Chzzx6Vu(KGJeML@S2u(fLRfbcCVJW*imwE7+kOWF;F!{L|((JJ~g|7d)(ts1ERt)1Vq71|JC6%$laDwXS^U>J?iT zIy3Xg-Z=mWjX!UPx}+jCRL2yTWdi2Q%S&Xp{^H6jB5ExZQGjJG)?0;u7reVUPJAe8Pyy?-?rZS^~Zv9C5xGb$N-R1Vnjt)a&pA$;%rq5 zmYlO9bWLcQKn(Ldrj)YDJkQ=c#@q-!cbum8w-(-k_;CKU%kb6A7JL(V%Jp_UwX+yD z*i_Crd+*mv6IA5ixRzrW2pSr_05~hIl z2*R)h$banjLAhZ`3oXvoWL?KuoHesl)5A~xPhFlURBIx;$aH1$h|AG4@uu;cfrw$l3=`s5!!T;BcZ_^U&%n|J=M%>DfEFU#S?%%-|>Mk{qh z@TubmDi?G@P)Vh#R`g3Zz;ND5ar5M+**Or81gv1GmX3a$3+(RkqXq*4Qd*WJO|@3E zyHWC|>#VrP3&aY+pn@@>d1uPSqd5qM3V?`cpjFHa)Ls(6FEl_%;0Vb;RSgim_lPJW zrPftwzuRt0$_4@|AZkoiB;^vL<(yT-Ap!t1E1IOzb(df1t{%v5-@N-TE;LSuH=mp` zeEq+gX7yRAKNCGPjy>*5Vd$65_7&DH=om>QFV(!-#W9_B?r~|Y#F86X4)4e5SsQF@ zyT`Y0&FK10q{-u{fAGeyA2zSe5C5tYfpxrl*Kuje@b|9tD0{^ub^Rp_Sodp*?;wc7q6 zN&6>)tgya2GgVbEAtD6yV2UOtM8t>yW?%vzTRJKtn^fn55kX$mkjbii(5CyN03zy( z1OA6>f9u`dy5EPD54+<-ga}pD)XadH$w0K4;d*ue03jG)O7onSAc+CjJ3TWsLqd$~ zdC7!e>vlN{fO+%{QjrV*cbyl2YuU1yWs}e}UDuh}JRavIc_jCP4(GooJ+O^vQrDh^ zQi?f8J|Jn$GZ>NcJ_KebB8ngCe7MIw?s1QM+~X5P_83!_JtWCpdoB;WJC3~^{{HWu zzVb_3lj|lj51V(dZ~EQl!KOPnJkHmp6z}-)!%L~K+xFw(^xo~$S6|ycyf{BCbJuOm ztZmv-iXl4}hPE-~Wt?sfr{SX6J=i%%@M7wXFR-S{IOY&S2%#)V?(C)jAgy<63Q=%w z^&V=egs7@{Ny62UCt^eu5e$A!ndm}eRmf3vzE(}kQgVJ#PzcWX5F#R)LCysc0VITo zNaHxQZQJ#YK{3Pqeh-Sf;oOmIHiN2~2~l7sB2ocR0|q9FzUi7a&5Lu+3|BL0INL0x z&L*92T*YNs0CnRVL}b8J%RJAysv!cdH&0+@QtGN3g;i4{DkAed_iaN&Ap|nWIZw-6 zvUul$bL}R8Nv=f!T=1*r$g~2ecQJ+_BC{j|Da z`cy@lrZG(bP-|tPu4$X5KaNux({|XzA*7T5714qS7?YY7Se?-Rq$LZZ~KU zV$3SX>BuS`$joBn8Dma$nwF+%ipi2^1tn%6BxWB2$x5RF2@n!FKty(^N+P0S0BU9- zxOIjO09tEKwd7i|gx;H3-}j&J=` zx9xgIRHW8YD(IY+n(ELFQ@Ne0caP66+ShVe-n@SM!QrIrPNx9$PCPk039=_rdpL)K+%d( zHUc4IsrEevjMal%PCf)MYRi)1t1!P?rBMO6Ez1LfmYl88;&(G^MnGCuh_zz??XHPdRhi&)oD3nheF&Zb!34n8 zv{g{eX;#&y3Es!piIll2GNJP!P19Tbd1bwT35=1IjosG@M1IgLGWOjWccFE#h-Ie zzjIL9wa{}%sI2klie`+2NMK-U zVig@LAg(?MRU}e`A0+wyk3&IYn_)h|<}dt!oNp2+pIcDtzaq|G;lbLK;ed2%Jyf^WQ!^L&HkYqH?Tb5yYTcrs**>x0{!#rrh_vs-jWOVpiMDW}fFcO=dO>0})k`Hz>mJUcr(J3Avz zDJ^H)?SqT+Wy#Y#ni4VOyf`EmBM=(M%X+b)?Q+g#NgygH^PF-L=Ui}Zs`Yk08Jb$1 zB&&z>?WPdWvnc1;|NvZ2?!^;Scbz`t*S;QER$0GoAUFVy4d)T)=dhgRb6A_v+ zV%Ij^W?)Y#rLrVbEktYIL*v6Kja6hg?-{9-(u7rxA$`|QX}R4OF>Sgw*Wx{`;)d8X zB34VK0HqWHuDLL9&iU-&PDz~S#z)KO*xl^+(Wnc}X4_k~n`ciMa9XAZ&4Wn65Q|A& za!pI@`ql+d?E5~1fR0b|1OOp~rfH5(_P6EB=-fUdYu0H+0D;&hj$MAySFE$@r=Up2*!KwU)On3 zRRdt=p=lnTUnJ2f<(wB}AY&0pCC~Fx!Pq+=y^3HpPrSyvm0C(EE9DtuTqoc!NA^U3 zgjO}g5Mn&*1}Sw;ivl5(oD~SMV$(#ZsX(1mX_|%{JI7{Li=u&wD6bvGcLIC?TYFqe zDQkusBbwsMi82y8ETv4Mk$uxNZEQw2dJZ+&lGU@TmPEvP7BglFO+*7mTBr6^1pr*| zExS^y1Xl^v5qsYRYuR7loUZigXS>|P+avU?hnt$UB_)0;4uuCK#)X$P2QIRUD zP|>iM88P`7d<> zQTzjyxgRJlO_VHF7WL86%v-KY0UEr+CHwgbo3m);;@$uA)5m|ie_FJ6h$y&9`2Z0K zjlmGONvyzb-0(Bv}G!vE^=KmDEduYhNhx{i}j?XD{D zQ#J@C3uJDM1poj9?m|0OgxhP;C9x7vHK?MVphK5G`3coM{uIN^P{9-Hiz0VaGrbQN z`jjG}DFXl~7^q=U2H~O-9RjX07K9gaaYZoGy8s?jQB`$BCK8=z=CzWVh@_-iN-0^n zdM4+bIJYcIS{4BEF2a1OY82bh4MojRR7a|#E%SW!t>MY9ZsW~~9x4dbVj!Vo0bsy6 zcj}6Zh8o-Q$T0;MYL!$cBC-NSpc!@>{tOhUiUao-U4-rHr@DYEx6_-~hkDkvueLn; zFaMVPoS|#&P-*v~Qts~Ra>o#;@@flHt3^$Om{lsGm0Fm}-TlCeIy%$Kad-vF7vt#- zRxm>0{elPXJ})p`(}u2p2fF`0jJf2v@4wL3{ii2c2ml-j02r8o0V0tD0992rLr??4 z7uE_ULEw+=df^a@E-CvtZh9HXS4E0HIyc_S?!Z6xU+*gD%uo~nm0K|#@g2@nk=i6#|eT7!ivya?lkyOXC>1dn6{fFx*&R!s)?a-v{hcj_@j3<9Mr zWm*ssP#qB(K$RbCUHzW)fQCTCwG_4DnXOhc3xU?SfGVa2x=8o>p%Xrr|FkJgFid?n}-je8vt>c_atf4wK1;4 z=%opvZ(CIO%xkY-@1I^@Uv0MivnK~c&S~<&ViV0~LhPErfm59kxnu(=(=;Js+qTZR zRY|%ABS@_Pum(B@=bdxQvNX|=shP2J08mnvTD|w^0wQ)@rwXf4w~77XaC+^v*WP~T zt!K}kdCw~1$)ifce#uLW-mqCijaK!+X0utAW!1Eb$l99Kw(T@7U-*fK&!0ZYImHmY z?;T|XcR1X>dPy-xW0#AF2vcYp?|s{F^PTT}yX%@#%5pp^@GxvufosLs02uTFUY&O%)IOoYT zp}`u6wA6CG-6hRwUd(Kbu3nAI>wz+}Cd8a`n#b+%F#2#f91e#=QMr6{fxvy=Pe^56 z%xoM-D`b|SSU3v|7(!Sdq-|S7MAN2etg5OVrz3fnr^L)JHNpt!m>uB5^NYRB{jQx( z2amCDS^+DiD7e$LpQ`jeM8s^OU_b}} z25axu8tr%Y{#`_A?7bbPWyo!WNLwr(yjN`6}ArfEdP zK>EIK*v;cK)d~QsTgVJ+txGMbWC}i)?1dg*^gBS3@wN0$%Zf^EX)41S|r_*r0b!i&Yh=?4#Re^O70iYtasB!`@?;W!P z08~&_wdx!@=bUo{P(Took-YbX#4+c2@yJ^1>2z|=4Z|=|o~B7jIk2fs(^NQ{nRCuW zO|7PUI32}Q5SpfG*h|f3cATe_5{t=PZkBw&0lDT1>Yj(Ldq7`&PS3y8Kl`oI>DcgQ zd~Z6G02u?du9aDlu-2+d26qhly8W(J%Th$ta|j_Af~cYaBE}Fz6_I`C1EC|1-d#Pr zGP1!xx@kY>3XF;aM}sQ>|HpnGu$YC`Mk5~p$7Y_2e}Gm}J|4HP_Dj@gNnq@MVQN0# zv~L;Q5VALhH7$TuNE1lUL1iKzFm{QM%g9zHWWFB#O%4RFVS5$aYkVo;;KyJ2o$i;B z$Ou7z4AkImknw4n^#DMy$Pg7AI4>oal-hA#Zp$W3)m!x()j9QM3Z_wK-h*ltNku|vV&j%2l_JIIl1i0P=e^wA{434x z>9gNTeA@M2E(=e#oIiLy9BrZ8Yt)paG4`RUF0cguRPtvEw6P_yUel3moRhux{@WMb zWe26W;nX!d94`(hc-OENPKPHUA? z5Dg7X4H!hElmY-E!f0#$%#?1HtN-7{d;jkmn&CV_;gs`j?_Wzf`64k~-27&F^B=^^ z|JLSo9dVyOW!H-eYo0t-Q&U6}LQbH9|07nF7z+@&L*JvFkkOKe#LH@pgDk5Nt5S?3X@oNs1n6CMdQeizim;k`E3L|*u z)b4a70=Cv2gPN&EUn^i%GxUt?&5;5L&aSR|&oxvC01%OYSd@W)2nfY0uC5+6;2)SC zFt~dbfYlRZKtTXR2nYaFfY5=hH3ulB$iPGds#3{-iB&-vlhk%I0Kj}aMRp{nATQoF z5)rXOL@|^);fsI;Geo`n`pl-C`A|`ivKMkGDoDn~0|DL{GtCS^84VF?0f05`V0ED~ z`&z5}h(|*ZP!wMcv)2P5}%ZNgV1y( zv2?A`ruD|RHA-Z{hz3-I4J~j64f4@UE@-g6H5cyU>SVR^e8k7O$35=xX9p<+#E2q5 zby5Ht$fVcl)HIEC|JgU*`}{Az^5xC&-S_6>@tEdA)AP3P*lA?i_5G65c$!{)?Ge54 ze#QQj^H_=rocF_nVF$ThjfWoMPk;6&V(`oe8 z5PcM~eac%N@=~_jvy!Uhsw$uuTr4FI!{CU~U^@(@lqSS^o~ugNwo_V~?U1NK%d@6w z88^+4r%7`K&=7n|V+Ngd49>TGfIBe`F_Bnxj(y*zWuf4f(-_+gQ`{fMTCA9LL$6wh zY?)6()AZZRyp*F(YWe1Be?DAPNdA@1x|Kx{veleh^?=@we8VOlG=Aq?|9(0?2a&&Y;v}{8GiO3zw*n? zuHU@>Mkoau!_-t>2!K8iml6Qna|8Fd$47!ogHgUgxd^*=kEe&zv%mhC!_DFG_4r-M zr)MQIK^WXm#O5%6{&JiSR>ibKgt8oG%CT8WR<%Ihkw1rbP)_CInC-a!HJEOsT5)Hj+r|C?Z<$4(WD(tfe^QDg~Ih?c;QG zxY_tVhY+deId2tf5^YY4-#zUfyLa~C0l#98PxBQ3nnLit)(VD^`R-Eyc{xP}(6od2c%`di}7RS&CF_C>4!D>2Z7Cr_*>vVY?Cw@{uP z&we_WT8A&&?wenF`*()Nak|yMTYhWOCBOHj_x^+5`1$|i`RP~0JIq4ZD5nbI%zvNX zkP&*#L?)i=@}o_1xW|t@1Pqw~07Mz^E_~*m z8~CL0QYHO|wCasU@~kjX)ugD+E3QqAQcnhGML>?a zpEASNaDUTm29X=c!Hm||JJdiH+Z z|Jvy|T@J6-!m-SK3|I*qoVVR+8m$ozM1Y7`vx~ezwF3LWZ}T{c$*t7HH3pvL2(j04 zb5ViHCKrF!J*rk0Iq^6J3%vXMVHw8FqnkGT)Ks3itg6v@Gg{}3L`=*&AHDZlv#Q2; zH>xs(no|g&5n0&KSvrof@pu?%x^B{8q>VRg#y+y$!l|ioJ}X^5dqi(uRZ#6yf2_H- z`C*UW&BhIPI92oh2+{=Za+z@TCeo_gUtQE9@}q9?KnSJ)0Q@pq-T(lM5UVyoAf^Do z1fucd^U4Wk1^|K`Ie7010Kk*N$I1<;5h4Ot(2T%s6Nh0G@7{S6x6#+JR7CeWz)e@? zg*_kuScM9(-V6f4-Ov&U*Fh}<1AtW_073vnS<{e+0anq#-Kj?achQ3g$fO$92LxMx zPz6C8H~@HQXt+DNrl5LvWxnHM?ot5x7A63QT4S!00@>=zQCBAaMOj(gPpHC!0i8#qL7Y82Ll3DOIb8sv#rtD#ENq zOkUKTf4{hksD5F+K}1Frk^8!fd)(t59|>kQ9*#K9Z?pS<{r1hj`xkcYJI~&k<+3@O z=5#GNFA0DyFE4Ko`+2nM>uZW`sRckuWz)CLLn_NJ{=zSO{jG130(su=ZvfzPpMU-0 z@xyoCdzY3nFSp&MbKW1W58zO$CdtvYfT*A)rBce;T1iY&%BZ6!P9-0gDTH8v^RzgQ zs~!}HOmu&}KiwRMVQAY1ovmTks+uHw@JaJ_b0#KPDk9cemuaq+hrU^s1r&>|QSx!^&e)I#K(lbo|tXxWR&;M#4&X+|A^gd4GeAetL>DA5ix?taKa?ay8SI&)ZzxL|i#?O4YvD_TLg>n;BO~FXN zm-_V{_qfORkA2K%HE|8kW&e}q?SJQ$@@j0qdGmZBO{f%ns_EV9X9U+oB-K(b@!Xl} zrt8xpIp=QEFDVhD_r8&aoFm7~ma3}5F!&G-$0H%Gu{`5A=A65(Ti2)< zqYz-pMe15uCnA+4HqB@ZO5+E__dZ*OnsJgO8!*M%5>TP?*k-uC-hD$EIqQLQN8 zN8SnB%cvh+2VM3jY^jEX8t9bHdpl-0!SL#5rhnvrh+qDoUzoS==i8^W?Hyju4>$DeI)BPI{*N6WnI6DZ z+V;%jfc`Q)qZX4~!x!F*G*Z2O^$-P5PfpFMlFJg7L1;Fc z4VyOP<^+iK?0lDveeB11e)QYqLSKBseSz?TmixdeyN+}{DD%)Xzf?4vEmU*6+ zWqI`Im4}ZWAiL++SLEDTx4XW*HH0c9<=k|w?;N^S#K}`xnfFUifDv@(Da>v zEj4*ULr^dU%e4>@A`=oKit5@xu<`+G=nxS(=Qdq0q|35|CYV{#%FHhKJe6FsBhSnX ztVKa_Xos0J=e&me_!y!OIcFj=kj-Wjk-hhG&O}Jw`#{2pJ#Tiqw12jwscGW0ET_sk zSmI|F?(z z&z{Av-)4{Ry_a#RSyjb!0$HXhvg=*TVi7s|aDINC7P;Q<4d6JPHhnK5K(x}Xwbpg- zzh;_cNzOS(oO3=-W661FyWj(=MdzdQD><4{+Mo6=_`r^tnVAgOz&UpqPcC@p+>Y zX_X6D^ydzi;8wW_cH0U~=wa?Sb%M1}3Rccezq@o_=J7ps9%?2v z1~&Y3b@h4jJ`iFS18L4uw=3o7WOSx1#t1;5W`a-=z@fXpPTk`kKaTk0=gUsB5VyUx z1MSW*Hl-?>t=%xN>XbzdWF1Defh*X2dS-1$#8Q_j%h1NIYjhe7vW~~w@p2~C5RAs`Q+qADuzk6BVnaW5U8G@oyE=8AIGFIB+ znQvn$C3?2v7)e0a3r&b_PH7cUwcv=T)D*mTNHKb5uV#o?O0GG}q_6(M&m7-)_n+fA zoPBYF#mX^LXhhc?oO2EUM1%lS&W>OW9eSCkw36|qDgycn%7&(vYn80y>9uY;ZL@Ot;jz!n!ec7CR zkBjO^*ey_Nx$MBAwIf$zz7i-z(C=} zcW~tm9dgooIvfFj3>>k5zzZggz!V8zHYDnu36ua7?;b}H=$rr7SDt-$`GtS;@Wp>W z9bQQ@lrX!IpaC-jyBmlD009~Rvj?D6CIbLw0;2TsS!={VQ4Jw56R6Y*)9i{s_+&r0 z4;nK*S@!@TuipevG|)9Pix3s>s_Q}2?wBE(GI+o9zZypc(r|tP!Rs?~R$}kM;9iGo|`&iXh=%C9*jkT)J)>(m zO+}<`x7(%(-ZMBz0*;tHBdP;d#Zn5#Xj<#IXk}tzW-Yc3gJO(^)haj_%qtOqJC#ApaI)}pl^hFZ$?_45##Ztyv$o99o{Zp+MuNCZAG z6XazA0Cr47o+;OomPHkqIkePTe8UmksZLW`+OFwqKb{V%;+-?I=lf>`jh8Zyxe^i7Wj+x-LPSE* zlvE)1=!&+L)x6xtHr(SLKQu!1g0S`BTgS-x`CHqwn{<5Z>Z!#`holCe)-=tyBvm~d zHvNN56QV;(DT$y9eok{$saEYy>Vzr^cLrH4rFeFp*pc_%AIFoHx>gRTYD%f=8fMNp z1AwZ=7()|NEoE6m1l170%zOxlMg(Wu!F#^GzGlX@i$(KlU0UWvRXKQIx;-9B$*V+W z9Zw(%5i!$RYPSw1Jo8%o$jpYYvI1J`8s(pIVrGX#p48Y=Q;=$2R`EPAoyOB`y7NYihl592b{ zxx`ulvW8%@7B$BoSj!5jNZK^|#;goqP51=5YO9zyI24_vc7J9V<#Sa=_(NMh{d#$cvR!UEuRaadSbb zQE>u$Gp(F|AL$5d3=jP(B3egDJ>YyTAF1sfuekU zl}2GdL`|6XTeyC0{MCB;jU)D+I!{4!tt>vE51j#{LK9+1OP=P&MH6Gd7-L=*PB_kU z(|70{dSoKw5Qs}DV0Tx~d7f)FN1Qj^5p&lyUs(Fn=5+S_w2zN&ZjW*>crHL%lT+Xj*Kz3TU|5?x08!28o*npspa6iN00@de4ycNt z*?xQ{q9lss8u`rYq29Jvwj9%scZ$hJrU&Y3zd*bj?0+z%765dNcMAj)tZIW7%MBtj znBM&^yv+akaDj=L-u{*U;PCW~akFWccRzRfzX|-M=VyPl+7*hyi*P!85zdQJn)E9PlnF%Q+Fn zY6wKmt?58o^rKc-S1=P%RmV(p=j6Jt!oJ5n?(rw!kFJO7I?0>e(rjL9Pyd6z^56Km zpLo@wU2Jy0|J~oaIDe4xNXUKDZ~D!;Saq>E<)zeWW-&PD++jRS(`j?IW3(9kF!XOf zd2i_3^Kf>3eLZyj+2_Cd=+##)F3x`SpZ=3HFQ56$Yfqj%y}G)h#>rAa6qtYvk)(n` z0pxAB_1;hOTvfZSvz)6I7du(hM^8Xi#7w{=DhXyqt0tw^Vp^mkQ`0pmC6CCCfJhOT z85mMp9C`1#6e-ITd_=%JRjpd1#n6-`OHnIwtdprX#j(!&c|-;WxRhE;ao|GZ=fl+c zFrCI}e@v&T9s12?0K}&C$K&yo5;Ny<91lkhe%B2M>YR%)=9K0Vv@?X9}p=mHRoJ&skxA=&bfJ>i72cQ zPcO`f-h0n((Ns&yT0;miHoLYjbD56gG)?5O0~nH9!2FN(?dCo1@h=A`35mxAFYT&{g?8K5n&!ED`ReIg4nb8*UZC+_ z;jABCy?j{8T|VqMjzv(=QYlT-teb5$k*asyW=QDJ5Q)fzfJU{}=tBr$&grgb?FHs} z4k3gPPKOZ)i_}^Q5+NcXD#EzT)zwmpiQMI9=IoPqty9(G{xEDe^E|K1(Yk82ZTm8l zhs;n7Kz&?`i8%py@6jwTO9%mo2#^32fQTHig)p?8MFo;njJ;1}%QO=aGZriCDNEJU z2@%_&HIwV>eH$CcE|3c@I_J!c6q2U9Gy_Ca1;p*7+xmVdnY%b$xj zz`IUWOV>0F!;2tU#G1>hxOBm-W$%i*_S>nch#ZecA464jN}FzIV?5jLPN!2PXaya2 zL2uMk0I22+f7tZ$yUo=1{sB%$WVeJ@hLl84sk1HeSyb`1oultC9oC18&ZO+X6pJw0%b zd;FP!QB?}uAh#)1heBvtGorf_g1+z#z{*lvRW;`viJB-o^pe^T-<|b5%|jnPtEVx^ ze?9&EYn*HUH|6l`J<;c}^}TcR0jq7$oYWHms&x6dEl+bI&`~3JT>GUg|?LO(3 z{3FCirUwckB02!nml8$e%0NI_(02HRH6!4K% zv5L#o^XWhA+_&ck|6YFn7QkJ{g6Zo1Kt#kCiNHi_&5Fukh{*J@HP@nJG*hr5+00DD z0Ey@m54ZVbO#>f#87l+CMyN(BWol-X*(*f$j8sfS6hC?@C;$Lb>>_~Gl+BEp*2LBO zvc`Md;~w7+J{-kXi#~k(=>2E!Jj>WI=r{{D*beD5kJEBGoyKuwM2{qdb)MK@*AJ_1 z@#f}coW`?Z+cn+o_07fhV%QG1hkc0gI8RX2v-|De`I?CQg}?9@&NrJs_j5l}ay}ov zxZj=q_FHeJQbb+53Fug)P ziU^7|J~)f2HKn8k1kO1JW;xAkl|3WvhE3mfAi7NRJkKH`&YE2lVib|S?E>=c{@N6_ zyNv-%x*#HZw>#VTptq;&iM{vH2Psub?V46fiG9+7rA@%7hV7eEZovQE>iV8J``&df?J zRx=`dc8KVlBVtq&snJJfUUJH{kioiFUo&%@b87~Xs`|jp7?CadyV##9s+A0!a{;l` z>WDXezgiU>AS0P5s6`*NN-d?7;-mN8GxM@6sJ5!Df^z`s9H~}DAjf8gXw0ZpBfH3+ z4Q18YILFMUCTa{A12ej%S=Nb0mO5-VOG?vxYTGvFEKBbDezVy~t;?JP@zC|-IPRCl z5Q>-&?7df2H48p8*3J9dtNP##{^_}G0MEWv|7%_jtW+b}KiqmcgKh5CS8? zYAI(z1aizkreJ1EUMhhyHeKI)Z=xwBC9Npi*t&4J=yR4madZe3jDBE#01ZJA5$&!H z{*Of=7V_t5Q9hG5es8H??DOxX!_B*{=|c-DH#hTm``sL7Z5iOPYtG9m<6~fvYCq!M%X{489-khVS#DY` zghD7L2u2VQEt|3#7@&d~=!@=wRRO%r=zM3U3QL+`y2&o^>6>BxXJ38gm$uW*Z{0rW zHt)Uan#b^;U-C4w0E_%Anwxt2kz^ML<6&3~qA!vLZOt!z88`H@fnYs)YF3J6KoU>| zcHYN$ti0Veu13Gv$nA2t88uV~Y1W23JWusA)fZko)Ug)qD(a4OZ?KCLd3ODZbmH+Md#h==K^S8D+ z)*tKkfsafNxV}XIC?YwN+DjS4%LIJVwPM*m{Fg8BK^P9gS~I+wV(4yF!D829T{EGO zzkcO^=xkT4&Oi}?nej!dm8yamsx{}^z%DQIa#AgYzz7ZPV;de9L_k~LCmMQigcgWr z{h0WN9~z&m6onYfQ~*H90F21UAtHJxC3!O-OQo74@0=6~ACXoeqJ9{f7^ma$u-`As z5}XeqRQ+_BbMJAFdwlxgN;uollgn5C@lE|JqkIM!YJKh1FO*$*|H)gS?P{&(=jQ}) zT}G`XC~v#I@j(%4t-<-r-I-O{b{j{uzuEg3ecPpR>V`i0HU=*RzxnllvDs`&o|a|l z`+nFouU|fTzQ37js#(}fOo)g>5T#ONSyJ0ZVyCs5Xz+f2y;qS`7cry29Gq_g0C|p%VMG; zlIHpH!hu>OH|%!g>|u9SYdxJ#AmRM%EQ2mN-|TM#nfJb^EXzU;x!x6aC+FFA4Wh=@#~X&BwIbijdqYvD;({vJ% z7zZQEIj3c5nx>8GucBk_x@OyV6k5-2a^8D?JRYse+DL4Khcqg{n$6w&5ZP_p9tcJ2 z;dC75T(m4@nbUNAb2I7eoNGcvgL#~!l)mjM+ji5pu0es4_kA15p$nYSygwZtynY$B z&Ea^QPREo|AG;VMV|XsPdWgL%3o=#qzPw2JpWm5hiCxRMQPd_OT0gH~zU;2b&Ua?aj+GfO!mp{kzu zTh%q2&By@Es_FBa>&sy)BC1r1f^mqR9h#Z*e#tXBZey>ts_Oaq`Quj}QUL&xt~5>4 z_x<~#&-BU=mYS<_DaCtFL?vYq9lCyZoVS}_@7cc0fA4GbSF3C{eb+D3+P;H`QfjT0 zkqAju4Z#oqQ5n#YBT})PGcgH*m{rxB>bSoZGS`Pv<_K*Q!$uD0Wm@45GM(KoR86VCo&7jIK3nVOb2j|W=>mTiC|z(S$* z`6dGhaJ2>S)1|&QDU)e-5v?ILsxwm3iV}#5ks3P?Fa=TqCA{y*xyL>JEMT42mC(7i z5k|D?JSm8p0^?dXU@y$IYx16%ExC3eBeJBUi73`b5$65X^Yhn!&R<{Lel4E9_Ot%W zyt!EFsW~1weD@15U1R*+>2cX~$jrgC>pzpPDthk;Ftvx@vYX#M&BqhKX4iNgr1HFI z(Rs;J$7h@2Lh2-?5~%aRP|nV`U~WmpIY$H`I?=-9q*ew@DS2fvV`d~;r~Fz(Mf%<4 zcU(i)`wpID?wx<|Rb2oN2N>3Eps2!X)I(%ut)MSPf~$vMl@X{ErB&8!LV%t*IM>zL zc8c-#c5KL#*bH=rP}Lm*>EAWK*vgrJdR zwc4#xS9i@-Svj2PpT6N8_ORCC;yan0qVq_cR+e(8d@fG>|2g0J-f6#kuf5i@o(Frx zWpMN2$wRr>inYIYI>ILb&mBxP%ca($n4wlRdC$b@*mz6y)DxJTJ1WJ$tBUoHWU&$P zUB3aNW1;3L4+cZT!!*RuaUS77)!*bibh0QwOvK(9p=GF94U7OTZnn#=J?aY+6M&i2 zDi!$o-$G$_veZ2vAKH|)jOS!lpA_Q~Hh+t~LBZ@<$w%_6pb*)*xFf(B=K<^wJH75WN znGK6tWjk7Fm7~$6RH;?G_oa%N9aCV3{b8E(YPDfz&1Ky$*|6a_-0Wo@PgW;KLO>TH z_CV~-(ZhG%Qb&s5z3-Nb!(rSX4qex^izOnC`$KWO?EBr-O)j$|jxo+TbzOH7yQc4o z%1|>AQSjSoXu7sRt}1iRH`j-f$}|X+s-e1=avI@#yCz8Z!HO}P~{ zUBjd8)|jEV2m+J zWoFFQbzMrS>DpXt^uco4tk$!YTB~!8j+&i-sfu$>Ye^!%1Z95slXA-kYpsydep zh;0n2x>_v8N%vD8h6$kp;4wME4pUJ)YQ2hhcCqoG7ATZbJh4@I{OHkkzXO1I&O$&2 zHJ7f9tJR8#K!BO|zI}zhHKZSQP`f{%^&zUFL9Ri#K3 zscaf!d}=LYMvkVbUS40FHH)t6%%KF#<1Tx2fj2;ReEu!ngmUeYxs9xUDfH}A6U%*kh)b<&hPh+-yD{7mbW}z?6L08m|8lwhQgi+99QgZ>_Fq2XKDz5iQgq*!^4e(*`K|q%->BnNbNUj&VLmkEsnm&# zL)$ssX;{>vhqyjv}oc}~-C2+lKF&BgOGJ&PyO1))r46tiSiB^M@` zT66#a24wHL5hKIV2jL@uIV3!qJycTwK&GQy!v}u+dD}n>vT^~gpaBekwHhcyF2YD2 zNkouH9Zgouv(L_vu_f|x=VO2WE*h4@A-r=9miD+^FK;dGy}DDCt4D1aYIs&Rjw7O+ zXCiV)NOm-Ki%5)wr67QFP60D25V~jFe=$6EdwF6rlbR9nXr%eUpGRLMp+_Kw$&z94 z=B?Q8O$zA~iy1T;6wvAj%>*QYG(@ByyPeEW6iVQvkaGtd!LbjR1~rHAqk7+e!bpIM z$su`Y42$^9Y;QP$_s{>ITs&Re-?x5=caP-atn0`1%YXT;M|bb#f9Xqi^2Il|<+Yfe zP`BvAZKd^!`HJ4T34J3P)y!-f_NPsA>;Ao?5;SsHQh#}M6+*|}%_YZ0lSpo4|ElUv8T9LtS@dZo6wk0wo=o4l-pZ1{xI*dnu|?M zY1q$FfSFY#gn(vU(+pEeQbQAk)Lh08*+o?~5wi;52z@m(ATZBJ2B1*UW7Zs0Ypsq$ zwXt8f`+X%2?DK3>t|fN(&h?LOuWvQ0w(lbpx}rG|T;!1=Q`P=f+eSAZhN{i=&7tr6 z1(0*DmU?vK?6DiKrZ#jU()CO48Od#LcG|gx^5yl-kTNojc?QSBJT#j#t(vAec%IT+ zsGQz9A&^OQ9LD0TT{T8l)Pi5sGCGrK&Q0I>#$RoBtERhmb}RPbP!5MQV;lF=;rei+ zRnFURIPARVg>#U>6k6=m%2=kTq4!;$>z&oD8{IXGH4hip7s$L^G%1xso??u{JS@9@ zzuza#3+*4g_2}%*-J6<&Z^*PvL)!#1s9KPbi5NL>Kr-_BWS`ERE0M~0x=Zg6EIAx* z)?Y14a*qb@aiE$pwa%?ZEh_jCdFm)uwyt8-noA^^6&k{-?qky>`Qd^pmzE{hB)aO_ zB-K})P=SJwRGCo#94KTJQ1s3|n)~o9BG`vT4Kb|df+3keH4~Exf()OOb>Emo1`MVE>$4|=a6~bm|7io8SsjBMQwqKM~8Xwl48$~BziZy2jI^Eow z#Jsi?F&ciUBoU94-?&7F6}*5b>jtMeHONhqdq)Iw;z zu~W<-E-E_QDeah+uuJec@a+ho&5O0?)7;|pF`Y-IU_RMT&z_{;+Km&mniq@WsvmEk1RO4WzWL&-zhIOe++04qN^K7&*^?(E z8E35(08D&|X|Y5!xzSL*W-rDfnsZee%lDvp~({m2ZRw0(j z&JTalHXybiosv0D-CfBWc|CqAT}|%hoDH0E7e#z^DiSs!GoYUktztI58tM05UvwzgGHS zR{pc15qZW_a0!isDul}Js9mg%k;^mk<)KtC3K+r8bK_&HYnPRR4}+`Y7Tt) zDU%sh?i8aIYy&|>6!X<9VflDX10Nb1Wg~|MSg|Ow1P75siu-gvcW=X;#YXqj?XF3= z{p;WO(dYQ}U%dY%ck|dSBek|z+1fkCb+>en9>3j(7+ol(__!qIzU}Xw-c?mZp61yt zz#L?$RMl0C81;;b7J-5SO$ZcRo{A#CKAfz&KDJ-~`k$Kr>i_jOzVr9aZ}(HFU;V-h zhi_i*$32PfR$U1GaM(3XGw0f^)>6wf49JAWqKZUKaLyB|F{7C{0rXfEToVxyK+MF@ z00BvfNUI?Lm;!;pu}u#Q4a_YUOA+zxwOVj7&7~IT{kn^rx$KyDE@Wuehk4wltNDt9 zAIt2!;5{QKR7q2*aA>=B7`Ff*isLw*pPzS4yS*8h%(Rr8_PgSoGn1~3pj5`{A*`DQ znOff(YAK1pn6Zr>0Kh?*oM$Z4Ol)oNNSfz?9A^S88BM`aDwhUlR?TgI3F61X45oQV!U`w9!v^-Jv1~1ZZR~U^D>JU|NMg8QVf%5tRVo$XkDp^VJ&-;p*P> z#%a+>YgJmJ{o32#xO@7dLs>TQg?o3G%jM0@&XjAdom<_$bN}SzbhB9h_^k&a1P<}E z?PIjZPoAWd+O~Jj?G7WB6asgPrfvIF9uK>ODBd$W=OWXfIgg04TDLLAVHir8julfM z9HNR!Y#cdOu;5&1nqYpM_H!zg)M?eNG60dAayq}Y$vKP2es?v_iGzCfAmReGjZ1leDgfvZ znMspeU)&6|FDdWR!G}hwl~OF{7~2?QN+|}cRY;Hs$P`uBkPv|cP}}HTbTv1TimpV?WAf|`P7DPpF;y@`-YbIC)wW+W zQeMCM^)!F~_`<%_^$) zjPr!&JN@TgE#3|5-cOAL03B4qvQmdPTE?N25_<2EToWxd8e_F0sET0Bz@PN$00e<( z#6V~Oq69$IlSYA0wV3qpLWk@ML8SnmAV!LL6pt+Q>0I}p1+PTbG}yufR=JJ7ehLU3O5gs%Ed%B17XLdVKY zE~SF8cf#R_2#(}3 z6t48U86<|_l@JKjLbb0AT3V2;aw0a{vTHjsO7-z^Wj?*?RM(^X9#;ct7(3pP`xP2#*?AM;%G}Cz0TU zfc0IY0zx1}15kT*(dAiR_`z{X6=OC4R_%zNlIRibSqC7;l9LqI!}gVPcit>2p@*s; zvntT>#+uo&?(%dyWTqdI*v{=nUOwST768$z02wj`7sCtZPcAP6F*Kb+^2TMD(r$`) zaBh*60hA;mniOlXlV*g1MND`^MBISCy};;Y`e5+{G&m+oj(uAqLJVexLT*Jwi3!0?=J0A!JhvAAK4_Ddv${w z*nai?+Qcw6mOpkDYCmbP-F)ue0*V5bE_9_77>#>ZJ{hz9bHkL_sVbBtv2A<3`pOH7 zx8wW~k(httP$+7xYg;0_|KjH^c89<8#)BX2=}&+49#5UZa(RbdCrP`59O$brtgNRa z_0r5zN+-8&9aoXa&J9Cct=T&<5fC-2wHCEf3NtbzBCZyT^+^Q)D9)Shrv2MjZ~TE@ z{`s@6J(N9d>|}HM^Pl^f{V;IjkcpYYT?YVOVRf>}wG3&dqh?SQM9iGLa}4N+Ma%$> zT+W8*Xo_`|voHhzLPRh+_PR1NJk<-MXc}p0y&jAG|@R{^0e&Xf}itLO`70Z z-?E~oKD5jAvgBG+p=;W4dmcG)=YUbz42I3Akw z#j#8JsFdxTE2XHa*UG^oxi7u;Rq_<0cZ9W8F~`h--O)HytE7}dNbBwdaTQ2>^aC;us$S$VI7cW&1DQZud}SR&wT`)sLuL@soyA znoy(ToEa4`u08M?h8Gq#s&gT_V&YV_l9G|bPw9Oy1W+_oMq1F)AnxyWR`NeFfd81_ zy>@})X81T!BO=fj#xnlt-~YL#hVJCH$f|SaUwf_X*P)Hx12xf$NDDWDv{-ML+vRHI zsr&pZU+UrpkbucY?@?d4zXX6HCL*gf)~c%7#?ZBk^%8EMw?xE@TBYrp7w&y-nx>TJ z<$A@;!!VRm#&J}$;5`61;t+zE&Uq3uEp~f#;*ch-rIcZNn6*lD3#Ma`@0|1AbMT6q zYeCUkYX~9dtXgZW9DIyXsEAlCf{4sstRi9vz9An%5D{{GeSIB5FtZrr<>jRz`v$>C zszb;Qn`PX5_4B`=CaDxdFcYcOAwi>S&qtAEBATZR0Da#hV&k}!QYCAZqoTrb0z{_e zJOe?~#~9k_&am#*o8{{52al$C5Gk>ZUDxdQ`)WYQR?&NZtfNF9j;k0mduDPD2w%Pn zS*mJXEfz7xSYn;8E}9n7gAl&_b1N6RJ2dT|SJK~730;5i#b)Yh|JrF>ITx7bxj46G zCY7S98cKF;f;($EoLzqBg~dhr!s^94t7{FxXt6pWqcMRqe>3hdn%pb1)}9;d?rmWI z!e;k_jX%}1LHZD;kf1*2`}HiA&hu_hfA>b{_~Nv**}!za@n2bCjA0g;DXn{=FhgHJ z5n2bJ%81AaOrLP?Hww|s(`3RJgb_(zUebwAUwL2NmIN)j)*0y_b#ol6pUWq*dN)bfs zHn2FI>fX$pV^w#|!_=xortBPaR=MUR2)tZ-Hr5<=WiI>S&bd;l?ia#=mZ!9ZpB1F0 zUpW`Oz%)3R;Itcn6g*Xgah_+b0?dU^XO zjw&L@wB$U`OacI^>V5RygCVHS^BkJSc~-5G3jolXypP_8WVJ;fdO2C2g66~K{t(+& z!<`qWeAO0?XTWAUbKqGzuQSpnS208vJUP~S>|7)Z%j25moCCt+Mhp=<(y0az-i2|0c~0f!xBl|Y ztAFC1d!K7@;#fE-I5I_4UAi0O~*$zx4iIx-19*6OdJ~V{CuioxQZ?TP}b8 zP7jtbV+cV+fx-0CSti5^l|vIElyK1bP8T*by>{jsAE>5;OyEf7Da4?!V8`?E`oYt^n6)tsv%LUUzw+WzGyB*=9_eFNMJK}J z76{Z3>Ak4Y_`_Moz8s~G@m6=%#GX{dL{&ZeMr%1+xvmrD3g|;4qG_64eo~dpHNknv zbAd{LC+fDmLM?2H;3x2j%m;H;OjjRx+#pZ4c0@?;D*!NZt@Wuk_q&r_DTVzrs+#0P z8USE;x?Kd+_jZpT^l_JKR%v~+mQ-JklP7Fmza4(cytgzsn{g>uKhxGf`Gq^ESRk`K zck}Wm9k=I;Km79PS+^XgZ6F7XSa`y*!SGR^+D`-uuIlWZC-7qVb38sibzeEV=RYDb zjh{3G0jbZm1e+Vb++#Nkm)-82YdiV;nU(-!ynTLlf4X_Jw{gsViI#IF=RyeMIIh?0 z?RJZZz4xi)5JJ^bv@}hlpeZG-4KaA{Yb`0Iwr!6hX)iF|fFB??oCCHm-TT=`*O&d$ zOv4wx9EWKFLPMyzbWMwh4VkJ6D0>G)CQ@saQmu~&%7Bb?4EhkUs&veylv4Gn1JJvb zV?D0SRgUtM$2HN+%*#B_UDuc>B1*L8>8u4$T5a@)4Y2>x)`Z8jT~bgU&f7aXx#Eu}c;W=B{R zEBGb~!8B(!z^WCX>)Wa1l%}R@J-d>#h^QHRSFJLmXO^PQj>TXgq8Q^ijzrYOW}0S3 z!hovEK7bmCGN3bz-tX(4*{gzx#6V3r{iR>{V=?-{Ktza$nMK5VPYfxgK#0JlNcc(w zv@$0|BZg%3Ui!>LM9NWzG$8+9xpCWUP`CU^7a?#^1@rm(P}9T#@Wn& zL@f2pQj&WGc7NoR(*bJ7Ewah{sU;CQpoug(DG~|5*bV!`R~CBm`mL7_F~kLDt4<8f zD^*}`pUz5oWh5g3aY&0zHb{qi_pIbkO88U3-iz+uZ^~7KS^TfuZLapGpmcxn@|@-V z{MDJ}Ha1uaBbeE}dx$X{=2SE_?7Qat3$K3di?2B)Jmfictru5HVdko)s>i9U6tR-m z>yz!xK7@vui9oGNsa@CAS^=PGLe9C?>SGX*TJ=3!0wO{*F|D=6z&&{>H8UWSlse^f zoyS}8JO;;rwbpr_1uQfX8A>U~i%m5HK#|(_EdUT5)%@)!*?%OjF=H3vZnpz?y5Bp} z8;M9nvZ(V`s%UXt!`rJN;_YAfg+FO#r4(c$LNR?hr`7Uw1(9R&#XIL}t$;P>%i?^u(s{+dopD%44HOz(wm{zp9Wluu78waGO1GyGBL3T7=o$vAf zFXZd736nQ0^nBv|A6|Lj{Zzl-`OhQc7wE7F-M!U`uU2Yek{DJs-EbS-iU^m;Bu8~| zBmJnlsXioBMn5mIL(jevr96JAjg8IW8F`OSE3($E=?+8b;`j;=|J+wz49;1}9|vE< zf0hskxAWB>dHJlvcmM^-Edqt8<&#|odlpQTqN^Zf0@q-z^3^2*c8B}HQvl&yYhl!; z1>Y8P-CWEpgrKS-a-8I!ru)cIWi2QWtnCoQ#>w43*>XHd>9pKr*=YT7xa}OH_nq+%UzY+bz!bk)#ANxrvyOv^;Pe^DT)Xl zDf4DTlx05tKtyJEjLAzWJ_fCVQqhE&ft)CC@N=qNaJy+ZIeoEA>1Cz3>VIZ=Y9qg4 zt!Hj~FzZ#D8P$*1=Gr0^0KE6)-H{TFh-7pu2^isf{-B766Z$D(`!L>8Ph~1^}@-BU4gEGZO$HK`>jo@|9({awl!WT#1=eN=7N*r?Xtj z%(Kj7&P1rY<4V3?uK&~*UWk;Ts*)i%2Mciv*5F_K#X{}aKIzXETRQKb{ATUH)>55y zmAZzFS#dK>&>a$h|vup5|DY05CkgYem2N{U7}6 zfA5DM_;n=DNcUSMz#|n=4HYb#h*^AwMKBo{y(d7VN9#XTE&Rc^^;u|Qvfs5XY6-d#5B#JS>O4yKli`+)n7w#si?D(K4wkU20~9eA&`LITgA(Vl+OE? zzy34-@Z#z>-h61rtO{&qM#gh-%-#iG0l4qmlMOTt#ul~fCICw^yl|fA3t9c$T1`*ldNMtz9xlgZG?OCEywdygagNUltM2@9} zXWbpn;vyqRmWL7e4}I}=6Zmg@>+MlL8W!hA1xCaSU%s;#zw+Y0@&Ep9PYm8PN&S$@ zPM;LK_+R(~fARnRYjrNZL9J5I8sCjDfP6Ze>>6{51purPGK4?=bN`3$eEaeCvV2O@ zVLr`m+g`!s0*3>XO1y~k#pB11zxOYGq5oI@*yqyz;>SODtLBSAb~o2Mc9=@JzP@gn zW*CN}71d(3Y#r5F7mGzI^E{8fiN{Hc{Nkwf421JM*IHu?S2x!p^4be8bb*uVvgSlzuT?)MYnNTDj>SR&N(oXQdPALy@`5aMnX_zIu=tC zXep)CdJKr4+G&azpsLndnFs(>o~CbB5vOTd_Fa`sz@GfA^Lw9r?Q{3nFa5Q@`Ikr8 zp51$4&SH=^t3}}U^2vpmGP{z6(bZI&(0b-#br=Q_TXcOX)f3P2jEG&|dhee+dD3-V z*L8@Pb7tmQCRG(FjMOxZiR4t5i^X-Cv4}$oZIe~D`yCoBLz7v}EajSN>0;A1q2_Wp z99-j$87OAfQbDnA7lA{F5Pe|gaX${jz|2k4Ftdy9xT`@#6Va?zr3vwJyotUcbQc#F zuf6`tpZVkejlcTW|Du6hPP-Um(=-48sGip{LSC`evm5& zA+Ar~dUW;8AAft%^>@xrb50i*7wWj8UEFLNM_>Npm%jY!FaLvY{1sD9TF0Cyc$h&| z+qN~coa=jsn8rg4?KGt}cE@}~2yULoySHy2or;^Lsj6xf5yx??1(><&v0utU2u;&? z?;A%{)y%B%6r3mA?sngL^M{|i_4<0fX24p?G);4!fJjVd%S2Q}M5~$!)Xip-Yi+>; z0HUdC&bgFwv>I-k=2$MB=UGJBt~0ZFo>QI_T-B*o)k0}Hbj{-RS6}@<{gwYE02~gx zEY&$jM}J^|a7-tNF!Q48j-6+!N=-N%4z(&XgB{D#P)c!X-uu39*`W#4RGZM$Txui{ z0XjBU1w*J+hr$+~eS)QJqA>yNphuiJ8*4l-xFic*)*8S{X zJJ~qyWPZR6<@$Uy4zExFxp97Y(EaHz{M+C7&cj`*p4LohJQy$S_RBlpjBmYh_np80 zCHILHm7ZM<|27HC;1Vdf8l#y z`yYh|*AfI6p4U9``FiJ{9})Gx_4_{mYrp-@-_4K1-8&cCJv*fyJZgs0O#QkzWBnub^Gsr<9i?Vsr@rWLs;h8G=1M@_lLgzZ~d)r zNG<_%a-Wa|;6p+O?F;|>*Zx<3{ckrG22uysfXua4_vxgCHEKnwkgmGZ-Eb3Q%d}LeVaC`XX zRm;D0{kOgT;9h;v8XNR-mxnV9nY(>1rez6!=~nCY@{q4e<@2s{hUS_XB+q*y66CR7 zHcca@4<9~^F~0obOKF^U+bwWRDWz1r_s)gm>bB^Y>NMwkTuMZQ$uqO6PH9HahB-KQ zTtKT@Xu6H>=Cn_SpZnRbYc40A{USaXxpUAX!R{Ef|F%w^FAzi6B439-Gr zelzEibL|!j5gDgLjDa1_Oywpyx9U^e9Ue^i?fuX1-u%L^jG4PM;o%BMh#VDUM+Jr7 z&C{A0y>F@(=T$|lA_)O9umeP;ufKZthok-Pzx4ogqT*C5B65`ZQ=0d1S^UD!huk;q zzRb55efvkg@W1|BKbYkJ7zhS14-UPETAxqPzx=Cz?)uhgpjW>2^Z#~yeCt52*}nV0 zeb;aWUvs1G8J1iM4jp?}N>MRlI&z4hR?ln*MFim3-b&twTtAq76DR-x$dPZuPaTGj z9NG^8yC^w>RX_qDRzR^dPR{>yg-43Y;7cl6lmSgFmDF(q^65;AJPR=^rpgAOR>g#& z7AvOhr?iX0-esVpp;je9ovWtmK4xXn5AAxJbr}zhqqzbDH$5dbcXFGW@&ESbkN->a z7jJnx@GwW&-@t44Uw?FcX^>Ag(R*(iQc6|KQ%^)WABI7Fv*`LB0FJimI?ae!YHi!L z3-OS%V+>6H^WRLbeedy~`r`es-t~HMeXCm)!BMw!DMskLr@+;!Z~TehsMayfecu{F zE`=R3k)m~7QUhMAP7Cdc3Scu55OI7Xs*1C(`c$oV|6cIDjT1C=s+#5({7BlCmX@Z%E)abl( z?)vJA9eB^1&FbhnP^<8f`{~+%G|i&`uIcfqGz10s^9X>y(%n`&OJPDJGRbfgqSY@245I?bbN2sN6z z!&pu>cVk#MZV{R^k8SkrBARJRsbBWTj9U!RdB?y{NyItVlG(eI<{}xAu|q^;$A*&1 z1O+^=&Dxr^R+Y+X9w{_(ZM&V_yHBxaMJHtFNRbsEp^hd%hV&_&{}GAoNYeo+6;w?Oft-9Y(tr3q zX59PR2rhE{#V>E}u6Gaa-@STs$m(^PUWno9?ADahVH$6CH`KYd=^i|K9HS?7oAsSr zw_gNq6#xVsK%QzE#`SvL#CX{4^EAa6iAbt+O{=P_u3atHs!&RbjqjQurSA9pW5a9H zG*3^e@5xBa#u<=FwdOQCPxtQLo_9rRGP9h}a&b-9_*Kixo87kWdqb;QnbA3?0IEjE zLHRWQprWE=M%5sws??J6EF!_VCbrCMsN{IgQ`KrSg2)P{RPsz1}w%M#t zopVziqYvaMBF7jwcpxmLpg06@!7~yNO({t!ecw+eE(UU-sz+L-^R8*SwQHtn>ifQl z{^sU}2$t*RI7~^$xz64*cr*thmQ{o_FgnLa%?e~N)GC4uXQ!L>dabIv?UjOwRNpvf ztmZ@>uYoRUolow;^@G5Uvp%mFthvGk(l|uzVcho&OzIsDY2d|4=62X_%kD?#+h6+% ze)H!$`L?#(+$0&1{maWcwTR6P!$oMiWIm^`-Oq16nL#^ZlDl83Pu@t|Yk}Qj3B%&$ z)@OhuGP&nG&u4WHd|28iJIo5w0AfnhA$7bu?YjY-BBGI~37`TB7y%)o`-GFKs~HFZ z5*U&hpg^vW>&I=s{}aCtS{?a3q;+JSGApE7op(QN_d%>;swgU|fXd#bQV9L0^**R# z@ZFRT!+roUL&YLqU>%*#GZKg1L)T=hk|v)=kP1>gIU=eE!GUT`Y3$2hutD|+iOe-k zf6}>gs9!uh@`vAufgYBZ7Hz0$S8z@3Zr;nBMnvpF>?wLMWFU@h4iy=^^A@?L(Q+!Y zt3J6h>~kBtTX$Xz91b^W8mA^KvsiSzXuWr_6e0Dh0D@9$8ddRWu{i`PSyVOnPF3ep z3@`*w92@6?3&9AjiM3tWS%NEM%9W;lRhsVY{yz=<;Cps+x%z@;gSN%)Jn1zNF(8^6 zh#4q?nMDvKH8f-bvUllO;&LO1Y>p5;s&8AWV@p0xKLEFrW}c*ai=NmlkL9Y3%Pw-Q z;=RAR-qslru%)=f!`K82X_Va!T+U5AkSpCwlplLw6#xYyz#^z%AjbT(Dqz2pJ4FLU zK!bqffY3mNL=b@TG>2uYsQ{Ws77-(~V$ZuyeNw>W%UlRpvOqPbpsCng0+IOOb*_RH zJP8EtR_%&F zg;3BN68w%Uk~1hkObCF6$XN}D#Cdu~lcqeQC{m8*0cebA_V#1pzWei?U$|XxBMx!q~1c^G_9{S zsAB_20tHKkyNkt*y!1C8C4bnh=l;vL`_Fah!OfFuI?r>zTo6&r4Kn4N6^y%X++M%) z&J#pv+qP*#+qR3gEyKZp3}hUK*ajELu%>!gtzP?0Tfg-c*JcOti2pnS; zH8n<40Vst+G*eJxd@9Pxi~vyC5H)G30BvXqLP}*!(=?~K)QXS}2SlFsjHLz)hguNP z444^_O07z!rnOAB&(2jLO>-OBdEBf|Ysp#WVYbuL(^~6(zt1@nIc9FVm1-?%2#&q$ zVhBvI+a90+P)&6zV^$C73{WfPYE?+hsi74uV>&P(u*~J)+3~TkWi{6dptUL*O*lHF zwrQF+O4TD>x0YmPF}6gMQZi5%++5Q6<^+t+*8#}od7h>zwxP((F``;kskMp-0I(-R z6-^?5B!nbZrg3u78EV_MIp;i9=gB!IQi;fUie+Z-HP0T?`JxHdTOFLdgZqc`;cfu& zP@y0&gO~sY`e^1E7a7r9MDU)MmmY8=OsW+aWl ziC|7+PUB(NZWgyIz?2g+Iv1C1V`g_wPM%yoMyDcLYj%!3I{vf?(PGiHZ7%s+cC z2TB*C_W^$N_(|q+(ryUkYT9k9cXckcYua`i>vAKo-37&Yh98wL-a32f{O}*rs@tFc z(Jn+JYuM*1_?#k0@QnTMv&4JoLLvr3YC^@0$eJ-DHWN%h0qiY%uSW_LbWhJBX34gxNi7K^= z#0{2|n`SYW8C!xR{!_XSQqRS*>!Zv-9-RlHVq9sc#ke*L3=09$B#;d(Dq=OO#tdA| zP^nfuOeLD}az$;g^#J6X`tdu5!+*HZ!{h$exf^yP&$KoemeyInz;Vwt_t-4j zFdg=X*#v0OF}G1vYZ`pS$jv;?VS#Dxyko|Ogr_Ns6b{imqfC9*5z#b`DP`vz2VZNQ zQg)s~aMe^qM3W(eHegj?-Ot0mh%df%&9dopk}V_XzUQ!BEQe_#M?kKv^e~}T>fHQF z{_ZQgM~>>AH#*<_Py8z;ssN6d&=d>_$*~Nx87d$tk#k6ds$`5N)I_0zRqru0J{UO# zu4+}FS^#2KsfJIfxK>6Mg4jAUMN(yLS{6uTg$g1IA`z;OfyA|SclGkWz>;PnUlJ z1OxzN019nae^=A)@dtjEcE185dL%Ry)tsctl#@FBU>2$h6EK)FP*g@yG-Pt}3`(wn9T(eMZu33iTdTNvIbGjCDecR*@^|+icwCLH)~bf;xbORZu{seMnt*slA%`YaL_9?cq*Hle9d9pIf9IzD&2MdA`SuV0YcI90ZHT(QM_R2`i^XDC zu5WhRTI)1T4}bV0LcG1)bY0uTXeNm0i?k=_X)fb-dvSHWTrWZsuAW@~=5K%Z_BbDI z|H|Y3_5baIhkqtre(u)k4}bXR^FQ}TV5SC~K^+GsPsXaMOoCEG7}$^m)rc9;9HVmppqfNwv+8o1 ziB(DpO^akeeAtie+O=(zYGp3fz&QeqJ_JDEh^pXtVArc^+qikItJSJzi!qwQc6*4S z5QXKsr&?>u>($y3b07oS4~NjSc^a3?jb~p4&3qFZQ{?d5KDcN2mmPYzY2%+tWk>%Mz(=@5LaIah7l z9#vDxY0i!F)yN?NKvkgvkj$zLsdRl@u9u)vNu@ zT-KZAYPqR1swVGR!q~cX13ik7qUWYzUM-n7e35Ls-Oj^Mt6we_ckaC4h{ti9#w*Xh zq@qcqYZvQ<_dbMBvo89LT0I;F19C1D0pzfV%Y@DY&r>=E{LCJ~Tv+agL*x4x!r|&l ztHu~a>B*Z{cW&KYFE8H~rNHi|51UrRl}HuIrY=)QBI?rgCHD8a6L*^XTw! zoZMAApIh5TyWU+yyyHEb+&;1GcC1@>?<+r?VMTLo z1;+|SDpX5Lok;Ga;=X1M}2DlOc6*MBbkzS3muc1IOo_{ z=+jvqARz#=YR;H|bj|^YOCg=O+vL@{b^)Lw79^{HOQOSEVH&Dg$=S6n1Gth{^1%+0 zrE!HDj|MvDe6u^-O}hN*S8z2;V&ok5=hA(5r#*RZZmMaQYSWX2;EY{q+7qqYv2k&# z^Ei~V_T;2p4z}Y^reTIzLg=uVp$j1vDa%#sT^NR;%G~v>b55#xA10Y=H2@<*c8rKA zXAxk<<;haL40#l(MP{THt6t0y{b0cRawq~#T(Xzt&0d$?#9_F+S-$;i=j#RaCs%m3 zal;S#ug>FSRU8t4mQodg5j>)(3K$tUQ%{WS%uEbxEr3Wy-jR&Fntwkjgb+0bfE988t2e)!;(KhIyhb@K49x$bqEFZ{*zq=xd2jsphp ziqWwGn^9HLTHuqSK0-|`D(0A5k5QG7j#U5v0OuiOtN}oe%8{Tbu%G7J=5xQ$w$G0* zTCQ4>?|U81{0ED=cmOzhVIu%S0032(|1thc3U^L`$$$TGA>KQ=1AMJhz-KH=&*-9j z_Ux@zmEcIbP=#X!Y%?z*LS#?#k>y2FEcK@Szh^q^guvOSPmxNHHai(Q+DGXdQZ_Fq|(o3)YM3*x@zD~dOJ!*79i;7gG zdiEGCl_Q0tHFY_p+xPb`ZN`hY`1(OY z0ds`!&V=tD8_#@Km8z;m^FTaNF(?t@OrAd^cHSoiCL%<~3ZFP)Lsl^2Pib`YOgL5W zO@hSQc$lk_xT;v`6XHbwydY8(r%08|ne#LtgxOD@QmJiFbYS4nffxWYGpHd*Lv%JKPW=h{!?5e`y{7X`o^Db~V?_|_ z+qUtoXE?ug#+5Kz-z~r+uJrSt|NNJ#qv2|e`gqs>rS!rd9dCa28M zE0xhV`LF{xPT6YN%o#eaL^{sY5Qsp|i?*#y3b-EzsMIfe=dnnJ24cfmrc(2*_QVHF zby}UAn&`ua50~pzkujO9P}poJgL=Lmhd?2MUe-*R&Q{&U%~iL#doX-5JYI%otx-J~ zNwLW@w?0x%-e3|LhN<-phA_{wi$ETHL+Ns0@0uo7g`zptY-m3Eu z_RG#KtakOmA%8I}?;XChn0CWvxGLL6(VZLqq*bOr7|3nL1TczozWM~}K@A;#O4q?c zwG*z0ERX=nHwP<@NGXYdDpstD3e*syV*aS+|D(t3QAhr$0)N!G@*k3Cv{FDoAT<+1 zGgL$-{KPzV6dJuT6%f}d%|*D|+}Zx-pZ|pyOgJa?beKyxSa0pKXRFCw97nx;9LneX@e#cDat(>&*NuQ zu$C$@w0V}kUvym~T83etYmV_~V2~M*fgE!@IeY8so!4J_EoQsb^<$cU`1YHw3l{n8 z&MM0|O{0U5=CWEZLUhwS%p$oaa6HTtGq?TPV+b)s56(F%T9P1Oh`c>q5n-@S%?@cS zIQ300MZjd?mZ!HCM6N(0POfn|r6N0p!J^yEBN;fu*hVIt#|fD}e|~G{Y*+?lQq^UE zY1rF%Xwhxj<*w|j=EIa0ZQnH{c@&I?A>TSV9rrd4!~OgBZ!Rx?^y4>PeBmC4xXbg5 zP{8Uqo-P(I-Mc+N?w<&zR7w%2La^{~t8Isre>Co14BW{ytF^xOhIJ9|r0#FK|LTu7 zKX?1^4&gSGz&;-A;N__#^s%Ajm9jVUDxRq zcJEj`=kK`#e1~v1lpjxXlgH`i zN3woBIc#+bs)=I;W5>RXhQS#tm@!tW2tZFb| z7jJwURzFO#S%2r>di*p0J>7mcs6W8Hmy3HC&^ee44u~tALkQK-eMIvoYO6MpD+W+4 z7!iPpPMt0aYn4HLY$^%PJ#ilT=<2}HC^?xATt&$fV8WxqkOKoke2-9|ss<2(+n(JG zFWg$hraA5XcYnBbLXz0~4{MW(WOcVScHSxjIe@B4MDSC#EIlub&>>5ygxDbGU8$D5 zKPrUEQ3l~XYZOElXH_7T90c7kRQEcqqO&5mwV^(;5#x8zUjRH z%B+NSf8{s7`&$m3=Xia2vsm@cu}W3NEZK-d8xX6A$)rr&9I2`7*(6McrrfHmWwPrIz01ZP-+sxA~l2%d(VuemMWDQnZ09D#KV5e zfM|4aadEW1oO1$zzV8p?08Ko)BQ3ig1mW{ZpdFXZQ>NaAbs;9d&HqOLx+zw58+s0EgsNB9atjQ`g3W)wdu1 z=;FFRKf5JhfYi3F^IlAEw!3MXjvAG!+V{TO>78pzZ;4c6RjqIM8HRr5*Mh6(R)AU zWL1!enGK+CJ7z0dT@C7x$Th@Jr!*;JH&tDlc zZf2t!0RR!ZuDd?$_S5L2s~}z5V#DJ!HImlr)#mKv(e>pp4})pnH8ttUdL84_4MjC= zyLkWp{SbUsnWiaQ@!rS2Z`&Rb;~+7!R;GDOhtb-q)rNgArd>Yg#F6ax6}jInx!gA5E>^f4xMFp`IdZ;7}00=44>f>jlvWF9HOul z)Y*#7rJtb224ckEa;>I@0BD+PLYwa{`zJQmnZdy23s~Sht(_b;rzhRvy1)E?T*UjA z7x%BrDK8gs5QKVlb2&k3PnW>XCIL0o%FMxg6GhORGcyyBXLk&5YOU{i3m2*1-R@7j z1}apkrga=|TJSHP-+MCclRPz=Q3SD~szqyDv1`2vWRz)|=fgyf5j_?l12i&VHO*>R zYgT2cmXkS~b5d1AL}W8$=A(6x3RKf+nzm1NMe0fO-McS;?aBEs{dT?o(Da1VBg~+Y zLOAJmH@h^He%>0av|DY0&+h(`aki6uaT!U#d%S$S*z{&LO;b+deLM3E&={HL2o_OI zMM7}>sz+os7_*#rw{A+J=&a)P<(vwjpD(X|4cixY-uv#1Q&uaTWu4vRLQrs^1dvUr zU{{F{W|G;7Q|ge-acw#Et#H6GH|ujBi?&xdrRZWopGrciqK+7yA(#Ll0~?TeYWCL0(CD=jC6_Mk`6?1G95;ER(f?%SR+eTA(r+98>zB zZ0v{xeWoFh^VXk^a^=RE!)0^Y<9rBfvM06fl5K(~M|ptIL^y^g4$S!7Rsu6q1Y+di z!`=Hstr-;`@A|6V(Hsib9O{QtHF6B~=AfPx#34Gv0t$2_b$*6_0?Z8A0}^|5K8T1T zJ5tQdB!%mx5v7Gn(S^+h@R*T3`NFQ&(T?8U{eyt01jr1yQXQ2|k5@??Mb z=l{ek>B;=yhd=o4+rRbpi(hAbzXbH2X3zWw&2$7Jqwb2`okGqDWp7|B&oDH%z;cYW8*c|b%a5JLq6 zLQ@l!7^C+-_y!TT+wI|S@Wi2CK#NDxYtAK2xfH4O7zC4gh)g& zHpna@6%+uNIff|JQc80vH`|-Vxp^5M-r&1)>>0aK;(jtU0X_4B*3F$ zAc`szEt;q`Gm%x>?sr|+v3G{(oSUZ^O+(j0<^t-CN-5<1x^V$x&U-{;W-vpu-C;lH z(qjn19;q^b^A3@U?8ZGI8<>DN_F3wbW(N*g#cC>AYc)I8(*YULyL_1UX<9g3FWL^* zUUxr=uV2wgn-!#>Dg?~!iu}^a&dj|x6hG>>;WK>ncz>zvxP5*4m!HKs005w9hzM#& zUlbr3hy7ujN9(Q&wRU$AHDs?%P+l((ZBNA%gc-Q@#DuTHji^jDK0n6JPd;s%YHXp=)Rt}>t(-ay_>}i z!=ws*Y-_Eh6c-(4&N(~p(V?1&X|1(rVL&n14}*#JF_=Sc!#ad{o@pxk!;ou9MW<*# zeljj1|8Rfv!X1Co#_LCK{`%|x6)lT?|70F7#otnNqKB3mMfu&e0RUj4Dnt!pI#^|G z*JBaM>DA#k|1+Mx#pKqj%ezfsq{JU-bWw`qXd2#B?*3UadL=`rI=S;6Kk*a2S zYN*EVMk&wxzAy@a7!#0bO0w*&KOx;o-g;Sqkg@9{+QsF3c!$RQ(Sz-?#ppA9hEEm9 zNmY|#Buwmx*(g}ZePCuaRYov!Qb+~Zg;`TCOQdTFEsd!mPRo1xSYjO0_GG{NYWa^3 zdNT0~7w+8C1k(Y*%$biK81h`aVMLx&Vj}P!4Mc2`2@z2Z6vY(HQYrI1$F@;~VL$xp zH-7CGzWC)AH!G|mX;n}bOw;gWd!tnXhok=(I41$rxK0DNt%1hIBiWn^J6}rS8q9#4 zGsJ9loYG-VrIcHJCuG?~5P(SlRm75&Qc97&?FlikGxN*ka!T_mE;kpKceeljdH>S4 z`akiGe`UzqbpZ)yl&6kxY;V{7g$@^M&xiKRGKnjMQjOAn=cxwoU%QPr-``fr-Z#s| z8UPt+9<`3v_dYluqW7K=O^86%ch06mM1l&Um3b7MRroyirE!b#hu5o@^M1R&`9bCl zqC}aU-<0`anAt~g-C%%{*^Cj?xK`DAoghowaML3cO;k6FbaKm=%TTg!*f~;Uab9W( zCLMb;$VoC7v4@YyBkiMtQ7E|2?#2CA{*BlF!Jm)I@$t+5{ae5OKO3*VH~yjjL2j2} zsvbk~REYp1>G+YHr}vn}$c&jegGi}_M8sMrB#`Cf&T4aK?y#{2?Va-<{rhhJx5sV0 zhJC&EO4LIEG2o;3z4z63AaW_*3h$Ev^-VnNYOSn7_OuoK7@Y%H&KZ$co0C%Y>SjC5x$PHD z1~aQe6$KFlGX?_!(5KDp-Ze>t%{{XLVS?8ZggoZYD{d+a;|s>Xuz+gH#~$I+r&_x&OE!8gP1?0@&>8~?#? z|Ms7~xBAcj@)uwEnLG5(g+qFDI2gLau5^ogFTUgtsVp|9_io?$&TswJaF|x>PNXc` zZkz|(y}8AE=*k~><<^@IcI@iE@Jp}1aMB4iX8y~cdwsb)dE@Q3$kVc4Wf=%*p0hAv zbWH4mC)8=0s*KLLYKoYTa)5@YK+`md2qGT41X4-};FygJ(X`eh0T~gQ-O;k$d+)s~ zrHH{eP6^8}42N+T=g9$(0$6r{0!UzDmZvn#6C#Ef$DDTKq3afLr-#E~s&mznN^#C1 zB3oH3SKF)W+@zy0w+mjiR24&-=UEIRI{t~#fzs&h)Bh1kbt5ti=y`r6F8t`iXk4Bn@4+HDW( z)rqDO*e$z7o>Q6LO!H>5S@r!iO*h;9SQ2=?di)5buKRuw*1PS2s2~v{9^-ie zK0i6Hs$z<299&>;YI)9Nz(jeTnb|o9qBWOwzZ%Ot$UMpvFq}1^Yq_93xma+QSBuDq z^P$u#O#@BKsygH`sY{yEbvYbHW^Vh=5KpKdrzuMcAwVF?!_BarEXU~4=ui&h&7|!5 z<)T~_o8|HLao@J?{MO0I2@#F+Xqo4nhLTiavto zgh5xgQtbF}8D#$^EecwP&AN};G=OylD%9#Y*evL&YOeFtuGV)>&%>e>f#BYbHS4X~rDt=3()iu05V@YPrD(#`PX@udyBi^XX>KYSb- zX-3et_=OK!KWj-91F@JDmD7cupK_k)(L0b+9yJ5g*=y|2%Y*OL zbmNgR#3&-532U~3i0GXM&%n>OdVTUi*_$nzn~Su1jl1Q^TYvSEzwq7v^uKnt|Ftqu z$XA6rt3yjt(!_3Ou=q&M)2SJdAcz7XuyaP7!9vRrU)9iGl^8F7t9$#KcMgBc4UZdp zXRz0IC!ZhtU#~3yIDlzlZjavvhz5p$28L*eXj)6kdy?qPzW)YCNm8=HdLPrWb9(ck zTt-!6=h#3I)QpfS0*HQ=;{7M!srJItCQ~W3lu}Fas@{9=eXSML-Xr)&=aAf7>eYis zVyJ5K%|5nGF8j7!EEX$5KMg}I8K|%`A~>2}0s^E0Y>t2co;I2xnHd;>86g6qB7!I) z5Rq7sT!|orU}coU1wf;2?ObdeXO$c67vK8r@U^eimdc}duw}mcYLhF!^@B36-3#}( z_@K{|kW90#`^_N@N2!IUrHJ>*iMnI_Z}6_?w0&r1eV^eoe1@MgJk2JE9KBn-_XQ!P zbfT)htnMyv-~0OR>gNCWgKz$|fA4Sq{-1sI4}JOm*H3S~cDsMLztYx)ra5(+Tc>AF zE_SAj9BM6~*2L(T80m+H!>M(J4rSa0y8Ht#oGoL!E3j@LcFlf2Ekamz{g4l}R`1+$ z*=-L)MN7E=^FR#09;V4hS}uDAW*5*g0MuHUJUKR1Q;ETel-;gql^EOgYL#;?S)zA{ zs928u;T&J-oWd;W%Zc!sqW7vG7KQMSX>Nv9o|<^8*e*yqHK?|a8*Si}Fnh&1x7 zR%47AwUpAfttT#}h=^zBy=Ugb;V=xtVzB_QYHDJRL)*5cNGav${v{%S+B8jQLMf$` zBmw~7y*HC#*dK;^etr(f&p!G1$)m^K`_0)Yn`Oq7l7UpKIaNeHcF(%V3Lv7)9AgYV z98;35H8HeZ(=57PMDBLiyWK9AdD$(?e!r&paM_Q3`HwCh{@~*f^w#a3zw>4P;4w2w zh|({l3xp_L*J-IS#;)z|?(T{Z1&@e+8K@Skv-e?|$A3_ss#}cXe7)PE z4@^9Bv^lT#{cXxN*dl!6F9j1er)?8XgL}Z`##0a67?M=!wzued$(9!c9FbX-o z-YhncLg=jjcKwClJ^PKj2X8*v?JL}ht4)a}##S8K4j_J}rd|>3(`u(81xV{mxjiZC z6F_dw1`*@+1WRx3pm;OLjTivj)%h&6m3-qo)G{GT7hD@30IpYO(|DM30q4g#F{a+P zB#PdxSBsCf7xUe)*gSNIQHN>s)!dx1XANxHGB9AsPEqF9m5@UfhX^k#rBsf49enM1 zL9JmlsTL6e2Xk&Nc^z#Kk6onw2p1pG_z4@DU*bgI?$ZPJ-1E;HX6A^H0zyfpW`@yI z+4Qh;D2P%JO^e8s>TrOA2-T)JwL%Bjj&tvA`q$j=y-}b4!_8mjuzFLk7FlGHyQV)| zHcuh$4qG!F@)#&uuEF^jeM7$Zrsg7Ift~l>L`B%lf(KDG1c%@`99xasw*C3_@^08x z(>8YAa4FtYx5Hu4$Hs@~8`WCbRhEjzV2X>E4GEbV*SCI=bCy{KE>gughvWiBhl*z2 z`KcaE#mwfMq||Ym7#Tc@2pXtrB-h2J)+`PdenAsYXPRgba>~%U;YHv z2u|)$`Ff#Gp&>J8FiU~P|qzp?wUl~@p_<`>3*q2f@IPZ~* zQ~(1973Glk9shW|uYOus0CG0zHxp0)f{vY=AUp<7AuFeIT5quM% zsL*Ss|9@=EGlyX4l2&GFyG3pC-=vwhdbs-1;blE+0EQ4< zC@Ayp7ILFlq!cjn%$^$Z2sW81e%hJK-WN%Pc#O!4$}7d&FGD1K-^p1-PV#c?L)b(s>zh7=bn3hBtEN1Ktv2EWLh+)9r(puIi+cRy4b9L<-L!e z{p}B?|J(PTecAucfBSF#>aRR}w7a{Ic6U4KFZ|5Ed~x-D+xVOCeB9kGm+QlDcX@f) zb=@OR!ljAJeX{k*gSXB$H+9>jAl|yzM!(PF{>j-%vwZse>c#GGJIuSydiC((<2jWd zeE7l5b~^|L7nhsfIVU0&vLOg8=bTH*>|Nh?$7s

C1r3j);ayYcj2Q z*)PPD8Le6*pYn`~CO`l+7c&dNhu{}&pQYBSVk$*yE}nb{UP1DlrfF(Ibi_=Y^K^B6 z9e~<)VIbo;o^Hq^r<5v~wnR&!aYBU+$(_pkvL+l zwMAB?R7*vhSw^poZ_+pg$AQSC5_l2m7fV7d%X)XSog|%{oU|c27d*SJ?HAnw1#8NS ze(icUr8%W}pQrtPpHhl3cCiJ3mjY-^(=_Isb~`2Smdj;I2?8t@i<~Q=Qw2nf(dRPP zTE#4cpqfi5G5RWb8b{}xq21iwG%+yJ&DHhU`N@Z$e7x!x;9Zd_CaF%PlqNKx@s7Mu zbq>fVTVtko`)x@H5mzTGEt;m84aad*u=CSX@^m?0MgNEcy*rlc9o6*l&p)1i_N!li z_TgfEg6lOq^JT75ox#%CNlR|XvkSNmeeO4kP({3VA76Y@N=eiFmnLF3YDpUcIS!4NPmKYp2p}?Z@J-jY(T5k;pO8tzp7AIc zr=Uj8dscJnpWxgWmtQduC+}dn0JME5%Gek7{)SZ}# z9Cuiu%*rR!pBfbqA-@`I|4x36AuW8lnroE0ZZ^K|=Ja9|R0bsT&LLUxPKf)^e)7q) z{eFL_soiW+pmE$!bAHgT+6H()QzZ67njf79ws_I~#j|=L^iJ`oRJ?~;P1HC*0tm15 zoF9|=ugawKhs%}Mn=jP7Q6(!32oTIH58EW^7B`%0*nZ-sYab)2pQ|=d^fMds`-ZlA z?)m3~#OkYYC5)jo9W1-rw7N}soIIdouA+t<`vsPqhr9k{@$ku$fbz*Fw4FZqSLWaS zT7Ok#{HyYJ|Mtaiyc^HXPk-_2i|2QTog1e_+=ZCY@-)g^TaM^y&M7bf9;W~~=GwK& zvGLD)hfGK3bLU)l)?91NDNSW+ee2oveeXJULZPS?6WHXeC2wl_d=YyE8V|SQzD!19$~|e&NVz&O*eEqjBWhgOyn^TZ=r2Dq6qP)zZ3-2ubNs zEvVA8JxKKHGv~e5Z`XX?xmBuS!0b9jsI>s(E;7_F`U(ECWiEw!Y|`Blo6Oief48j8 z>Nr<=s@Lyz?PJ6SZl2T9&)5eM`+~A3L^O+J7hMxTZBQu+x#^vNk}Ruqs0L_o(G%)mbvZ=$1SXG=UR0qxh{yBTWzITlxA!x_u^EGJ~a$ z)AA91`KxufmFFLAifVDy6rJ2|pV8=!^+d1CxL)xxKL9V6$3HxV`}ONR_uTWEyh?dG z=K!GAstz0PM?@~<*`c>FXCLs5r*A!|HcbAbbo1Z)&3FGRKY#W&-rUdxtnp$zJUV}> zb-uF~rIhE-pT~xm%aw>UK2*g6h*dmWp5S`<#!jyW+wUgAyG7SLI$v*xaoxA4XW#g( z-~K1HrPXq=YMOQE-4po8zqlFp``rvEN55Z0?+6vu5&MQvb+g&jT<1BX3Xy9$Zl!&*X}wrBAq3}vg$+~{gNq?}vf zd^$A!!Saavb3+ZWZ8+)+<(!w@;y8M6;1K*~v#GV#S_Q0a+hg<4 zm}VxAjwRPDHBb<n*k435iq>_ke&XaR|a&mII-dtVX?6&!(wJPiNdOr?LXiHVq z+I8LO^5lGVTB>x*MMurM7w?VJlu~L!R7L;=&!gq8>vBp&lxwNQkO7!XWx4FL=sXrx zt+j$8s1}!X$P`^Ag*h!kvss-KqcNp5=VDUijdz!S<-2eHi~XYOKYq7^k|M4$d5l90 zT2YIYV<|npYy>L&7G!OC%U zhWO$2MU2sPp=;WOfdh2eO3{NQjV@0In&vLHLRCt3&ZSCo<>fLOrL(sKN_jE<+)eve z-hpw5KsE7U1QSIg$r1eP))o>Vs44t#cKFdo074NEhZ(@{z>^P3b}Pg!rMoLTY>{aM z2!&%$g!R>QbUzv0bI(6FAk@mfP=j5oE`(Lj>@f{Q4q3nyb44cXJ14W%!#vOLz5nrP z)J4Ppwbkv$BEmQXTGTNSV5XHEeg<-Cg5#{{FHqmhktd$6xQh{4YLk#;f5w1FTNnbis`;iE6Ov zE;@e}r+)Wa+cdg%0cswcW$6|zR{+peBaQDssVOc(n`th_pZOiQPK4FN=fG$-f9lDB$HX%x>0f<}+XK&E?sa3IB zD;Q{tAt9wQPTVfS;zZUD#@o!Z4jk<4w151B%ff5DG13b-?L;utkphamZhzp9={UVFjMjD(qojGBQZa+3rF^l2 zC(zOU&3|+C@Gqs!uTuXM+SQeNC!`mdzGeV%eE*2#oO6diA&)Ff&jCzTY`4CF~rcT#SWBkF!w=k8RM0Tr%+i%NmdwBHt?D6A=@%9!8yQX{V z$p0J924<4WTxJIJ92_xwhelS7h(hqr6RNgt)IrQjaE($RYleB)YP;KBBb%?Wbjz5o z1>MLMv&<)Fi|rMBrkCYDD02^dCPQ$a75D=Ph&Y;R7E4<;CG}INh8eqAAoYh!~tt(>$f@94{7&gU#f)>pJ|jj}{SA zN&wcjt%wNNFbv~30)Uy7Qi@3k06?XbX_^3RwO;KG`)Qg)yBLF+IY-RipcuO@c4f+v z=9Bf(99-;f#+1pZU9_ppErbx8@h~}ZDo|^!lF>U3OhlH^0LbCh)lJQXfShxE-}PO0 zx4XW*z1=KMug1rv7=EE=y%aaWn zFxW8+WM*0o5rObwySghwv2wm#ef`1HRl9t?-OZ)^JNU0n%hN;qWc}9L?R?$cUd}qs z9frAaINpXp;-k(5lu|qO+NM9M$%g1q{2IK(=76#klIm^$!7ohz-bpoT*4X!OGf?oF zG?gj3?J7pAGVczO%Sl-Dl~g35janjbLt#y!ZIw5|{FgTp+*9-#g3F8^pXxo3kKt&aR!Ye^w{6>XotdHJRAvW=O{~Z-<}{0C z-;VPj1v9fb&%rxDMhMQYJi8dCl8aO`Bap_sn(8zwtFd>*hZ>Ay$fZnDq6<p2e7)=@R4@8q+0!n5C}2&$-NrdGJt0M3w% zs4A)rWR<-)kKbH_Hi} z^Z0y@(*1_rCne+8jPxhW}dsBN+hVQG~%c2MYW$ZeNd|dmL}g zga`o4YVpLN24D`|%m1q2eKbPv{G78Hcu$II2o)>)zO?3dX|s5+*AJ)Efn_#ErdKul zh)4jYYHXjLjsXB3-K!BYHS4qW=Ir$9>T-LP8}m*O(7I1t1$vmv_{NiU!}+5Rqd|5~W@!z~ALWdMnbEPT7EodWW)a7j%jc@$IzxUBcfA1e1{-e*n^WXgSpE-|d7yRSJ z*Yjn6cXNBQORGg6o7h6xPrR>u1~Ajod7f%LS++e&N_iQ3&G6QfFIO!9@YN@e+ZS3@ z+T~)q-{q8_JUPkJ12FsW`OR=Q-6m}}J)}8J)04APRE~>9%^F=?wY{0O!JE}$X)NMA zT29gIXWsnMei|?DZcMRhYiOEbN`|mlgj#hT6p@jE0|}^!NSW0t`xuCBrnw~qg`D$H z##D1$1TiR5YpT)3`Ql&#ZD?3sPBqO%Rd4K&%FMwVaH)!9PZwv-xsoNNG;W7d>Z)y9 zWnF}Eo{?#sa}h<+^*G5tq1l?fgEcZ zZIhaHX}8PJ4)qQ?Zpa11oj`Z?xXc&d`{dch#l_j#$%6+E-adOUb}eh}W7l|w3V_80 z@4V}lO`15cYwC&zQj6AFQ_AF+nXP1U?AdY|nR&b2 zGI<6Ef@v&_-pp!}Hy?jxf7s6R%)z~P@hrv&hz$p^GL>8phU#j<=v?5Spi|nLR0Z2C z7Ae<@>njFy-Vb-Q*1keEa+#|I$~#}!RT4#>G+*OxZw#pEDj_vmEF%Vwi{l6^Ck z?EwFvy!9>qT6+7dc)|_g11s+Eu?`0-*`p6NRH_1@tpG3s0I>&vV{YJO6_FYPGBSwO zVG5L#q4?+_lC9nTy*F?E-LJ(#&70@z-Ozfh$&EB5sF9xR%X?C4?^>S~p-O1PN}eXq zG#(DZbR+qZK_{#(+&O`X-&{=FF4`*1dv2(`IQ`k?$=cCGAN>xFgD;7h;#>vW5%cS6 z59mwRk(Y1&(YoPBcwc6$!9w65**Y?JcT4BzTZ7e$_uLy_3suZpt(5>ek%S$Az#kfL z?sEh8-18F#efm}^5ut&Bn3N z099NCq_%dxU-K|&r1!tczjvL?z+pvoxuI3?)XHXnasU1g%=2WqYwM6pj8Uo{oVw1Z zX=+?Bk>a&zb&gpXOaaY1_TJwe4gf%&oO4!{6$7xSTucc7qW26q9rkUE=d0D#)zv{^ z$$o)e4U_03MOB6PQRu2jonQLss@VvZ8Dzqos$-_;!;lcF1=JovUv(?&QVl z!G{O;C938FwadmaHFPh5iyvs5^ zY?mgX06aPMQgpt&-sjwHGqg=O}ucymTb|3z` zZ|uKwwu(0{-@fsm9o?HvO@4P-s0+5!Cgw0I3yK?wEdW47g`=jR`@C;izW;1)PIgru zUREPus=04mI0c(f3;;mwrA;m(e7<9V_x@PPel#RRGDHSIGlkDoe;*U_!*B&p)WJqU`a z=`s8dq=1M-pWE$G(lnsP7#&g9w2q+W0`gK2>G&{s;$sy|8+@6!+56DPcIGsN;w+Fx zE_}1+fLtIWCB(j2nB=X?%Yw&(8mM9jM@W%gxGySqC>)LPw9e@UbT0<}z#_0uobu5b^U0G(`!W9355D!AkGjA0mG#e)3$BeP3&C1?1&+!dYDGe^x)xxf|+SQm)u6IwU#O=rM79>u1&Q_ zo=eeMtEwK$j%u##93ZFy8KD6Hx!?o(X`eyOd)BJ8YE==bE-s7IDw1{y02<%K7}2Qj z`!o$~JRF8`n2){=-g``BfM!~Y&YH|4B6{yjt)AErYAIr(2w);r29?!jX(sbL`LG}Y z1SDp%$!QDW;W7^ex;e{P{!TpmGTr)ebz4sep0p{Sg>j6BDF`L-02~|vsi7sSLZF1< zCvIhQ&plr}s>h6X#^rL^ z_lpps^ZjwN>Nq&pga~bnqD=)tEPjBZB9&0AR#(Q<#U73>HK;@c1kCI`#I9AD=h-pWQjjrC^YQnk6jg28)_ZSK#dNLPZ810}lFbbGMH zx5KYK?|=2Sd!sr&TgRwSra`71))W{xHl~vf?6Z%E-ynbNXhJQI2@+AID2uoJMDnns%AQg zHp`sT?b<_JKG5givQnsI)e3}wAOwoYuUYl~ajYOy&pQ( zyXU0WnN*Y|21}(&^PRcYffW|8CdAOhE;M4QT*-0H71RvOI~UuQ4N^$~h!Bv$%mh`; z)Fj4OO3^CJ+_tSFF160n1OUfBB4TV?0LZn5rfKD6s~j>_DS?>T1>%C3MUM@>kU&K0 zm_@`@@=cWJVrVA=B0O2NTh-^gD>5>Y8AL%OaB-NedL}?5GiIKor0HO0OCPJrco=+i z2(Y`o>$>jY`Gc#gt03#`Jbip~k#&0bqCZ)j5nXf?Zs==|PZn=NTI0v@@cq7QF$Q(E zpL3}rIrcspB0FN{s=D274~Ijo)qCGG%`x^ELfDm=gI~4FhU0#_t0e(ZArNYs6}0ei zQ$HBv*>;w=ADffM^2zs53Dr9l!Z{mEtA##&v|PLy`$3<*$h!;q_&a=Z61(%@SFvU; zEOX5aTEsCiS5LC%tvYcaIE=8p!+8oN`=&kCvHi2dXmFf*{*X@ipDG@>>M;*xgx0V} z;v_=I0M*QnuL%PGXf-$e)o!)@J6B)%S8h(;X)oT3AzQ444^F?BaxO;i=i9nD)b8!= z^!_(K{9k=_|M&W4#Ww>=m${kW_*>mcsQhHO+O~&OB!JrSo)8=_fdIU$UijQ)fh!}g zHXJbD9>!(cn1tAd_H0fsRI7d(+Ga#W{9{Z0)l2~l7y(HEimIwKpLQGkgjYflV5yav zebd%jvjVfH)`9#7cVtN9g%@^URBAlnTd*;&GUpvwbmw{kK-)6t7Sn% zB#f@2g2gmfg~ps#uKm)z@euB=`^yU;JkE$OJ-Yx^4HcMC%*r?$p)+Krqu--5M5Zj_ z95IuL072Cf?!B<@x#ynOgpv|rMR62*??dxoyM6F?zWe+)p0vRxt%nK8wad36(5}2V zjEiAdE?3@jt=0QR%t|RKP2hPP_uKv0^=*t1NmBMFr;qNoW4*mi!)OSBLrr9}nZQHuokP#via4Evf(RU78O?oWODpo4glnWv{?>+il6EgWeQmwIX#xmt8AI3dE zZG9*uK@$NFrmkKcbass8k(=HL&Fvd!)5XQ%>SI_pq3eAUKsrGjYauDVWZPYXmOrJ_RwHd6*X6rEEhA;)d@=G2`onoPR8+YVWm%Vn(L{i{=C-OEqYbWIhRsiOoMZ78V)nzG>)k7_Ss{d}2NQPBZ0 z0$CnEy(K@{=>a;nSAv;|ngS|8uD1AbM3uk5)W|W3hlpBbw)yC>MnpN($;UE%|D#{} z?&r@7<|xjOepUkJ}6h$!k1ons<0Q8QC8JUUMzIASnB zuo!%N@<#dK1G>Dd=&@^wL)i^uYoSg8+?i{dB`ww?pc`{Pmct?(m68yVk<1VcJUbB$ z>=chy$%twu%7_zM=NydL6I+0!+AzmC1H~`u7Tj~sJwK%&a%^3d8BxnPx~|*2@y>TY z`0l^?fBl_*{a4=l^<`OVnfsG&^WgEaD_7;zw|94U03e3Xo_(ZXF~(9PdL@GEo2%{4 zrIgm|W#2AOPft{?(li&TbIB#8?cJUXRCO%F?d|P24^`yp=FCbxl;WYzbuhzu%4IGo z%!{t~t(!!cnFD(7RkV~6LTF={a~kT9svvN14guH^34>GsGJ|8z7ZJ!YkpslGcZeub zfeDYcy;WJkgIoDtt7xiB6hh;OQ<0_#B^3Z`W79Ove%O^XgL;8XM19|jYD!5;?Yd5^ z4!3uG-+LfMsAw_9ob#L#sE8>5uy+xms@9rQO3Sze1TqjaW@hG`YE{50eb-r)aU2mD z4T?z9G$Lq(%uR~FDIe#))ydgWOgzt-999KW<*J}ii<;JqB|`y*oR1l7 zCI&zR1Rz6?SyGh}LQs`b%DV4rE}Ys@ElOj0#}gKPh-VFEq0f=L>iCwooISan`0T7z~;VN(tIgy%j9!p7P zfWoW+SP00-n3)XJH0O+n^E^k#W15-SA)cO}-QL~;z%UF=(=<&JV?3_CLkO>0lNXDH zs%p;b#S#trrWFVK{l0J8#bR-6JBTql=W@#4`}J};=WJ@jFo={GBL_dE8EToN2IOVG zShxMUU&Jm<*SlO&6u2{Icu6` z9;cKNAgXE_r-uDJ46DVe=KQU1|DG~4DF%tRp36u9tQ|f`yD?-k5&bdlqA41yzfdPtv5g%)$ za;oDzE&Js+-~7g5dv$x5ebB5%3ShA|=bhd;D?o%yf^aOPWBVc%bXma=kE%;#U}e&q zYUKDQyGrk#d+zyTP<>gh`bvuM_)jmU9*P;vs^nz7vBe53I1U8~%~YU5CFG-CG^m#sd#NT(1mw@g`jHkl(c4QLZd~SdNBc_y(kh}5$#t=5V?&OSh%n@wbIL>%L+hNY zrR=A%b)gOMn2|sLhs53$sor}A%%ApDDFS$I+K9|YLrrolsU9PjstRgoCb zGj@c*`DN_uoP+3Uu|%@2X^T`r0z`Iz!4KnbzCHnet{7hIHComx&4=&0g{@DQmg#ak zyg1yntCMEahZ>!8zKNw)K|Sswm?Is-K#XUBRYFq_g8YbMg^kA$oV<*i|6UdaomwRdrK=Ib*FxAQ2SMtay06hBHDid)fa@re^eK6#O(}j=Wf6sFe+RZ(SZMuhC!__A#^@tPA+yI{xR)b?Ye`PChQuEU)@mkCzPWrMl4P-s6Rf^KNi4 z!ZP}kWtr88_59Q>2Ll8!1T_PE=~zpjyW}wfRDk0;WQva&ZOqr{nlPA4PnbFVa$r$)~y#55n$yHLkuCa_m|-f@rE{0G_D6|AUw_#30K}dGie{#r^*G z;5iUi>7U2ci8XqG#?HJ1Q1o{ zTq$LqXC+lNCI(Oug-`tsUv(&}s@1B7#tuQqGZB$T5mAxjuW6NK+b(^htVOD-idl77 ztJUpJM0^w4rg7dQVqn+ADAkVsV{I1!b-&*`PgQcR(gGeGei6|T7d+;8!PHc>NUf^G zK!)D?Ii=1uA%vn;YMpA$sXFJIt}UeqKnTG%Z5pRCj?sJe0q9tM%kw;IHK{hGkqtyk z+qPy>N&$d8r*28$2?#~RnRi54i>M|khUx+XK(5IMW80X?JkQI1p#aYN^~nkiOiJ7K z*r-j@erOPiYj(Tko8Qr2ndYw-J;*!-0U)mchj}tY2#btv)pe3)n>|Z?Sqo-gV3XlJ z_xz-zssx0{RV8?gjlbNM_qX#eoORQqD1#PY`uaT+gJ4|e)%2*cdhGwlTfArBu z%f(64H2dA5l(Jr}i0JnARz##!?>$y+*kjO|@^{|*Uf*`XaNRFztxfQLz5MX%#i7nk z6SN2-Hz5RM6l>W_aJ4F^kats>Z}wXe=@+ZB22)yq8>vY(q1Q=q^IiANAN0S}opvXS zfWrvm1?s^$m-y5Ih)ER%Q$z+rIyS+-=8#Au^FNsSB`w0=QN%VtM)UobPz7?8o#YEu7)f9;Do!h7zy=SSmJ$W;-J5jn?N ztJDhQdS0K>W>b-L7{W_&HUa<@(__`T*|A8}Iah0*=S*lJgeErQR+fi@f8(3)y?ydV zey`gOct|nNwLcBWWYC5vEW}(@t4Wc}M19)<05cI0c;7SchXWWpLjqtV1|VQmAv=z= zdi-`kQ;`g6V-hph1Y&LwbIxeSNKge0N-bczh^?xoQaFYs2SZ?>${}f)a}p8uF7(k0 zmMPu7xN7cZ1lh{;s9(ki%%qCmGa|YeL_|@YkvXWI7@?`IL*My0iaMmwG+A=3)jLN` zOT&=6-MTw zH0~@Hbj~~QROI=)et*#YRvhR_=Uqbx$Ti5=PK3{2P#Rno)9hG9V!aq05-R{<1t_3I z241DFUI{J!>0xGy1l`hmQ&q|PI!_Qtz}OHF13^hx6J+X@Rt|k z`B(53n#1qcddS3tGK4_-#Y7hO;96erJZZRD_yNq#L)uQ;RqZ4n)O^1h=YClRhWSSE z`_TV|S1XYp?lV8V2A`WApgcuvt849cF8EQfrY=vTTN{!XIxs{e5HkWm!_RF2P$Y+h zpaKeNs;X6l<=ESCRI>jwcuIbB2nn#7s%TB=u-}?l8h0W1*ZP>Bv~xTv^BjM8S$!!L z0MK?ABi|m_7Z*~U4+UGNbFGYObhKy&Fho?V^>g0=0U-h@`53roW@JL-W9xvKp@FHG zs+m=lVi-MwR=K{hzO&83R;`;WTZt}eQIkw$aS?P8DrYZv+?4@P)t9xQS=I4Txk!&+ zN{&eh03uK&*gQC&xBGfigNwOVv7;v1J@?#m&uf6uGpR9vGBa78OPZT*ea4c${%il2 zi}yeJ_n-Y2-U@&D>+2c`b`LLaevg@vVNQ9@$+_S|BVr-OW6Y?Hi{UV8t=-9Hu~>|9 zke$x+tg1l+BJQUlrSxon_4Mh}MZ3vUVZ<1t=5o8;HBIPN-BhM2r&6*4Dp-{|&vVM@ zScl@hPg-lOM`_W(-uqxCxg=Kd#2g%=5h24dZ)_q8RuQU6Y1@{VkMfa6g`S$r^N&7$ z_UxINx!~D3VDd2#Q40adu-0kLEgj{6uv&HGV+gg@Tq**a;&DTQKFlREbK`<@&IsGs zkV8bwwUkmMi&$kBh{$^{Xve;>DprfC>XZ`Kx;Q@W-fuvjfS7xvSP27TKy z<9HYp%p8AmeYp;Cu~>L^DNRk+BH5yC0aIv$IS>(MbQE96}HamyRUc^My%!u4>I2`rdy1vs= zUrIWN_!y5>lW0&xXCO^L{Kj zGqVw@LQCFz4(LwbeBuq!WORJCIlJ2(e6YLQJu#=0y!WMKM|^g6#zeQbw_W?_!GrVf ze)qd%7@`Z6%A5e;`Sa&ty%^H8tpoa1RbeJ(MnqL{#O4SMY8CH%%_ZkN&2!52?UO#O z7c$hldAIaLW%I6l`2+Whys)?+vWezvOhaKTu1O3Ysu>_48Y3bDn;B?*-EDje>Dv1y zTO(+VjaQc^)lR*ECk440b{ZN`^ho=|VV-k)x^xsgg0O)ZqM3?AR>Ix1Z#lLUL)%;* zc6YnGV3&s^7Br^>BgZ&xyKmCzdHMdgl>q_G01ZI^%m|PWzo-rZLqI@P(W(UKoe@Aq z`D_#aD@Epe?z!iu0s;hj`PK^9r`7qe!xF@ET-th_`woGr6eOu?QWXt}h`^2wY9ex6 zWqQxdv6e&zq%!Wd^LO97Ci+WH+ON^~7dRCc%P=kQd|xUeNG>L5rbR>;m9dX61@Tmm zgH0aisnlx3MyO(Z^iLK=11K`-2mrx(W>x@IEheQZ=FHxUNG$~btW+(v>le(3j&n|o z;vJkuz)hYncwrxr!kjpuX)YWRWSS&2O&q>hSBDfR27$Q_!VEaH_+Y)z6d zl`RK&1-rrTu!YEvjRgstK)N?`w01^$woI}Ic&JNJALf>AtxBQt^JSi!vRRH#l zTb#3?opZ-$k&!`5C6js3t52}3beuIoLnJ7m zQeRwFk+@lE>u@)qi$@V^0jpr4ZI4A*o=HUhsSZ0v0rP?esH#jJoQHl991Yu{)A@_O z75Ch8&rcb(6b@>Y5S@tumPH$K$uVmuFK~16d&A;?^8fuu|JD8Uw|@0Yp`)%{ic|$T zJv|4){cZrFld}g>W&qHtNci-PCua5L`gsU=yD!zW*6N(QyStlGHnSqN;dt@stxa>%*PQ!+O1eJXjoAGGEv%>V$7DrhaK)@o+n`=e!3Nm;BCBY844 z1tSJ_C;|XT%&OH)%rr31;{-^|42Y?g#$0S0U4n=-F{zc+g97FbE${rNe zaWD`^-jNG!Q)?xL;Jv7pQn(9J3L^?wWL8yjPPL9@X6B}8n6a83A37u%ERz-w~ zoO4{O0$lEHJ<|r6P=6$~&wK9q=O095#HvcI zs+KvYT=t(_UOYN`vu)PP#Yt@L*io(duss-%W2P9Lb15Z6%sHo&ymJT;LKw%9iQ2Xe zzA?pVEYp;`t^;#KL=L8D0)Q#a&z?Pd`t)hjwU;;7i^T!}N-4n9#x~D+n&zfyn7QP7 zeRXqodg{HGQXMff0ucw#k_ziorp;=#iZKF&roBE4^WhL>sa?M+};=7eYlvfeCP7s@>4X-sKSDG$~s!TB*C+I~fjp@e4_FQ)xr zdD=J5OU;Zl=S)^hs%TK=8e@xyAa$H3HH$HNA9uM>%~-JkLi0a>pZ|{gC2daHmMWke zrap~6^57dlit{M=vU&*sRJE$2{wUk1tjOxBN}P7UIX2t40)VQD zKz(q`Ad)4OG%+(d%Br>EvI!ztjW{&(oQINqzwk|a{p{BDNDN-5JeTW7{{lNDrHV{| zqzHxxV1$BJP{_WR_5npeJo+P>p;a+MFe1XAbQi#T?z!i4K?MT?Itn-g5E3Dx@e=#q zwae0WVgxy}DLJ-OM3j+U_UInH2LOywR0)ZWbt!0RoG-5~O8)2@js1r{c=mPp?jUaN z9_9J!K)!7~8`NqZiPTcAie`?I8Us4#N={iSF+1nFz904@8Az#)-7!GJ-mwMKN`P&M zMmU#(%po`fY1wC~MXJ<_h=B-*y}?nJ4?^%rM+Ip_BxVZ)6_b>XeG?i*0057`v+nDp z74noPq=@mcUl~>jUMi+)2g!t7oiRJ_T$*RB8jwA)@v%}$BD(^x-|u7Nm&-*;X}8-o zO{=P*Z+TW7a||x=B+gpomRCMcjqg}Pi@~T$OjVeGro#}Ysg0p)TakiQTocK=-EJ2` zVCH$wA~okcPsw}VbPG3TofDb_7d<#Iz~Ep<7NDeS`|$s?S$=hdhec_%d33kCk>!fo z6`q_*>m=`OnE6oK;897XdNFgNPE<3Gd3>;u&Y1&e=F|uPnU!dkqH2>AM?Sv1>Z~F^ zf@SBcc7Z?3^gsm(L}Hi?F+>dux!nR3LpZj35*Wf~>=0fzZ9nhxVwIW=-(Gd|0lH2j z2fPCde>Nxh2UzG{-3bzI4*NO6h;I*H`Nr_%U%z45xVf2Adk79M#rcYUo~mLFaEv8@ z0veD4sJ--d`rPz@_9y$pKx5uTSSfD;+bLcTR=kq{7%af+93lIkBbOsFcV5iufGJ?0U}6Nw2K9P`x#Xo$5|X(~@b zizRiPjK!sGs9GX+(%#kCH7up(tLrvp+)Ws~$;*L8)r_!dTk>u{%|$Dk9qX7x;_@$7OnSwoTtNX@0x&!MI@JU z^t?iVT&tDLzEMO|tF&bQQjsDO zx@KVkzkliGxswRq>8BS#^+IHkGdnyt>`#LV2Xs;UB!^PZh^rM@f&&T}c_ z+_i0RKIi<&kY0Rpjp=4M`Ni0scXdCE%SG53wssCWPXU`0RyDAu?dtMUeXSld0ao@( z0nkLU$;%boJ@?%6hfu8w$j*UCPKjN!UA_5#{5${FTmSK2e(UWwE z)oR)Mwwf;1tFhFE9id+@mXY0FXH>wd;HXGt_7I$?c5OGMGS73{wu*3EkT*>;He=)=JK`m9xKO`A{t}He`T+glwonma!;m^9893DF_)e z5qkmvF%?o|2l5B29nI_~+ZOPid+vFOeHz3$wha*BF{`@j&r26)c9zwNfOlqCN^$@S zrV0kCpaw`pL(BztGA}Lov^0D=T6F7uOgp+ z#_`XJ+A9smm;sXGh zGh<@S$fViA(&ib7jr4f>pWcq+YW;IRKNQpGDqAx1RXv~(9=$0UUTRStztI5LI~E5G zKvjV3JOEe@0A??IUJyzu;Fc{~+FkfSlOQ8Dz@SEG1XQl?5OEP*_vN3x{O|*shfp(0 zjiD)s-~wQEa7o~?^frh>(nlb<`e(m>GS6w?)F#C!*aP)$;|A1H=x!eoBqj^G{KyzYu3F! zy_-32o~+W%nC7Wz8Xtp9^Dn;nb}|W%A5YV;_U&eM()i$F^ZfGSgS$)eZnAu`T(?c^ zTyS6=I>>~M(Z~gtYO1w1!6EaET|N54J14@RD8N&xr0U~hKU|tj9(>!xBAMf&g5;dl zBs6@wS~KAGuw`acZqaekF2tr?IC6%7MrEFAtv-4aW%VbklagmkQgJ?6_cf>BJpyQO zhEQtlqf1g)%Bt&w4`Mdg1ccEDkyTYHiWv~7F#tFRKE#^SvR|rdq<}~u1PUV3wxP~I zhKK#0R;yX-p-v$LhhPT52ZvBpf+Lac>TV2SZu$mEj_z6WFrLPxi#>GBVLwUQPGz5K zmEf8dwHhfo4l0l}n?qv?40%qDV6j*_Vh~L!BO<2lgNG;W6yBF^Prk>VCrWr1c~z-Z zD+SaGfXn=Y_YIOzg91?izzik9QT5?IH*n8Ae*^`Q0TLORIfVVNbBBX-bJL&w+v{Kc z;Q##l|MUOS>MK9<3!nVXyS#{5ca7n)ZPkhk9NXovKPl!RM<>8QJmv(Bo9O~tK z^)LO}-`qTTd!DE3?e#448(;eJ)wgcH|NaMNx^Qc)VxaSIh@KgN%~W-orVv8TIpunG zcD7k=E}ma7^P&%%&1RaWajeTW)?E7K;>p8DcXv0m6nP8Y`}o7Nv$OSTfe4GH0Yg;_ z?3{ClVF0GFB-aF*GCJz}uGY%VZ+z3tc^t>WgC-k?9Aj*q&vVjR*Kt8=ciUSc642-4 zdpO{!XBTKY}vT+_w(U%VL0?rimlg_$C|cyY76dU5fkFMTQH+&9b9_4)R&^~9^L zZ%et^-a&BNI=chgBXVWV2w;r7j6 z{JG!$ZTrgp_Zr--+Qs1OXwE`DS+90;&T0Y3X#k_FYNZA-m(vtC)4VgF4DBb=L9ML1 z_QmByoAIn$L1-2i+uwimH}-EnTzvR@OBY8|05Ajr0RU78Kz|0~d7&<(7Zy)*uCZ-c zuP0kJNxHMhUTcD3!D#fM|t;hzx}yCj~DhhsKpT zkAV(0!C_bDUBh1&o2olNmHo{n!uXW@fAahXQ=(7&vKLtiIPO&}Hk2)ohNL(uVAR=nDaE(ltVQjTB>HNC!r$) z?>#8Y;|z$7ebLmpfTDd9r#cCN=7IxPgqjMdB0xZJ#13h8?iLCpV-(-|S}PGFhsD%R z7rXZS0dl+P&$i=TCUeeo8Xqqo9>xiY6e&9L%6YC1DTSgsVUavei*9jpcBTLoENCIe zx*xdW8}8hJn}^By0_`eTBUX}=)Vrc8X=?n?Lm)!-F-u`?8{eipf%>%1DW%nB-S{?Z zCC}>3p_^fDh&}hjfJWPC`{3j>#t3Ws_=77Pd_DbzyYSaK{MgOw{pM>9S5T^<7sRmJ z*Ya=^;Bfwk-Kj6x<~!3v(S)_SV}Bl1M4n-&ij4~uX0i^5nHp3?wptGngK&EgIfa^x zKO1E=JDLq6evYcjXZ;2~`$~9|J?TJe<1e-Qneo+7lT|f7-pv5;Va+>#+7sNoRD6tq$A_B??YyhbAvU%W-v7Zi3 zQigd#geC;@Y9uNmX=YIaMI}N4q^eR(2>kAGR?z0JphOZ24aNOYPH)9TB@NDv2)JMs)AZkB~>Ll%Ei5OdPKx(dKB}K z%9^y4VrYa|s|4o(oDbZF)-fWeRdM2)&~6q>P-8$u8iwI0?{=&h1$bHbSxmK-&^73o zRK>KaI3H9M0YEf(k5xtFSO`^0sVdF|WI{02TBQ;Z8hP)@pw@gW-HkDt8Z()q=NLUP z`ZV8FHFk}2eJRCzzi2x&C^d-)A!@1ndFH@V8bz^ffEn47j{#kWMBBqQF4i#yEi#S+ zJDf}5CR9zwi=5KrTi-NcoQF0x%$#%HZnvIYbly8xYfZ`>jPr22_cPEzx*+|@?yx&S z^lL|srCK#rB5j&V_w&>}_q+y7fCfkeqF_XhfzS|KF7fpASzi4w|MqYF&;E;lDXOLj@di!MFa@XADo_?Jo?$6|G6nkp0`%gi;rKt_k-_z z_|bbVaNEQ=Pu}}dGkZtQ1MzaXMzx#morx0haX=WyL$_Fr<5*L^y1G);rtMV(nTYw~ z`U)8qi^UV~*}HGQ`)(eGciw)}w~a%dZ*R%F{eC}B6CmmcF~-J_5zK6!XG`;Dv)P<1 zokzyy(OLbKXlINOk>+6_BJW(=b#0q1cv}lZxjB=KmrEB{0lg(i|^j&Mc&UwDM+ak+)wNe;o-4wWEB@8Rq z?0t?Khb5fW@yTud#uY!x@vYu3Ie6>evNWO;uyXV@BLuLQdOSa_M+FfL!BouTxa&lw zO%rx=ZJZC}Q#CE2pJ9!4tOSYyfr*{rR7$CZ0T&SveB%NsdxkmNoRhJhZZA^SKdLWsHR59|O{K$U)sRf(7Y$Z?u6 zbMY;gIWLHdP>UhD*gy-1yG2M>I+o#GJ?!i0SN`fRoi`u}sBG(+VOLI~k7&1w}9O`&Q9A|MQo1x%}W@6qt+77#;oRJuM^;)_V@9S~=!&btMN zqx=Ii*IbYBWmSzKFp{X{oNKLQ5JK#lcFZZO#t@Fym@e=*j;d-^K$IAcPCu{A8L(;{ z=VlRVE|G!<-?#=NkZ`0xE_m+)drOka5WKH}bI#O7=jf12*zHc1s|Snql!r7IBWhv< zLKlK}&QOIaRo8m)6f1O_rEqsqhKaE#-y+vdAagbZM;o8atC{OZmJ{? zoC20=(^Q9Xo+mTbOhJ5PYK$_IX|ALoNh?ACL@TDDTN1%^eL?O|{wxC=y>QR~OiU1f z0L@guYRPc+Fr2+9(=>0NyWPQ22d>RY>RAuD;*_fAO3D?8P(Np#U<|dgTwRsZ==%-T zLUD!5xgG`pB?3iIRR_)?6*Py024+SGY6gIRd?`Y(Nm9d$ej~%(;_>ZQ{_AJ^Z%;L; z5lcnK#K8za2tjJ90bkFv_H{YNyj zIW+$fH~IMH%utxk18^zzaI0m)ri-B!CiWg7_9G6a17sjIp_T)+GMX__LUl%mmv zMVIdO-YO!Bz^ij1qLMQHl#2-%z;UAm24-MpVCdXyTYoS!gYVWS`|-9k)INTi4+p!x zY|$kYMc^u`AdUb`#xVwmL{pww!4U%}0h!u;{pCIP-1DbR5H%we(goZ zS_;T!xe>4j56-0K?QnSV^wIX?k8{aw-K*L5XhmaJN;mSQ6T8iR=#K}lKB$Osr(-!wit;&~d4D8^_ijD(&% zArToWFtI0w22i)dp$(A`P1HnM;sO9*1Q(VT0Hkqij%Xr#Pa zEQ4<*u}`k9A1|D*cMA{7z(F>;(im&wFj2)qs3DoJW*PL0k*vJuo}Wq>P(__{71Yeg z1#k?^6-zWc^l$y$_doc5fA9HUeCw@e-~0Z^tx4Xj*MO=Zr6f?yInU$d7hV)ZY?`J3 zWF}X)7r*=azmxO)JHP$g!)|+adLnsjf&<2!5;&)X6%5p@WJGM+Hu})I&@>G>uBO5H zDzaF1+x;#Gc%s|wc7GWAzW2^eHD6y``o=S}56wG|-+1xy^YJiz_~C~ieDJ<=zHQs1 z6~QsByJ%Zg@h$*iN-3qpo=QojPQ$S8n&qpGY63J??}Msx6TSDf)E8G*p7^MDWTKA! zaG3kEuIcz}b9S}+WS+AjkwG6DaYAa6%d%fQJbzGfK3i{`bGO$wWZ1=~k3N;0Zm!SP z8#e9QE|poZtlM6}yy9@M$*#@y&}6T-OFczLxjlTR{zifG9mjp|c+oWd%+X0}de}i3 z83X_bKmZW=XoT=%drkn%#Eei4)Buq}m@^WoCt3D{O)++he&gguMyM(tUGNU9X28l0 z9gXuq>i&Q1{aLJSS(cuMjc(4ln!WdFZWmofwpEo`omga%6y>5t(xNCCv`vuogBt=i zV8G^!><0@LEcwB2HelF>0mFa^L4J@QY`;iuVA2+4nI%{xixO4EVpdgFcG2Cq-D!5W z)|$;2!w>sLR8$kmimXUxr0RT6_nv$1*?XldwFiDTM5OMemPRkbKbV=^ zK6xbH9e3RE`-MGLFMt}+$*Cz+7VCPtd|-$f(DbAZB19%c2?ow)`hgX*0W}0Z_B~>$ zq1tQ?Ra`QeKAZ!_~b9Kk?IF_|NbDBRQ57LW*gwWp_9NVF*E( zBXL3o14L|qr@l;0HC!NV>Gi~mDsgnR@?r(h)epjLO`^dC! zaO%>aH77&?R6}!U!&$g^v~*oD?H@h@x8*RA2!+1B{s3QW`$JiJ%3e1HjAkEG@-ynBecLV}`&vOm6fHNXEf&c~Lh8_Xj4G>N_#pZ?pMhG916~xT5cO&d)&vx^U zzy9an)BjQ|dtaa9G-J^krMZn*8Hwx%-aG$o!pe{230=;Aa|@d-zV4AJC|5%hGe`L# z#^Zzk|Izt;`M961rA7`Mz_qsHQl?|*r!=msYD9E zPQKG=YSmS$3$id4C8F+ZgG}@FHh$rO=e&RZ404Gi1c=1B>8I4}yyK2L?)X^5pb)sB zqj3oD-Eym%LTGK4z%j;U&ULZowpn$rzxFz=9<~;j3PSm8bzd|1bW4>!cmK6OagX~L zyUm7j&Sr3SaemxS+udzgB~t_Qi;D{*?9Rq-y!Tj3)hJ58wM0|5&1P-j$9c-yT2ps7 zw`v+GeE6()cXvZ_Giz0sWoe>_q3b$w%9WO8>ZY~0S}zfrF13TuDY(POKz>1*=9r1 z-{-^QHxqm(PfNSk?T#x7-bHPQh9Lw>;1#~%jJ+?pbbyDPYeNK62r z-rBVE>Cy7}@W1)Rum4~E+AqxKn`O$+A3wWimjWKTw1C&5CllmWp_HoC^1N^ir7X{` zZlAn(F_mqe=DzDNS_FxG%B7e)qR)8-z*5ZB!0_gFceYwzo^Oa~DQ)cf*vI{0k|3|W z`p9chvlwGcebKfo%l-TJHy7tu+v^lUjNPgmzwr9!-+AwCW;wgqOw)qEOySX^%XyyP zfA8^pJcbYsW$L=_6ibj&$Ylnw`}Z#9Nrz!5rIaPN)=H@{s=KG57v^PIaxL8T!!Weg zZnn3}i@feHueQ4{eg4gR4_>|f<*UtV-E~7GR)-MS)%syYYaM9IwI2vF~2VeRArd$7p{mFkEKOcvH!qih7 zMdC=%`!d6{V=IUTc&Zk-<0mgRd=DHQPr>801;>j-loBtVm$&czWLrN<@BTVkgTSAj z_~8?NF2G%>qNZxgYFu$F$H!Xq`y1~6-Sc+EP;73}2W&)Gpbxqf2=c=^;5+X4Lx430 zCL#eeEX_T*^f0U{GMJeuI=!5L)~04|uB|!(3lf8?p(;TW8M;Le*V}r|LhSCXwGd)pZmkGXZFLA~ zb|XhTy*6Sqh0>TwBve;+=(cil8i>`oM_HWCtv%}rZ7MW!*JlGR#UDK+O9 z0}`L4aw$Z2?^1|?n`)opsZCyUX}NGp0MK$_L_#Xoz@3Q*(ajQ5S<3DE&)&HFiGbn$ z`kt!Sa-=>oMsRblr4&gW#h~u_cu3<=z-q4Owwn&*-meBhj-e}JwbqxRn+(`o8wLb# z4av^Cb%Jue+&VkK%iy9@m=H6E5Esjg)c3s^08EIu8pjyp^XJbcM6{RX&6EDGsWP)H z;(fVz<)T}E?(pq@FFyVM{1N%qi>Kk64}bpc;Y%d6r)22VQSUe$%G@Rt3C_@TR@E4~ zz6V#oJk#L(N@;n?Q${$r0%5G(I&o|S94x>%hfPsl@uz0Z;z$7Ih)xR5Y@m+HZ1+F! z5|_iis;)2Z`B8oUj@Uxp8HyP)3Ie&K6DWLa-8ffGVx6-u#a5*R_698syf)q*v4ImB zz0A{f1xBdgr-5~JKz}*B{;}`BHR3XvE5)?C#Tw2~=Zh~eBwXG{Goa{RwdH^kIhy^z zU15IDcROADC7Nt3nd-jT;ws3DWX;t-I$8hQ`Ms;A?mmKm!=e??P&k?SG_|UGM9eve z6AI7=NdO%(;Hot*KwWD*Ran{#%xJ_sXywycppISQ_-fmqEDnFzEh zwNE_7KW~OGFZ0{)f+L5x&s%WcoIhF)&p-NYr@p1TA9maRRDNQ1LseriW`rO0e#;$q z-0_*mL7f7+D|m>qgK=hfbnkqs@p!l$hE*+#TT2r9p+D?z-hA~dU;5IQ&MxBL{d<24 z+RZqwcZX}u<@~_~BBs>4!}d2I4OD zn{gGRWXg`fD2K9;Ql6I(!qIXo)tjm+6Ge_ef}Ix22nOhewN_U%;?tVG)=CaR1QJiv zL?R(2)kZ{#VlyS6R+>RAwYa+wk#Gpn+1+)?Es5k(gc)2#r0G&hQE+!>VF8oqL+td> zh{c=c`3L~9OT^NOnw68NIoDG|0vfn!lgMU@0Hu`J^}0+%1m;AXb4K)##0*6^h}2r? zv<`7_Gdvju0ssIl(?!P)GF53~8aC%0ptrh|8iw_d)Omud3X5=;JX`0%)ec{bFyP<#g{dZf@S6+GT$&<&by1m`6SHo(x zVt{cRNm6aO)+%B##@32EMhV+855oZFwbmFDvz&H)rs_n@JoWF2 z?mMg1W(eu__V)Vrihx+S@B5r{&2?E8cYo!TSGth2w$1vCnQxDKZ0)!|PKU|6a3~A6 zZuT}U(>~8p!fLhY&uN@K&py>!nY3Vd(!Tng_)|l`E_p;m-avfd;|l7WcGpxEawj4K zbT#b_3G9ZR|RcQ6^b%Y zb6;|4%Mz$l)uL9l8GuA_vKXT;*N0i2T>RW7VvlG(Ocf5^x8 zn<{_8_w%7!?}r(6CktPI4}6);TWbw~!0Z!?!|%A`j!(eLkVtY>cVLK-y1s=DrWq0C zgHqAg9H)E@LR?n@-pn0#2pB^p}RU#j0pm@ z*1Wdn&O+mAY;#2l0KiPtTuTkYk;no4Wz?mAc=J(JS3Q+<+?@ao6wGTb%-l*{tyX5n zfE43tcu0uN49wYp9JEw4Cx;MY0-5R}6lz}P>#cCNNcj-k3(yvXybrH0+Ug* zv%cHh?+t8w*x%$S%=-fd~*0g^Lz6Ohmh_er}UOY+w26pZLz(kEeNGS=-+h6&csQFOVyUyI>)(V6n70d-iYE{eAn~Uv8!esOtu@f|{+VeR^$`kK?y_1btLY@bm@Y!334p z%J-aS^*w4qT7kc`<^SDH0)gMP0y}*ZJ1KZWN2HDg8#FUBOPdR<>ZCwu)eNeGfg5~` zzfkFgl$O3T;H2PVH${xM!t?g!iy@dBA_~&?SYCYSsq`VI!A|ZB3G%rmXhac^EcLPUr05<~=z$i_X)Z`P|EBCW^LW)Y% z5m9Z$FUIobBl`LcYSY>n(aet99RO&l>75tYB>~k_OUB*R0&}}tx88Bb9Y0`<5D^)* z6{ld?;@gMi&4+4fb-Ue$G=w-z$8GFF2;uSL$KCo{itrEq;onbErzP_kR~O@aJKw(l z?EKNA^?D7&XX|yBB)4fl?dRn=hHyB}A#l!fotDjJ)2BgIy&?yQF#

zO`E3>NJ5lA#`(pw(tYVI|MJD>z8Kd%W-HKyH4;G0wd|WZ4jf^sCD-bRmzS5S)y!I{eHvD))iTXp*Y$nh z_g&8U>iP4o>n<-ZiD+5o#R(Bhv7zfzh=GNOV~i(@IW1MSth*uflylx44=>9rZmZR* zG;+fIb}uN89^A_{gc!$hM8xy+^QTXrlF%xKq3hS{^;|LodaD9{v%THVlf=M5=2DiX zG*60-DWC%g1NqJV>S6b}b&J{Fy6L*N+MoKa{uHkyKn&b*9I@ZP*@Y#5+iN;r5d#q! zqJf(ed|yrl|3n-C0U5lSBch~k(}%CD+R=QMcgr+$pXT{?kmUPXwHY{qAxodS$PRV6 zzS&(|-opmhhr{#jetogoY*yRd^EN}U`PlyyzxuPo^1>7xiRjbJPWYrB3xwj*YipX1 zjb1mBKn9;uza<5nHA$Tyj$KM?YLgD04^| zS`?g^IR+wf1ZIwms@j%goes8uXmW3+MynOo~9S`w?}^P=Bp3? z_~-gR;on~W+JCg$KTE4ko@&6}%m_ftyk$2)W(`CLEC?Uizq?uru_>OyK@Fg_b}Hfn zkt5Yy`fe1#d7c3Pjmo@ewGe_eWuz{qWtphfH1s!znTWuhnTg56%Q_z#fB_B&mN&4#b-sZAtp=Rn{T8_lXe!IN}=!t~D%o-t^AyE>czCtZ?-$ezVmfFSDdKi)1 z$=n^-Rn8NK#BG0D!s;Fz>9tZraMTi_1sl z;&QjUt^F7Jv+dH!S^dUO)&KB2{4XB**E_%6hV!xS>a=akt}T13vv{m+SJXoP%CL@D z)r{4}70p-?F}RiNr0Ut1)_3a|ZF%;BKcyjTbU=3ks%YR0=8iUr{7 zTM7x4Z+G6BhCmJxz!A*x1mNQtSj8(MqY7luVVio+`-d#z;*~qj5qkKE`2i zK(7rx;c*EzHlo(pW(zF9u+8=xZZB~oQi!J4-gv!~{af#RIDlDeEpW^^ zLrSqrM5m6fi%4Q2a|Q-fM^|t)SXx__1(7+B2#JWQz6@4kT1p0qNYqqa?W96LBqm0~ zQmebCl!)l$mLx^w{!kC9H8(17x#1XU zzs^2N+5fOF;XCg5k-@-;shZ-+S{N~<#Cx=UkbREmFo?tW8&9`?=Ns4m;up`KzxD1s zO-z(?wx&*Xb#~s&83Dnu*}5B&$h6F&J{Kj1;OvHAJ`8EK+UyU9tJ|B7L*y7Jb`(02 z$lNhDH90?<=XshIL~N~HzqkSbapWMFQB_%Oj2!_Ux4YFaM&f=*5_%)L-EBJwXX9o* z9=eP3v-QT^J=yViJRA-&hS;UPZ_Er%ty-%dLw~xVtwt!NAd${KN8~Y)O z)LhQa&emsVkFQ^3P({e|LPV-sy`~fb3p*@_mt<#gH4A%x z*4BL(Qe+FqJx&MofUXE^#7z|e7*6#j|Fp{4=wJXR-5i)Js5-b%H3zTQ7S&Ses3GV= z6SsTA`o(c~XxZJ_TwAs3t<=qCqmWVLxY*Pz3_-Of>iQtD#_R6KLch-Mels>O0zm&^ z?K%uFnr6l!bcR|WQs`tp+V^!nPbL`fQS)I{00|+aaqPx{q0EP3v+-TX+Z}iOz;J4y z;7AZ+OQVV?sH;`5s!hR(D1<;LrL|gXl7vXD=J{~Y!z~`SX*r12E%*iR&f>-G@*>;A z2R)g~>*pbDzVdUFzk2(R#^3yPvnk4>)$n7BTz6@eLa1|Q##QXx-N`E`BI~K;Y(_-P zTx$&>FpCy-L^#PEP6aw}Ft<_^8Q?Sr@20gHruDkDrn5;9arH*0%-FFX%xqbfqUBV# z3=%}7Ek#6#D6kGgcM1|~t!QiDKF>2V6AB`R6jgN?Rzh?dGS0Q00*A_yaUet-`#}QN zTCFTDF)^SU$3SL!IBKgD1SMK&K`C{kjOjQ}p6AHi11WPkc8Ylw*?8oDW!DEOF-nvQB{G-AqSRHnrc;0iKiH$@MPYn zWOJSpuG1u6ynJvN+$27axcIih<|p*&kKg{SztjJv$Lr5^nvc2zZXn1pt(im9w%ytB z@M6XDTITK1x>X-mM#|8vY2k|4A({|5I+;pUIL@Ee^-O?f4*s(0Yi7n0h5>3FMXI&M zOok{a>OsMQVrpPQ1YquP%Ci0*KfmNfm_4){f#$v35U$tKce`wOwyFZK+eP*0=m;w4 z&KMI$2FO-Vy>4`~k8AT=YpL^NIrFY@8{oySw)DmJ<-Z>G-@+j3Zu6eZ(aAWM%35vF0TvV#W}{yKle$wLkIC{nXcfevr*i|I~ZfpRGgx z<{R(7v-|(mT3=kfxO{N``q|U{_SQ%(;@RrFUys}4WKH{ic=qBdr5=~%^y;VDPz(ZQ z(=;If#{hr|dQug5c0&jhvb7K8b{|B?yP1gyGqWH=>_TE!L~NxZlb6Qlml7j70HHTU z!ZCJJUI?L;Lf~UR1ZJ!?uFoT~FI81fru>KiYT(dGaIR5;HLbO}`*E3b&VApD$g(V@ zlqex}L6FGM5X~J<<44S?qY%Pz+Q%q}etUbHQi?2H*X6vQtDa86Nl<(`>M*3k;Sg9( zjz;dDLX0t%S^?V0JVAs-f`Y3VGS5>^aa{Goe4G!{G}pWuMh6H&X07kjN#1un&R$%S zEX&eb>tZ^U1rLWqt@YyUe4+B2-G8op>CfD|zse?>3tzn_*%kMSm7LNldWg1I5ya35 z+_eYpxa0RrJ1LFKnECWUPB98euI!du;bc1;H>>me|JE=5qrdp$zaY%J-EKYftA0S? z7gsluy3`GdL~f>)00S{LixP~W)!JboAut!?WqZ(4AFM7U00MX2N{|R}Op#qviZ7N0 z5s6SFJiK>*w>z9}v)epxHk+>NS~YMd2XRETUZQk8^{~_$yR=NpYj3=Xs)WvfqEyv)i8El_w?$; zaha>O5Mz??+UMVR``x#g8Njx;w{dlOs-MRg-+S*pX1>0?2_fwF`+1&g)uHRlygYev z^`)SK!-Gx?OJa1?3)}Q^BJ}LvQ#s^;V=Ip-5 zjgKq=``r?rKXbi>q1T0g*Z~cZPoa8_j-Tf2JtCgkN$w76W@doz%GbV|f7K3;?>`vF zacmRdgcjjqeSWCR-U<*AV?;)B4pO|#(=Nm@yU(gox>j?buH~Zx?c*Dp$kTl20Ey6v zz#OjO|Al*3 z{i|osz`slNm5u%azO~`sJimR)x8HcYYeOWJ;YRe8_AK0RU7G+QaUx0~7A-2oDSGo- z%IOx-i4ajO#T6*h$pD;)J`^-JEvKwtch7lwS^pzAJ3BiNZ1=}HEt~Zk-9VUQTv|4j zQ`bRI{1mMWpsJB25HqtPoQ8xDOj~PB0iaK*xw$zJ>$#%QOR_s-FIuQex)PT6NRf z1Q3bb{d~Pa^S+Caff21$K>~+mT8bkxp<`<;XIrdM3L#tDO=&9o2(4M)bt$D&btW5km+>Ol~1`&0EfS zzu%>yAIJL_{YGH|97KR;dbJbllVN^EF+naKjZ7j3`>4z3!)lwJJq@@uU^^;*2V|e(`AHV(4QY|8WfA@U? zKywFl1jmz;4`GUsBij+N%pzL~V;a_dN4BjFbASlsvnewOV^Jn?_`OTeF$#Lny3Jke zvdZ>(?bel!Y{=^og8Ouc&5|JiC;|{;2pmIGMf@k%@?$s5yLKMK-k?HVtCrjE#(>!s z<4O@)J5Y0m(Gbn@54wH+J52aLWZ(;DtDxRCls$j~0&<6_r>PqFz-9QOjSy*MVh9KU z*pVCo5Cnk9tvCT7sW~A8X{GMolI!OY0GL1lzzq}>0OH5J%bH%7>D!^Gj+axn%5lZH z@Zmccd-&}gneQ!B`RMB8qhI8g?F9h9Ps^wB2Z4ytRGqL!0&{SvfB+zLpbFb}4}=n% zP36#EUTp5)yZy~?{IK8hABbQ8BF!xjLN4{$3jvT%{tId-5dqXs*H|zO1P+iORjtfK zj;_s_z@4?uST-z4?MOPICFhPqYZ`%E&A^RNnGunzQ*}&U(5t&4iGvuh10cd@ufuf5 z9d~@jQRLWag$BKIRvmhD4dHBGrpxn-r|-W@5~f8Z_75)ZJ$?E#q>BgZ2dHh^wwJHH zadrKCza07p_wRk>$CvrI?pNh-aBZoNb6#}O8_H=|1;vN2zW&Yk-}%n<`)OPOI7Bi~ zRJcE^Q%KcHDb0Om#=h?*1XZUP5}{6$5F;31C{1%+SUgIw3Xpv+m9h2IgQs~uA_G<# zRz0+;j?Zqd@2$_zht=_T5FuJcv2JPBbz$tgB~L6kEt8}U2m^`q7zu*FvXpr#u^&KW zf1I%zHHaXgTW8k?=l7@GK2fY{98)WbPQExZ$(VM#TPH-;5GXQfGodD%zSatWXRGz$ zaO5P*T!M^UTMPk&su~idu8#rHwHO#!Db1~QK>z?5n4oc==Sfu~M=MKUVj=}p#eFIJ zcisWBRY=jXE)7yDo|%Jiga!cKauMcZ-UZ^?Wbi)9V5N?jcHzh=_x9!zVjLMr`1yO` z@T?+BY~W`B0Ki~b;Jx=ipaX#V{1abq@KP}e@LR&w??d+Mpa6`VV<&}Rq=GkKIi z-joRu(OV@zcZW}C5_rcQKL`W~)B-U$fUBDVz@rD3PhUKp3q&$)1crR?mFxNTfAoL- zm4Ef0|1tm8JB=1*gk?>`nGGV%K%--%E>>+oOZwaU{e;#1 z?7pt!I8Rv#+F?yXW!F3DFC(Ub;hPi~wH_N_$%g~j!vuVd+b>DATz-@kpz=eAqJ$DK zE07((&byXxp32*3zf8@H&Gb<0^>OW?0Li%8X|krF8`8^#{bHJ8O@`phg{sBie=QpV zP8f}ddrY5*e#zejWu_nyW3s)?W&aZ|M16?D#Qsses+EGqw3`6&F#L}L%=0@%j}fi!#KLP z*6QZqn_Qfx66IBn;dCNL0vURp9{lbb0_$U8gi5xGm9-E)Ku6GdU^EXWQfZ4sm7v&j z5CDp4!ZemfkEgWJByJ0jK=or&JpZ1LB8z?L-XpLy&f7zHkl&Kwf^LsShup+RouCtHU}YL;S@iee|ArBDQsi)Z!6at<5caP9Wpp`vCIp$c= zHKQCZ_N5qq-x+^Hm->gWf88}O=;C&#_uhx+Yxyn>B$|VJ50r*yO@Cd4UVJIw5afo( zIEh|muYhsxDI!6PfE;U%nDieJWX#W}#|K;3OOmw8#K`o~b*S+cPq|62(Ef`Af zI+0K6ry&T@ifc*>hbiKp(qx0)RwpTM^C3o(3! zp0Y{#(RZY)%}XSzRHpG%A&sy%aJf{h+Mm8!05=}z!7$FR(1;(tc>A>Koad?{y4gD(va_e@)js<2U<#*XAv zas)$67N5B~WX4DFbpg&{UA9((_U~kqBLMHP-YhtKU+S$3$1e&09arY~$`K{qwa)V^ zz08Xv^ycEit9c2X-k4^s z=05#f3?+bju79TVT42^!EPdF!I9E+x=ZJG&swoo!g zp6^ug{_t=`~#ksT#ew(XuMbo~V=2>a`m%g^-ssA!Z zBie1(>ZPYbcdO5R1*U{(M%Q8nh>6S898TwrL zTJ@JlY%1d48YcyKV}4pTB(g#tZnAIX@w|n+PqX4FNF)C!cO@O(`Vn5-o}k)TiFT)4 z8(*p<`}&*=X;eB5-svIGPTXD6H~VtSfvys*>U+!gSk^Hd1koUZqrwh6f%jTE-mi<& z{2dfwY`LubTK|D^U!4SJ!n!JTRbqHyWsFbx0i`e)a{&}{yH#>nr*zXwsW#V|n~$?}2y4TJJpe1~@GepjJcc3z6F zUC4krVcY<+$R3gats1)^oHr+<1q+H{SV=}RXz?>`4ZX(zX@_Z5IwGn^NLpgrPF&hJ z+A#A8$VA8PccF?t4-{H!8 zTiQJ_HmsPF%uhEbOuMcu#3!F#EFr&kYuP`oRDS4|yaC{Ei>p`=WvJq3cux!a(PXoQ?2qS$r(pL;sMn8*KF6$^!0@B_+ixq47b zz~dsZob;u z#Z>J~@n*7=({@fikOw|ZO|7cN> zj(Uxqdqi^6q<6f}yz1mMlxc>UHa#DK2e3|2iC5nFt0YVGk55815d#tCQDKvo@sBbt zS@UNgsD45t7!fI2Ub^oq51_LpPVR61+U0CsO@7>BLF$0V9%a7d_0E63I+_&*`tyzd zN8NlLY}lZW9*{-_AiT0mkB3eOe97eXBMb%Bq>K8zf_Hj+#(GikpDekr5g$O2y4&q) zVlVgh5mB-Id`3Ybnt0Tlpkfk`i4Fe298o<;#D*O z43BN8j!JdtcI%#6`G+Kzxlq4xmI`e@+eEK-RBqg66~As{psa5>7#q*?j$Wloqv!1s z*#o12nbVdnTcZ#-GFkTypt3JZ3V8gr9?xFwT;Nk!K!arCdaX)o-_)G7_g(Vn{2J(5 zkaSZ6@(;n*AVp!hJc3WCIkeGA>=o#DP7Cna%U&D{r9~3K1#zczX2hgl4I;r**xnY@ zLfGyASr#EjU&;EeK%sibqeQ@I3yAUgmpc$^-L$-AurMX9+-D{nYx@2VN_F2|w^Dw3 zbRWT;1F>;atqiIxx1uX)?BPQ#2Tg7NRSMvR-rQW$8|OIZhzD+t-Qhd8U;0p9<5#sD zUx=-=w$450`_UsEF0a!|`G#V50R!b>o=&2iaI`PI1-?D$;Yx1&)%C^pftPCF@b^Ot3mwIcay!2DE| z^_< zN+JPIr|UpMg(E_&t*3QjrJIgb; zDB|d+SaHqKPJsbXcAm>XsS)?#bmF?QbniR;4>IzqX^qJ+7-Ofge+-|4_%mBK0u_t87(-{Vt#o?d&h7_A?^{(-q?dJ?>tskj$ahdQe7C1QKz}`D!Gs2(n z!Sqr{()a(?bQZ$wj8cWu-n0dxL9~x1zgj4{8KE_;vErEFh-vEQ3YK^{-jW7$dDz0t zx~0+s#Qw>T7W>V7{tZB?Kt3i}banYZqv?sLU`L~>A}$ja4vi2|X8jgQdbmN!Qc{Cg z^kyatho@Q%ryx(q+o(s-t;v4EiBp^0hw|g!Gc$(|OI8n6>g`8WMx7(w{}jD_TK1X$VR6b%H@ zdsAQ!;Y*Z+5lPT_%~vvpP2b7NhVdrFFhq3tOyF#AmiTL@}Z1u0{ z8~{=9ZgRy8f+m`R4i0c+A~12AG^a1(agQ0(VPjp6MTCDkow$ClqRN+zE4NylV|#OTXo zL4^&ckOjv489h64SV@D{W+ulE0x!-Jx)qqGWG%JT5JGDtsf+>$U+H|4gU)04)ouFz%i;0?Ut`_TKpZdV)l zo12UE?@3gP)2P8ge&u~D{GYaOO7pGU@i-VGP@zX2wz(N9+ZH@p;w01sC-%ZGDO<^3u|A0k zBB+EeE0^hL9EU);dWh0kKI!fX^gtZO!nPRV*mu5_v>h5dxZ*}~IedXFk4xZPXVM^>thYy$# zxwp3o?U$D8FXk74>p)>T-V_wPH`l!R5NwInRUtgm?Tzc!ys|^=An#L!%2U+^p}EN?YsRVhmA#y0+^T z_YOira*ytxbH%!^j_v9YuKpsNS-IXhxum~2ze7y&(Y$iY-ly*h?$i4$Vc}+ zQ!0FYGe35<8Bx{Xr~xR@-N_O)u~gfST(c5ggnaj&304?hS)~wjHiT|RpZoqNQUyH- zKlufu24q)&iDDwlRtht{(-CYw*^FB19ps*OlD%vd7%utx8TWna34S%d`NX z0Shx2!vlp z$db7yWml7YvlNkTFymQnCLOIMgjG@2RQ{6kc3Ne?4o_$^%MjO+>;)NkZutPwm{*~T z+2^5sG1U||`}s6a_@GMAF-(a3=fBGXi<4dWWrJ9yYKZ#TPY=rz___Z3f0pMEU2)Rc z$o}8c66ber2&=+b&l|oH$#nm}rB+TwOg}~Y(xRNQWYSBu1x-~amw-x``{?lda$WCF zjVNE&9@G`4b5;0D&5t1YOAC`<$1lCCOq?#8wTioVJb2Ho(bmRW(@P;Jdk#EmVUrx= zVb*?1ExU!&=(XkC*Q7ysJ3<8{^TONI{-#e*fmQ6CfT6mK_)Zhm%vg40*YG5A38@hV zAUnhp3*Or3Nc#3j=@3VTSzqu4mtqG%t@C&G7rL?1eC%w+0x-c?3xOwKXP_AVS{Ku; zDo3D5P#Tg_&QDmWm*V_nu4ZcZJ(N`$De7!#fpH8$r|Xe5Y^xZL=0P zTdmH*X-PX()E&ixgQ@@JgOS%m=?$XY^`T^?;-X;brsn?Nt8Z^DD9?+RvNQtespr-d zRKjPfX8a#i4KIf7MNky13N@*od!%|C+;0(*=C1~F9anA+JNxVLll!2sX~00#7t=Pv zXo`LGod!ESK*VGulYc48d&c>+_L8;O))771AZ4^4`sbyKeX{83kSpa|fSkNEz+EiH zH6&{hD7Bs5h7AZwZ?1IMdd$$WV|O$#Is4_k`pyeWEO{9moF|GrVjRK*=~snOH){%? zF%BD@6b$6!&QhQoqD_2zj9O#k>KDiGx;ZIhGULa;P6sCB1KFTZ$VG$3!|X_qWAN#{ z*kMa6y&eZKAnLFDq$eGJGZSy|QD5?b~WpQ4c=5r2KIFw*) zZ3C-MJ-kk8uOXnHjv+QfA=Lkp%el&lr58eY&YL9_;Ou?wR4ml~Z-XKhsAQ*`>gOf7 zcsT4BXR*_tSe>77!7}(>l)H#$+}^z}1U4vtDj>*SvSj`H_2;v|6^K|mLExxGZIW8& zJEUZs!xLTizllxiEGgE|QkdL|<7M;aNG{6ipwEf+M!3gQv|I<%r2^db3oM>GnH+vY z@p#hzlD@^q^=|JNv*7X!^z?k_#1;s?-<$h^e~G?>f5(lexUya>9Kt`^I~E_Iz?^#y z7{Rir@{sg2VheK-t#@e>7Bh9m?C4HfMEY4&%J1xX^mH&v0-ogePZ3(bbyG6jx7OxP zH{ponZ+v{8?G;$0^&G<{OSt+<$p;7#ySXjihpD6*msf!2(Q8@1SA7a~Sz<-hoq2qHu*HjCnCPkC72W!21bRWCDPE3U2p8|;RUIF)qMke? z#r#`O?R#485eC}!WxwE)K?-!qIQL=kv1)x%@pNRgOr{&Dqj#RG`(5TD9-)Jb?S1sn z7S8+vWJ=+ z&UW`$Ki;#4aS5+z#qO!X2CBtGj>lZ(OelE_Wnv$4np~6wWyZtDFC_*L4NN{_jmTpw zXJF>%_rJ0gyXq5|NPfZ$DKE(Zf_p(3JHI1O&ICTev+wmR$4E19X6v$?66C4Xf2iQV z4u+wj7O5_V^TP46g|Lq~1@cS|%4Evr44>nqw7-8}RDYgRQf;K8Gg{pHImHn>B#Epd zfdkWXz~=B+StAf91k_x)OP%*78okw_Wc3@Wk9a+V1)z^^;%ugvv3$7&{0&{VCjP2c zo31v|gND)0g z{5{!&QLR4XZ?CRxoE+r1UV!8kGGw>DO-bsF$0ZDJUZpYpLCYATN)( zpY6=-)2Iy1$XvQ^8G z*`r_oc}NNBTzd$VJ*4*%nJbn2qI>=o4?%%Y3LawQ?V8~-;2Y5ZvU-`46`|$2MPFDWYg4ZsWf3|Dwnn{&b z-@A7L)6>#Vzri{9#YCS{fq}Y?#XCE3r7+nzCux~P;i5GCLJwm|DC4_fUs0}n zcPqpBpY)3Fz>`f}>J$G(qh`79HJ?tU89obhrA&mTC-hsupM@m)I(!Zpp9=sUxlaSlrX$!jK@; z*S$K;lJeR&Mu#-7&;Q-hZWgdZ|Ij~1Vjq%Vu4b`$x~E<4&06x9(E&ZFmFFmSQx~e``rk=>J7H0 zt6scov)@th&G&};FYVVQ$2)W0D}FYDFLMSXZuE#P20173wO zMEK?;9H@8s4wm@6t@M-`K9|chR^b4S38?KXmtZVKLGV{fd%Ft=kwh$khU zH)|MU7nH(=qAb=zREqw^eg~b-v-cvsLA9bZMpK@Z+$Jr%)t{zQ%h$4c56*2UYz2qvcH!5cu)TyPJbTJkqp`>6VGCqqs z0B3H;rK1a2v3->L%L>sF$FJ^S6>nx{W-$3dytH07O@Cc!E#fdAs3r6*ne6H`1`FL> z^z44ev}w;X;(NP9-+ev=<6(p9QULy=B&4h)`wW*bSFu=4WB+1#~d<$Y_ zwuVpd%%d9%u)$>+nfcvi{-5sMK5d*8Vgc4lok~PJ`LVl<9s`i4;%XCif9xrve$qlR zan*EU2$Tp$k=UovcL}8M{l0VRn*%-*wC~*TP;aXWesel$mZ-!5$pYOEZg&!J? z*Wadknyf;!cH%~afd&mbK`cz>9Jl~rqQ#zOmP2dnQq%^NNRWHM%U{K%O&pWoeIsPD zcpGUNxY52Om63k<$Q%SYFFpP9Ct3+lKu6JAX>8dcNq zRtVZhw4Yd?&TN`adC~4n__v?5x7=?(Y~Qbq6z(3zO@zHKs`-)bZtPS&oN4n=ax@>z zbhhccapr@6;>Kn}aMoG*m@cS1=wBz+>f5oJTZ`CHMKJ_HA5`(iW|o{CSLzCQ5IuCG z@lYv#Gqt^^F?g+(<&0we{j!^Z>%7CIo`lo!w^arKNVO(C>G@b0^SZp!7C?G{Md~xf zgp}IiED-O6Qr;X~8?$lIS|#xpN#^X8IaF?eHb z9dlY;E|@R_paXY9H!9iU1L7mV(1|{KLt+au-_4!$AEZij_^OMT5`b^CLw^7YnNXfQ zD5v9xK9FqWeB=Q1U6;|x?@WJD+G|$=-X?!RtFPzw2;_!K8pdc5BtY29Ybx{OuFcZc zmBD^?b#iUEo-uOXY5JEfF$Pcy&|HV5IM`01)l z%>Ai>XY;W1JYN$&fAQY7^tqwHp=_ts_z}s{(F`%qyB%w9ORtNrm)p0UD-V4Q57Skx z7_BKXccQ?#^$vAfTl4$+z1zvK9CUqvj7>O27cm7g2XU!X7JZDfj#3R=X()j8c8rxa zzR6B+q`4*w55+dydjdxJ0z)RfF;0g(nfW(N*=>jq?OSW|uxVJcsnMeQQuFF zhC4^B%X*4t^xmfwKJ3XclA6ECyG#nwBE*ne`{sVvXWK!+{=RL+M8L=>}FF( z=UPnVlun9~ss+Jv01-#r-GPtK?(stI!+7SQRFGF~drgUk=E^fv$K;o86>d8-$7ci| zW)ZyuV@e?r`ts;+C0B_KxQuUPtD&4Sux(8hcdcKjqVHyrS1zu;BCvZ0$WTEg0Bh=T4hvGFL`97OFv@e4(EL+ z`@+G;p79)yaU!a*eJ|(X#L4=>W5o253Z{;AzP-juuZNrJZxN;C7y4kJNY*?JFR>GvEGP zmHXa!BpHj0NWA>`eYHHKL1?ET(A5c9#HokDcAJR#7ZdwwsSaJ&weIS$6syL zeuJiX$gCO(7K=Q{vkN?5IuN?wJPf=y{Y_C-buN)BtD@o@)<<%BiwNG|z8B)j@(-{L z*u`LkZvg;zZy|R8qjbm0_KUOUkPo;2Y|TX=ti>$)0gcCY`0QkwH+OXC=#p-toCjpE zM)Bt6VXz8~eb2|nj-zhLqu}|y&7!B%MYbH^bOO-V7$E95P`Weo%?;xb=!{hF=5G=+ ztv$SCB;eI|=m1cysN0rH`MWF7#$A91M@3)7fiQ9UxYO#RX3HP1-9$Np?4F%jNlKuF z^*=f5AkY7MwXQ<)mGC;UalQ{tmu+4*R|+#9(DO&S62jVyMOZ0=V+I(CEiLfnS>d={T=`RIR>NfA6my)%Y>}1Z3QN^U9+GwJg82EvUi;GBjT#dVT6+B7bQDnHk zlHpyUy;$!@be3FPV{*~d>NtcS&Vd6+D~^fK`3{7XX-NnmqMWRGJ)oX~PJoXY=heB$ zkF@tIiH+5zw-`*85mOx$q;v_ z=^*Nq<&j}NZrx|Dl)uE29 zY0?qM?21))spH_=UeoLOjSwNdS9Sh2nbD^ABDVhfTDU`b1IWM~9z;e9mz9azF!A6| zPAI*|(t($#9=6LRD>oilb0JBz9dA+PuKH8^qGZy*XLa*eS0XC*JWq-mdyZQ1e{;UH!NmnbSW_Z3m3zHvSLip_o0^5wt$9xUT1fj1Hx6xI2H!V^`sLT?OroW$MYN9%7Y4G=^)#7>Gy$<(^PEa+${BGz?uTch zz3=a{WF^N z`{ambOrpt9>0#7X0Rm!VIw#qjIko!C>T2$!zB=h8N)?*JcG zyKl^hZ{OAl0akuUbYMW35JPM%%242{KbohUw-#GK3-FNz=lzsH)E70!U>=QGDz5Kp zghJOJBrI0g?tWsTWT8A+l}seeiR zl57tE$}W7D@)#8@B!*M@XrA&t6e0MadT{l07ECAN5^hd@Xk6X1RGZ0FrYr3T#CGF^ zJzJCsI~g|`P{{727KK88lqP4e<}p;JmWSy@qnF<4|l&R-7 zYGD>57#pRN8s{>JXqUWrmSR5m`-0Whrf{-71qFp5x9vQB&c#fnisvlR-8t&9tNdQZ zoRjx^f&=0h`^c+bl)v)7esn_6CVaCz4vW&r5x+Xly`HO=5^r4A8Bb2TDRole@S8n1 z-qUn>>w<(a5Abdj4>^s5sJyZ^fra^S^t1nd^^00)iwEtt8ZspQs>ys7;YKfJ{i1{* z;7vSa=X41UID?T1Uu5@mz9OkHg}e@X^?1vgC8=|KzQECLPU2rn52eOj%e!B8+n8h+ zbXn00x)Ou6R=afMvXQZu0Vy35<>wG`ZepCtln}-y;jy{ud6E5#8#~A~We3G&8y73F zL=tTR2K%#s8_ziE(gD6K>6h!hHfEE4FK#Gvsu;01s$5?zVsoCto+100gByXO79G?D z-SJFHMl(@hS?!ol79EVcc(rYP0ebj8i`ssH7SM*p;p-)$cCXNZTa+!~Sr#)43*+4- z{NWGd^Ct~e#cc_j!r7V~)$GE5MvdN4L>-U`9m&Wps8}8R_gR?r#uY@AMuShxYVde@ zW<))^eTEY5`yNgYZ_lM3_?;JneICxay=f5ZFZun;cRftbFIv5;Qk(o;4uN4beiXQX z6gv!E;|!Bi>(yyZMVQh#&+F#|*7 zlE6+SSpN6CMh-r=YI%<*6e&9a1eCS~cmR;!bQ_Q*WAk74z5ai}L+zy6r?x*+(>K0v zN+jf+o|={z&bHk#^zYiZX0`EgNd@P&y{UYm7Hxod!!!JsIf{~{kbla`Ko!(6n_os( z{!?bE9zu*lFpO(Lb{w_M$sB&Ey1$AH_gG3abcA)pPo_cM=BwjlX*N{gl5xpV$B`7_ zT{e02YQPol)FoDk&QN*ug=@&K6&041}u>T`Me%_ zH=;I>je~9F!8wf{H14PE@*9}e?YUeDxZ5xJZ~AU(WYjY?+Aq-l>$NU!H>NYNJjFMs zwaoB25h+6-^-OdIx6KOo`K!YR=aN$fu^y-C1^G)C-*2m90bab^{*@jPjuqO*av*3| zyi#GKCN3e3=ZNRGB2T9IjZFN;($hLg`et8JD=`^;;j@GM&aawbgflZfF!!9wchcBz zqXY*#GE>EC+0d z?+vYEayx(S942X?)|Qd#r$;_7gEKQT(&5i>9pToFxJ$o@KA5~LIRI?EcY{+^(D(uL zi$#E*wDbLG+!%cwucw*cedZEL%q^P2ZY<*^t(;Fhsn3`<7qJ$B*v=TLW|^V-yY z!%PvTug^7;L0{8d_T(}t-mtNF>j|-`-X-dW7gsKR3d{j9GUMUcenhBSc zWvuVqF7NLRu_x4E~2 z+zgv?M$zLtW}+m4SY4XCuEUt@t@6{})He+eQvSizl!qSXuO`TXh7yNX$5Tl1%n$jCUsSSj5aEU_Wd1fiCXc$5?gK+MV@na?qCxHTa? z@pRq|Vr?bmqEq```>pd;zL|4zy#J1MvS()yRfk?MBLwNqRZU%C>%}SRILIC8JuK4a zqnKfBO+rigzWi?mhZVNHu|6lc@q|i2R;`1q?H?36MW>yCix^HR8oA-8qZ)npkxQUo z=#RMp!JCOu|IN&((4t9SscJLab7Ww9+U6Wl2U44wL7cZ#r>~}P8W#_T|4FGJ!m*qv zh3|FK?~{Y;on5vDCl3_sVbd%v%nBTr-g>z#0{%V00xKx9lU-QWg|n)`T$G-Pw6KzX zi3Ns0?Xxk_!Cyn;q*2;ByGA9&kD#{Dk5E-fMRV$lQ+q^)3Rvqfu*@^CZjoeRN+;E! zHGU4CEpfhHU@=Q_*nLGHkV1m~b;_>q(na>}pjXppgN#ZW=ID=Y(LFD?P@=~yxV+}N-^OT{Uolr8zHN_U2Uk&A@agn zLmgAM<>mgJa($<;b$QQy5NF;~4U9>D@dpjto6+S+l^g9<>ig{_-1@_ zF4u7Yo&A+|as){6&1B`Z4vtM%<-Co96ZMJu?$uBt@M~(Y$T6>?7Zd|=7ba&EMgA?K~pSpAN$u$1w=hgennNxlDNb8JxH z!*-HbEDhkj!_whpH_x6|vl(T^>S*4h4{lFtDtG(sV0_pU<74aE00Bnqyg8SprRvO; zYJunMRI@VPlt9qKqdFeBr0}(>_*Yd&gi>#g`etw>dW_STDj#XIxB8xixZYAPKXk9O zoSzCM7~rLT${3mw5S09zB||I$kmkVEPEUvS*~RhFYm<|_6c}zSwi`%tuVi{FFQ4qL zn1`wKZ1z^+5TOA77Af%l z1f^l#Z%TqsU4TH6rqet>TeA(QnI>DCOzf}_vLlNHWk(@Wx17#|m5kM>t*hxqoF>F# ziRoI!2U5K%=ma*yAjFi3F~*+&9c~J>87gq!!Ae^*9J5dJ(!J zsGYj-*0%xq8SQ@&_ozldrKSIBm!~}uIfTPtVDR+IkLa*k5w!_Mav8@lFDvBcNKSgG z4Phthv{X|b&7m@UwKeVfv>?VyEeXS^_u&vRwYU9hjC64gVp^ScWQcK29I=|r!dV`3 zJH+TuU8Hb&v!&Dw2c3auF{7n_3yS!BD8&7+018Kr-eh6l&o8#0ra#GWBdP&?r$XxP zov`iy2+lk;z5QxmWESp=!(d!{w3KmUfeF)=-(I&T7oN<^ySErv4!&J^xF-%zzZ--H zuTL*sww;fea&8{RU7ek`AGiM%dpO&WI<+ys@sH2F&mgm7*0WT@_LRE(cln?&_a%#! zUDT@p5ynIfyYn$(2(n%So*cM*c+B8Dnfxd{aLycmboIQ}PztH`3LVi1%4K$uLY%DO7Ua(%Biywnz7XCwFzHn zaEj5!Hn@oO1`B3(eV>7Gv?~_(j4@cbC;ioqW6L#Ny8w6j(sw-s7 zrq2n-=gd7VO8#X>#9OaMWm6_7OiBnG@P>;DxdnP%ojG7MykoVL7>WulD{E`~;cqNF zxIBjv{ER1AKPeL*7p3Vc_6X?7XKh}sbJ-yApXD+0GdGV$fW(_$2xG~)hZjR?p&_^{ z@)x}2sEKO()Eg1UOF13Kn_@IyaZNI_@*r1)qWmudo6-^Ws)@GNz@$C_<9%Qm?|AskGC;(zxm8$?X-spkrfk2q_(G-(^0Vx@yyqby+dIr z;eQLJ4>7s-WeepepTtzP7P!shc>6-iXR7HY1leK9$(o-ag70~BaMAnXjU9Vo?^pry zHoDT0ogFun5&+uCM=cJR{9u7oahpCP!650oB>=M)G)866|w|EBmkr{%jw~OOO2XzuwJs z)$r!2HA&k_StCOReOVQCqwu){xW=1iI^m2c*WvImohb|^M1h&A7RR()4mTzxWvWeJ zrtcvDFg!)`C*`bpjZ5cY^y8{;KObq*EYaNWd`Pg>HfH=PN7n>Q1OiL%bo;l1^ORPk zrGbF`q#Q?Ug4_rY$&6BN6ab~Tgu08CnnE#%`@2VFx3N1&e&=anLW&08IGkjDT3mVnwihEa{(L26wGAr~uMb+_vtIcr zJ0hz1iz>U7vWOxDt=<~M;axhBxGNY~{67FELD;@FHr2Eus_MdpKDHAn5$Tk6YK*`J zPz9MhJMb)KA%v6;trG|kH06V7$W9T^F*>Ycg^UbF27%pXKlrZUCgd^|vM%`MxTiVm zDBJDEIj^doUEg$NOhu|QP%9n9TkV~3f7V@pv^x6A-LUEMt-6dsy9jpO^e4hk_S>U& zk<(lZH0Kb4m`KSr7hsGWjXBpcB`w6Bk=QM7-qt5$k_v=m3aA+crc8bJagXTVp7yDd zcc!E>%`?rTrDSTTVB_Qz4lSt(2muZMF=GQCI~x9{xdmpX_4*^Y{n6L{^`E-=tFJx& zf1jmykFWk#`&a(l|DF$v zuNf0nfWtkAituy9aAMc1s)&eG0AN5QWT0%u#D{-E#Df;I%9pNsKoN=X@NNJAE5*zh zfS6TP%nXf*)wIs@KIiNgF4e}!krNUj%PNY{fn&fbs$d4BfJnxkOlsu7U_!uPXj0XX z$v*d8{VTk}EBv@Y5eyk@7IOsb^7s?C5SyOL)(m&Mofx1G)0oM3i*;LTP1CHZj~+cT z)02}M`*F}}m5Y>$&bh#I$)FOOrfHf`?e4AH`T{4()|& zEr@t%sQXDzDne!=aDchWJXbVDv*>*(2>@%YspKi;>&*_Zs0^xFO`UfDpa5wuZQCXp zp>}T6DpRrboUjh{r`}#I^}OrWyP^BAoIGn@o8!`jnem~(8;1-jLsCPBKzi65{aEZD zU*Q$LlsH_J{HUXbk73RU5rJ^drCS{}`qg*tfA4?(zyG)XXMg&?*88^U0`@&30-$qV zHBHk*;LS`8gAXLS+ia>z%Ulc^QYMna?M5jo%3xaOqIS@%2h*l$R;v{NjN@FZvLOJF zhpq{3wNTaVc3VE~$fp26j&sh))GhjPJ04E02j%602u2s@7l9fxOLK8vVr-#T$H6%V zh-sgVaJgRjak{>^lsPZEzVXdG&qi2lO;h&HEkkc+9I0(wk$oS!)-8}~<Rz{1RwzP-ZK)YZ8qEJebY4F`wk%2L*s!N5p%^-1zYa}w07Qa zzE3yaR^Qk>kP0v$`a=*04uUwJhEF~h2v85d{DTU=s%mQgX^VpApcs~B_477`d4G9T zZ{2>&=~yNT!7rC>-}kksloCQf3{ol>lq_auK$q87yJ-f;7n|#8*v+VK9WA|3-S5wb z_(A)#UF=}9WfMfiLmC_#AenvsS%juP`h-`9mRER%9}f(!X@|V+%klNu?+@?&AJ<Z7HIv)08frJ_ABA@ZLLORb?c}1sF@M@aZCbzM>|)4rjVPLq&;DLG~}Hzki87R{*YP5NbzM8lBgVc#zo%xt2En`Tp0DFjU3 zIdouJ4LEx84oYf$kPb)2h-jr+sge}C4uUh&nna5#0y4m%u`&SwDIzdcEwPR0*vtaZ zwA;`7u{&OpBcnLv5gXpm+m?MbMYM4kz4wd75-a4Xthysyz|pk4;-PKyj(h$4^W&S> zztSz2-SGXR)wj0k&369~PD0w8UA5g&7a=hNn9AYZLTF|}1O{SejNrW&Km`(DbPo2r zCRH=ZTuT9LyexVe^-$&Z_bc+lZ>hRNvIS&t-hj;jB;)3p4i{wP_2c|qG(bfl6qNt$ zr>R5qg3Pl)N%AY*KXv=9 z0GZ6p1ggQ_hV%>szyYC=3WBOuRVQZjDh=lqUf~s990*KkKq%<^yuIo3KIPSF)imw( zc56sPv{q;s6d75)N=dg8_fNkvtaP#C#w=Xt)~&kAzXubSXhX_SiKs5QW~s)>+_ zR0Q@h4)c`D5Mw?%I${AIyqX=#1xnT)+$63_<`Bl|O2+-Y4oIdC^k=sxzwn`7Ud>Hu zkB*KOAyTPYKxMb3T)}YxbBcsme=PQnukZ?ATA&&sfPtWyDLMuwV$M?Ab}i$GN4MfM zf9LUU{5yZ?Klsys;=i)rUN(q1=kxRPNEI=5-NGt0UCcS3UtR8o{WJ|OhE#Ifw!yMi zF@oUPffW=&=a>{Vr-{(T7`vu7IY>R&LG1?!5se%>W<~VOwPaH*r6@oM0f0)XXS?(0 zgPGM_0F{|*(T2mK?`yJyKm-_g@BLzRh(6Ax0z2=h)_Q18MudQxrK00{NFa62dtXao z<~}wYT0D3YdPRS+Q02?(O|o_!1rGmpc_%uUnu{o>G|Wj`k~YeIBLH5F6` zrlzP`=QN8`p?=Y7b*z%+aT|7j!%qIVHXZC|(25T1&^yw|u>5v(co}b2Yez@PR@80ofE>f8EJNdQx*6+J9UqBksK$uC$ffrIH#$Ej{7D^~#j>9H$fv(_>(<%CWs(kIx7A#7tL~gs?reOr<0MSA$R!G&w2!z8tt0P3L zMKff_s7h&?slp)~?#z5_4)UHNVu}h-R4OVRqPzyoU{!O@W+t^-mEc%a2>`UVjTtf_ zW>xh;InDs&I*SXP#+0|ts&~L#zcBpU#7E~S(ho3?EaYkMihVz8Ww z)My@xctd9xfHAt9ijj=NJb+Y{w(Fx~16K{CR%9YmPfSe{$rZHLJ7Q#0=l!kYBMx3w z1S$GpxsJn7=46DBPa%fVvf8}J&2zo zG7*Rv0ivT^O9-KjU9GjJFmLx9{Ovn8uXo#d%Fek->M%_Z8bT*l+b)h%QdQ^t_4W0w z8@HpPdY*zo+M~A+@il(5rU(6a5%7o0y&mN&I{(hh-;lG-uuM1e*N^b>jLfE1wf=Y{e|>h<1MtbA_wUQ&;Da`lfe=sK4 z1#a%$zJtXxl9>f|bDl(G&eL`pj45_aFHQ-kn#XxUQIAwg*-XP!^1AOr+muquxUX74 zxNI(uV<%v2hKSyIGgFYJX_leMQpah^ph;^os^*CqiPy z6cMqS%ySkoa_pQ#11&=2`gQ>T`~9c_B6z5WQ-s55K~;0kP1E$T@l8aAqJ@Zzj2zco zfVk_I0JPbjZ}z)!91*c?jQWkd0Wkld%PK&m!wTg(=nx~nFoTt^Ib0Mf|O6jmFnkaj>SS)IRl+q07 z00|)F0sv^R-|u6LL=-~Pwe2_zQtN85E{lTKGeLr3990r|^4^^%A~c-BJ$|bBl`uN zr|IJCJO)Rq&bd;xns!}BT8lx`G#%5rj{$P7b>ZA*@w>v&Ywr37CIvVsFaZ$OY62e@ zTYSDaLM7>*#$p zIp1CO*4S$1T__g9q8^8?3{T_sJ8zua+qm#M-unwzw|-Z+{}=0P|JA1_zc|l(X#)T_ z=RSVDB2$Qnxbi_v6@U-L9|u)=L%6Zd<0aG@h%GviR5t^w@W}7Gxax*uC?d0i%K185(FhuVhSFKEaxwJ@%YT}*@Xv?06;{G6*B`h1|+~) zN&phTYSyqvglxtthvJ!6@P>9s?`Hx4Q$Yg{_`KqjAGPX&Ps(8qKR=OB_|3R+^Jd98 z4^v6ARypJ@G4t+v6P?fFY$}3;M9wjL|3yEKf7sv(iil|ZaYYrPo5`Ylys{XW9+Wo; z39y}}$ex1BX2MoX0I>DmR~0a=Qosxm2-N@>6uqedG7^K zKps`C7V%9|yGQe4bu@)W1ymIgnV^Yozu8AzR%J`^Oh&sg&5YDWy9%Z)BNOszYqMK20;IsZ}%E@Ao0bZqX;H zHtwrPqACb|*M+7T_fv3~bGFXbizTR??KclEo{8mRWM%~B0PCCpQ5jWLO_S7WpsL2S zTrAezf~t~1*L7m*h)@I&LyV66p?MSof^$vNq*{|!Y63{9Q^KN$;O=VCw?;MX9oFsT z*>ZgY4W=m<9TkX~RUu_fDY+11`+ z3M@~;66L-8=J(oPI==T6E!3p#m~$r_jrO6NsvHfMR3`#q!J?={MrLBKZf{=U6+S;4 zuCo!5h^j~kfrD37Rj6s?wnxnR^l7uacXsjQZ~ex1|ApWEb5kxyM<+3~d7PW3A@8=^ z-4E}-x8LuHAjVL|OiBo$<}@dPCUEqsibj36RHV&rG#g@UJ^NZ!rL;}ow$Xb(=h@7L zaaW7_7??c~fl<>mecQEdOGMQ)kJIXCeR6bk`s`VZvFo~Nnl8?-rfF*1mYKWtVjRcA zZ4Lm$*dFZNZm&%Q00@|f7)e!gtxwO+Rdu;sMjr@(*&Q;9*mTO-3@UN0g&V9^s}*R2 z)RHS~#(j}WM9hrNA)|9H&1D`F5w$TenyL`F#s>f}kO?PM_2e2KXcm)VDZ7ReHaC5k zRx2`srUhg&Kmw}>`J%o3mE~L0c;;8j zBiEe|XSsAYZX7%3uCK3at={`LUw<7ylSweFwPvY}^IAm>PS!^@3@K0T(XtM8E_8Y8 zZO@5n0>r|oh8_tK0R;dE{XhAXetVVJ^a`)=k^>RVo$%mLjT9 z)zH*P%}hj5tqtB06EaAxX6V^D=bVeukmp$viaJBgMQ{Y9{djHTrp$wkBdQ9Tf+OPf zB7U3zRBEc_P-!>Mb2351wuy(gzZp#9Fi#`9s5J)#QZQ8_8Z{Gf0@SmcKWpGT4qMI7^5I4qnQC3kqaTP)?%*hy7BSsykD%A zM6g^gpIz=l2#dvHwOBoyo(AVY4UhnFE;%~#F?b0%=UfXh>~?zS60}$>Bu^#f$)^2& z?{D5a+g*z!uvlwdG_ChgwXo4tvVt~U+mbIiqfy(n4RB;%=j0qNnsz%5P^9zG#ZdOc zRA#N>ncKE?#8aA;P|!kf`-|)A>+AJ$wQRceay6&XSjppEx2!6|v^zdNiG%EukhE(d5vY9Tr2Bgm9m|3djI+d!gy?#qofr;p2^@(|&Q%ZN%H^$(V9)R zyImGN*+08)X{*+q| zaXCEOQuu1S@DNnzD~3HU*Qs4N!@lGj*N;mnhtmWX6`;b-z``r#x>tCG&l3mP8Tcfh zhP-2DRRh34G^zOz7&JCZx4!#yw`FE#b` zV6r^9B_e8;YiVPXa^COvi@xvL7#xY_RH`D8*_1|L`0(+=&2CH1Wl3G%`w-@7Qq|qC z>)RgDjpLkizIXdp*L9`jqvgsum-9?aZQIUyN-1r&+jc=!#G$f}7s>Y5mz(aV>Qv$) z975dy@Y@mQ|4o{WCP>uSykJ|HiT2m>uU){KpqucV{54Y`4x9c5A8OPUO zzp>x%-+ue;uImm5tL=8@7>kOMN|tdP$DCr*9j(?ljqmREgL77cQ8#xd_u4tBHVB3+ zhwd_HW`e)1Ymmk@}(@oF1rFlD=Rg1TpJg3$7L?u zL!-7&`D3LWgq*YL$DMCB7Z~JLcmNV zB~^7q#EgjEv#RDYXY66lqB#&dq%@AU8$)ctqou)0YMRC|*IE^*0+uu}vzZy8fjRGt zxn^iN)>^^9)cU^bx^BDOj^o&wF9*#f(UfNjv6`~;sfYq0`zl&%nR6PZ+&6x_-mjX5 zOYNcqH4|%yXEPu|#{f{ImMn-^O2KjBjvaBWHP4f(I_E-gX4V7`!EqaOm=TmZZ%L)( z=-SXUhLCGbQd=KDbidyoAKk8{BuUsXu>(8^h$ADRn)6OXIB3(gb(><_T%12WS$1ZY zQX2MSckB3iyNNy!xoMtDx9&`qr?ykS_ z;pJvbP`5i-)K>4*ai>+xs~J=k79h`R;>a<3)oLJ!L`(*ZW>sR>9{Fji^SsC6T=irS zu`0lel(kV7W?~>SGetmfMC=G48EQpi03fzn>EnhE|Co;Un}QajAGV?Eri*1R+J^!e z#)robujV&?2D&gwVT$*^`mbk0A^-pZ zFa&T&1jP#)u@Y7_RYR>F)Deq?!eCy=0|09QEZGZ}M!TvyS_U}2NB8boO8WRQ)qMhD zrOp7fR5SE2w}_>FDo?D=Pz^;<85opIz^bIE4xKY515^{z>OmN&+GoNOXHW(9j7T+C z5o30sMg*Vhc*X+{FdbSil4V!_Q9NcOf`x$(?ZN9fKYu*CAMpqPYauMvYg5%giA7Ku z*a2AAX~&K`2@q#Ae)&%^Z>! z!={i5Wn}CZ&2mYrfi~BNr~}X^3!6Aln%(@VcG;Li+m&o25S`DE0V)&nbL^@S0f^8! zKu}Yy$_AcQzZk!$L6uoZ0nCdy)0rhT=(B#Q{PgqYDK0TVHCK^|jkKbwcq>+3YP+7t zGq*8vMl}>7LV%UH$1|6J_)^>TqCBR)0VP+GPl-w}?|_x-B}?SogaqAWJ(hNfiOYi zymL(jH=`yieRW6yy^{*tVV;w?I&}R>zuC7w`epn0@nyg6hGFN-ub1n~akE}5%b3%c zb>Tu6xYQK_vzyX5l=((~8~|;YCHT^}T}DOcTQ5^Oj>`yafabkH9VG8_9&2ve_U6r- zK88)1`)T&;KS{R&h8^(@hC|xV5Q+fUcH)y0)t!WsiaIc)5zlH^*msagjXD=GPa)X!u62 zyP@hxx9h{rS9|xt{=uVkL;3CZck@;Fs=$raS(E4OWxT1Y0GCe<;mk$XxrKtOhkz52+!lU$XsAR;)f+M+Tr6C|b=Lvy(5EpMF+6SZVg^%!&h(VhlYH zuuTZ+W(L4Yg`J{+0s|EOa(!Q)6%XeUHL7w&2Nr{97Acg2#h2QyW+rr=6kTQ&D<-jN zr|F9Qu|ZZCBw^@prNn_w=v_^Ee>lzD^*z zzqV*UmXc-0gNpiLT53_b^5kF4zw+UGKNKQ}h?VL=!gJ@(>Q{H&z5HH(^t<@N@0^e zEPBJ19GcEKp(>4&LuuP6X#he3D!fkt0C_F3Y|Vvi zR7&f3);dUW9DE?r+OSgcpm7zNlmnQ7)D}5&-R_uJwGO_E&7yHk3mkNs>+B12x}qCL?TsTG zg3K8_i?jXZB{Y87uBlc*7>1!~x)xkkrCge2Pfbu&9cLR;%^84{H8ft5GJC0J00P$b z?VKtDmq~+PW`wNPH_=HR8K?c=*GF|ys*uv`T1xZY!wn(mVz|{WrfC9Z6>Y(7AuqH; zO0Wtw&#jXf*TeZ)z4GH5wolV-x8iR9g9X{nZXDYs~iddNXd}ZIo~=R5L`x^iMb6~zERi4a_@N?8)~XTl>+i`7}7ID{scOky5CZ>)Ul}QkxRmgzOFrvsO)xtrY>rlT`cF{bz zH@;#nrl)6hyD_T545^=x$Rrbu-i%R$CvG&Z%ytHVNOs6~H2{a^sk8o5$7Aj|Pl*wI zm0mwGe9)Ktl+5$fh(&R%M}UYMsE8DQa1o31ukfxrq_ zNBs-&RHG}moXrB-|KyiW;*fH5&+6F?Shyzc@{A;mG7Y=S>njAh*Db#O&2OBYJsfqj z+ijYgC(GmGRUdZEsfspjV*(5GzFma2yO=h!s*U5x`sNCK-}lLbsUSkvb&Bxe{rgjz z7ptX!?WVD1f9zx0jk$=1khQXdm5U|e1mzs8bA$uePEvkX(<~z)ei+`3m->h>fqccHI5UR347cCgUaJ;8K?90$0EE zr2T4J&xq*Szc%Rd*BrXla$c-E)oh||7gp;z0bT8k1zi{7LMj5I0n&cY+b!iu8{b)c zIs5{ViG`#FNl;kSJ9g|)<;&%#pAnC*Ai!wik*#_W1B6r&q2cPen#&^EyIOPfjsQ}c#+t`@2&qg`2FW#8*xw-OGW~c3r#6zDdq-}J?52=d2SZX zej2OQ8+UHGQ3Pz&^>5yO-2oF(*^iBJXhKogZ8t^pdc9uwqYAb!GYnRSp*ia3l*l{r zG*>NX=7`ygG@7nguG_1cEOLM+bN|ue>k&%T02&!Q5|FAPR$x~BT<1?O?h@i64v*Zu zN4NjL-TCa{sU085@hXI3E7y9Y%X!#hbq+fYBB>XaqWg$v&eE_v>{)tl}4S zyx^}pw}kP5bmNQe6TWQz_cdP1Ltb#Cs!(af$ zi1EpifXZ0pO|QrOveKBA?B-sPVXBZTg&+Yjw&0x?zRpuB&|`zBGl=hEYt1~2W6!E8 zrQ}&eWX_4b^UIjnpeC;52?ZqsWY)N9acpy491%%@QpRzQj=6Q3Wmi)vS`Ix&9f$#ttvLcfDP?EX6Lzr+j?65xhSr5Bq&1FoHGD;wcuIJYEF}wbFK{$)KbY(Dl!?-x;j(MY&BAuQ=L*|cZAeG9#L~b~v-n2rcBkk2p~H4>)T=_}RTH9F zk&=VPT~lI4=Q`ISY6&UT5T7l`iBeTp(a%ig4()@{l-d(7v&o!9K&=ta*d$L*=TG&E zS!Xk5V23~;Dg|m)RcploDY(%A0CWWiHIbt73;AhwUa0_Sg_+F61wdpa1{P2O0|697 zBxJ+|uhKMc;+)W>so9=c$g0O0H7vVdv@K=z-?qw`ES02}&7TrXz64lZ9h@!7``BLD;gnp2`<8o-c|At`}t*^HJ-+@_Lshd;cL$8J1n; zrdq{3v?kg-HL~aVKNkPu~LVKQKzO{W8p8AT{bJToj^A;JAR0rPfdO(qq{3WvfC0!N*q%$Vs@$3vkFh$K_kZlqRedUj2kRcrdx@i4~B zE=Mm~e910l1u`NG?HBTVLUCi$rP!ASfKr&P)><56{9?xQMe#_@vCIsX8^{67sF_oj z6N$alc0G@$u_hDN*kN>-Hg)^d_S>EogVvYYt{3HLaK@}|p3zLA1Fk$xVcIi<7vfor z*NoFi)XP+U@?`zd_`V(mD4#?XjWy?SSRmhY5n{}>LN1&wN}4l5&KpTro9o3uVKc|x`4Ft_s@L^KA=xnm z4SgbDfzGVYc_@QNbs$D*%EhL6-S+irC^xzV`!-1s(?aiYp zPqZst(qT#y!i+#hIA&*>MfQ@-dq?bw*qmDtPXHcgmAg%!s_dn@E{Zo&;95gfo8)ZV zWe5GDby-%GY|3rZKAYzUo3px`SCat1*fz0m9kiCFns;TB?_r;gZ zFQ{Eet$B0qx92Mel{;cTWpH!)Qu*m;#nVF0HB(K}xZGMHprPPyXy+NxBsv81mO?HL zP&}i4x%R7{6%Vva1*J0fHMPTaC78|7oX+D*Z5M^5VRdy3Ih&cr&io=$5V>G;PG(Kj z(pabcd6^#U_TQ479p8$te}$`;-B{1B+Bvm-|H0M9vh>P+Ul3AY3SFzg_d6xAT}tLj zh*HiAVw}u3!6J;gL}bo&fBDSXHjo1c!ErENgdPpK?}m~}DaYpaDOEL&j+~QPZ8!Lm z(feekZ5PR{Leru@9WDoicCoZE?H)g&wvW-PRV-x~cW+2n%-?8_2G(s&Wg6X>7sW1z z>#*uExGE)%ut*ZTFP>@H9!YRutBA9>x}Wka6#cgrw{A-_Zim`9bRGb}v;%I|M;F`c zVc2)*)~;coB>6gLPeoH%cdaAlIr*x^Ou<^Y0vT)`CJ!@Z##)!ZxVXM(TLdd}8y!L@ z`JG=c8i_SNX9tAkHa`UC|i|6q;m~6a;aU{^f zBv2cf-CR)0OKsQlc$hjNmt9Z|0RO^!e_8rYz=r>sRD4j61g(!MFA3#wzp9+nlefJlx?P zE*?`B-DO0~(xsS04IGGf! zO;#Q&xyTDnky9#Zv-M}&akCe!m9ZnY1g9yfO{H<_LYb#=KcHFcJJ&BI20>g*<2V&I zh>p#(ft6`8F;p%o*T{h!*GfYvrs@e%Ypu@nvgw0kGXqgNG%nsvZpsvV_B3m*W8$o) z=2y$MZ`c6015IjdMvAdvK+F}!#I-sX%u@m-sVs(SnXs&*Cw7za^>hM`IBcdp8njAb zGaoxAb1kcY0>O;Ol53JFX;ES8hCxL^q_AM$b5-8vKAH2xRX`?gf(wMj#$mUcFVNUir#%~;H3@pT5r zzHc*oaI_fGb{eu(>pTXox%gr&x@lia-1E(&rfU}*{5*C|(_qX1mg<6hJL^2p5*Sot zjKlfWs_6jncI-X)u3wFpn?`WFx?CT}2MUEXC=`(eAo-uVG2*)e6LtV=cu21UH6Qv4(x^r#%4Vrk$e~YV>;Z zseZA{F#A6HmcW`kJ4w}%#Kr`L)Yai=n*l&#sLr#XzUVmxJ(6-AnlyQ{(5w&)6eR5s zmD$uc;#*Vnb@1cJ65?4F=WA074PItHvbCKOo28hhb{Y{eb2EpwLUhy1ac&CY2tAt` z7$SlyvVnYDKEVIx|L}i6000PR2>6%({lD}pzw*n+-}s%sZ+n$v+^iQ|c6T29;O_k& z_G4DxZPr~Z!_mc~TaWIquFer9we1v}w;p_Weh=kJir~EiX?4D6JN) z*{z2k-TUs}yLWc_rLybpw98;u3+HLN_vnXrzxUn#Vi#h!Kj}cOZk^qK?ZJnutH-aO zaE|WV=hN37d~kgBw7)nHptNdRD7PLyeC@m6yLz8{zzbMZ?J6-GA5F@4Y9&7bmNi+O8Mnx%cn~;ll?%o5t&L^RCSS=hvQ|z4_6D+mAnd zA)c?FzJKS@gOjuOEzxhh_w6^1+tu!zzWZqVK zAFLU3*M>Q|Ri);1*F4^=2V43^qr0y2?M-RVhWE=dhmXjN|ZhZCa_rLvj;`JDwO^d3nbJTVc@%U_7ZR=_`g`ZyY<7;=l_W<0667bG>X`k+ouJUo$TNeoE=-K$% zhkKfHyX<+{t&~;|wu?tw8Rl@bd_3*^KK;B~{emvP)!n*t`}INeYMAWw!;|xTb4Ol} zH{s~*pZgNw_4x6f^Cv;aDHz&(@9gO}-+AZe>BE=8FXq)U$?(S)!$*JZmrt%X zJE!BK6^L&?dGw|7)6a_Mt#`k_eEh6`b_%Yhl_*dBhY$bI`FZ_~?>;W`aN}mH^X(@; zyz|lb!)7-d?~m6ngZ$5laE${BUmD~312M=FvyVi>+!_nEJ z+mG(AF3%NYZrdG)pLzF#*H8D$?cQn{{TN}t+Ftz&B>wdArqtz|-@y5D{JTH6`kUWQ zn~ii1yM^Cn|K!>|ylAhcoekJe)E=If_fCh8P9rjgD2~S`r}?92bb3uy=9V&4cy!Ue_jEYDfW{qv zo^JVapeh;E8h_)_w!6mTiKKS0 z7>;dxwEfx}&*~fRzb_>Xs~BLu`}o~A-v3^Iy%&j_8=vpK_vqo{pLqK2;(X&+et_W! z&+hEc^A8{9)7^k|i&9d#n~wH>_gDOGNDktvvoVE>5kEZV^I;gq{>Jg;Zu{Cry)mSd z@oJ;T-~Ps*xV`F6e*M>OKY0533+@xXZ0p9I_rLqI*H5?aKe*NkM*(Gb^637X@15Q~ zegCDl>v=pkA3f6OcQ=-7)4h-0Iez~)zLK&r|0=kb+O8Mn`TFAzZ#=nJoj+YV`GvP{ z{;m6`uRVNt@7)jXK7H^~+x5ac>*as(?e4Gt^}lob@ssuCQy>`It_J`0&-wo6_1(3E zuvxXf?C(8%|Fy^WDV2fZfX#TldF#Eu^9L^8y)`B`CNqyc>D8!Dukm!#83`gXm9r~- zaMnGW5^7z>)-c|`qK}@{aoR)ej+S>q*IW+ygC}sYjRJksc<=mvc273VCXrQLb{_0j zd~N(&e>b09xfme2CToATmygcI?IxpY-)#3=IlEe%?fq_+;Ap*4^jF{gZu#gmob8;N zbx{%I@g@H7v~K5F3iVAR*gW0S<4u`U_3V!pK_Fa>_5QQ+_?*PJkJ1Hs_fdItuGjk* zoLe*`#?LnHqYInWuUcNVoU=ZBDv!=;O*tYp9S#Z4Hgjqn+gL zYP$ZxN9Dm;7u){M4G!K;uk`zm`EsaeyzU#N@Zp8~=-hHGk!;mA72L((o^I)S^z1ac z+WGY4yxc#hCtHuC%eIK%qpRlKGrFABv$c^*jpuuKbRpvij^gt8@ekh*r`y%}Y^NJ% z82YYVuEO)>N3ADpf}+V*KUI=TJe{D%zl?PtR`pIzMDp1!ty|D)A6 z#u)BA`N3Q7{GB(SJ=*oWT^t+vH@^4lcOE}ExxUo8Pia^@d-D1_-?{glU%vglA84J2 zwuw5w{`iBtAH3UKZ%ldWV%R^sefq&y@Bi@5#Z!P(f&-whJ^pmRSX@0#o!{M9Ih$`k z_~DHY-|@R`F&Wl(unz0X2X`O6zq)>^EK|EEe)083FScL3KI%i+9i2bE^XUHZ)rE`p zyTAGRUw$-xL@ytCdu19szT8LWQg(Ze_2|EQl5b_K&%0v64epMj=zJsMb`F7y&&b004*pRyZYX z6Bh?H7WRp1f#TSlqY*F^j1?Ks2nAKOf=MI%sXPTT5fM3osOk+>c|sm%zw{Y=HZm|m zV=xmiiIqMR9xM#51%Ut2AO7b5>X#o~?Wb8%FRsA$3mMNB#q+0r@@L;a+x_j2o^E^) zwtzycS!In~Dk4CNB-KK64q+Vkol`^vAk|RK0~xtQZrZ#4OJ$c#2%(AK8$~o7xmMCZ zj775ZLD)1X)j#|bH#??(|2vO@qQMNPB2laZWb%9R`#rBdbv(`xoiEi4h=>3HK|~5` z`ESDU#9x2f5V z`gPX%YID}Lu}_d3 z2rF3S8o3RXu$D#R*}J6v8$a>0KYjF5|Ifer??6Wv*OO6Mw$0Klp=Jy;r?Q_DIoGud zFaeb H&k#l>>DY@4R@ahiswXU}flyQ5luKOt}9v>Z5q*?23 z*aJWV95@Wa(03i9bIujXrPd48Q0r3tTd#d1WBkFx_qWqUD%)JRhsl@<|L*PA|LDK= z$AA8R{MVm0v9qO1W7QgCXua%oxDfQv-BC9=am;95#F7aD1|`NkCGwuJpEqIu3@j^m zxpvysm%=YVIP&6u`bU29zxP+alL}jwWq=ur<>5=^r=Jzib!Uj!RzYi3L=bwVf9)5( z`j^j!ADlmBbuMXTR1$ASY&iXRz6XOvAuj}>Qg`}6!D)QsC)!eHdAYxL1mB^rN(E8P zA^?!>((rH^)~*W+rfrp{_ucv9y_-+Xh`#}?hwho_YjEpPnd@Bfp3 z?l1fsUpu*XYjKo{^nD-9rkb`gZSnx3jE=?FAo!+8t)^qn%nSf?PDJFr2Y_Y2V4p{H zI9>;Tr){=Z=hLqq{n68({4XyU@aYF(cc#Wdfk`|F09gKf{mY<;wFwnhUF-E<-Tm9| zo<7EIO~M$UjP>Z|jX(GY{-uw8?f*-=%TeLF?t0gn*&ubhAE*7qZS!}&^X;i!-@F;$ zio4%%f8;yA}RZ>>v)nLcbHmH$TYZq+czaB;xrEp zTwnex(|`2)??za_aJiZHaC9fq4kQ;;3w^t*38TlV4sd(EAGL@hijDvsU_|stxfb6x z6)ZH70C!i{I_K7f1f>+gwga#1noi;E-~TUu@b~^d=Z~K-pq3nC4AC*5Q-vxBK6gL_ zonp>8h1hpTAwe3R0U<^|X|1(J7aQjz`==%4abo5Wf)Z*qNjV@-4$!O%K6Q5GPHS~9Uv4DWFtf-L8|qj6iFs3j0B*B#bF^5 zf|h;f!d$D>3MM{=R7wa9W~swms4iT$AGdG++#e_=Pe1(i8@!l?VXYHnH2`Zo00dPc z5=Wz^Pz4bSGm-O#M5GQ@$FDWFk2mw^71A^hS~94KeD%(2_a8hMOIjWub&I~%Dy6Dw z-sNE$V;kBKZO)pjg6!r|y>AyiA_{_nxi(f2GwVZ_<~f(5jMb{-!bKwx0F{)L2>Q57 zqXWd0J0gE{^PLADo}3)5+ittx&0uEetF8NP|LkJ9Tz+tV>KBWX#i~s6ejMNa`Y+si z?XCa#ul~FD_oo)SZ`@h$uCB(Xn_s*8U;60H-|H|i!3up{81SNGyeYa{KrZ{`*`^=n z!ZrsaW&}m4SOm-=c|{;7&eeUX;JlI-0|sP5v|=T@YR|ckI`Vmrr2xj_`piT)Fz)@h zU>6iYssVuWLeMYkK= z(pRK{K+McoE0yesDjNw3a`KFbO;yrbQJ|KWV?6|j>d*f6UwQC<{XhMwfBjGW`9J@^ zF+vaQ006)N02lxQA{u}p7y>XYHC5_M=qd==o~;lCqkhDJAUR271prihB}g6&qIzLB zyD9-&Bo8*(v|k16JDrsPNfgI2E9v7%Lg zVC)kMBAX#Phd_#!flOVmpFSS7vd8MY9ThD_=Vj}kw^nCBM{mJ&&iY z$bi67K+K>TLj@{n^zc&K^`bmx2!LjyreMVpIPl6EUXX`5W^pbXNcj`L^mE_*8eL}x zW$v5)bbHwyA)AlLuBdOjnRrZ_akDAIQ06&0H>Co#y#D5!-~aBn+dhuth>aJ-j~?7l z+leYZTjIMPe)s+DS+i{4IJvQI+q2E(c1|b0%{gZ>0?8^jj&C$t-+y@j?rP-OblS>1ks5%8a50T)=A9$4I_I&L;Jio3`}tiYZk#Y=u|8+0=0^GXwX2G39)| z*{Y*$8n1SxQGHs=2bWJIr^s5tOVX+6|J?l zZEFB|E~>ih7uR+2-m~`zIMun7swIh@EOShVHzI*HjT(A|05{$z7}4nx;> zqoy{*uIs?l9%XJhj_od`Db3M0kjkp<3&ZX8(FcYlhqZBc2$ER8n_rNII>-r{LU7Ifvc^R60nrxg35tNx$fw8Kdj6694Fh)mASV~F7*s599?9fl+*gHpvbIVli`1p9aUe-CyC2z*D6p6}p zo&sVL-PUsdz3+eh&g*ZiZv>!3To$@m@mIKcyvdKBT_jg*&0mMVadCZj+uZX@)mVoMXCY4!Dv1y$LRMQNm002ga4BS|xPxXst zoD^LO0TEf|FpC)gE>lQ^g)5R6B8p;mCaBK6pr6tVFni4i1uQ5u&`7im>UG9CNqFjQnTu6MMMQOW;2F9ycFwUHvbTa%GLY5a3lbG|WwhqA;ejYgkUeSB z74OqDmy%di(0?K0`J#CI1RlYBnS`QYgG)uoQ(iJrR&vPB)vDv!713ZrG4?>FP(WuZ z21w)(T_~boD!U9DUlcLT4pd`SGt%7Fqmk!>w-5u18bTF6OA3bIP{p*O4G84O%ymWc zr;bOQ^PbrAOj@hD5E_nqo1Qlw!AKcG%|ui~s7NG4B^!S+&!>u`qRpH`6$76UtyIjj z#g}_NzbH>6APRy2!cGgq%w{=+QomgD`Lp5)7IjVv;(c@!O`VB3#sn|7T`$T*>I0*9 z&cWnLH8n=3)t3q{#FML7(-@2i5$3D2t9Gj^59ZpgU1h0?4sPDNv%R`jBv9+4zdn1m z-Cl~N<2P>Orp%k&HGwUfZhcf!o=Ya=4?g+`gznwA{oeVL4UfwvqE>r9J?KEDG z``bq+Wadf{m6`B-GpS?oepW3YQf;UenVMu)IRMY9qZ$~%G-DQPoT^rpN`#1It|DXf zHBDoTF*p}h90dTItALZ(_F9z4?M2SEnGyxPAyF1PE1|9_Ms#0I6B0PyX-IbVIJ7cKm(MWJ|%3J z#Qf|G8&pq6c(Hz^0jA4!d5Mrv)@phm#|07>lBTs>snQ1?JIT~WlG<=&=3Uxe z+YS+@X)?3@Jf)HW01+MH>S*0HZOYr-{%X~)&UY73=DVv^vpNx^-sJ%Wc<-^5k5(uxC#SEQw13Rj^p}$MbuC9*;Ie0K!;(U;r>S z1w*Kx^LS>*4lE#wa~WJ7tL9AOR4xCG<^IiYzP?R0pN%-Bif+FZXE-rHAHAcV9T{jq zjd0xE9QJ!o>X9T>7eiRK^E_M00$_xqI?Q9O)jMa5h|c#Ofq~0pFi*)0h#UZTb_&u& z4@7MnDfljM9;SVs$22#wTP%ZB%u=f5nx&>P2^JINh6SqFDE0rx-kU-&M8s}ZGv^$QANI)G+`os&IC=J&u~w`# z*P3&T@&6BSWI}LI)4+j3Xxh8<4A^TEh8UR;R6DAn+DVMNMZ%K9Z2EXU4Ujh!@1E=+ zFhvqGcbLrpu&VAt07fALLjhAaHEJRd?J>r}d|DT6odAcqh7`OmV=DVJn8EqH zRJZ-^rk_?!gn_-gf;L%h_Cw23jA7LdFbDA928v8)v%yy^KRQ)k_m*M00B< z1VpO~V+~*?Bo+ZJ1xs92Hf3z5<1|f}xwHECo?f5-{QdL4eA6c4 zNAJ62*4O48QjaDk#Tl?P?CA!3FVfV_kc;;P88n1w8DMV;U9$liflo$f{ zShWWILXPK)8fRuVTmzgO%`}L)M}@(9^8x^lox#9Sl%Q*(0F8_Q60k~I5Flzyy?oiu zOK1{|?mTutP>Uu+EL2lxH*h_~$UzZW#3-Y)DVYPfNArN>$`WG3<0k!{#}l2SN^5J6 zfWUS*Hd?`*WBomk=R_^Aax@E}h0qLR?}PV1U(CFivJe=j*=ech#(}8^l;G(reLw%W zKYF&o(V~tZlPYV*yNuDUk@%4Sbf!k;AZ^}rw>}uccl5#jqX5kW%PwDW&2BY7+ct?a=E@DJ6uP#t*%j<2DZ-`F=ScPls(Dp5NRZPt!Ut z(=rdCY>5-c7{glS&f@0g#%fn>fvBIC0#1ZApq8+hO?~Ce6tO#mAq7W9VuxPke3~Cm z=a}+#zr%n-N~(T3O{zzCpXT)zxz=R`e_ZP)FYm`Rj+-sVFxA;ZZSy(ZeEsF_&wP@9 zPRa`iX?yd8tR`t;|E%q9(%5z}?l<2^>-}Nei=+4EDPbDioThV@2RffnR;F$#dP*vT zn;0uIf@5{7MKGzjhVGn-r62G9`J(298pj5MQ+H@0$(6f%2?$@VdHT6M=Up8^y?cmj zR1FxqV4J7lJvKIp8XM+Ltkjfxw6Df_{kY>Wqy*H(8@VYY^`xLk5Wd>y~@mS{ZRGSUf=(Z-+k}(@u)r*WN`zG!H`nYNjPHP|i6M(FWM&y4&qcK{&g6 zjVzM{!-By(bp>azzFpvgS9b-CN@Y+65j{n2V6$M{g^yR`Y)cfK*z z)=WdBJd|Qhmx`>5iniL;#sZw;et&zKkJbg&z8ywaTU%YG`S^Hf(Y9V->hZkF-eT>) zeETOGjeNd`K2t&qWCFnr%wk}G_=kG`2_OgH~-{&zrOw0?*P98KRs>U+fI$%SlH(hU<&w^m=`~C z{bSvO8m((sAtol`s@Z%hUyb+Ik2}tNX&aDW4yf+1K%b~i?1BDp@53+KIKUD+Fep;D z+%U^fF-tiAnCspz+j;%C<3u0Xh=qC}LuP@<$QU91;U3R9nM)OtnyE)1LkOrhxV-M) zg!}RKTkUWVna`(31o@bj2PpW7WYp3RvyT5lUh8aY)M|zO=x+&PzX?HoM)Ear@!v079c9Nt;SfBZqD; zWL&aThTgi1`ni#6rerRGBOujQO~om)fjfj00l?AJ#N0Th7=npDi=~#{q_--u%bg`2 z$_hX!id$WlrLH+=NGX@C_kMr=WVhLcv544mp4VO@LGArX-<&8vCU2@Z%^5AV8BY`*r1<+PA;2R^sq+pZfYb*R7#m!`}pN2=Qh9h?u)xL zK1{WrXXgOb77`z}&z?Q|)$jZoGb8el0vi~-G3gmtoA&t_>g-Ehy`Qyi;QQn0yJh<5 zxc%B`{I;gPjUA~AZD_NtQR(z1f!O%$4L>W7kA_PKjU3e?LT2~m?KE>-#NDn{o> z?AXWv9Dr1#13;FTlx0LQoEx);6!PF|XpK-=v-&07frwGS5qfZ@{*`zhPC;YGDBwWn z1T_EvWTdz10R<0$=l}=-kO08|fQknLY6w6yQUE|vUks9~5dc8ExD5~(I2xQ-pK9ow zkKTtyI}R19QOi9#U_%Cnq?Qx_P$A)u7zZI-JdVJai~+y^R&J?*0W@R)$mU=UfC3TW z^BKq7oSD$cB(Or&3Wk`->N$ds?3@^zg zCi(K77eE96Kotf603$RAfQHfC8RBScV&vo)Oq>B75D35lI{<)!Yt$cgJOBnnMCt7> zhrmDr0rrT9$OOh99TA;qp*jLo@P)(e^XGRV+<*KrbtR5yzxR2WqB9{AIJ%IT8G7dm zEj9q*KnN@nMTzLY$M1*L%%gZj4AC*UyP+X}L_)$D2?+XE{k(qU zyg)-?Msjcfa-P9+RQpl);aBCne(dWG>c9#)tL8QdiUA^1YW8D|Gjq?nnOo4oz8ynS zhdOljb!K zrEd8U>;=QF42#U?BtQS|ix(vioVO_-rEBwaliq#${Nc@m^fr@BGN+K}TuBk(Vjk$; z8Y0HT#;Ud73>yKg*4DN}=8@xFNba6ExFaZ7RW}&1Fvrs}fx+!2?>5`>c|M${TuKKh z!-kn1X*rz>A*O+e57RjWdgq9&=S|Ygd9ecf&Mpx2I)ZyR7PbS*2Dq>+3o@ zn|4R!aoE;cvlwMM^;ULU?QO?5ptj*Ogt%5c)v2olKr#qJTu~lA(Fvz&ZI|Qi2GTh8 z&Zc!ZF0Y2KeSiG()6KtYoA>gFxkUC|n_mrM03H=W+TJX*Yy8G376OoFP)X}>zm#bf zXlt;*8JH268~_v;0FWU70hmSvfB^0CXJ}wwnR(%$8CI021sppXBuB$9+dTce#!>Vn zERk6uK_I8aJ8?>W0?8ck~GM ztkIHMp`grt=^yBuk57+xn=#D`nJ%D$R)`!ywRJ(xA;t*Eh9uyDIK)Jb=d}jH$Yj=| z*d`Z6T59*m+ucoTt+iEHGLru*YfOe_e@#v9Ze-}CY*>}GE@rNIt@1BGb zg2LSDdFtxcS_^=>E^q1yH?*#GLveq%8v&a4H|sJ>+i!0|3Y^H93zO9I$CEx81F!OT zhQIW~-8Z*k2Q`fvu+!?|uNc@9b_qnZ1?r=97<~ zeSJ&qeD+Rp#oOJ3#f?pA!%IKyN+hC)CCqdG zz7q4|M~#!w(8zR*z5@poCC2Lhb3glsTKl!B%|ZtLD;p$6#a zPn}RxuK5%@+xk^MuOD|D0U-lt2;k_Z#KBxh|FGYKMDrTQT#1KoDW004bPMg{0picT z^NuaU!-v0G%p^TUtM~fx*51F}-<{5faV&zqEOi`$qxZEwd-g2n{Lv3TKCkB?=aPnX zoh>H>kKk4p2LvW1b)c=M)>{W-Y0V%NG83Ct4Q`Afr=}tzfpHuLt?QagG;6j_!M0^T zgkq~TU_)J6C8d2VomJHVh!B?60T3M=NgcWf5s^W3B_a_e;CduC8a=X zj5#GyH*iJQweE&AfU?5Mb5vQ#!ghGR+dkS!5WGQS@@U>`Z~+9QP!y87Af6dB`!-{d zpdr>6ImP{xCxKF*`|$L}Stvh@-C?TF_cygSEa6z!Q=d1R%?>vY+p~C!#`~M6kfV4& za6}9-p3l=5hsX1@xw|>7OXL)d7+|e!-0jcvYHsT)o9z&5t+@0JYb>^`D8ZDvm}{`H zs??XSUv<^y>Ls3*DHO`*WF0Nh5Xx{fZ1$URw>_N>$L?F(+pX4hefRl`4?g++umAQZ z+uhCXX?}HBLVWj|%SVr&{QZCWQ!k#Jlic2l&1_R6;JDk5@1Bkmui={(pdvibP?_0;MWOV+Nn1*?t zd+&jXnbF|#u`l|(uUz@#2t(wcDGp6^mscA%ViH>Ksk?jY>$=Ve_T=u)dk27zKl~xa z;-Of&EqCumU8mO{d9OIljP$KmR;)*`z<5j%l}h`S1qSKK$sTpLp+U*z~tQ z`r&*$?YB31H=fp+0w)@+3p< z=bTOB+9ohg<4EM2&8FABv>J$+IYt(dD!sR5x7(Fc%tXO~oQTXU5VzI|u%v8eZS95t zM7GayGl*M6;uuoo<2aeVafFT7oxUOfeT=~jUqURP`YeUXsz*4JCyC2>ke)!=Je*SL1 z&JzICS{>-z-9kxWqMUngVrE@bohXWX$r%uH&M75ZS7y#bK|~QvB?9m%77GB zliqtSLyUQvrq-Gww6$KEH&V(5%i5~DhY$c#p)k+0ipDa;7#C&$h%uUUFc;T#UEMvU z46He&?6|ZlA}MD@sJ+$R6|}7l5oNW@6IE0Jq4!>h#ylpW5JJuc0G3NHM4}fjUVQTE z)x0jPwQqdun=ynoO{I+9+c<2G*7A_n^G<^Krs|vQ?}CQB{MiSfAyc0Z~Z*{>XZ1vIP+nf|Aidh#yd}L+@M?_{$B_aa?g%E13 zDaNmV{p-^-oeqb!&R}?P4_7BMQ*(D!P(vUfQ{@n2AnR^smsigvOhJGEjOGs7dlwOP z7=|ImQfr;-O8w$Wj;k~x0)Sd;Hw7T10AQE@c{xaTM*vk7k*1z=7E>@)RRFmB#JZl> zrMdgX;7CLiAOr?ib2o5g3>4VhL~E_;RZKZ1Q<>J4%u8T1O~@Ruld2gaj%652a+OUY zrT_$UU!?1PzuS(fwbsw)n=+dA1jLRmcDLD`mZj>7NNxt^?uO(DhKLG=fzb`rLI_UW z2^W>;cefT;0F=#9FcFtH&j(#~S?X~*Qw-bv?q+)vW1LRsD83ms3IQ29xPlXriga0* z)A{z<)+zRRjesV?Ig4uyxwS?_$ix~{HRRN~&hzY!6oS@eSr4skxt&Dv0Q&!0W3RhC*m{%}p6!}gtnegDP7 z-~aj#o*kjO6pzWv#;OOgXXC|E*NQm&nfamj?59z)r0{UNeI>u1~E55)Bud& zh~z*9g#YG0^2)!)ACMlne50zDm?I*>kGTHYssVwkcNIY#IRxsp#z4KcTG!Vfe^Pt9yL);(Jf!XIpZUdqj{tw`m;c|e z&f>mfy4~$V2#R!GD=;0``L}=dmpb~3yQk;A7{K>G_`#dye7D`lF)R4IF5Xn{%kz!1^8v_)g0Kuj0{Gb6AO?WTLRQ@#PRHSSezP2+_snidz#Y9L%Q<2w+En_n z)2QP&U+=%AcRzzKeyZHw4Z)x;wF^<;aw9&GI++nVU#cL?!I?rt!e}jcHzp-?CHTD7 zzgMnY`L_;v+m&)u@PT#dC_TjT(Q^NP`#b;O=l+xbfUp3E!kB@iS8r<4b4;b=C!1ki z*X8vA6+|r{U0l~V<~px2U@D_nM?~p{30+yG!pPLWJnR%t$oN^V*vb-{0RO5yjA?HxU46(un~P zKp=#Gh$0el=)H$rhRskbOzW}@Lm5J?9RN;;V>4Z)DdSmNDEVfyWrsJ@5)lBv%zEzt zP}?%SUhc=!^WFYAM4N0*3Q%P_veguMXz8*1+;83e+(%FT9N*k-+s8ilFY3AnM$pPO zA}F@?!{ys|o4@#1^!TfBatyKAv7SKN{hKLDI{UgEcVC0pPxr4+r|ERw-)s&a{pQUu z?(@yB$v5%YO_0S`(jcu7fWb9BaplUDAD4)sPv>xx$r+R8TzR0gW1s%7Z}KmcPyU@} zn}gB1_Mrvi^*GJU7;|v#O2CYc1^}fLGgA=&$T8x6FRjn>yi7A(q6f%Mh=@qcfU3eA zoCw6soQdGm2>vB>5IGPbD0MMW=@klch#@mw3U@?=h)N16C3jzXHvJlrb4Pa%iHS%Ex-=vYjyZ*ZM53u=b)b}< zKfPO~`TTf5#9kM0B6DygLIh}Q?Y)*Elp#2vG8$5^%l&$G_q&)86p$lOotDR!uZd$w zsRIT;AU1SHN5H^=hy)rC05}sQ4kGRZakm?uKAq<|g;Yv;oDRp+$$I64Q6eG^!vIVK zNPsAY=GfK2+`aZb*G@y;=3?0I-#k9u-$K`iSFfw<-8(PFlx3+F$N}!}AM&u--RyJD ztuF2qkU|JV!5pwQj-+bSG#j>3##;N~c$}tbX*H!#Ho1jrtRaNi&e<{n)TSYa)>e1V zDPe{;PVI;nU^%Y!IgHz^+i0Wpp{-cwi z7tH~_bqWD@zhw1AOriE;tgC%BL=Sljq-HyA> zc)v{2D}cFUOv5nh)2Gk+`S2%y=4U?o^pDwq4Gj^&d^3z|Z)?|> zL(UnUz%`XTaFo`?MeNrYitR{&y;iqKV4DosV9*D7TbL!fRv zugl{yjd^r~`8;C^t%;>lX|BTbMhU8x+c)E1d{zD=J^e^)P0?UTRzFJAsH zPWJBeecJ3$&+Xx8-Wa?JgqTa&JbUqCnU)XU9MzNqwzYdi3IPa>uscL1$G`zZIy#mV z5V?zXYhXyFY_{7fdY-0~N=j)pODPfHvaTSS%9v6TXZQ6fa@gHG zd3}Gs)cJfkuDy-hjUx!amnkhl<9OBcH58Myt+Rhzx%=W-+%f7)t0&r+imMDgwWP`yB$oVN!xpV_42_& z+---=dhhG`bxZ5!y&w8FrsscQz5BDRPXixmpti#1{0LvresJtIyR&*k0UzI;?X>wb z>OcIpwcq^YpDqL6?Mg;n+p7hAhs&?MezjJ7%DcOSpV;zPqksbXxcF*6Y|2SgST10RL~h*cH9Ly8taL`2oWxSO^9>~3sg=i_;K zd~EyM+ugoAd(vw=pU-QnwFyu_CIU=3Y|6%T*4|8&!As6vWv#-@6j(*NyiLP1AmGIz z6d4eZh``kWftV1TA^{;f=MWKbl?r9^ zc#O=<0IuB_*xU^uk2wuxS(df0r)hpXOhmy9hGB!qr_%x5!^WWzBT+~xrN{fjJkR5} z{S)6P=&%`gXy@a@8Zv@d<`C0qSu+eVZ;S9|uOIzc`4_MJYu`PMoYUs+UH;zxYyb4G zEPNBN?R1*L&F3>QoB+w)5R3uAT{z;+6ETAMKAhj6RC;US1DBB47d@5#2=XJ-1Bkz) zPTAe?ZT<3(x(Z!DSV45)2qDzoSRuv?dKFz>`Qyv&?d^WMQ&lu~6Le5jcjpkT3xguK zo12>cQ&TOmz$cK+w7v<$y$XoHaefKk+dB#r64nyD?%cWKsJ;%$gv2CT_l* zz$+nvLsP2)Mv=oHu*SogA5N-kX9ck2%p45b7k!IexpL*dCyaD)n-%af4td?9AIsg{ z;iJRrt@ ztp$c$GUrSop3kSvX0s`S8>mAQb@!MHh`4)ijY2|1?V2;Bo4PHn$o|_CSaE98zq(udPNT1HHT;B2!09DS3?Gh2spSx!0%a2Y&MF`@ixp z_?v$BjeGjqO=!CkoU-9l=$?; z-R+~6&1Uz5kMCD$oXXczds;-o_-vcck8j@ecmF^0PkgZXlWq6(&YP}B4C)9-t%F~y z;I3S`@?$e~efACB-ia*$!nGzT@19@&K*~Gi*{{D0zx-y#+ELd@`eb*v*ong3-P2fT zZEaZ=bKH*OGOa|Eb0&w=;W(epL=+f@adVNNA)*tTS#PRlNGhVtaU2VTyetbM=A4fR z_*s|%5jnUy5I7Jr)!x^2%{dQ49?}TrYSLBBi~=*WDJVEFU7{S96vD-B^pfFLhu-y~ zKl{w^z)S!FfIF$E>!r`DxtYhv7rk0j2}mJE0+QCMny4XSAX=ySqth9{O3Eezrjdgp zxw#`qH)e|zdTR_AnIii*ZnxX+RT-b$-9DbrF#c;t;=GfkLTN^wI5zI(uH+-)N(x6pRWJ-qd)yuH+Ns(J^s`42Y)ZT`&VuF%liE1AK(0P{QR~@pBZrz zqNoG&xY=ra>KwE+R0BYJ>->Ms4*a3;2df7zUL_2Fzy(+}47-g@b8i|F0~1}As9*FM zUb*rGqH8;}2USIY5Xe7^;9u%_nZ?E2m?HpQihO@hE-%`*9YMem@$IYE0RYhSGxtD5 zRMpGhUb%AR-zw~c37ks6^}L*3zLB(Tp6+tll)Nklsq4+{ZkkT#^Nbu%^9r%B_Xzdl zAN=}5$7jX zGp{WG0U}|j($Y{O2KX$S=%%XuA|FOZ3`7*b0dXjWFuS;!MGjqC>y3kXQ-o!|9WHVI z$blR#2>@{uk)VN@V@^b1X6rl;!>}30by`zS$+WjN>l}zp-BqpG_U>k$9=`kGhwpy< zeI#NIAST`Fyp9DgCaUg;Oo+%F(N=950>v16>}zZ4&KyGsN#i=rPqzECyM6h1Z-H`m ztInaq=E+Jw`!M`T+3}On2^Z5?6#{nXOHlSuntK6JPykX4KoJp1s;YT^fa3|*GbM)* z$Pg8Xt~H`pu6%`Y3ho{P0gz+yG_>Op(D(cOi`(sx!a0Oafy(yv85nFkZlLzlG`EjG zW{Qy_L~U+?Ifl&4&Q!*+&4B2fqO__C0X$(am)3B|Ip^3MPKV=WyDz28Ywf*XD&EvB zr3B_?_O{N$z1AhNy1VwKZeki3d#`4a%79F!8ab7ykpmH3(gVHsTC15Q4oyX*kK_2{ z$&*^^IF5iufKZ{<8bZhf(mR+Mz{BaN(<+{rxw;DYn~z>=mzYzG(F4s(?bZ+HBan*- zBCc!8IRnAkKYFoy?}PKV{~z>=A8fwKccZ6}o__6T&o2+BIUhe7@^~1(K1<(CF9%uH zu>C@=`w}A)Cb3l^^U^Vo`tqO4*MDQl|Nc1q6v(v9cl-R4Kl$xy;o;@?f9kzwW9$F? zoc`{^*FJuHuGHF)+D|?;$-~|^ZDkb?Y|c*2^3U^S_cLf6nt ze`9RlCLx^RxP)1ph!{8$061by*$JCFGYwoIh2?Y%yC(&K4G@8H zVZgSuR_$>*jN8qnXF$w}TO+_Rmp7-wkTZ&r!;>dZZ=c*vr!$VZj01$o&RwON<{0<; z{ln`wZK_QraSRyP0nGbar)kpG%XWi_o2dbah_nU(E;5W+L_$b0Mt8k?a?@IS_2HZN ze!jc8dwBWKq(6J|d>961c0wRpmMUFaljFlN4Mn7#`dFSm?yTRhA*3z#__8mtN!}`bH4qV`T3t_M&ZPtpKFDYnL%&_UK2&Lz(ny!6j#0dSC={XS$-dp0WXPq_;jBB&Tp<AO(;{ZnP?*8p>zW?TM%z1!7Po6&gxC(L@ zhmB2b6I14}-)sYgKF>|`>3i=C>axzeRGjTNox$lRe&Q#N=hLcPYoo~B?QL!TB`XO4 zZza8b$SK7l+O*eUvnzRbiBB!df($VhhsI1qbUK}KDy5VVBLkY*x~@*R*=!u$%)l^& zkkUnJ96|`Y{r+4RGh>Qi4sK&EAtxQn>*f6F;SfUF4m$*}E(XfLu2nlEqDHnsLa1+{F_D#9TI!Vn4xgjmrVLcyMIOK<)-$YVFzt>&lg{F5)R5 z2J*zg%vK6W2+MqF@mK4Dfy+3Y&i%N+e#FG2NDSDPo)X3wr5Pf`7*(}N&zw_=CJ8QX zXv5g$sDPZJYhPORg&Og9@1KFt;YnNv?e*R!%0j0f}-dFuyfw^Hp%LoOQSK6YJev{`=4(9 z^y`Q9`NQ}A)~xZ>Z~e4Cp6~8{V=aI8@h5*N{3O4#9s0~=+Zcv6*MJ+hbehSCige+^DpKoG*EJBOTvR&~P%a$fB~ss-^j^&_Z3|%-GGeInL5w*?B2u?Y zqu`WM@BLEaf5``nyL1haOBv0i*Lvwaz#M^)h}`kB+<@7?3`eLcn!%^#j0WCC5iz9L zYZDPbAfnbC0K}AtI0ga-vl!#LuGFJQJ2}v7NiG8ClKrE)EyNtlq_be^xOT-W*A&lTkpLW3dyl8H6s(z zsjaPWj>GZ(fz{W;41}|^y40NW7?OAmj+pa&gb+_N-Z5hhubUG1{h)@7bpd=hAj{`ED=g9>(n;qgBiJduR*bFb;5Brz> z^@r81hUs|j;LpGFf8XvlA3VwjfBOHB>pA=Jn4UxW38)`_KJ$VEF_MuwDL`N#t^V)^ zEYs;e)S1*EP$vL4bo-)IiGQ^DgIg2-&QRl}!sYk;ykcsAOBXg^#3qs)Q_8AeSU0LG zSH5t(Ye2$4?%sR5G#)UsK(T6T-n(~k0YkPBC@A~^=0X4$b3Ax!7YJ{)0+&4b<@>9@ z(3LA+2569kGi$RH~)=ETx&dj-NKt)8{tuHeo3Q^%mM76GlP)cbg zaFH~9`hdE08>>sJCeBn+3I;<=M)dIJ_4hvd$W6vFy!-S;FqGPmDaO=$D{+iOwN^yr zK+UB0K0VB>uS67MM8p{Ryi^s>3LdDWY_9jO@25O5FsQ9Vr`^usTgUCsESr59o^Ll# zV%D=%TTW~hJ$A~SX-kyadB%YwV=S&9B5kcU-G}pe*KA4H0|zzi3XH(`R?B+j%9XDu zlng|q5ut?`hHcvI*YoR{%BgBBqXE`sZmYce&bP5XoTo0WUFN)*X&338b0ENVUF+H@ zgcO)UAVLF=L&@5`RuOTq7#R?kWoEI<2qMBUg%I9`R}g!b)>_O=M7{TAS&ql!b{vTa z0dme(z4zW)iz#*V%`hy}hlbF?Z;;s+ub^_tuD$nH{HdDJ93$dj~`_ zTb89Qu&!&&5kjcC(8c8~h5&$wkM|F7>mqWP&LM=Du#BU+q>yTx_xszm_BKs_@A-f5 z?f2e$zAoMm$9{02!Ny{H4L!nx1kJf@>lyCfs7l(6-o6;K*DLz6%M#tt_dhH$qxg`pacyBmK9%b09pE~{jK zr8`Cky+$XlT)FaN;+D(dn0C)J#*-~@di;y?FMp?nU$rOy{`S=`Mg0eG`%mWnzkX^* zdjI>o$EUK9UTd4qrEc9iHNi^&9&xTr%gW*^wk+K6tt8t&=dGSlG!y6^Nj6{;-QZFED%oI3tDLt_V6R-PYIyI>j_dD-am|!F%y@ik6yew z58e?`p+E3_I2a(}x`8=5@rV4dRSihncU1xqBasBWE&ysp0#A;F-h@qBLU0NPrCi|@ zOS-nn(d@{v2O3xP?2T9f;(6L{iLY+VRILKp?7k)%anF1@u}`{oR#zFu@9t^Pkrym3 zpd5F7w%YS>YUnmjk9+*{hNYM`p5dOJ9GS!YI6w8_1yu)aBb&KuJ7k_Lf@kv&+R_9|qa? zzHivs)^fwX!-Ly90wgev>Q@B}1iB$Lc?tP1Mc=9)href}zxXv7O*sE+`!_RSGYIKp zOBfcfv>Y0YO~3hccy+>jkwz6KNFYR6i$PYPWl*t9`R%?;_2 zo-)$Q>t%CDIEADY@ zp1~>$bNb~q8~qHspbN?}VH0Z!NZ=}-C=&l(Ja#RTzXZ>?%1N{CE9X z`e<~JQ=0$1Xb*+L;kR`DQCylJ=#iX6L0yB4EU4V)Cj9J3lDdbRDWc&M6Ct4znG#Gp z$RE;FpO14t&oR5IU}5@_#V$wq@QYoFKgg3va7UgpG{v3P9{F4bK-@0IV)ox(M>aj! z^KIGMUHl_U+bws!D&hXPrzwFtsyoA*bXW~FtS3VK9yRmZc03e&k)mqmjB>UOW7r+|}n+{cZHZ9rK2)ala3@M^^V=g2t;7`{GC&wH$Vr)}{dv1ENXcCnqdap#>0GC61 zYECsv*r4k_DaTqpnMqr9b&GHFHs+J=_`8TYD}o*YiG^MNv{lXX?=OZ-6^$OBTn}~l zk%Zcmqgs}*v}0D>L~q2$V&sBaL`b~$;_^yQUC*J*x4)0nj_K$Jk`r-dQ)@v>mpuI| z)sK9$Oy@@K*}F-74FEqT@-I~+e$@?NEq11)as$YCX#oIuHXN(J&)JpS@4rs%c-oF$ z)Yu<_4DZ3m^zI(QZ8}ea=Grc)WK_eF2rE$8g`cx7C*f2OP}%cXa|jzR9*#&bmbFmm#ZVIK&qA{Wpc@}XYG76DBXtegt4#{r=Mw& zE<6UyjORD|Vb!XP3`151rX*Nmi8C$L`BPOY>gEh%lNT6siC|u~j>FHmxq9R7aYJME zDU)S2y)L}}Ry4JmF*Zh9xE~uj9WV{~{l?;%d@E`^IfDI~AFZ75hI5oC6oilz`skxP zM9@d7(bKTJo=;G(a;;bXufo!+|E8d}OZ2BB7cKz5;=lxEBqiizEM)aT$QduJSdCu8 z+q=kABPWN`t0Fn3*K>5IIZZ&xDdYa2XaZYAbCq>E7kTRqH-5#`{LS|~;Y3$+a)Zn5 zjLVXQOm8}&s=UY{FAL6U@}?zMofCoG^GzJi)k4*e9y3~5E5f+s1PtUKeH)!X^cEpFU5?EsG+xl zqzcAxmqE6+fk!8|lTJl5^&xw+4dOxnu9il2YLRCQ-R&+i`Z?$7lt_UpndP<6#TO(C zOx^gJthpi zwwaNE2m4aw4@E}c;%tC^h*QPNMirC-OaM#28UNZrRxg8i zxZ;q$L;QFeLeuO5%K?!WvvT}x!~MJ~TZk->JE04>VCYsUkmdkCB@M%@uC}KP;13A# zX&V!6FUH2!MANVD@(Y$Zb1sPrDXg|ADH%IAFTK~j*0P!Ln?QU82`Q%fQR=Gl@6Xd~ zpjBN6i60q9t4=VO8n($|GufxdEGYO#qt)XfHESr%Rfft(x^i9*5m15PL4I#>1yxTO z<1fB3uSSOB!&5sA>D$AeUO0p@XPiKG*`d70b`0%i+RiZW$=jer?D-BJ^b;*AuZ-ne z+-C{iJA^I$9b!v$$i|BisJH@tfOz{Yp@ab1p7l}AN91i)6-Yi-xU=PUNo`r?dM10R zhSX)R8sff0VL&YY|3Wh1?Xip=cnK0XJTLAIWBBm-d~T;@vxXE*zMRY>^AfotmQH}S{?>STRuuCi@Ey4`QDhQ1N~1@#3a8!EH> z({LD09cg1gnOGY4N(A66{LrbAJ%SpeDa$h$!)8>ttkz3m3?>+49DF}CoBmDxcb%r5 zISinnHGN&IBJp2$vjov|Ck1>r?+$~2XZg}t7NjN6PKip(M;kW<)dKm866TV`XXsCX zzf47PC}M@6`fsMw9WZs1Q<;M^P=3uXl}1jHY^>SYpLz$iat;eEWxn*7&nfhpK>48z zGsQM4qs^(`@hIG?D36L77eEvsyfyUsGs?%$OV5u}Q2vQrYE|+pCdeHi&s`N5`+0BO ztaFsbpp|fFaifn(AfCDkA=Q)jjeulYfs$x#qet^3-K!lj+x<(R_l2bvt*`GaVbqJB9e`GSA zrt^~+uOx7p9rB6v#}B$=F%j*Sk3ru0gYM{&W8UfYkju4c;g|oUZ_aKX1XHdaez}hP zcd@T`JJyQx^K=D(WP!Ff~ z_8LW&nY7#mm!sa5G7h~^_?gQ%NI=g;3`=nZC_xy(*M5-nH3-yc$B}fu#L||OjsVe0 z|0W)}yss`3CdA_{_Mm$9sVUlkGzqkxGpS!AgvJur(q4t$fEe&^N>Zcgl+4)`?JQe@;0?GjuWEoxt z*84U2D*<{lbwvv_wDBABk>j9z4jDP?mlhj0!DBKUVtKY3ad9sBxYOI))8KP*B+p}; zyLWB7AhI*XCdecA{5Q3BV@u@)Z93o6_g6L=?uqt)S^xwcDrl5HEqFM6()Ay+=Z!I) zU_s0sp2&aul-mWfAO8hQLM|%VJ~!j{^X-|2PG%<#CYSAOGObU5Ks-j_h2M5(h_Hl= zN1wI2%O7$Fr*QSEokaHp7`S=a=YQwGQAgceEXMimKgeNL_epnaZp}U1c=7%RHUI~LA9j}wckLbW zFf+q}lI@YZ@1dsQWG}%Fj{c^leb5irg^7SYC#GmY*M`U12Ux)<8HuRH)&^6{!ok|H z{9)JH`^mY-C=&#p%N^)c^8K6eouUZmc;KcIIe#zha^*n-djzOuA}+Eood{5*(=wWI zp)(pVx$Z7nbdO)4R{V6aj}but%msMoXRKc8D__n{Fa8@@x=xD$2-8D-NW||&Q$)-1 z1Uyl62H=BiH)UAYPjjT=oM@nG@?q@M+mpk8@H?Y$(8us(8YtmM_7{u*=u1-_{?xsV zv-M%UvCd1Mhr&b~N4u#4?%$%LF#*h;TZN7?VdJR~d`C4A%IjD^$;4|qR@m_@E{i|<_>x?Sqj1h@Eg-+IuAa^+qHF&L8y^`! zTSONbQ2{j0caWDy%a5SZzN@SyP=T>y4>2gFY5q&QFR>q=&bV?HI+swuYi|Uc-qN4D z@HQVeKR*v)k7$}DcT1?KZEp(+kf^^=3$C90S3--Ft2X@m#!@Hzi{w2>b?dy1MbxHJ zs5|)j?GRsK7p1w$^eoP6_ZwUE>F&nzvgrvBk)zAZi$g9|@9Jqpy|sYfwM^k5>VZ0mfGPs73fzNS~S(&h3!iviUa`w=op~vW_uja z!2A)Xju){=^6({tH?et3+W;1;VA#0W1sI%^St=1Btj@Vyaa~lvVjazNO-jlMzo#&9 zw^-2tWJo9jAOa3De8X#8 zSXcp2(^ML$rxfG(@3XU^y@KnQrQ7-?P;&A``$O)@7Ci>-uR{3PdXR{=%-t}WsQgS& zb!FNNdo-gylZ;Mzj?&`p1cjr|fa`mt%wK){&LJBJ0v+%94seEg-ac2m>RU3mva-)4W8_n-tE4PT#5Nx+LWT&tQ!bKksIlY+*hmtKcK?>YY^ zue^c(M@>un?bm4EuWFAm`E$DWY_98r@SNCw$gY*jmk84MqTkKgiey`+TxGIVNJc3IvJukOX*?Fa-Y z0UuGAx}d}E=+?a_o5XI~9`X}A+*srwxR;`Lp&kVRK=X0%Vq)8!Ih_#-UM&N^T@zl+ zDO7CqlXJoTqQpXLtFM6qE}-M&$>N?bs(p$Qu>Ui#vo%m4erx3BDf*!9P%4$PYatLL zwa!|1M~!R4nU;x-k5l%mlL^i@gRXoq<-Q&2`5+XlsPl#t0F5pSdTyI@8qZZc)Edc#Rgsm|8 ziYQY)iCU9s?AY0s>#?SYQ|l+u-*2S2SH8`EUfmL0x3IRAbu!^Olj85cadorh(&Twa z=F(o8s>~I=<@^i0fCj(D%4e6aLwYN9!^X0?sOgXS$m>OfjOIM^O9fLQM4+faKo@9a z{g<`^!s*~xB14M@OaW->wU-5QP&%S_!4n^Mg*n^=N*H`W)HJBp02yL=ZMl&QPmP!I z#JA6o$2KtB)~0F{ph&U(m)HOa5ls^{p%`1WS#C1VNY&w7{`QZwrv+3ine}CC;Zh!k zlgue+`lQyRDZm(qU&E+?-JaZ;CL(gA;YJ4-2im$sv+LI=@(kL0(SE0-)@I;YUbTn{LaS4 zSlY8&skQd1nO#;i?KehwjB0pym14@(-^+y($g*Z&wSc=n|W&Hg#d$nu%3E!uCI!FVQl_wkI&g0>~Lb)tsHS`4l zM$mVTt(zS^HU*K_2dfQihHj2^_YLc9vViW-hyoTVKC`oGyG?rEcR@3XX2)bj(#DjPEA6rM;Wb{p5Vyu0 zgxIKE{bG7r473*RJirh7++)+{++OjJ(77?twvMNgCu~NIJpwQjNXFr%!3BP8a<#C5 z8!Je((VxB#@Ek>TV{r~98-`JQYPEOyUi$5}uR20qYA;(>-oJMDvsKh?Krk&dsG-~0 zBZ2C_*00N=1BnzKnM3__$%Fu!iY>Neco>8IE|kyIJIa<635m)X;fWUb$>H?1{e1oQ z))qbB=+v0OYnG)FTYSds#gM#xX<2q)uMXTtQu-Jmc3feU4&QWJ^;NUNFq+#(pBk7A z*FQQYuo26=uB?Zyi17}}LyAu4u52u7pJ^XRdZ=@uvQX;7&n!7?TL@?Ryk`^$l92jX zH1M}pR5_sJ+k{a6AX_hWa=CI&Ner^e%sw&YzJ0>};p2*K5gB)wx2%#X zI1Hgf{tC=%KtV|8&N=NQo&6L~a!N=__N?Z4!Ku`(`B@XW^4Q$ATbf9#B zDVF_oI&m`5jy%)z^<;^b@(;M50pdPb?jxv7XtSLr07)JZ_N%1|knh!jTxDTq_H8vm z#}MB8C_=01QrRv7Yt1Bl9#=TKj_0LRG2KB92I6FLJaq0ujni#H0*|w-;l-)=4dN67 ze=mf9wG_|7cT`h#GErvm9Nlag6{1eGk>P-fjZG@>7`2${IlRw0&-p!L2qfqKBray$ zLN1yd1A`Kjm9@eM2{OYpoK#}~BgPa+3BK2l%2yoKTArQ+sIisNlPGR=R7tD>g71*% zyQ!qm!krtU?AusjRFSg#^0Z_t=%B4`0Go|~z=L;uYTCAzlUwd(p=J5eTEg>tzE>N! zlYiaSBO*VaFOCjOcUBXU&AV{yh~!8tI(x->8Cf4S@W^+;n~5?rfmyoc=H|-}DoMfM z3;kGBF9fbwHOZt(3+~s-{G{BtXk=2Xk^qM`wsiRa@y;$$(Gb@wz#$OPdmOk-t+|79 zKR*%R9TOkKO)x&3=d%3m-77ld@VgG zk*ck*|8Ats#q|6gV{Tf|uI1Z{*@57YEdVqe4(2l!d-ycd;-bc2)ifZkXI^lu4>)78 zz_bw7X`F#3=Kvne{WiM#rKCb{qVC#o7GQ$whPAj^UXQw6>oWVs)wS9kJpZFKqmH+= zOyN>H_Gn~T`hw}a82#EAbonP|>3Sn)AUgF|&9i~k>dLoD<-3gsF435yBiE2~>p&Q` z;CqrBuoXLsW~Pb;5z+!&>u6?Cj4ChlolC(NDCTCVR$ZM(T9gp+ewV z!&XQ#I|;`cB+E*ZYzN!DaDT#5-3IdJpfCaHR7v^qBfp|8^OJWBd|`TkSQ+#h5;dyC z7BLrSlCI)NFD0bd)ZF}H90Efh3kbh^MQCb1so+ zR~v<=bjO9pLCBf#O)aZH1XHwaK9)hjv5LE7|G4GZF+cx7!>WR6NcBU{iEw9IH)hKA zY0&qP5sEMq$7_`#QjhYTmMZ6(=Jto4&2<-Z35oAFDqR|Fo^XNka0vMr z(35nJrYCQ)?bE)=UkE~xMYOHI-rK`}Z7&p1DP2D8)s;pr`-lpj%5ba1|pG1;4MvVQ`Ab8|e8&8Dv1C8Ps> zsi^yjG*(uHmFsbFy{q?VHA%m`J+heOF4^$+YF(#sIJo;pl+jr#GN;vxG1TwQO2$4( z$of*~|1}Z(QuB5KUdmiJM#y~2l;P6$oolL1Nf1(@S02m?GK-ztR4EDS{6k-0qD)K# zkOvUU*YM8zN=EPnUSQTQXb#$s`%6DG&M^bGCIuayKuBqx=d|vn_%(_&9^6ix-S$k2 z%!@}}_9QM{W6UnoJ_u{%yLRECkB@E-GesA$*gaRbj;nVE|NgoK;R>$(I-QLcJCCpH zMzcIrFK5r97^i(ARf|(^f?zF{}ZBMbTMyUwYH|Q!tLuKNV z9V;vS1ocF`mEgGC1KiXGB^2)damL33MyxA=chwf`N*N+Yj7Nd9jt~aa?SX8 zzi=Sa)m6EoVJg6Iv{{pj>vy>65OEt(65J=Af(_Mtr&M0P1^q^>)Mt)F0v5`ab?+nf zEAptWKL*18HR2)Od@vbH4o*!3Y_nuJwDmljIYI4TU&syBKhDm>(zR>zmc^3y2TLAX z2O+CDOIPpB9!7-`v;Nxb|qWTL{wwazhzP5y4% zNEN{$$%?-!emO!<`Dgq>^~aY%@Ii;{nr=R8RHtbNQzSc{NfR@j-QAZ6woC4GDk`I6~f2~KS>wF zrEs}vp&Vu5a!YPM$ecFGZKO7Qe)&6nDO6*kA^%96!A(I7AOd`D6rwjfd4AXm1TB{E zP9PAfkzHBgFhEmsVNY?^>3m@d?_cD91Mo}I?IA685o(>rTn4=L=JZKpK!ViD}cyVj2nVCRGqtWp!s_jREeVJ_e?RQXaW-612VGSK{h&4rh(|h;28qK zuvYEZ`Mq@EMimoFb&dV}NzAI}7xKe>@iA%tAnynEd#rJ2fSCV{Z=HgM${;aP-@A1Lp z-2JTo$k1g4`?<{7v0S|-aoP?ie-Q0K1|X96e_Gx7XMMF+aO{A5Avi41$NNy_s{$(^ z(8vcETHoYOFxQWPZDfk{b~v0q7DY*c@M2VKr) zF-FwL#gg=DnCy=}Fy%lr-1(>X{QKGKFNGT*kf>N^ zD`Q6A1_&k0yprD9gniftg4Q1kIM%6OBTk#pl~CXm%qIr|f0wPy35 z3auz~nNC*!QC48Wr#u#_#6$#buJ1h7BhQMGf5Tpa*TwN?QSk?+=cJB1jF}*hEZe)u z$X0vc)&@say9X;jF<(jJ=>gmhY_WmaVw8nMy$*dI?qu)l>SMFvu>iG<-dJmGRr~rq zw0(Mn55ISrEB_u~!hF#Gdu{mG6ILK~efsrZy|MJi>?!w$3tgHkbQ79=vjoe2bn_`8 zpLlm!!a_kLL^85PGV*Ix1*e*dH+l2ajZT+?|AD9?9MDEnG}yf_z1nl|xpUm(q$}J) zh*!|T0iGKC{k_3~&zPZBJRE$s-iA%n2@mugfAze7X3|r2-w=n-PSOhchb?t-`)W3Z z5fJ}1_t1jdBBFzlnK_2cQ=og%d(w3+0CMNsdB?0XnmpMm{%S;bsKfz!S1F(VwUHvu zG)K{4E|7|i4>o7y_tQa)Z3Sd0qv>8=Y5|eSpcsJgZM5O-Abk8_l%cagItXGDTokmZxmRh6gCO6 z)a>Qs9j?&hE8rsAQta8^z`EVBN%cufShGqvO>^yU4SHb4Ohp266nQjO{z>rb>9^j& z`dQ3g<@uYS8?nqZNCNH>dYiysRRajESlktE7bXqeOAj3#@cQZkAr%}!CjWf&cS1In z?Gd|v{nyI0kusMU{!&p0G1POMoBFjbjK^Xo)h+mVp5^ASj84NV@Mttet8YeI*~!T5 ze8TXvBYW|Z>D6B7+2UALqFetl8AjtqJb*{%He~n4;fw1|Iy33}4Hw&EAE&+jlk!+sC8!r8j>rAHuFI3REkI8g-?ar5WW)~Niw}w|ncx{Au zKP>;H&xDLw!*M0e!wWev_Tn8afTSMy`oT*l33xGVIS_JyPeaOJ!u`{ z;qdsYFcskBbK;U&p7rGU{D?0j=h@+M_dBfXaXa3+CLrK)PW-_1ed+4aMK>AC_4l!m zigiAJiU-#ob{$mHV@gv46N)We9Vt+8+|ve z#n71UR~F0j)L}*oD{0?_tN<_6fKU7zxXTAcW5msbZqBY)BreldHQH-iy^c12>zfd5 zEJ%zww3}uGY4Lx!^cN!Vd^qQkn|LLM5zLuHO^GGQ=@0^-*0tI9!l+^5I50-g32ZA& zq^VGnszt{}sM-0e(%*^SFZ)Tde^=%Tm)5lXR|02%)NCnc+*vA}E+-^c3?$~#qL-?>I)W<+@= zZC&mdF;ZptNcEj!4ts&eJCYI~N4#5QPfdoc+P-4zDRjZ1PJSz!X*W z%JqHE2fNTe$wA3sz*nycy3jiE3SBz1K-v-D1VDw(+)4H+$~-CxPovc6aLKMZ?R8nI zj!U@%x`ZSHfw&Kv_dXpXqby0y*TVhyty7lU5*y?@$Ip ze-uxB$f?o}ExON?h-!1p+FRK|*jv8K#L0)}uQu!qTvc*)SvcOaroQ9x7Wc(%&YO`n z1Jj2fcOmZ>ofQ|MU_WPq`X97Z?PV*HaKY5IdweL^SG5f|L(^tE&BBK z&uYbCzafW5oXnElMNs3#pA}(A_c&hKn^_**m+~?lK^bV7Va0N#_E{Ku=xl#SsPnL-PvdW7^B12{SfNis++ga5unW4V zIy2<`-9G_=z5zb=^RCK|HP{2shhDOn?SFlP<2}>UR_BizzPT+8-Y(s439kF-k0(U~nWT%r}DBwp4plq4(gTn#45)5D4FWl07t>cd9nUi_Q!l!(MyRzR~t zX@;7b9m#|usA-u@Nx7K~Id{AH8nREdaC~yOMe^Zz@w2)MIRc{4<(LnP9wq-9>~nrS zDlv97T;U-G508@k7;rET`6*u~;FY!znWj}9%5fJEF(?n+U@GGV_d!gYMPowOigp8< z+89eFs8)?hfN~fVirw7OaURl^#UGa#hSXvRxB4@RRIr}(El#z7{1g|IgN?;cce86F zXrcjH%H@$W86UKsD-^Bv!!w-5wX&LB<~?#Xjs<6FEk=0gkPr72XA2rap7@zLoGp2B zN5lfm!@nVA{#OB>-%7C(!T5+@XiX-?5|?%-zRHYUNBR0ocihCS-)fxy^j<`NppXSb zME9`acELF_K>OERDESCgTT_zk;M0?B&ss^N%Em+%>HLy%hwzw^RMMZ5jzss%-UvLY zPs-*DZd>v*89?I`1nti5-ErfRdN?o(Z_FfkCe^2iT^UWGhpe+yfj-=%r5fSNk>7C3}i8!M?EObQ|b87fpA0Nk{bV3$KlV&d20<8tKD%#a%H(jTf@~6ss%3 z2A)T~F3zDo*&HEHuY&*U2corLMEr8Bg^>iijM8IU+^0JKlmQ6%bmEGA1T z>TuIs**@h=+Gk(PQ3-wC#J_$sxeR95ShbkR>m0GVy!d>sJq>^GOeuc=DB+M#mcro<1uLkTTvIi65%d=4HLd=r~b~a&hxtbOEMm=m*Ku7}K-o zI|u;yAgE7Ud9_%NHZkTim-!5gIlyZO3Ki3Mxwxtq)OyDM;jh-c90AB59yPgu>uQaG z&4?}seygZ?RQ-P^f7ptaU=cC0gjk60!zx!MM{m6jmk&QPej9Da2|kkVO0_*)e5hF|)IZ~m)msY2{rScc^(mqZ z>zQLbNj+;aqe5jTEHq6Q;HQa9_1N3S@4|!jDTK)H-!b<6ZL$1xG@1c*~@fxHJVznVEP+hfBkb8bd$crp!OU&X2>gb}iDCSH|#u67N?Y zgL&YR_1fOcD z!Y5b$`}Qr6oJ=j*6bX%TU`@r{-FF0QT31ySC6wl`l<>w7p1?|{4FPeSZpCEHQ;T>L zOr6?6yL3X&`tJ`&S)dB(ir`2wtvvZ|?!95J>?~b5d3>y;3=FDhwP6Vgx6{@n`k3qV zJP!$>}?S9lciF)&r6#M4$ zPH;=x537|PzPu>%mid!=7Q-TY#GW$;dOJcen(FXLM;`y@vw;V>M)Bbi;@-?U10K)n z4%>^S7>#$c`BVicKKq3sHv*)X@Q>;nU*ajjNd1E#xq)fYQfzB9JBb)J#O5|WWcj$L5^{BK0)YUYD+1FGY? z{oa>R)2Ck7u11p^;6d7F{lHiY}b16TDnU`uzpDG0>$aDd_g{oB2+nY z19lUv%sx&%#^!ZTTVriOt@eYLOGr>2jBv$yx zCx$#)6jsh(rZOZ`T|MHAf?lw$McN{fw$MH#7wLiZGvTidE+*J=n_H3F>!pjVf?KH6JE%aHqwF1i&C+3J`X!4a8e^!@t3hK0wK_ww}> zVuM;nS@E`W%;T4jH@_mD_&=C|XM96#i;lW1i6{whK6vM)j)Jb(PL3SMlUnyC%+CnE zUCkZtn>H+kH}-yVQl>-T0dB5gpuH`EghC<(EVjA{VaiZl{QXjz-jHdaF)>uA@AQXWAw6xy;rL{eZGAj$*gi>HpyANQ5A&pH; z5NUzu`;UKmFs`5!N(41*Hway(b5O-i%Ce6+BMZ%CMkA3Ok6-c#%71wrAo~iYlw7m> z&N@^|P%0`wDIgzdxacAr-60Y{@y=PDSq+SJeUp|-WmBxp zuQv=OaqawhzlwmWs-wxDzgXsvjgDEy!*P`2xLjR+@Im{tDY&W{CW6CM+f!xB@=G)8 zci&pq`O!c+gnTZ}K)%F*A!^H;0rj5Na_M~j)HPGROF6c?uY8R&3_9KHe>u2J3oqqFKI+qc+WN5 zcqCCYJ{}ub?Qk*vamey-&yB9!?_lxys>E3Br~UJcfy7e3&cF4;!;&=gU9Ro(W+v!* z^rPZbt=KdaT1w1oEqsZE=RRu$ANf1_(C_XNX<8cru75L&bSt_S3h{2jHZLm>2e~Q^ zQHHcH>5o9a@HE!IsNVw9S&; ziz{?rDU$%SwDF)3C0al?KR;{Xx&XR&p6u(t?|yie!;N^zWynQoFx9Dmagf`59a{q~ zr}g6^+Xy10J!~J-VY>vt-u)FduCnf&_ z3vF}0`x=9bmtDPpG7hmZFMj=8JaDK%VD$G@8cm{)vq8~Z?Us?I%Xk5>-@erf@b z+vRf`@;4@u$zkAo)Y;>|%Er$6|7!~HX986iln+Y(k$`|Z=atX#?&v66=OiV}rKP-_ zWoP=|g~^h)z=OLc;;L4=^AaOUf*Wh|QK1ufM)o-cfYov-QdoHLYD~YAqD_@>{9>gwBtTP6YjYGn5h81D_ zwN{G#-#`8>k8l?P%aKs<2(aQK2a?60xEER@1{rjN@MuUYX~ym#_G_DzI$q>q>N0tH zyC3j)TIkk$kSzkys5^;i((6g3_Iz>IP)%~pcvw(R;OXNxsVzl<#x#KkJre3R4DIKD zcCU#Df1e{krbH$$Fm;8P&G+*6YK^V~s}7I>$U=}<36#fw+sKPnF4R~hzijGuKkp{^ zpp94S1vdy$j>F-k`kx_G<;anu6z_Dw$-!7&g@-xVDG<(>me2dG^OZ-?g7CT@^+jh( z{=SLH2+mH(PL2`N=8D=;vKgcSxOCW$Rg6mAvy68D6&3zV$Db@8i7Sn**(Pj0bw z?s1Idr)C4?&G~2zW)?~^fK0^k@o~dejInZgi88qw5an1{nHafta69)^Sk3ETu|3s^ zizH0WzCKotcTyIJD8{qP+E94&!}4DyC5q*d?Q$Fl`JaZzq8h6CaDXO8p;Fd7j0qOE z)=#EQl7a`607=g~$e_b(hW?5blaAQ0G82QbsU;4DaLW9NBIT1XM&7NKsVkjWHb@u& z3I+IPg(?E(55mf~Wl@j298sw9wW=Le9jCqN9n5bCS1QVj+rDl)yXw8UO#1m=Vvcki zR(VK2jWwGHAtOK7yk<=tnr}i&lM9Q;ko@rfJAvPt1gwfL3x!9Et)q;`z1#Otw--5f zZvQUbZU?SP6L07L_2MNqix2fTF8M4&`^CS;k}fnk-bcs&5PZ+p+qf9;5`ptrsQiOB zGZa6s5O$A0}; zR`Lun8vr%jwa6~{q^hmXA^5|AX6U;x3v=+#+worh5e5ACb2@T&hk<;bJilC z-l5fW3~quy)g_iBmn!G`Rfh%Nyo|UIiCpxNrgOTXHlw z!zc^=c=vQ2g4)g!f>#u56DU1BJv6y0s>QG}G<}%lGZ8f#D{)SLa=FQc%qSc-iQgR| z!4JccN|mI#%0u<>4z99;@CW6|cq3|h?k6jJd1!Q?qM)Ex0{trloBJpfTH;;FVqRhx zW1OyR9{&+R%UzazzxOe9m#rdlEhxLcG?690)5ETH5gn}}OPee)vdu2kF-oIpelS;3 zP1?)XvKdpV6Zs?pWTnFWr8$Y3NW07uF*IOr>i|)5@_w=kVOZ+-PZLlcf#;;7pt=qW zlF9ndA5VICPkOi$WVI@(i83YlE1!BZL|LGZ#WFe^20W};6dt7oG|mgHDZZ9nnTY_^ z->uyw)JxpE*asHjV^n~3$0Rz-Y&65GQ0{@2= z(pTUE0u1k?_@IQJ%N7NNdx;u`Zo*oWkx^uca#?dj;ZR)6DnC7iF4yJo1r;hH! z?|ieTfzqPs&KMZ_u>*@x7KxJ@v zWdhSzT0=bVGG4Vwuee8M=)&6%J4ha-CSmao;7P^r`LoXN_QI#;j9*NF7uu6UxFNv< zj+_>>%gXAewKbW+10denR@aT2n#G{M96L@v*)gFjD!9?irMg_upSnzmI?f_O7{A&9 zrCvmZ0g?&OjHb~~XyF(pRge-_=Bz)DzMb^RBO}BN0$v>HdgW982vli@aQwg$2gWW^A96%!wG zB){)@vFU5Lk;yHS17Sr*zs11Y~gvASws5|)IevAExZ8KSfMP;6xSLdK0%gD zxv|TFydy0D*to^`hcg=g6Q6zz@q9cRh=9uB4(X6Y;-QDP8*h#&{nI;AhONWcWFMVl z8G@$FADKfQEb|U-fIU*Z5z46 z`f5g3NZ~|?MVs+mE5FxD?=cC)J#GNWBDjx+8)4SjQwi{w4ZFC zg@i#c9a=>Wafg$k2^uO1U<^9z2`kB&s9om%d1!sdWn4o8ih$(pEj=v8RKdV3|4Zu) z{QJJNzNCHepBE>XOrlOc{6ZL!GQwyJdl5lOW_h^a%295>B2>&W4j4Zg6+6=Qg0)1_ zqG2O+ob0nl{yFs)CCAQpn&{4m+0yPNN2cr2U*Hn-;2-&Sf1tP&wb!`%&OioWj3$xN z)ntjD>L|9tRXsWsmksuOw0KVN{aXp_0pBQ`zr4->rw_8ubd-g9iw0}<%#>T>BK zKC0MyqnKOI!azZggVqT9L6Ywk^AzmYy5u`pk9goTQqtOr%l&5NR*@^~FIazQS^pv% zCJn!`KGWH(I{o*_sgbE;D2s{+E$c~GsL#1f30?T1f5A@W2ftTDW>oP%-Ssl_lPyMI zRd^{yGmI6x7}rU;!Qi%!U_t{c13*A9F;1B`I=S<(ly8dv88lV;&M6$9{>HY8uL>>4 z5le|#NFTxos%PI%8z@EgE-xmw%*(X3&Z8TUw*~6>iaqZYy z@s#--gP)QW=)-X-F@!HF-~8y2EbH%R#lj-)YC#S`uR@CdZ_`S|mpQ2jkz1z@uKE_UKoHieC z_TAXfOh{oELOYC!EKwwE?{i8ha^PnDN6>T2RY9yF@vFU%Eoqu`9#M0@PM_0{60c=` z1a?TT?Bv$MYQC?ze9o)LwOYx+ujCLhT^I(iZesvct4({?5h(#`0^4qlrW)w-{>G+_XTaNr4EGI4lBtu zr_u!9pTU~BQ7~zrJId@ZA3ec=1FIX+!8<7wGZigf7_*POJ8^kEymT`^+jSmxiDoudReZin^zUU!Uk{j zG~n3JoR&Jf`d>2vS9WOx`sx$`7kaz`iS4KDz6hmatKvzb?-lF|l>qY2#>3tSv_F+x z>t20``jBTYj7TU68jl?(E``td=M+VctT|7(y{|~l6cXp~BMMdZX!zv!ye{@!{rreP zewMv%xe5D6Rg|3dS2Xu}@TO@rW8?}JggkMc$#^Xj*tA~I?m``$7VINYIK1hC3K+Gb zhDpstnu%-`X7u%f29nG3G3(xg8%HaY@h(xslny=3&tFv(J~t9WU+thEXY3-rdgCn2 z%Yje%E(32pfP6VHduf8xNPWg*9~ms+1Jp_*GK{6RHUMj}5y@mp?V@Uo!YL}}d@1C; zV1|rI;WCfg3JeSc=iXt|eG1p7muEYo-$V{|i%fQL?iQX-_TpK&5tG9GU|e2ul=-G_EWAevFFZ$d!54!zH| zFgwR;uN!XMGCSZeD1@SyhWG`A8uH5qH&udr4_CCClt7+?t0pEU0uLGK_!I7Q9g7ne z`Fax|$V|pdQm{7XNGL^TeOy}Q*ZR1Dd;#^Erj_GwPH>?r0|;|RM+Y9TVi5Y0yG3()@?p9X#dXHSc~$4(voYUJKN zJntk&8LS`iDRl6tvo(&~sjku^@v!AnMFwc_=sjeGcf8!hL)`iF z@o;|TENweVm*;@J$4KBUZD)nj1V~7-kSp9-klb+i@{+6pMi;X1RTne@qVX)L4NyIf z3cBEg(tZIHhaAb=o=WO^?QK1YL-;RC0>h{)spUBWW*Ub7c?U>+ckML3a$PZz3h6j8 zP-MpYW751N)gEl4s`u{Cw|}UW=Ya5MHU$4Lf3lasw<&|<@uvfSaXn@aTVX@b1>s@B z)N*uEwK|G1!mSr$h?`i;!mgX?y#>Tcy@imJ^w;))vQYPY!~N^;s37GHw$K}Xe!1Is zlaq=IVjlj(INgPn*LSrV2^zd6ML|W*T2cuy&(ange@emV)tp#~eGL;gBBmlcxM^fV z*>nmyrdCu(>Q?1}#}u~bua>WkUUl(fUxMBx!|>E;E!!bvq<@L;?)V6Gs5%5pF>vD& zKOXS>Q}VyQx52cr@SpfK%op_B1Hg_^tr~WhPHY(IYlRhN9^RVRMb zvJMC`%TFVZ!-crHOJ>vbpL?oK+R4gr4F&srOAJ#Cn#Yt^!@V!%&!J>eRyOw%#NKE? z{CAmD*-LUibrX`VrkKE%&Bh=<%8{j8Ur%%)^{^-y1X?+Q$5+9I>(>ejc8u$Z9)cRA z|NLp+uFRqu`Gxe84Sw#v=?!xW^0jaNfC-&HbnQGS^(u~$+f5|sSUft(mz6R#rMu~@ zM;#W?LN2#G$mqHIiFGgp&>$?)7tOBloT*`tquf>lVDy%yIl^e=8rspD3$1d@_0WJg zsf{%|%iHWuIIXqTLBC2_DnWeUIW+p#dw}8$U1zj!|MQ9kOFn?`vgXDJ60W8A;ymrO!Tm@Lfb!3I8sY8DwrS; zv(VtHWg_gz+i?>2J1P^Ze1X4}#y+Yx(cuZ-e@#S1@bHD20(ug|n)3qJXNJB|qx}hN zmBbjS)g{SD1AHeZ<%!;w|Gvrf+mE-4gW{NbR$&D}ra`gDow=D< zpF+{jI*b+B5dGcn)p<@WZXYIclq!BHIL{U!cJ;L?(*R{Rx?Y+>kuSrUesb9E)z{a1 z)`#N(T2LZ=T(*Bn&82HBFuA2zPlT%--I!n<1FJ$fpIuRY;=)8x9#<~gd7_-;otjr# z$CAs`+0K&{bxgoF*s8Z>BTKCyHYN zq~G(u0>r}rPLy!cgJ;TTXVahdGeuD7yW?T_4a0AuE@%p$I$EDk-s;pPKWQ;K5d0PW z{&D>RRuD91v2-jGbgrV*Rf2r_&lkW1iuLXWg3{9ds(-+V*E8ottMtK%(Q>LzkE;!s z+EE$#ujR0Vhd7Ik&OX+3qb;DYJcF%LZ{voe9EbP>{5NM!JUQUC^~|_+?p`hI0U6r; zL2?g;d-3R?tyl)}C=DA@WeE&dIG#xVDDt9@=^v+yxL~%9fHgWMkCa7z0acO>$^kJI zyMFu-r_<7$LF;pB`RVwhtBW~`RnZQinp#~&Fth4I{{Vo5XVV!>=cY5w+rPI_y;>4* zdB@_(OHE0dw?_kT;)U8kW{W{Uc5rL{XPa>C6rW$;?MA%DN`L7RbB0?G7|a~uRX6nC zK^7(h@Hz_+5^t^P?W+RAg4hDHJ_n}E@P99h0cK~`|>Y@Ckwm=E#h0|b9Ez@Kaq^SpbntwX8HrI-}uP5*LlNWA`|St zXkH!f12qQc{W-I=-nyQr^60w_@`R>JJD@2iIj*J_LI z<^zY&W664O#jnicyZZpw_k@hNfYUN~nYJTmuVZ1T)?qX7PjmC1rd!#Bho?V2#1u*` z$mL`fNLE7O7m>G^j1eNi2t)TTScU_^-ScNh8qCm!3Ar!>9sTyX^&dp zhKUTIs>;D#=mGg@!lHL`!w%gkq5Ai)(C-_-DG?8Hz~v0j-EI#8-x#fE0%?C(kb}0E zFx=}!lt`~!shdx)x&Hm%9=x*hMc0j56%>}Mx08t$KDyTK+`8w7=(*mXF4H{} z9|xCZM5P}iM)nnh@r)~s%%iF^zz`k?kxD~qSU96rk}HYLr=e zRj{z39EDoI2uE&t&;_~MBk!H0CJeIMtpt*~({}$@I&PHDj!c4 z^_(-bhmAT+6#S?!qbBTrX``*Wd#}mqm{~nHfgm?@Q1#y(j(O5DzwHy}dLE zDJMU51g~9SeBY6HV9*U)p6^vR>O^n;gun5~fIKP?)vT8|KjAMl3p-3|=)PNaUtP$> z$ac=Rl=7FFP{B44>#tTGvPiF7>X>RJteVSL%PlG0{*EgLpbm#$bVd8<@&cn>e&6#O zS}MqY)8+85_Dm-iztpTLuAp=wV6|gFFHs=2h3ma8@H#S~>TDm6{q@U( z?pOX_ClTTue>(Y1652g_AVqb~>d?P4{R%m*D@I0Up5IK-*8`r zd&_k{czgqwOz&4-5ibDCHonpI)%ZOH^z=x}V(}~ke(m~Sd^>Vm2t&q9nm2}u0d)L* z&th3D?e2GycONkTFN%Dmm#@wMr{zgFUgYWQZ(OZ?UMDx_eI-Gz%$)R6?AhhpiL^=9 zbQkSP@BJ`sVB54U}wh`wfZ}~mPDACne{wx;;cXD;_0XUFU;q<<eN^P#iGl!h3PtMz=Zcy42= zGys~50%P5%zS8v;iBww;sh=r@B4AuG+)jDWq86s;U}q$V{)T7EGR%HObbMVXLaiD_ z7}}+SU9ErkxgAbeJm0F6tu;rCleY-j;ba6)N-)^c(x_KIUb@fKPHx?=M#*W~=9ao? z$EuSG*pWtjg2pcT&>KvY?ss{8?3Q-R6)jTsN!(rVgYV9GNFC)>2sCH>+R}b4u~&Lw zy6{DJI6dC@vY$NG5hSOVKftEB2xMVIvk+t2x}>|M{H%IxTw588s!#s6mIp!iU!-{)!A<`(C-1~ zWkp3vG8d4^(s{e04|H~oI?8}4d(^uXd*mk%a3COvRx`lVlapx%{*U2C! zXpB6|vSu*CES9l-%adfiptL2+EPWa!hHG6xf=lXYK&ptTAoPh(dS#i?mIdGsr+bdL zt1(1VjZuZlCG>@dNW^`ll_{&mDn51b@t?b=e^(Sc>=>86az%TOG8W*+Jaxxu1yw-2 zFHWT_6BllxZ66P73~WMNMT6IV6}=`k6Mo|2TxL`*%;Wu&TGDa4jBMlWc{tn>*Tp?r z3^GrEU#$A9No57V53jCA!X8IbwobSI4gL4D_)!c6@eX}n4?^tZKdiBS@kD*{dZZs# zdLI)|U&+fn5sdmgEsf4OyduF8ra$v{M+6gP{o={#$C31f>r9s>0J? zg2r3)q}3VuUNb`^|NS>Z+GrpB>S8@_Gvjkk0D~u7Q56%d+p9i~kXvy6!UfbhXFLb3 z%0LyEy1@QPO$rP<(gqXQMG_6^Yqop1?n~WqXfsY>Hv~>l|4c{YFK3>ktS13*L!9## z0}xcNQ@wbVwF&8naCpbytv%c3*eNFWe@h~Wnxz$XEXb3+&KVD+@o}gFS@_E%S-t|aYhucYF#GH!zxkQqx&#iq0^CVRdOvH^wqMnQiYsnCs;I>p9Qd4p!BPF9iv}ydmnmMW9uoy|743 z?wRy=NrPdezmLyOH{xME&AjCuKNZ)7l!_dYoFX_zC8ge`!^xjz+pFNi zb%@)BjLhxfE!$~w=gk+KKL*|`E-yGk?rc?E#wdXPn1IrWLuhy+W(<^346YKWg!Z zz(UH#LsfvEH*Zz{nJ}qky{_Wd3?+>X@WI2iw?xQuH$qgCEONy~^a0s}5Ce92NbwZf zxNhWFaRyLVWYjQGV({mAed3gfv9cTQab0OOZCJRFDr4xr(dQyXIojZIs&JL-Uu;l+ zO)CYPYZRQ1SLaU!%Z29U;R%1fmNu`K7I}S`94QinUNVeHzIxscEw%?RR&wNO8f2+c)ge*8DF-5wXSWOIwUIS5a}D*~cs84VNg+o_ z+7N}9G}+lV9z0j+^YXoJL0mRB+&@+@p#!SW%!^(WA=eom@iWL1-zQ~q)y(dXS~$kAej^Ld`Cd5Xwzlh*0`tIL zpDi;+;72uW?XO_--(Bj`tdD=>n_m$L2eNYfH<=k(-t&a>^aIl!oOctzoEMMt0B3x@ zZ>yAz@s%iGkN(&VQ_`;d9LL?2SE*g}vDKc6imLbe7?Ew%c0l@04P2mAL0e$|)u;C$ zclYS%XiJ6op@$eql7`_F>|iJjQucNE`W83p%kp(M#HdabRd)EI>;o1t>ab^?IHEb@ zszpPmL&1fP&$hAs*hm}|Ht`63c!Hw$NoTjzbI~ebCq06_oL(kC7 zS#xSJgQdi=4==WA!rPmigGpMQr-8+mh37hH{kZ*6lvwX$Xapr0t*>>yXM)PJbL>aP zTO8gl8BC;}|MS+Z3E4o1egH{)y}T4hjz^tX-yN8LD)R2^yjkTZeY@W@Z=noTO#bFs z|BgAl7z~6FC_~XJoH+5qU1}$<+wIu^w*;U6QlOdXsTT5}C-kgNT)hSWgdx$+Y%W4y z53K=GACM%3n?jwNnwWgb`0Adxgj3WXPY=<?L;8=tgb+0YDU)7MOzC78FXcB5o|z19{cdonB= zg;5H!-VqAhMdH>w)4kB_m3Ur%W`0y?UCv(%AhG5fU_-P1YVhlYY48&LJ@}8YmRI`? zkhd_K@1dk;pA`Tw884?=la?EzDh%s6`XRGY0y6t$Hg&Zpb`fHZDOv?RrTK_(`;*qA z=Fb{?o-^?L>(#Rs7B1mLIY{;A;QBj*+2&rL+fyT3*iK#0N?%XblFy+;vgShJaDPxy zO>0dL!Z2QZ92NTv*%)b6h|+J$fZ@h4I%)IPkNUk@(Dr)Azqcx$?AogpuzXo#j8D=T zp7`F`tYLwpoO(-F4bjao(s(FgBbMles|nRoTx{3Hq#>_GUH9Y=n_VmZ%Iztd)C=A{ zsYI`8HB5+po!r~?2NTpuM{zd#+ofu+8Bd28N4YVV5ZSwGNQA)W^$An<%iGMy-QHys1`B(b6s;YL+AFHo_jCaFU42 z$#s?xH<}B+H5A}$-qIEWyxd&XMA9lCLd!5p|6;l_ov)rqiIb$&)ww0x2zT@E`HOIs zK@wCi0Rl5 z!A)=vkKkT@soq<|*<*F|qWO?2Oy<6y+=28kQf8X z$ove*b(V&4l&KnHN*8#2W>rL&vuj}8^3I(Zotgp7bEJD37PZ~iWrQFHo8@FlX1G(`ZG?6 z0Nhe$DBc2#V5yVp4-Id@=5qeKFRz{fRmCs|EBWCHMvIdQgLmxIq|%ZHqh6*05RuQ? zEy{8(9O}I#bNVr-CU#rx#jMK-3*CI+%usU)TEG+xD3lwHR^kOG`)pAcsfGJq-L&q3 zW7Ulz41r83db+z=N5+on91d+#=lpla8*Nt@gR>yn2lxpT3>^7SBc5 z)iStIO+4;{N!AvIXc(RCHg?zHKw&*K)sI1Y^-N8=Z|A4(r}DbgC?*V;DSxJ;&=8_j{J3s&j_!n+hk>{`7v_r^(+ zDMIK-ka~~9ZU2R}DzAH#bNH~L0yf@Cf2P(ltRX3!k``Pb&g zxIWjC{H>5eclE=cX5(q?XYQwJ)M?m!k;uYH&A~rp=AF>A;n8m#Kx<+c2?Q%u$&mDo z**ub}hMX9E(5TDl445~si)g)kKM=$avQM}Xyk+Xy6aI+;(7v!9JP=f*7xIiGmiseZ zrUJYF(hy?Xwr(O7s1d%6?Wi%u>iU%!KDndpZ_idHyp#Q4UQrdvoLfWX1dN z>g+tVro_t_Pyn{V01~Myqp9tXe6u6P(`@$71|!5mdW&9iVA@Q)Rd>y8?Z|!e!lcm7 zonSTt21woQA4$q6(b7iGW790p4HU6sCLWx|LZ8pY+MStx__Gi)bLDw%foa2w4Pyj^ zb`L?aaMjZLpb#yptWFI;@7BmGY)_-~+RPTE-E}D31^b&L)a~mkY{QQvYWl87(ECNat2fbA1%e`c3#Q;U{ z2v8co|32B>`H@+5bGdI<@AtmC4Ph5IBPsf(NYpa4?=sio0yPsSxW-P3L(HDrr(LX~ zKMA@lNErw3P6aA#+zb(VtW8`d3M;k}JW6jt zjgA{=LYyaV%)?x@%ICYP?kVfSQ9F?+PE7}q1^B0dnP1J#&8EG|rGjwo)|EPy8ofd< zIDNp&%RZv_>aGg;71i+sjwjY+3RNx1TXfbg_w`@XN5RR)o(`qE=m1TZJrf;wx@%6< z5H@GbQyIOlUa!cRn%oQ zE_1a$4pf$t-FBIs#wI=UN$xLyI&WtX`70ueOBp5z($#OZSO2S-l+Bn51ko({M3I!N zFFLhvxX*s5%_f}!EAYiYioSf$gOSfU{jK`k9yVnrt>%7pL9G2;UW9a74&&FA)E?OJ zz*QugYNTq^rReKxWTCDIkRkGyIEgLUKy|)w495^wI$q=cZjDH_F57`RsgDh>b^6KP zZhtQIbn6~U*H<6o2xP-HYWcTZYfM6Z<}S|u9;?d2bp|32U@zU45}3=q$S=JhK3O9M zN}(o2VXBcN+SyWNIqbZiyrzu630g{0p$h@g+$8EX6StjaSqlKo7h*np8fa0w1gm@vKFKDCHmB-d{yXis~1~q#a zjb2#qq7p3PO}RQMsNx``R=ZESz|B16;H9c%z+%1c^6vYi_wiw`MMgI3(~s5$ zY~==>1rTqeIIPHMzOX?c;x!a=4o~p<0s@ zzfwsXl2%Aa?}k3%nqxD@#bW>v-Cpn@h55c@v&Ks~?_Y;OTS{U-CFg950+&**CDEB6 z#d3*#eJ}HG#^K;6&+(H<;`fAIH+1p@-?HtLPYnrPcojBpIrQ`|z$M8H){a+r|3 zuC9Q|xFePL_5WsY9d-~}(B!YfC`iL=9nw(#!+w23Fq+^U4^z`KK1xN?`nZt9 zeP|dro@jTs_p^7G*;^fgGd9OwRlJ+aM}b&fUHVK;U0+Eu9(KgE7pag9t$p-6yZk{; zlWmkZja8(34g?%zq?b8=b!6QoiL4f!)^{dIau5ms_--{b9{L%D-lu!GFjYyO_Tb?7 z;!;X5I=&7jscPw{F!hmU{&z1r_@g1{oVRu$vkZ39$q**^_`dmSuOPJm%~J51OL!qQ zH=Wc`fqJ@x__v^84FTrpTs{Yv{`@!zJ2YJZ>4M<9N^PJVPjn3k5m7XVb@+Q0BN|;w zA^|x&7!u-QbiiG}XDs%^-Nqen^iX2y%sDIperqh{iAO6o=w`F-EU`*gzXD`)&$FT; zN8z(|?1R1>RZie43+zAK0^U|bH!CNHVRQq2uT4YwrwwUGjmpr#4D@8Pxz6=U)=~1l zxiJ-vHO3P|G%juSL$hLpv374OKwJ4<0{gl95^;oIIp-`4I)k3qJ+c8F%?Gw{AMtBb zdw?}BG7u8G`4!x{u4V%o(Wj7KAe?b9#%D)YgTvc>7NT|0 zHnJp|bYH!iH}?LCLUGB8ADMTq& zbT+lxDOCLN!h@n<{I1E#y&h%&(Y}Run%|wG`nsk43B!H;L=Cn1;_)K#cH93g+pWDj z^(pL_>&Sz#7pw(Mp-q5!Jc|#n@JPk%J7+wEn-_gc9J`27;-RbL#Mr~s;GtF7fP3I^ z%Y)xmfxcau=goRHNPKGwp@2Csl@#B{Vf-=27rrWcR9OWq z>RWM?Vof{PK#*OrT`vArL>Go2MV^mU3 z9?QAQuZKsZJR{@1C+D`fVARTGp6G4+Y~Uh}_tyjRZMEM72g@o{OUxURA8=s0f;v9H zNMh}jD9viu#Ja%m1DoFx$U7NJQDNr{K}*_CpGw)8tVEwx_M>{W_OxC>pzm+h zPpVJL{%Y_MJsqjF;}dv^GGVw^iG6{jK|(HBQhxGKc4*@$B2!N;bIq>HLQ2>j5(^r# z+?F?~2J-r+-gp>|u8s5eC!G3pJia<#rin2o?iVXdl)jF<&~*KzSoj^&z5VaeANxQ5 z!qBwWi1mTtrN-7!`L7*vt7=`VTP0Y^ zyMDFm#As+?u%c8F2EUK#2Q`jz#mNAzl0&XH29Eia`ZL0vqIlM|I&xpWWdi`8nK1P6 z!=u?j2M(-U<(Qw8-R;OonB@f_SW2-(1W`BTs3?Ecym5qM8qJmdZY_MoQetSoz-vIj z)$oSm(elxE<7S{lxUQys@Ea$KN^ymwCtL|%h;LmFCQ&v_-C_f=1)&e@oF#U9wHWNt zk_!UsQ=H116J2s3s^CQ~V-=IF zu!6jbB0D5414^Ju@wEv8629%HU1D>0wjD5@W}^*jlDUK2)Mp)i>p246efqD8Ug#;~ zxy3wv;loK4$Wl7s-;P~j7%tKw2S|e{oUu5rcSjQ@ZgJBc;L=&AZcqiH<%qNTk@HTF zdP)8YrdG04__}(+u(M&2U)Vf(WnyoENV!I*^K9#;v*Efd^L?!bH#Khaeaz~S3WqgU zef3t1fZYZD@H2=T{dO7{K@dP(vw%x`zxtolX zLnOLSS&8j$_RhAcq1qd6!ce5H>`~I}`O#?mx3&kf?^i}{4%yVv(MXHAnfuC1!>|`{ zraC#!nVOcTBJro!A&*tV_-&pbYQW*>%YQNFU*kWeIujn4jE+`%2IkR!of*6Nzkj?w zK#!LsGOwp3wm0f#kvkXg)9v|JuOf8|GVlgTJS^6A`kClND@Olicv+T0iocdlyB%;w zgy~`XWx;*hehC%Yf_ZWGC&SMl8O`vo7->p`6tK|c&}l{*T~`7MDRApULZq`4`*gU) zep`;((yksRNh|vNS|?wrm_IOiuNEETs|4EwQJ^PcJL$p%Mq29};`_#PDU=F;qs*#K zwb0V#!S{Lx%h`3%(Tg?r63RK_F^fBs!ZQzb4#mOkEU9RFvso1-MF?0Fgg*IGR@0_Y z?%_KcDOJSaA+$2v3$F2EdR#FcB_Gp${Tlq*A)KP5?4RSgs{=^i==&|C1(j-p4pM7s z8%SGx>&w;K%gqo{%wirPQG09meki*gKJ&3thWuaTh_PxkVUxZbwmg}fCwk0acuYlN z`tDec0wP|&ZWs^nH5spjt-dAKiQ#Y4F9mOf+*c!*i_whbFW;pdtbEM3SFXlZB?9;e zSby;Ble6Nq2!9_>k`@<3^1XO#H$x`5vFM^t>yU2Ih$10LWyUc#bu4^Ww;%-$p_3LB z{{7KK%s$&`DQInUPuSed`|!5wFMoW$u%WkApQ_6};r(siRgJSuNa4(_pY*qk>f`6N zFjP}}!*jy(f3go74~R#Gr#V@B?DhIr%RIk`xW{N1iDWo}FA$G^WqW&Wnu;Fru}B>_ zjtrlhW$%)nk>(F96Z!GtTvzI5QV(T+i=LK>9(ULbdr#NdAa<4U^)(I-I~A0#B=D6f zY7G+6){1WLk<@?q9?!-4&FU@zfa3cG+5PA3l-{$3rycXZrK1#jJYtsyx1dM-=N;MW z-aGt_-&@i3;a)GCNGio^)^rR=F&u4KC|co2ujfcd+n`$Kz?jl34Pq(oSR9gcPzU}DU#`?P42|W(?9pnsua#K zy6gw$9N5jdEgXW*_YbU6QM^T?&Vwa$jq>^BdSg%VWNWfz&0dab{Z7h58~|!CGpQ5V zyMsIV&^DvtJGd;uaytAro*Ea{pBj5_vz~Z_#^MY^^qg(+6-jcaCUnlVAh8}M*_xFB zPAk3~>2g!$-1O6)^DN;z-S&0>(3o6+^q7)EehLKf}Z`Jz>L2AdY9o{{;0s>7UhcSYevmYanHBPy3+UOQ6oY)!vb7f z?V)Y>&$EcIhv!>{r(h11zC0tIOHgWLO3m}Xu(Oe;^5;o0G=dMZ)KRb1v&)k5t)MN4h;1JzJU^DY!}%)+@(#TBsl<6-Rs@_dSUbANS4gL+KDG*&|kZ%s- z5Ouib43uTVlY-ft=%L;9p?>H2K=w=)0dJ47NJ;j7|2Y?YcNc{HpBdn>wv zcvT#w36CQv_Q#XNqa+HP{eS;)_=lWw*Veo*Ph!t~3(sGAFWUfLG|XCl#17o{KC*@F zhFv}1r)cQhXO0*}g`=8khzsp|4I6gM8)%k_vfJW9KR*7FrGI_mdqWKA9jc7(bX!e; zgYCNDB$5d6s{eLAa{=TAV^bIsR5`5dR*pqeDhK9d3SQQyy*%=*k$tRudbM%)rT4yn z19_$Uv+3;|_u-Yqo{3r<9RGdVPxvuhw5FZ`CA!R1<~7fqzyq3%-Qd2JGyk@3WEmTc ze>&fsn48ci4?qXE|NGdSZk*lzp(TN)@BBt;G6BQr^5YogtnpsHt{Tc~X0>vy&THv` z4*)1z?Q$m6SA+NHexJ|70;OJ0aEmiolc!lytC8B7rK6$f877H^(9AlcN=4vPAdT6> zzoI(43$|WSs;YqGX%Wq4vj*q7hnZ>cLTerD_ba!$)q~S%6+(^Dz@?(eDheV^*IiBu z2XRephLB+U&cKTnoT&M^4{nIP>SU(3X(oz;xFeQo9M%5?`VIHzE%DQpRc<|<8h4Jt zytD#%(*;ZPRHC+h6j>WUnYL1zUKBDb?_aIo|I{8Nar()CYCYMp-0-b=Xy|QS`j}D~ zqQ+5!rke>XN2g`doEco#M9FZF>2x7b{li?8Ur)@Rr{CresGv)O2$zP)r_ecqnSDC& zUrsO;IB8~RlO5a8mZ14#(9<8{$BPrTr*GPh&)ZSMk9$frO;l59vV7W%T;*;9I!-g* z7gFmhwa+=S_t!;8b=jiBQ%RYYFtG^r{>9+ax)kl*Xj)94_W;~JjBp|T8k%6n(o(Br z^3BwW>zMelE?kJSh1*+EI@6{Z#~4;vzb3_80BJ6amx^0n4!2f$h&&ZJF-`Gd6|*#Wm7Y3XN%G4njeD-wZ)c|(IT0S23Aplzi+vxcKGXDY|4Ad26~DcvX^JDv z^%9&MFPyYBEM`k>RMlg(IXx=+dM|U!r4-VY0?Kp-ehfREOwge`&9aXx6b<>66?T8t z20!lpDJhup(yxOtd+^wy1UixmkELfsm^sxR=#sK)mwHbY0rlVy`yJE{@btCLJ z0wu$pn9U^yWXa*Gqlp#E%{M7m5H)p#^?vnV9F)F#dk_&LE zBU7!r@?I6wT>Vr~>_18Ls`ZCsr-z!Z9V2LP|6#ta_S}7RWz86H$i0q3Dk`*H+y{dU zo2a45Arv4+cP$triY@cTU!;J#E3K$b%-!7b+z8Umzo8mv6^_QZ-x6}rNWsw~SyZko zxl4}Xs0K=0RpIggoE7k{=Jjr#?kGL6&mr$w%^Xb{ZIuB~9<&!}kN7e#gKmwTV+xMu z+zsbk#2>Ug_b?VY9D3SYW2+ zlJb*iazJRr8Xf<}|Iz1iS7O9jh?(=&n^7W?!oZNPH{NqH{)P@(n?;@|S@;SZBJC#j z1?4GbOQ{#aiRf%bM(DWf`4%vJ1ABKnF^3E==Fw-|bAuU7uL0s1{&G9-!R(xZWZ}?n zXn7d#S9?jrySB=m)Rb!DVRIm3C){KyI>j9Da78x9Z+?PyH4YYCg+XXiW56ZM_`Cnv z!229<_6;2Ys;bRXX>e-nuEUlRbj6I?(Stu6bJoW?GK6f&%buV8+xlEkM_;#_Pq4b(nLNzuTT5A8z7pCV>U4Hd6RpJYTx>J{#82%l&i?s2(`? z&8|yQpf{v8TSnd(1;!lB5TF4DeILjV*uYPXniAKP{SaxyDy=QL&h=~3{zQVe(~9%X z26L^hek*OHXP;GwW$(5>4}@O+E^s}eG`wzVXpsr=&KO^rKyIxKY`Ui!*Tp-~55KKJ zvmhi;gr$0ZsjJODynD=RdaQU7iM{+x7}O3Im;5nZa1z!Z#LNWR%SQn- z6Y{4s*~`DeAWHfq9@9y0gdkC{gJk))C864^;f82gjYLidvRFg*x9ELjVsw2+?WKk||v3o^xn9+MICHbA8es zbTi`^ia;GC|7(&c&>9do6O_F>eQ@i#+U*YV+jHdhqGZPUB~tPRH<|E2e>Z@e6ZCs< z(Z`gwa$xh|saveVa?-(cL8BQ!YvF2);?+e;>{%(uH}YI${91PVYWDTI&Jvj^wV~O5q3`=opH8 zK?s=E-_uR%_o45$8nXRtv?6T9U#KN8yw%A5PS@GI(U;w`O7xXI^UE}=Doh#b@;n;0 zLY^G5K|EDx>OtS9Wo$~#6hj=Qxp}NU!DOm+d{Wk0?1Rm@4pv9FqRtDg7TgCCT2-Zp zyO?^6-Mmse5axcc28Td%$rRxnT^MO5_lv5Xd@kB1GSY?bI$`d;MOOIL;=A1|taxr2 zh~5(!)rop2y-)HiaO+Ii`Yf9L!u$1!YaYE#8(!zY5KwO9WgF7eWv$w(t-Gy92zh?f z%VT{Y{~^Zl)dw3|wKOL{BvkWgc8s3@m4wLOTQCosx?EFX2oaw2Ivt;VndT-XR3T8# zVVgbYK3nQ^gEEbW_29}1yw7_U+f|g2iaNl6#Ltm#n= zXE?b0t8r(&^GdeYiM5^@aCeY75*t*yEm37>1C;QFYG5-KLr`lgEnQ|ZQncZHDx9qu zejo*kCXZfHRho@*#$l)|e8HJbA=5tlC)vAa|GZK3IH>>i4E~_nqhw{Pp|QQ_H~rgY z>qSLLsT{K)Hd-JW{>zZ>BozU|=qjNdv?Nuj;@vqHNTn%i9=-LV$P20#{s2s;(P=_g z9H5Ri0G4h|ki=9~p3!YF_I{8++5&*$z9{^875G}m7goy-J?a+wB46(ifZ9n@Fb`I2 zTW?%Q#fkLTSYFjpwd{L;oc^)Yqap^@k<1Om|k5P+$o&kIsQopFM3bG>I_5nM5X&sp z2E7aaVeApOy>Y*DT-5Q(;O2I^M9`7nPiSO-AO=o4%R0Cz4k{JIhsEaI_3%ZymHgzd z#0XIO_M+(`)v2fYZ^vMgBL%;*pp_CujnR2c>q)^^>{edPIM@KSrdTZxG&ZB z6#ZHuIiqVgFyno`-T*i6RG%1#CBCWLf_M7PUG=lcR!V8&Qe9l`F031wnn^$2es-k( z3o{i9AuEH0Ufz!RcA})w4D_n@@qkh2;*GOnSFq;`4HkSFFvBi~m~-!vD+v^enfNYI zVA!H~OV}T=Qva62G>c>QU?~o|tb4$1hs{~6nh0~kUVpG<;BJB923XnfbGN`~gkEI> zg+oUAwP!MP+f0I6!q z-ntfH0r3JA!oi-R8+T{29(T8I8r=^5-EJfQA5ZTY&i4C<{fDA9X>D4>s3>ahn$g-j zwi>Z#32N_EtM-gtEB3CfO6;IkZKbHaX({e}fB*abUynWya~#+4&h=jB`Fd%bKh?CR z!RFY4Fbg_&c^_WcaY!%y+U(v}j!PBoz$Sxz#n7%vbH|CUC@`9L0$qdIelOZ&I#7)Z z=o;SAED2Yn6}@DXJ7HwJ-p6ElL1!;&NC$gv=N}vt8>nH&Kg6i@NpLTC=CPM=ybfoT zq{g7ZAAJyP-u)`O0xEfyB6PsmKs2e_Y^nBY^n+C752KZwSsaOY;K)iap%$w!Th zm`*n)@9@R`czVS~6G{vvd#9 zNXqOw{N|!YE#&su!Q?PR?OF8Cxz-zCXj!$~CFKN{nVgg*JN;v2`-@g#Bii z7Esx+{$TKTTBxdZO=2{@phlbT@oK=Gr`qLG&sFXwM@i~W?{Zs5eoWCf&h`;nn&~Dd zScemim&r@3+bgNA@38%B$Co&8wuvLrVVrQWscJ+ut)hhls5_f9CW3`1L-Lohinj$P zHx^+R!~l}Aprh79%{GuQ1t)r-jLL8c`y&e*5R(y(Ja_mITJO+iDF3rhZ-tcid9|yl zbGPSr=lxDHW?_cVdL22YN2Q512OfRXl>41Okd;8Ju5!j_9d3#gAX*^`qXbg#$h~*< zo@CLzzpD%#P3LgTs*Qwua~4SP%tN0C@T)drTd?kgG}CeLioVempk$Nv@X)!I#i78J zXJQftzyvASKFC*p#H`~a6XX&ViW~>{7wY!%uzk8{y*k}_wa4hX zQ&S2)&~wIz%HWJXH#bFMSt?-EjbjI|Q)U)2pFKe;8(xGX=jcpKB(HVDb;Dpb$wo$x zqw$*4Pk(p0W@eXyao@^7aP+1)jiM1`XffddH&v%32KFJe#dIAq@Z{oWD!VzS34{>T zE~7s^{rh8Rehh!8gU`w0Ux#N4yNi@8w|~cj2syCTUYG;rYy9O0dy>SwK5J?p%3Z8$ z7GQTeG|@~mhzS;25Bcz{4++&yxl+!-Je)6)dcXegP^FHE`d{JbD42WMW;LANxf5bLKz;jog~(O+9W(mO6?3yM4PqkkK2m%;&|i{KPan4G-*!&%xDV8aV24RC?Jn0(GZ+^>lI7s>U?rJg2k*(fd`gV&; zNbEp8gahg!=<5DJ|2vlw@xQSHr?u_T6%dH!x(YeAn7yJVyioAwn{4a`KdTK+ z0zfGdUyiS=WD10sb&Oc%%wmQmK~O$X4T&j{Yb@u}(;N@nsLi?$xok=w ze;VUV&H_26LJetmrE#7BMP40wK2SYiS`dR{3#b`%mNp?Wrml`gzM1Uc&f>#>9_o8WjufLi<7jPU|VJ9q+(2)Om&(3EFHW=;A9`Q0JPH+KJlXt(#7GfKFwI?uO(9 z_B5|^qx+N(I>mLd-EE8sbpGX%l)RXk$0ySgCkcu4mznZta47g}j_H(Cs#y)2$JCN1 z)w;UWxCry9%+Kym&yK;T-5uJonzuhwJ>h=yizh(1V_fR6{@l=-8_ARi|4pC*M&jvR zhU&=PZr%LLuluoXTlP_z6gLh~2b2uQbJ7w<1&8vcFxzr5(KSs4wLK?;jQQ{jQ_$Hi zP(ERQ9KS6Y_Y5Y_T231(S zPb~z{Q7XWS{<;T{UUX)elkb=w3Zk`@x5B>A*)M;8#beP*z6d?ny>e3doBW!+ zYYSV4%Q(XDfLJE|0P*-Fe;Hn+68}gqMNeQ&cf9!I``*o+y{sBHGFw>>z2ctslys8= zTZ)`~*0hV!E4uyNt3fxM#AIb~1kh!z`;Ol1VO!$3%kTW+Z0_#>LiEv?R$N78i!#V-2^!DLZ& z8thlVyq>2#pZX?4&Ln1~T!p5}o}an_Eg_O1uTEQ-IpU`S%%Kw z@?bbK?n&*^w`Seb>%Jilx*XO}ZifLT-(tL(I6oz=LYf@{RSR>$1BP!e$c&$TvJB@6 z{%+u!NEuyKKe*8`5N0U_k!I>Eqi}-p3>NG#^ z-(~k*W386!xr^WmwvMo{dDWEe^0(I!!{^9Ml<{?HjTNp zyRcLlcQ(lGDOL%5==3H_@p>o7f@XfOy z{=Y?F(Vf>hIk^qX;yqtH1HWdKnJQcB@u{uQI<;>fL>>3|i8v@{t484XF%#)JJ+o*J z@`2U3l5)cVOj~_iaf!-jtG0E)Om7Am61vehn@3+k^~6`GUdtsE2i2a|P&WoFfA zExoMPZyct%GXR2vkOL>m&Lji(1h4GDijxYncMG~)*;N_WQvMg{j?~c`C!t5ibDs0+ zVkL+wB2P-hp0{V0-Ja%bGan3S9K(mlucH2Srgo*R@rWvEwAhbP58}BLNlr>a{Q1Ov z3gix**QmYmp<9Gn=RW+=hJfh(r+HSa4&OhxJWRL#yf%^61fCs~sa54nYz6*R@KIkU zf@6KBRo1FxuOB|hpMIW_UxfD58X^ecNKpe&1&=i@YsT8iU}2Ff(FzhE`GULs+e#ms zX!nlf(8W|ZNhq@t^Rv5ln+z6im*Q0mZL{`aLn5mh0b75^*!A!@28#G75S-Sqc;^G{ z(et*7t7&t*%IEyFm(9n)ZZt16)bpq9f0h|$T9>HVu!Lzt7#&nx%xoSIJQ9fRL5rUkxgyfca2GkLtR)ci}F*wz-)Fb z@)w1bkpZ9dsGUE+b@0_x&o0~rQ2*}l-a5~Y8SXnUuLrt6FauY_dv1T7j;r;^uPNrm zrY67T|4l$XVC3Y`arohP!8_q^f%DBgx;4jx6{@3G81>uc=WPGq+vRNPFIH}hUAw+} zt_V(mCooXyKE;SQVN{x6io?9#HyXAH0o$nHHyQP@F1}u+ zAsT@*d=+n+5GOX~g+N%RJ0DKIy|7#V_ph^(?yY&;N4GicIH)5xR-LSRHk?x%0U!0$ zoK0?kufGmu1-i9hudm!YwD`{PoLss7>mqYLd$9_`iZoISGaoKkM7)14SXv|iAlG6h zr@+GlRQ~##;-@HjCL|+$NgB@rn~^Bzu&tfgxx2TI3t4}u^y7;HV=S@iS7D0noeOv` zmN^zIK4gU6lDkC_H-3D9syK!{%)s3|*MJh_FID@P0!M~PPA}tGL;D8pg8y*MLqSU< zUE?gV-icS&@+Tb|TKw0PJBQ^$6*rposUvFAbS*tWJr9l_Cexg=_kRc#&Q^pQIja`) zXV9>fAQ}aP^=J3chTA5}nB52pB`n3SoVeqCfWCfkNpg`z3Y;{O7!2ac(NvZlW)NU* z07P^0s>tcts8{@g%+Y%oiB4>$DziIgF&G{q|0AUKi^uXl!HX4=J4x`TM5TTZ(JwS| z@iKO7LY_W*K>1xB^Hg(&uu~+hF0$5ePuOChkJ05Mg_ROu^L`(_lx~xH3jqGy!jn3h zQ8dld2p>90a;dU&GkGiM=R|BeKJ=G2*`#!4!ui)Kw`s`?j?)~CSfG>YUDh``CqA!_ z^GlyhPxZZEEO!eRJIgG7RGGg7e|S2_s4Mg{+JcfZ=vCLL>|)Tf=f|u^A2wG!qi^p8 z5th9O0VD^LJYGG7oiY#3SGYH~?}gERgT4u(wtR)MMRhTJL|~CU#vI${@;9(fQ56ro z{g<6e-ovY%vqTr2U#(a2d4HV5&VDa=sGn^>w>6&6M%^inc9V+#UD{`BQnXq$6xy*K z?{;@@lV=MOe=SD9>?EASr(S5k?V}0Y>|%!Dr;(?UpPpQ68^E#q*5jh>hViT76s-V? zl%f^n2K3p=5J+nH0HN31M+o{Ako+xv;^eX;Uv}jX?VQ7$Xnu0We8C-)*-(KicRF3D44&9=|+0`X0~s zZgqJ%h+L&~kwwcoG!8iBy+iX9aIqvN%2pP)AdW*_K$3VW}o)gTv~&=4Y}p)*kV z&p(iq*S9WHRz=EvYC8=^K4 ztddu^h(?eBzqxFz>gMunw^j8Jt)N^roQD*bAFT!3K)w@ofG3HTv?G!2Pw{vdb}T2G z$N9Y{+VFG4%37S06vbkh09#c#c>scM4Ts1wLbZ_&fKLt=YbD`Pa6xA-uMkuR4E87@~543 z&TSxc%RxfCC@F$E?1%Qb571B@N1#2eM zcLWKZCg;`aW3<>vSIR7x8b7Fx{t82^5GVG-pMi%Cm+v%6LIPo+a>r1t%TZ5;d>27>$BoFn>5 zvPs1I#WMRBGSh8}a^CncAKR#_n|H_4T6Iz*4a)K=ND4eM= z5S%UhrI1$v_d7v>>|dR2?{3}uW{A!?J6G)wijB7sW=04ozByf0Nmw^m z)^X+}MA01=KMqqMgtLb#(uf`IoOZoEhEZdTkQWm$gx1xy?#*hjJ(bd(R34&>e@wr_ z!VH?$oK8#=D>ir+_+HAD`_T0I?n-*7W%DmmTh3OYn|B{Y8;_&R_L$!07bKfN*sW?g z!*RJ%2B2JC0N(ldF+}(4^H9k-0N@D|AO@TAN&3G=Y;!ZZBCTPA&xs}J4k*E4&7|oG zBsGc}*C#kG8b_H>d(wgTV)-3ec|nn^L>RkcX|fT0I78og8^eh`0;m-h;Xr`DN?*(d z=9DbkxtpfZW7ByrY{0B;SZN;jQZ|3uXv`tTLPc?agUEL}Cp=vyfBGFuK08OT95`aA z;0gQ+&77)nyG(RYlto+J=k8;yg5%rEnvAR1y=JB1X2D&V2*CziVKWuTG~UkS%+p&4 z@yNfy8dK*NbL8ig%pV2ivjf65XgonlI<5ar%Qw2d&fN1V^= zKqDZ~p5~PH)J`i^o6ZafX_A-i+ny<_=)^;pke(Mv*H>YM09s}trq$swp z?8A*$%8n%zm~X-Txh2k}_YAnjPe0?;*8O0mQ$&-&iI=FB7y0sl#p4swRc-s1!Zp4{ z3_$U@g~-d`_KSb-h>aT9j3Kj=?KDrIx-I72R-5lY_LJGf04jV{8_>kwZ!zboIE7~h zvdsWP2yYmc&`Bp6E*-BkT=0vj&Q(<60w};Sm2n_y6-xyNFD&6P(+bO0s{Wnd7iB8f zfGt!PV8gFF#xE8<&T~FJC|&otnqAIk%M(tme^+ZaON{r8K`m`Jn<=ri{rjU)WP!_)U(PQ@g5cL*$ruh8Gi0pV?4Z!w z!*sz`)3D!z_H*~`Vy`WV`ibY1z-<)SJEk*ZTTNcp+4Qz7`+^GCa-&~04|Dipr5&A( zSA<>muJ4K;gV_=Uu!=c;eHY{45Nq_*i}10tod8~ zNNcC>^uv%rdEnXdYiI?C?t$&>h!pTvPS2UZrAfCboD~Wb${(MwC4y4$TF8~tVD<7z zkRkK^du&-kNC4J2mLz?lV#cy}ebillXaD*A|D82pQBipj9jjxt z5Y1GnZcI9c5WstmQNQ!iHSoFUDBs?#;;glBn@F9mO6*w1b&PxQe><`{@B3SQ>3bN@ z&1c9OrWoAIKQYZqx_{ZeFAmId?=j#a9sh#ikO6_?TV>bMs(t(I&B0^e=VU($!^mXR z=tHkfo&*CY-SPq`38fuBEpTyu+TPn^r}pz2vBM|z^CbZTK-i#4j+bz)^zP#o_lRgH zzSz!)j|zK<$B^IU>IC0&OM*Eg%>+JV1Be60S>IAxNzbDISwA`d7+L5_t+HiM>h9Ov zGO{#!A4ZlJPW1PsnNNV!-|8U#&WopZ#c`BWsLuF=R=EW);d1aI+Qi3 zlz2OK@gWbY-}ZyizqkQR8-=9C_=QNM1x;uz;C&tpj0=RLb$G}DxXf4 zQCBmAAf9MIup)SC@-9*Zj62z&rL}JgW+RGfuUWGB^PU&gPqWtL-{VLJ80zCyTlfndBm8bcG>SuQ&4yvEMfPHxPphpl^?`3GrBWj%;BjCVLr@)c;7QJKLPPY;E@me>&bmiNws$N`vlta;syOCAr zar)XhQT*R{=Lc!YAi?wi7wxWgwe+*2(@S3I+%fQHj8Nel@4K{mqB#p=r2siM@00cW zJ^$Cl-*}1dQU?0hu`&JN^7Ot`ly?e%?ta5rq;Rr zeLv}iwdISCSGPI)`-cMqhH-9Fx}+E~JH~?mmjLA^)ug4)mZlxY$0zsJ^P^iv*@(94 zf)I3B!$*Wm&y*`nuaIcAq2e|mC|3t8HsGCkQBi~>2Li=*SW}pZtQ3@i^)ZH6)e^)4 z+deIG+NB6Mz5nonpY#MAymfHE1wOucPD$mgLZvf&E-B%PsNEL4-V|0p-&mGfAo*C& zH4!?!GCinKQz}dX-e&Clw{11M-vY+|PM5e)o;1Op)(d)8^P0y_{(X&Crr<>We1NSs z9wjr&Tr}=zdN2ShZ`yR=@!(1C)c?*a01z0bC<{>lq>s|pPS;48GU)m2wdd_D7#*8( zC}fwywxRQEJf7u|7Sa%3T#{1f>LypA#eJjJSyLLKGBwX&_vEeB;ez zNM&;bmg7sNTpdKipGAYPX;GG<@^~M-@0xzI%MsA>A^kr`-(CcS>YH`m?Nz!iPZ9;) zT>O*u+&F;I)OjYN1M6Tlo7SgUQ?$DMJ7UAPnE$v8h0}%nN4iY7EHWS_t^(ze z0z#&Os*P$_?^J<@Z!gc+-hE&5{|hSHrTdKMkX`y&7n-J+wUHUsP?bG~$cpFV2}hhG zlHoNj#?WOl`H#CNGXQz$3Z1)21F~j*--2!@ReKma8!oJ9ot~=w-mA@EMyX>aEbuI;%;)g2$N^&BH!ayi(+8|68m zP|RS(azz+}&D%%7_pq3$fDxJ&uo>Hhi>-#p^lYPXe%glYuG})>>s$XA?if&ffv^B= z7}gWnhYy<~(&pz0hED&S-H3{DeH`D@`4fTg;N@5()|UtCHd^;S4>zu3(*IujpV%%| zE_QtP7@ZVW%|^XIq+7;KvTrHZr8)C|pLr2m14Eav+F)uqW%SzZ(d{eA?p>4f5o6c3 ztwVn2JSqQ<9$)wUfsEV0=x<&NRlJuPYkoO!+880l{Y!1aW8tNlV+{?x_-5Eae7&dX zph;&*drfS4bb%0`4Y^E03sKE!7c20M?ym-VSfi9PdTwcdrP%Ms!K%dB=JgE$1_tc0T>{bL-K@t!CtB5pu5K5%7unfJG47k03A%!?BS^LQf@f)@hPD~(k zs-)13XQ9kD42?Zimywj3l(9@~vRor_M{P&rKE!00mj`q1K$=?k_*xLzkWnFF_=t6n zYxand3S{Z<>foRVlQL-3qV+0^zF&8kfPzI~DkHCY{+$K(^muwb-v_I~vtjSMYxbka z7`-a3%Q+o~{4$zFcP5ilpTIEiDS$b{ZDnPpb_^R#w<$Q7J>j<8a#%DUp7)_14o$`e zFoz#bHm0^$Ag0nHUOHIb?NFDWJkYbAuJDL$ z$6o5gim_SHuGZ1&N>w1;yPNfqLo_2y(I;##2|r*W zB1`mF*X~8R!GtL>; zx7rQ0+4XCN%_y4vXHN?4EPodW_jyk60ty>S=7##=*P#aPAzMHQ!39z9_}>AuVO-+Pzm2Zy=PbH+}$ zj;?NO&$w)rS5KFT{nM^1f{d2f$Lo*%U}Kh3-4}~0$NneNFUT1G{ban`Oc8k(>EZ)F zU!VIv5%#dY%aWl6u5aOcCp9C<&aPnVY~a0-jA%qN#n|xZpw*xir*3gF+J$a~R5(Db z^~~*LZTumDS#kokN_yI4VcfE@_hD{$GE2Guc-kl&ud%W0wwOW=+(d%P2-|X2xhBmOxMbe6F zpV>HCwEJ}%nJp&=ipcNo9!#w~y!Y|lo{~*1QgEiw(a#E(%Gr<}!InW8>hXY)6faN2 zgmIn?>5nmmGU27-4;u;M{qZ@P^8Wt=8#|MhVkSfjTh7)FIbl)-2lJuGLz}NQHbAyK zC89ggTxIszQCq&38$_>6T)ooHKmQ1K@M|0f8%nZf)-?;KA6T;53aD^qW#P&|r!wzt zAhO5RoTTTeaH`Z)Ii~$;V81LsT~B$}rzbmZ+if+D3MXm`ev7VU?KRbKwfTf|MBDor z3%V{6ZA3E_AdU}h7z31(!Qxd{i8WsyTS4uN8c5J z*$Io{kC^4Bu`51K4FD5|X)JrOEtB&0$MfF4ecQvOjLpOmjVD7sS?)#(RD@VkNvRSI zCu>Ve$5NAA+)d_+eDdm;&b+eDpZ0i=f=by&5>_`?PCw-pHF4!~#An_f6Zbndv|uPh zz=yRnp=nuV_D@1uoMoH#F(R1+0f1AD>y2A9m|J5G-KI@wnDb?5(r@4C{hY*pB}20d zJ=S5miVS7a{@ojoqx2oizsleBwDW2<1v5HN{f`50P(3CO@5FC!9FM-+w!0CzN#`sv z!X9CM_uC#lWz?IP$4z!s=OO;fN594c)WIWqj0a?W>(vd&)Acw<~rBHa$7o6yeuL0UXFfcfNi@C~O$D(0+&# z8=xU^Xf+Wut>hYOdqK@G*L_N>V8>Zv_ayq%p7!>gs04lTNuUV+&N_owSH@3SPsgRZ zD_A%DxMG(?c0XOa$~(#qOSeGL4V0Em;pe7JUoJ*WaWf)5!2UhPg|^&BeMIrzIxSn zfA;QvnD-L@YWLgd>C|)#k_YdH$>aEza_DWU zD;?TQirm$IcoNCa9@$)fRqI@K<$U$3e?3N_kKg%uR4b3`9r~~Ut|vfD&}%6RAxf2M z%yVW9#*xDrGE}yZ0o`|Qa>4FiF_f2^>-<5Y z`3vJsc-5*^V%wUk4?n#8zvzUmYXb3mQHah$@$i;2lW6j+1#F}xVtdQo$IIOz&ZmW- zmBGQ04k$*!u1(c2b3kaCHmh~QGj-;hMz_9;(!(P~q0j*Nx|5xq z<_@q%PyT`5cADEIJCBLb)#;h<6oQHvo}SWZBR$Cd<;~>8Q)Mkj@@gv?Ft*&_p4@-Y zz{CQ%aYbLHLKv{=M;&77LXlK}@|MWD@Nu5j7t`#oS5s@b%@U5yRwx$COsmsXFtx=c z(yQ&AVefcg6EX%L!nYH!#>feIm3}BX5xdnFvA2iWbcVK~(XBaZb*0B2VlS1UwQD_1 zkQT!DX5{v+i*y+WU~X~1a%!$wHd!#vZ45Ha+de`I8!{61eqEaCB1-hffi7is)iKWJ zoTvNWx1(2Ovm)=BHAPhcL=xpCFx~b4Iv(>WSvlk$P0+kSM@Q6ECtWPAK#3i~d9lUN zW7Ut_{|BFWm-V)G41T$T^2e;|X2qk63yKCG@yk%&xbH2ojnlEq$7a5}X{Dz9K2F)} zD2_g;jvw z_)I#qv0tUWkYrWXf_}m9V*Jx78Wm1&S5Qm+S#3zF-|7u7w=LUL-TMoyjV$cN_AH%|Jbk*5#~-!YmA_v4TNQk5slVTsqCY)3J)~ICZW-Oug-uQi59K~ z8;KU536ij}eJZV96nmSM!r4ej2Wp7H`-t;{))sfM-Mvs7s%O3JmS-e?8+Zbhha|@~ zd!g+aY$2T(l(1#}??H=fb}8M2XF7S)Ga7_kPI{HrC!VuIht#9o#T&~_3thLXOB5o{ z%|F~OF;@w)s#lC^#Ja`UDSvd3%0WK@P$@xZ!Zlz2d>g-aaz1k3kS2f|tu;lk#rjdS zsJ!sxZ)btoc*lLG+5fGPsR@A0jEhejIk8Tj%c(E7RC8u%Q^iI!>rrkZ)Ya!oC;;W1 zuG!nCn}dBm2ZIc>z!2U+{!$7Ruhn9q*&^PTc9O!H z{cQ5E^RIu~>R29SVNIpHq`JNhh&%oDt6yv$gK}u~Ir}v{d^2PGvIwP#pN1r~+uzSD zW!FVw&>-aT*Y5~SPY8D0IvxQqmKq6J9h&-3ZV52(m8bV-Z`U66N{RJKi{zTNO^*}9 zxR;I75U!B8fka>B;>=KUH90(8+jw@-$j1o@34yXvHjf^M0`LJK#l%KFRI<<;Lj?s0 zPI#q)DNp_%4_E`O4^8A&3lR!q=t#yWeTB{?B0%8*7bG@glznH3KvJYOQ=jYhU(@y3I5*TiquScqrf7@vGo7>5Sck^n>iYYbNb!n|XuP^3lj2~jt?Ig7 z@8&}-O!BmAiZGe9I?sCw4PPeaUr4OJTX6fKcx!sQ_hFCvC~E$7!<4pxfKdf;8nGm9 zO+YXFm6brjQzy;I3Y*McP5_%Qi+wzTL z(&qVg(w@H8ezO=M@}=u8_kQqZ&~?ps>)=(lf0^^#b^h`w5<2~-G|z?oBACyUqe4{| zy1MSts$});Ojnj$!@0FvnIZ(!mxSdNo$J*#IU8#qeek39r==j~&Y3gXui`EVw#5HR zbHVJv!T-xg-D@`DNU%zeYHZs<;+yrZf-#9i9{E=|HedQ^GXu_;B|f2WWgc@)c#043NxuKDJ>qc@D;B4#7*bVl7ZI1ddz&X# zt5^B$ng*6W$_7f;#`tURHFdOiGn9o8Ikg&N!{}n8nDI75)r2fIzVozWRM&0o%4`~L zdQfZ}Eo?}H`JILHiXQ2<5?0Jexgj7v_ysnT_3FeUYNQhfi09A#-CTm-@oOeR?DBW6 zB8Ubu_EI@M_vcxLgii~wSaHSw>3=0xlP67k3PZp{>4oC=|EIBCk}`HbZBa=2WxK7z6F~Yux?L{UT@A=xgV%CW1WoWN34= ze+i1J+TWDuxQqr!_bEP>etU?jlP0CY`(<7L`kx8e{C_b^OS-=yt?y$-LDq$zIRqqs z=KcUA*CO!Cc3v6zC3K7dbIcm=T;*POA9rl;uiaoa(@HP?oNjI*ABBSxtg~|~BP~Ro za7V>jo&DV1`PHU*H6W}=89RmwJLO?o33d4qzOC{I*-RC~8Bb*;LkGqg10DXS2f~=v zYp#c9D}zgo+_YqOptk4X=Kd%GL&g?b>++OH5LF?zUS?jM7wOS?Amvtfg(-uNnw+@> zwFIz)@@Eh3|2K0GU=Q@$)p#UV5MyWbF6el}!!jaM=c!z)5f4>tdku*>*t&6nhB}U~ zpudEvjt_5&chsc=W9>08Oa!%aS#kfA*LjRMN3C6a8y^11hoadGus-}DIKHru zGVUcXaXv7hu+-8K`0#r-MHIt{c~Rp&E%@5!>~rd8F3iVbJmqo4=NFxSp59!2TGWqx z|C1My_AUfx@}+?)6TogpC{0U&OO7qX^B}PT-^#hy=FtF+`HTr+bYe^idrDt_A`YE= zLb;6=7yP~6Yv(8|zG}y}=gZfIOO%$UN%fVz>~WXZ>KI`7>%XV1L1$%V6?a8OJaaGc z4x;Sq#lc;Wyvk{meRaU|a&Oo%mqz-iG1<*W8M(GR?faiwvzEJbLo0uu*-oe0k}t11 z0eX!b!#+T6-Dt{w6T?dQ*mxEUl1(h*615p@_FtY#A*#Om#k6;I(nfXvV!IIlQEV>{ zS|F0kTi-wsjf4;g@d)RLrl!Iu!&@ls6ha3I{W{69c>Ox`Wl5@A)lz9He?oC{_pu+Y zOuq$997YYPqt{$*zFc;U6c={=u)+@7;s0 zC?^E0d+45PY~)e_S?f^bihrGI;5Pf*1*v@_%5#sH1fB*+(Gpv%w*UH(X{C1YmBYZ? z7?e4I%=}(k4DPOV%HYut2g^|NsnE-#RH^=0tuhJJxv>R zV@l*Z>w24E+j^W=)Sv0Kq~)8-?xOdydO7up3kD&z?ZaL~j?GCkN2oe0h*>Q@?=<4N z?Y@SZ+fb=9=)Z8}UC`gQSuTHkB6uagxq_HxnvPpwvyDqy+(txMyedFtBAp|}5_E`B zZdmnjQ*P~H^A-1iz~j}|<9Eiccf#Uqv9~@||M<(m9`PcVqK8vUN+W7LIl(WFZ|hr8 zf`uClgA5;s{Cbv-Zo18QGCWQFj^XHpvEgAPGMfhWbk9DUQwSwm$0HbMnB(2}<9YyZ zByd7oSnN|j?LH#R82aegxgKolmJ--6vnOc&n*-G%eQeN zdO3KpS^i?j^+&(K!f&=x`A)uQ&Z6@~Kb%s@^Uwaynf{uj+Ws6B>Ce64u#0&8&BfTvf`UrjsBcVk^NURoB4)1;Ns${@*SLquSacvzJ}(em_ubE{EgTC zZr>>JW7ElA%>~yE+$4(@F!xCvMhFVPVEAhdUKSv}({FTmC*` zc*9;6Ubptr$5+?;5{zDJ{?eUiIQj%mB^a7FqA~i70bI|56=rc^GWA(WWeoC$f(l>5Um6P?OWvs*u>P*D*_zsV5F5}I6wdZn3)=rE|=_EGhD^D?VZ{uAL`q3fh?>xu>h<3n7zw!7ro z6L9OF6lQPL>G!=W=rm*1>|x>lqF>p8WV24CRKVy?!v?u+X@#?feE5ZuFW*5 zk=6OgkQ@w<`TaGR15kd8uzUnC_f8X!7D)=gd}8&%Hj&F>G=8SXUP8&JTu48{7J6UX%todpYeQdvbjp`15WSke<1aa+T+!u zc8QGJcwT*|^^y?{$HadIloKv%0mpu80lUrm$I@S-2x|yq6)H_ymec@eP+oMp_IFW` zB@{a0eh%|Mm!!t=ed}&r1y21Pz@<}W1A(yI(aL+6@ksRVs>@ZDqPlYY=ooxMd5EC_ zw7OmAh^7FBu|mqR0a4uefZ*u&PHO&`{D)RnvdQi(9+RfLIe?tI zyuLD4nq&Idq(?&aw$baf8e?vH-f$EZD1Bi|-AY0WVb~$ic4_yLCY>{q$3cQDre%YZ}n_<_F2yYeb8w@%5fAhx|;&JvsasWPNJW3AK|9vf4 zv}GyG+e&sf8m_LPNSG7__D^=@~Y z?Nb|xo5hvauSI5XB1>OObcnuP>qq2C`sj*DlP%4^dj}kkZ6QR-HJ-&Ei5Szha(^A5 zca;k4Y+Eq%Dn#$zd``DNrGFrM$jPxD_!Vt&afek_^ZxmeA*<)SSUZfP%fZD%IZjFYzq^*jiLpkJbac z#jEbkIxd5}Ew9Of#Pw+z9@)=M5jAb!*1(!e<2)e$1!UW{=yzML$6ViNoBl z=D0Bllym6$+~PR-NBrt~^Z-Jc#|qJWb$7iMbay#lC6rb{dP@Wx8(y$aAW(2!X-^w@ z3ac~qU}BiQe|+s~)Kjg$cJp)eVYql;rH0jVlkA+=GF1@G0uwqp@=L6NDNYu z6GP7F(oauqRN#k%M{>R_Am#)`;bRtcV!wt4XJOLInKZN7`N}!pV-@f>Uft$vD&nlW zx^t>T4%BZX`Q8XkSR}D|3=lv*<>wkVEJhE*kgWLCbm|I4si5o@+!%>gNUE}h?Z3(6 z!FT88Cb+W%OPtF+JpsFwJ(pMC>}zMkQFsCw3t7SVk@#3xngp}S2BUZ%hd{%HbVKK+ zfHO=Q43`TgEGMJ_0w^aF8#cx#=bP>eyZ%hn9^IvFtuERWVMpKwN4vD-%pY}w`oL+T zq%z$^)zNh^jp_gjSsa5lfs~oUs=09kBE5nxWe=3yup0$hY*2ed@=?Jll;%fpX5*q3 z^!b^Kv@%5*-nTD8;-Sp~EjZm^9ZYZ?P(OL)cKWJst-YpuVRn0lj!fmP{{P(}s1?$Z z^G=r``>ZIxOrzqaZm`?S*N%g7$FO+SrY{2W`Cn?5XB#w3k;{=lWK3e17Ra$154>(y zE=vvb$4ffl$6?z|ZK7?J&_15rEFRJhNz4jb#%PO_Sv8k7tmM^jg_}ulup8BDPg762 z?9;O@H{Gx%+7?%ym8Q-=cPHDoz7}^*x>X5j9KedxP@WW_K0EwQN;%drWrIW-;4?`g z)Wb_%AUWBwLDE|egFsN zSl`Z5%@W^&N%k-Qk~(jG6I5HuiNEeXTctsi*+;{{AS(cK7zC`S2PJfn9z2(LMPqy0 z%@`Tt|GPCO8J>p$^*LmWp6;d*QNE09$MI{ULUshbk=!= zD?mhoiM!`Jt@N)35uWofc9P)lf)n~Df1D+u1w9x3eKpPijO+)mB9UU~*d6x#gp z2v3HMN9OXjENJ`e-#BH4a?=N0`F=4V{zs{tFB?79xn@vYtdG6qOxTrAHmGek-V|{9 z8%&B2;w$5k2mYL&K;mfu$dhCY@^McP+211Wv_@E%iUP z03c+9FLO2)&t^w8{_OuS_0|tfzj535Ku}89kQj)>2muioJvs)`J-TCbmo(C;bhqRv z0Rd?d7>#sF3n)kk()H|n-S_o8&;Ee@uzfz~`Htgw9r`Y9g$9yVC?w%AB&ZVXXJ zh7D-&BO_wwzz{wu^LHWXUw;~Xl3eC@T1M?n-(i@uJH_Gf)T~K$gf1$Do5uUmPfa3F zQsuq@oQINjB!NQ;Vp=*)Kf(%TGo29dCmYn3X6;OIGhNlN*3PV z<-;&1rfAw&((vGdy{hs%GcB{!D*S)tR1{iy;$}$>59m}9s!c6)%gJUz6;8Qt_@%#v zuM-J7yw-QvND=VbD?dbt{im^ts4NZ* z$i7}fc^I!{wX=fscX!(iNQ8V%-pP^zB_Ya%OkJGeaNIX5@f!T-xHe(^&ztJN6C;3p zEOU|UaT7@)2Sm`C&w}=8@8<)h?(VGe7gh|NUhZ$=T%OG~n7<_`a;gqq{1VK>6(n)n zUw+naC&o$RJ1RGg^D5+EH_-M2W*qQdyvLkAE}Gz9qjY6>v-pMy`zBPNxV_Ff==!AU zTrJ42$$%pV^}d%|W!gw^947NVIv$E`7SF3A3kMGAKMzo)nJxsYrti8fneS5Ya>eS# zsx^NY5nT=Z+k6o;DPTu4C>^g3Sn78lw$5h`M=Wy?c%>^lEsw{@}o;Lxq>I7gqIdCfKn-WaoQh6;}J z6gsef^3Ft#FVeTJyW|y|rwO79Ls95lB{up7x~R;QMkz9K4CMLLNU;**FpV@{aI9Ny z#xS;K%pAMvN{*3@Aa}dy2rMDcPZ8wO+I>xxNcU@cRkL|5kH>QIPIl4nA7i(R8u}NyH8$oq>Zmo3eFQ!EJ^H&UgV&a0`3s=!horC9H(=QF<@pt8# zv9XEwj-KAXT8K;&$58*Z%J&o-0Js~Ad7~|_p zJhy@$*!>_nyoCAwH@n=A#{2m(;Sp<8pD$^@Zx_~|rTrt_jn4@m?o;COFGx0U+Ksn5 zJ>Zf$pvwa&-6Q@H?~Mt zQ7m*Om3{FY*=w*)mw&xHbX?1g6C9V5I$Dzv)Z2NxRW;zf^g7+;&aCZe?T)Xe0X;P& zOc4;OO%&5aEq9w7NF z3Z+ayFHaC+i}NnAi4{tERh-1CGcHs;?m>NU;-q)jFVK;9!W{tI*B#v~>5(?}Z=n0; z)m`8G!+5hHZn^UM_xT*pKi!z(}!y|+IQtBb?3mLAF|Mf4?#b{C)2(1HII zAd%7KHA7J`A~m%Yz_^mu&`M2aD=`b{oq-RMa0)ql$3K z7MGT|T0>4wm-D%vs=5wQr_;l|=FEC~91w83J%S zBZUho22>0eU3TGB&=SZ5GE|^q30}kozQblMjs@IVY&?C5XQoY_@5J!6w+F-onD}7N z8|cXuF#cv2O#7c^SNQtdWglKo9ghG2P~)=mg>nr4_oQQY>CtT5++LoTW10Da|HI)8 zo$~ZT^8rC+$<@%uD7SoaihMA)5^G;l&I@Pu|w$gYvaV7}IzV}Y9yFej9sz+8CekI@8 zl)ouBuYMS4^Bzh&cb&C@q`a}yP9?-Rr>TNLNN8@t@54`Lzw8~?%_PTww%9_Zu8i5qqcXQkz!nu>n! zTI9r5V=>b57haPY$Epqt(Fhoei|j{VwtgtZTw?jWQOx*~03ewpUttJC1P9YO>MaG=X%LixP zjow^8{Tly#a`*nxpCf+Yap<&q010tb?J?Z_(NW-Y%0Z-i$b+tmt=?n@6})!)t;!X3 zy|OjYL`S-vcX3inYaub$=0(hODOpP)}!>%Sz6y?iOFzK8-K zx#?b>-&QAq0gJt)dyRoTQ9FTHQ$)f5-A7D}tClgk620;uMsBe1Ha zP?C+WQWV_hc9kz$D(}sN+tb`fT*bzWEVelPI+FuhKa;sGT(r}@349sh-xzpvzCRy) z`6T#;{DHFTeClc1O0^9V`VE}BxVxw~I+}K|;y0@nxKk|Og^B?HC`PO?+ou#wi?o$7 zzyCtq5-FVS1uEjo!|vG2-8+Fxi|z8d4D7k>_ZhYbuVBme|%mIf?fe?$%5I*MP~ ztzGZqKdodh20~r|@QNl)zf7+*)<}j&*-~I*fz{W01A}Z!ixde^L&-m3SACjVqv7?qqYJQ7x2kbY2N4$COE#H3h6wic z2*fMIq#dJN?Nued_#Po|7&2CiU+R@;*G4{JlwihG3ptut{VDyMB@P)iE%YOMUf<66 zzLV6Vn@R-LIK}zu?mG|mdQb*huDII!t?W+Z6iY%vHFLWApFiQn-@Hf6mP<;@Tia_m6lk3B9rUUBzwcF7m*r16=*NE6yPkf2<;{#kW*-}~}WM_^X@b;5JKAB8!O zqq^>?O)zeT{;eA&@JG`R3(mv8f6bfL)SgLo>(zEYTfOZ0ty?9%SiWRrl}}idT+HwY z4JoF!((X1jf?mqg&v#$D8fiKt80U0Mch z#QDXA&%CQ%i(2xLIoIA&;f%5Tg z^&UL>uSF{YFR-8gi^!_Qr+O6&w*``X0bvRnB2TKv;n0yIsGyDXt#mNxofHRWM%uIgY`ROW}C z17+2AP`7EFT6UWlM_%0Em$jOC2kG@(rxl``Now6_7#b4r3)mmiW40Z@R*DDICskn6 zS7N7@Hrq+gismU->3*YBRGaI8Nf6YF`gF|C*5}5~K4*aC6{@e7B}JwY#}h4Nd497> zZF(gK=dp2k&GzE@3C-i5D$_Xp`X}BZqjsBjZ@q(v0i%mlBcr9$=VN`?!W;*(ThO(v z_PI;YWzN6jCxSKcYLOUW%9N51rY1Fkd%OK<+i?u!v7_>^GKVW~eWH5;cg!xEbKE<7 zY8qhXyjEqh%G1bwq`TR~e$LDv`!|ssofcf>_ zIqi(E_*vbxmjGXX(_mC9JN*R_(02zge&Oc?7IKqM;hJs{Gfqf{VUs4^%B0c5xaYtQ^hG`n|$y5 z>phO=yGgs@7t@H7u8Xq;{0LY1?;jycMCtB#xU-CXQJe*1-H~)7x<|dlSU{<>i7hG6B>iBy(RVuLfmc`zg~cJ*(LoyC zKr(WmIW7VMk^)NiD(_JGKf*--XNnWeOTMwYFK`wHVJarU4(Tfc!) zc!stAPi4MVZ97~iq8N*|wDECB%YpB_C1 zNDjYxKmE_}82QWeM*hl{vd6tG<^UgCduw|lzq2!VL&D#)rPcSJ zL~)g_DrPC8zUUGlAf}*$ID6UT0Ui`2@B)<$0lBv+{XmL^v#u@!;D627H$km9=Ra!#e#B_AxJZ`C4Z! z1{VJC?|$exc}DGtiVOfSjP;XyJ(A5pk{n-!P-VFaQp||cx5Rjw9N&z`PXUxjf>NN! zCQB8+az97r5LAXV&~pp%;Yb!17XEW{(S>pRd&pV-O2V%PAeKEmvoX~PDe(Gs_tt|> z7@J{|USCqr$!)c`+ij$f8V-t1NH&K6n63G1mojF)oW}LsH;(DvZ=K%MWSIMD%C7YP z3ocNjH3`_7x$#?lFM3TGbVK{+;g;~?dZnr0{2}V7ZlavAvQu)aW@WMntXvOsgw&SN z)gMDbT69`4Dr{y;Fa4GS-`PV4mF*P?-+%7~jykV=E;@)HUg&5MA6jCTvKE86ckZ(p2b+6i&J zje4Xn;~~@EuBthjrxQF=Xd8wUL@EXGU{7h%=#SXq1sR7!8~#>t)x4SSpWlD`e`R6# zKX2WwWw1%PD1c6q^;`mL1gH5H&53*b8Ld$j923PUQOH{I*M1nxT)wRt-| zx!>N_k2X(r1ZCHDDzbS{JdVK!R8(>=RM*XsEb7}+j||xMYRh(n3rzM` zT4B3rO5TsLWkhoXS=Cm#gyX`T1s7Km|J`%*VZrO;t0DI8@ zZJ8%&y3X&3gI&*$?tcCBLHV9eeUuOl%2zSzpH@Q0Ymdv9*K)0Fz0ABQbvX1_L{I z1b7a_VTNmJ(}O6mBjJ5VQ-tT+yI!q_^SVZjd=B7^6c3jQ&70_AnyrsMOMbPE9&ZCSt4#eG) zi^JQD55F;Hf&24$(~iX-^tSIy$pq#yd^Kl*dq#>{^RV5Nl$0`ucWx~QfrzW*&I?IA zoh7e|2`7P^(mFM@`ER6_xx0&xFaRoUvw1HSYZ<()Ov`vQ7|9tuI!Y9cSwems84dr{ zIgmSKpx7Uk_(jhFh72oVMQAY;E8;Wsfj!1070((Byt!&j-JPy}-;g*+IIi9n^wo73 zd>E31vR6bo`~{|8h5o#KxMmRJqgDTCFAGqgLE+|ZXy2UO_WoPXd*gEZnof0=gpW9( z!GcXzzowAY&#Qt@!~58yW!L68`Q^g@_sS5b#o-hr#FLyIf|68Bio2g!52?A#J81g4 zdsR-0X8dV*9w`Rs^t?F~#N-N*tUb_whw<<3jqKIWy_%?|uJ3`^ECrfyd>#9ZJ`)zVDXg z>X5GbeyVqyoprY@Fp5IT6VGl*ZU);IuJ@gzWjpoN7T3S?}>79mYwalsF?)a zt*))TcW=tF7!4%*{PxDNMk~19L>8<&`^qIhzth(Sf%oIA_vMG?HMhElp8hq@+&j|d z)XePk+zSNrr`y!{Tx|EZw1Gy$sG;db+$Rsk3Msj+c_4vzb!AuAq#@t$bSL$-i z3mJ+`fyU-u(R=nGP~R!O~vYg6?!Z{;RlgFw0FhCHb6kUbXpeZV^Tca zn|fnB=j6Qd{%_eWm$y*L)8?}=`ZS+$>{NVk_$%Bh;9t`NgWJExSBvSI|3@PKKO44< zR{yufDcI`_F4#()ZNFpi#|74o7P}l}b1SRL*e4&0C~x`sVx+XQ(<4IYt|PATS0E`S z#CZ=D1S#-13c8(3@*);3#Ea1i0kYd(L)wd;gkMUE^sC1O(Y&9zCg-bLl{8DN7mB{}zaoa*1T~lvn zdEWu0l-W9m(rn(0t17}0g3fxbj^rv$t~MT88%w2WPQ?kP<9{!n~=vaq%e zUVCbKHJR7lA#y`az=UyzJ$^-L^HoZNeaPwd=dmQ({Vmn8hf?knI+96+o{D5ZOuCHQ zm5o0TW2~R5E6i>qU}eR|!ea=>b8}tCk6pMdHC=V00@-tYtAyiR&Y_*AW^T=IUHpur~J@SYqy{h`EGDP$uF1Kcn?!A34pC3J#-dmmJeEIUCz0UfLNc*$jdExKB zxTR2J?Bce*{W5{|b88`F(_`FX(hfBQ;TTs>6j3;v>{cF>rKAEiw`AM{c*+7| zk?Z9tGY5MIJK>pUxTEKQpl92xkcJV?uvBl;bPw7{EXfHS$-Pc5IF#6jR!j)mY`%*( zPtvY$e4w^F*D6FDY4&2^rYcUPygZ=$_kgRrpZB4-Qs2msBeLia_DJG4>jWRi`^jrO zq;@|UPDHfPc4$aF#U!NBW1QC_8iy_Ht|S;IwwU!E7#?5Dm^||JQ`WAWa+J29M~tQM zcVcP9HQM*4DU!eJyNzb%pPJH#WATC*Rq9_HAZa{bRhYeo2}E*R%afAxv4cP$UY;)Ya4S4ht#_iK{L{(UB=0S1O3PMzT*vFhV-0qojm zU9ryQTKn8+wD=%I5d79R!(a3{sG+Ls=mR+z?QNE2C&5ps37y1QG{hzSWL5?K;0 zcb%@1$g~`^85>@46SaSnB!-3qF?Jbt{64VVJV!KM$1-=i(>!)z-OKNCWXLp*tG^!y z(!8dJ`fmK%S0c$4!F(OXp0>Wt5_$RYPe;VQM9ga6G>yeDfmpvDmS6==xwT3xV%CZR zUJV3*2A0QN^txMfDVV&?wcFN%hr3~M^4Ys6X z3z+ig9C;gA|=pq>u`Otl-n&a(R(uoZosAsbmAXWaVIR z&H$vqhJ|N|p&M`K`Z$L7l>=p;WwTf18dswaK6SSUIc9^IJndGVs&O{w%-T^%AD!o5 z*?f=KyUzQmJ^`Gb@sn6SCdQ$jWyeb4!PVv~@32}U5O`)kv*-K;_-po(eqsec zEeTjBQErwYBZUC8@64lOk_Z5-q!rj*-5&P5gx2qrD_h(oi7K<~_+K1PY_!flo$UJH z8ooR}@AJNELcqUqZ*nG5$@Yf9mEimLmHWJ4)Yyrb@V&p!Z<4vx@+UUNhOb?Y!~z04 z7XhbM!Yavvf&1|_@m`2hdN1Sh)Sb9(E1tirg3~dm;wevIW?V_BS@721dvVZ#-vDUR zzm6U9>0l}M)B;L3P)INi^G^2&@=GK{rWhc6w%_HR0?|les;K))Y&=&Iv%@6{|W0Zc*l3+)oFb4Cehh&~G{j{%v(qI6lBzK)cBakWwi6g)4D|y>RkrP-_G%Hi_2cL8c%qb8Y%mExK zw19A1fO%qCzLpyefUEGdY@#C>3aHH92%Kk#DL>UoZ(C@gU$nG$XGt>t^f8(~scEi_ zO%rjNj)(8&vr&gCEX)qkBf}xX{QG#zqZO&~C{#r>9^l~d80fFFsD=jSzDTiX*|_O3 zl#jqQ*r!iG{>T>6dkS+Yo(u7Xiu@t>Sb=NR*D&=_0Q3!S5RZwGk|IL>1ok{%e!Ir} zWcCG+X-cs`daL*JA);~g_aVwhYtV^{XHJlZBHoej@A(=#OnCv)vT1Y*mDFsHCT1B9 zs-_P^N)n${Vy9rGr!u_x(~_7nUQ!w{RsD@+OZe@uZ?Q&a*#bv%wmWf0E_ym@`tQ)1 z^ZlHdi<}=c>ha_zSDfc${X9i>t<{7w3?%^6se?{3o%k)PQSZLyO<(op#)z%&Sowze5AkHF57QP6k{{nf8%iA*SPqCvT?2~741fr1jGhm0kA5Ip>WK? z75wpn79cGjot268Y2UBeIl9N-xjT5^?~pxzb6%6PxM4|V!|<0y$M*B>orswh%4NdV z`{6~K`xioQVG23s)3$N|sim|C_&cfNEs64byCf-n5IGqG4*>7gQhb@eXm((xPUzJT z@;TQHbl}SU=nE~^b?}^N(94qcXTE=7};6;naP zlC+FK9!mxai|}F*e^~v|{;+ISluu{U?&i3g^0GW5&9orTWueXVep{kjFvpI_=7Z|D ziKEv@t9Wypn*AGRa2OtV3pJcuklzo56Zc_C{_612Kk?*AIP&n1Kiz0>-G_(#CD56g z7RaN{8Y8?O-C1u(A97p_k@M5AP-Hbv4f|1NY@v~{3+po*RW=X|nZj=)g%og%#>+}% zdx8aMWIqdbbG@CUdnpZ3pr?X{PE-nXwMI?r#3cYn&cQe8ImtG)nQO4!7O5 z9-2~G!$#~dhHneZq@OiI(X?TND^MeN{#Wbt-kY~(JL%s;G$qp~7aI#bg(>nStAQ%_jXSpEr_En?#_3c}yv(JYb-+8qoL%&jy$z!FduW^yk7qbGS>MO19mhl96O32)Gp*EeK@>EuI^Xx7tx7 zy~nXcosI_SA;yB3S&#`vnmOuy(WREIIUGAdB7>Rt`|Ka={rvg!KBe(_)EkY@biMAv z5+Wl0|1PasyLG9k1zz`qNHE5szYc_CYvP_Whv&II&_#W6^yq<8 zq%j1^$ebRryqq$u zkTGP6CX!`s2807oX05FMLj`wef_fVZR+htd-9*NBJ#c7bM6zb_PyO15lb>o}3k)XI z;Xxn8LwOtB5b=|VvOF?$($VliOvHx#A3Xjz27x9)Wdng!J4A}A*xy(fKcvgMCS!fw z1Q_!mH;rW+lK~UXaJJoBC|wlhthhXK;4?SnS}C~|B& zVhHlmro1ITc;nAzarWVg-?j(n*sw0ppQk2J?D>}27s!yDhNZBB;B=Y7vTYi&>(G)f zSBsXeKpZA4N;Wa9YrJgWU^%>M;Gp3Cm|WTnF=xbEqzFE>;>l7jU%NeTITG{h@VWcd z)UtMh@UnbZ>eGhBlhFN`eJ)6wD=z=IML6hYs6XRE{-XQx_>z*PdH-Wp&$_hy%%6NQ zL=ypk2q5v3^m~Cd1(um0AntZLJvWKr-J>iH^t5`y7niwnmyVXmkJAsE__X1}0l#)bgHE`sjjk5XA6=(0M-zEPbJuH->qRCOz>2ghGDr#|`bBH5Jri9@8IeBe$S zj*$6ge{|yiMc4}<_<`7Ylro5?HGZGvT5wG~@6x_5yW?gqhh#PIVvg|pvz~2h=j<|< zb}|d&n?=WFJ&vUVmi}mhn;RvF3>;!n<28O@5x!icE6t+}kL0dGv17$s(xE{O^!p2E z5bP8-==Ysd4h^X+-*^r3*U7vBQD7*K2Lb`$Ch7%;H3@up7oAha@Qu}&f&m0r;Af7u zP=zGNGmtZq#*iyuk!4D{D=KKp^>yDRv@A0;ztBCS2%2qj>w65WJINvkg6e;okz;Bd8TB4lO|wwrk#wZgN5xQGnV)bC zj2{EW2!>ntj-r8#i70!{^3(tMnWA3uFpsu}V-IIBXXDYFwz_$U)g}CPm2!EQJMF#+ zz7Z9_9^DNt%Ho=K#YbMXeA^leyCJFKx=q;%zH#^E6YFm25cRdZ`nKZmaKHBDvc`1W zg-K8PSCze=o3F2%o15p|lDoKZjX?L^SzReKvEJE)YjmVmMm^Fg<< z|GfFu1Bit^VrCZkK7F^*T3|ZcTN76Yn-Yi`>AedQKe%+pa|Sg;U_Jg*?vXMdwh}CK zmphcDd3@p~Zdxj;{$KM-Cr7408v2ALj9r2I5VLpUT)PY73f?IfKiyN2rp9eP@B{yH z@AN%v-F<&S%sTwpPIGPT?V+6QG76w9|K6INyiq%RG}b6PG7hg;OVEH=*kbkVgh ztn1s1_Uk5m%#9bg_ULPERI0ZYss=HOLIK@)9V5Y2XtivrGcnA)4>JD>wp?~5TSU+T zEL3y64Mn`0i;Vv=6i2iE%x!E^CQ1fK`aG)k85^J?dxrzi7%D_Q^N381vWGgVQtt*B z>%QS~)^Dsd`>9O3_>xZnKOSo~H-advo;?))wHj8Q>%GPQa?X%R2Y0d5Q7q5UK&Kp2 zorD9`<=p5WGhxe?FeL)mUID1B$+Z`maSDss>8ZD>ts6(oDFIR(lfnOUQ^}-)!iA~; zto8zI1}cvi5KjbZn3+FkrgJ%&+$tRrf0zr{5BRb{8{Ani-W7DQ% zk_G?J`!Q-gXysNh{CM_Yh;iM)zoWZFFmjbDe`dDNK%v!r$FV72SCvdmt7V0|(R}aP z1gkf50n9|IFsf&mBUN`mhXHR&ru8~JNg0ZN{YJqOyqMi}6vL`Z-iTSg8W1Y)&tllE z7Oe|5?+C8Y76oX>yfJpJ9|5&4?fn)EaG(5KQCRmE-;Zjx^DRLzUBVyYo?ERS{pd+} z0;bd^%dsJ(vdZ{Rw0~(Lj5$q=bjNJcp9}uz45pxVu2M-E73x=1XzjwV%p9i3BbXR= z4lpn~2nn6doc8~=c*pWoP^G;d5;ojkY+;FDe>d*GO@u4Mu8p)$8k{{qOKv5pq8@+3 zq&&y{_S-&FN3Ih|rT2S22Be^zmnlH6_Q?^58f(iRobIt%2>z<~P>6@Tn=S*`(rkx@ z`3*?B-hL2U@_oZdQFS%%sAYU&|9n$#Ra&w2N7Nm73ydM+ZRIj*f?}XN$A`fLim2FV zOjq!x44-#57oLj8iys1@2LclhLs5y{0hk<$&-*PtTy*;PM$*$4MXEZxN}o9yt+BAR z1){SO6bAb`;%F$yrT>ldt7tJ6u?})xR6D-M97E6WrK;H_k&+loF&V3rg%x&h1&l>% zEE`7_R}Au;53eG$I3!fh1na+<#b%Uk2HU__JW0I5Br^GW;+iR@ZRm%IS5=nR_U^iC}*44*Z*UvphC=*86fS4*po< zbkJH^o`Qj}OYowNjEvYrRP3hTNI`ZBj7(NLe5d(UMBX0={ZTroaq5^R(~5?WqOGc1 zh?$AR1eaWx2_U@GRhIfux3>SZsbqUcVOQp_Ag^;?j|J=AFR5KTw60TcCQxp4(;C2B z?nA>pmU3yj&-O_qF6eqTk0Zr@m!Lka&3lCm4ZN*W^ez0a8xqHnA^4gpqvRjrMDghW z-E7?dv^bjb*nL=o@@IL$#T!`Cb8@3DAhq@b(aS8d?#J zs$fSEK(O}-d24IMS`>1o*MeBtZg%Lmz>6q1i(G709%@3#=*J6x1iF~wqUq(gs!*uh z`c$9Ol>Ec@8=R&@iszCVWD@2Wt`6u0ULvU^=G>5ElKhs5k5B-KHZby`aAIMEc<<8Z zJ1|d6@nz9wNuN9cUU;p?WqJxfrL=4KS81dFW+3vKoqe~o zY<#HsBAS?Z>$)W9-t$_(@oL-(5I2IlX22GIfvH~c-dNtXyKy+p>vtNvgs=@ zFN8EHgy0ug#`0A!jU^ezi3>s!GkkX1-$P;NL_)*OQu!aHz+f z!1dzKkbP73c!oKcq|S6Xp`KZwwzk?Q0s1Cx31|~DwonhZ(v(WWYtKh-MsdVoL+Y)$ zRUeh7Vc9IMMMTV+c0GNl{&=e3XhQSQ6VqPyFZPe=gY)gH6hEE*`yU9V!B1dy`RV`P z^ARpj+7JsI3fa(qSg1Qh;V|{4HKsWYBwTnK{TB8fX!JF{0Y8OJtc;(Hzf#D#?<3Xi4_=We-K& z-e5S9N7{=-6w-zRMgkyBX+g||N3$wcV6R7G71H_bXJvw-L1biDSczC#v7^ye+Kr(N z{2_@`71)>vnqpQgq&ggljysE?%gcIh=?f-f@v1lo(Mk#rk$23D!Aq27QjlXAV=IM6 zr!}e=!vwPHgqbkBfm-`V)*q@d)i@t^tYZ5n3&>zl8R)LlLl z4-UD`3j)SqmgkCqyKTCe0)LmQ6Z3hh0Vukbkgc@VsU;&rW%{%6+J$lS%GlQad>x$% zOA3v}c+z57>b09>U#eB7$;U&({c$cG#V#vTs@QCxkihwihqR%7zpG21hDG9K{k0G7z00Q;Cd744k;2Ce`pG+A zQ_71T?TTY+s{h+7GLX_^$$rjmCvtZ<>-;_B&={+##>H1CA!MuN@GfdrQm2Y(zsp$l zWs~JmZWBjM3Gjkqq(7*!rKRQfl7pc0`IcEcNS#Rrg$eGJgCoXZB|qUz5w`3)Ae;DY zV@YUvs`f0nceMR+=n53bB2wp+4CdRRX~eiI<#iO;Fh`6ek1bxLlkt3*oBkunD`1F` zQ-9SbY!CwkyCO6?krV*2Ai@I3$HZtOWN~>2+!1xl-UjotC3~Q-KIdOUGmfw-AU70+ z1W1u1;C}pg-r28v}2wSU?{d^e24!FM%<85nCh%t z1RxO@GYRpPE|48lRN=KC5x~v}0u)gi|RYk<+<)csJ#$I0HqBFw3IDSZToGhQ)dow!+P*PZNx8khWcd{6)YB( zmzCHK54cS3epDHHEr=GJ{-Td6*&M{iWBl1be5Jw7n#y0ql1vn8!Oc*ds1tg;^Jnd{ zBlk~U(DavGq{7v0yV^pZ+4<_de@0`0Sm$3C2ey*5XZN4%~Dkw7&oC&k` zO0z*)8N%LMa%Fk*hQh<6b9v{MQj@Eya(Q`Qg<`^FBY~0T>X_(A-T^R1U!hZ#rp33K z_(twg_Cz)l22_G*bpxZ_as?V=>M&^1{mCyHk-7YR94G*ssBjdg{hcgq*Eoy5PpTg* zQxZv*83T}IvQvezNxyb{*TEaKS4}FA-77TP!JNYU%8GoLeN=ulI+K~yY^zE^%MeFJ zpP6bCUeV6m7~Sr$ekeW5&R{n@N2!PXH~>UvI)2^~**fGH zKuphOfKKqj;rP@u=DChE=8xKF%9tm~w&snLoYOwT$%;P<|6=0gp%^V3nNasu^QzgH z#L4G$N2L!FkErDdZ(MZb$?}yLf?_p5J$-#!P3k(UGDb?-OE62{0ZN+>^Hl@oHZpk4 zwe9A)qh$>7O?BC2`l>b~SH)|Nz1-tDvKc267dIog*4am|uf&}M_FGniPI*o5tAgC) zHlC6yn0*CPrg?D~_DN8KXsSj~i+GQUxLK2biUqUWN!h&gIe!qY3*NOkZwp%#ydk_E zKhCnI=)O8M5p!8FUCHbitYO5Lq}%Fb{~m}LNNEH_N59Tti!bjOcc00vhEDtgzHG_9 z>O6OB?>d$JBCB$Dd9u=9qQ5;`EdHgw!x@ft;!@a&n#Ne@7D%Q)DlB`ADx=u?>Ydu} z19-ABmr!`n=Q~60ut;tq8RPkp?N$4xlhs$Du_eHvPc3smPvd!#=ODLnP+T;}MH%!7 zz|0~gPgdgn$BznhIw%3=niWSE?@W(BQ96aQjvC(AdTbQ~kuq$B^q` zEaR*|JX^@W5J?;YDxByLnhA& z$Wf<0!*D<;^an?V@l>3X8Z|d^w@NqP|9<^CsX+N5?qU9+-LOTPgHg{xCAOgT>UMIq zXXv*tjTU5cv1HM4T=VUZJP9Aye@~7&&Q`yx4I>>glyswX2}5_MfPh1ZqzozD(hbtm($e4D&-2{x zx7M|0@oRo?&EDs^k9~xoF z>{`*eaP#!%n|OWO)0u(RNpsd1=^~c#+bY@`gqeV$*JjXc;go78l~2yBf#U^vc`wuN z0w_p({7!NO`f$5{Hohc;AUEtHIciX(Z6*W)ne$p|svtVH6KIGoSJszR_ z0^*o$Bc3Pjo$YR_S!oUU-RDfv!o^_+BD6?l=fDo8-^PUxQKo{l-!;AKl?ld71-r*_ z*sJ2E>rSV}vItQ$d=d8Pe)QY)Sh=711y5Amr5aSV(u0I}PelV{A%wMO=~*bh-hudj z&F~g$-ez$}Colrg0pa@6-(q`qHYMP?$jZE@m?W?YkvOm;xi=WMCsR1xjPuY#f0LdX zBFcmf0w5R1mWhhc8DXSVr~snwNkCdMzuu)JXUHi8+0uX{y0WTipHA*K2aCH6)E88!b!L7$9&5kz zTR`xB`x3#hP`ZB?_)+hCQh(N02^`Z~^t;}&`SIbqi~<|B9}L6f=&re z4VIkTvEb>w6j6EX?YN!g@!zf+m+lB^Y@vxze`Ps_T}9Qeo55={=aU!9#KVUMB}?bx zl%eD*WIK}DbNQPPWy)ZnEc-U&b7U|tBN_lD8TAFyJEJHxumG=6ze_jh5U&?CsXpEI z*Sg#7YJlfu*}s@GFrV*)fQqxSv6GyyN2TreEYHa(>+0L|TH$1bC^8=Q zogSr)T=w2WPqK4kQRj~>`^GBs+X-i;wV zcXHKTEVkH#JN##Y{B}kANxvHy?9v)Mzeq%Am6RGhpe8~1Idwy{9PT9%Q4Tk(lOYS| z!!^WXLhk&@k}`K}N3=Nl@^VS2HKxx>~u9W&h|&_Of9*^?Mj*Yl*Bz z-E)thJqh{J!#V_n!*IYwIDlCShlOwa!EnaljhWL|ZPCx!qv>=Je1;{-K5@M8@*%B8 z7_!wq#`?|j1;YUBe?8lDhPo9_za-(F;0qASV9G%B00YUW46k*ViX3;x8lzT~=jgt7 z+|ZqM#_Ub zi=foNBK#h3=PlXecYV_8-)hcqYjf*i{?I#jvJ&L^w9;SE=@{!3K<1rxnZ{>>SZQ1~ zDM0V5cN;yCHqh_hPjl!mBhOnn7@J!Hv}B$M5**B2eG^v}k*X1qiVUs$du`;gYr6Z^yU|!H0vErfm zn_}nF*Znz9rRpmYN7L$6Dch^zb8C_sPaSMKwtejjhc90_aNr4hb{$E7aZwemtC%Y# zcw7bBd_}UR;-u$(fG8dkldedF>D>Mg`3dKBZizw5qoG@FpO*4GQk!5@Ej{)}h?rY+ zXMEC3jA}UI7Fogdw`TrKvNNopvmQZ>wBqgO+hR{Dvi5lDLbDWJ9o_W+*U$cAG5Y&% z0-2UoYt^{qw02#0qXz~}P^2tcWpcf;y|0c00B8`VYHkoAH{6v*Tg8)VXLJ&f&}?Q8 zl%%omJ7;*8UxGadCF!LhI*Jvz-m%qdfIbVHT!IeBt`l;U!7$bGD$G9&`}*|C)MJq@ z)1G_sy9qH>l0^mPO!D!pdU-Y^Rr#gvCRYidut=3X96cN!T_l6V(eKgeNl|o?D)x|j zZ<5}!&T8e#@V^((0qeht+Wtm`^D9?XhmSW?Gtxm&Wr)XEZm6~~_3nge^Ipo=zv@4q z1{tKT$3zy@%tbS~KQym-n)}_27Aznp)OMZb7ggD&;91S*RaUTQ8LLVk3H$^)>2~k; zU>t$cj%LSSl4Xl69&THFnV-tZT7aB?&+T7E zj7oK=BqW6)R>0hv6wK^Ux$pcpa7s;obK9ojNL?Q?bNSF+siRJ%`vIU84w55*Bmwxt zyKrW_OobY*ohc#$Y23#+KC^$|&q8+h_7jhh0T2|IK@`g7C7;591d%C)`q*JALc6hQ zW4C5z*GsfsP1?z_tOfWsTQd?p*0r1y zafMJL|C$OvpNRq~V1OCWk&|PpR&t^XKv|wbM?R$OT=%x^vC}dCsiAFDR`Nc^#UCnW z{O0^7ERs#c%V2}wwPnC#*u%w%T>M#V1T=c4L`+;fThlfT>2LWO>^jyKu=-F8X^7i% zY4=q>Zae*Z@ngT)@%-VNq5w(bo#D8Mj_f-}L+ZC6mrB+v=IJ>+v}(Vgfr4HSFLn+L_$OnY-6 z7$Gz}Bil;gZ$cv7nEy~HmBU|eJejdBhio=MjYnoz7$^X#d673oQ8633;pq8sRJw(& z&x%8H1ytf^U-0j8*=eOj?fMyxI-bSt75F)L-FXl<|GhEKsWK~m^>pDobof_=NMI;V z!TLr2<~H--2UM#eASLtXddE_Zh;2__iwkq)RK#R(U*Q@j@3*_LvNTuI@&5hAq%A5t zTIO&J(k*2$SAKt%2*_M$9(#Sv?~AfVVm);$$m&yq=__djqHI;) zF}BT=;#8%5VjJlpr9k1;YQd@o9Y$b>AcZxFgIfYw6BHn8$e6u0F6sey+xx%mkAc$_ zvnYC6=qMw=P+lOLC6CzWDuK~wDZ?bV^!VYtpMew3@B8XRg?d(s?=r^jL!6Etdz-5H zV#S=qKb5+h6j!K=O7k^&-IaK=-eH61Hh(z$h5dhEk@n-Od>dN)1PwZYm7~HyclWi` zqMW-OyGAGhd@FajrPSp@2lMy=eZm>{I-4RGnu9> z9+ROk!^c4@Vch398t^YHSO5lm)7V+P29MWg;Aa{|pAb1s~2XK0Yn%E9ecY^%VWRfSKu$f>lt^-(~2<{!V;pSyTE< zR@-AH5ud_O;g#5ezjGH`3Uv)Mue6Jd5n|4}O=1Tzdwx7z{Rfket4&SAQGV+;HZ+k+Y`bX%%PnqI zbco1nWccVwp1J?t_PPG!+4a`PR}(pK7XvFAr&NoKkdT+6{JDxT6e>w0H0~)%se#O+ zhq$jadRPp%(Y*1{0rWDctPIbna8UuMg}_0^jpLWHS50f?A1rnsUOz*Z1(hTTvKsxe z`kFwbcE3+RYZ~h})1wE_dQ zw-x`ux1%N^mlZl5^8CfXm63h&PIK>5{`!=mWz`oEapzD?O&@S`FBlu)VZ--{isdZI8)Gd^Yz=Gp!?LI9i>Rip(44Rx#!9{`ZF0TA#y;y1@cpxyzT+gcwQ#vj-! zc*P?3uOWfZL%-!?5%Lbf$_|na4!*AUkH5YJ9asuYT<)*;Uc<+)JN|S?rkIDy|tuZ5Ql&({0nMicZ`Fd+lPkJq?^i$vl7*_xxB145p@E z?sQYHRCKrxY|7`Q{LR7);p?RJw+d7Kcir9zAB*Cg$j^ToSw&#+B5EtYg~j;U?%MTi z_|njsJM$Z)(fAJjGifmDAe98r*t^KPvG2S&h{iI$5Voq$H&@^2R!y4B?drcBZ)_ai zULGus_8CzNvMP6yxL@?4%1U2FWM;|Zq<4wg#x87=`XQMH;a`u%!6Zq!b^L@Ayq;pa zSsGC$jT6bD#yJ!o%CJQUB|l^7mQFZ0S&WeJ2avZXx@cIUTso?*-`&5X$))=en1~zp zrL$at*MQL2hM~oXuL4dV4G$(tg4spGl)+#0>#%ak*nejVx}OgWYFI6mg_&t zir3{OGQrFX3gAISoj(7X57;k--F~%F;<8GKhf#OD{c+;r(lOv-(s5taq)EFwbKUs( zF(uh+Gluw6@-d;ZQpSif71XoXpm#l%*6KX^>_DJqW0t#4w-QtZq z{+FTh%upNTat@_%Wi2%M#*l|fhLtxk8C^FWmqO^B4kM!+g!w>7p8Tg=xkFCGQ7;;E z_Qt$qt3*!Q{nUs}!AWjAF^BBT?yA>Bv@MiN%W9bR;B@`eTw=RGdVT44Y#5%msUG%> zR)Ce)|60w4yxw3!yi$}8T;J^X)1_2NUyR-Inlz}M9P0D5!3Y$FOqo|s^8#pSX_L!w zFu)<>tf%K}Vm_)WH zBhkOy9W0WkojyfJuuf}H^$SXvwV!zh?qLQtUZ0pAv?E#`<~&at@z`)HrxUn+v~+jG zz)snb?KeT$1veWD(zhmm$COXolzbw2o~jTta6Z<~}l`n@wNL6KwsW0yEBLe(%}lp;RT4fXyvk<7mxfR}*) zlc;O^@ao~QOp1vhzjAIWI@d#fsOs0oY?a^1a(AuYX)n1*8986yrMGU3Qj6D|4U#7&!;(zOGT6Bjs3o@Jn?}pV%jk6o;d$Qq zH*B@5st*gJ9yJ#{4^v0a$V)Z$rx_jcvps*kVCnB8Y%%4}kbRqyWI=+d<1dVgamH;V zlKPE-BNO}vl_R}nVJXHz#LxI814ygLa z44!QzV`N}43BR88`gnQu`{U!E-CQ%UzGfrRp(%+X6!<=L$o@pVpmzV=!q)NQ*6s;+ zU`y0WQmfEUP19)}+D%qmh7VTdvB7d95vfk<#3})nEQ};2lj9F%5d-v;Ne~p2O69)t zM7Um=8Y5~bphPt`A}TT$!*CFljTl7+cTt&pHy@;hk|fJP%-o{}Dq;=c!a)azH_k4>X z=)<2a%cU!ouLms(0?*}f7|In{&X9}=_15clIIY*_%elXd-M;&f%zt+L zLO_dg2*`jm20*pahS-pWq)zTOb*PYcG77x>3OVWThV+HEht}z-#MikpWwgK zuIQsULysf2%UbBm`pvH`PZTmJZXP1!jQ{Rgbg~#R%LFs;4h`rfBRxDix)3M0up9;h z3i$5Q2%r{PX=7lfx@(&v>zt%lGh*f2_B)kda5;cAuaY*&(~1Go_|brnq?!{wnpZu| z;)IAVC*Eq3AV!-zy>+s9sM>Z*aR^FhpY%46e#v7-JgM+PLk`eG#r zq#5PuC?)1@#VYXqgAIO$&s;zgiois7{c@!a1-DuLoVB1ruooB?xErJ_6ou>TC87Lp7)pYJ*L zUp8?R<~PI-C{%xyBix6K!Q@Uxu4-)SRAxkXBN%W`1$#0V#_Bh09&6sPx#UXvumAqL zYZh?&_PXRC!nBo_g6gi~n>?>~)rDy0)I`Ie&dRGvn&Lmq6=+&<3~U$gwn;N{(XcIMnvrpP zvH4Y;{9m=9AW)bbcqI8bJrR3!C}(*&XUPD1d>kJpL`4B5QIGVii57Wp!Qydf$$bt7 zlDbeLaX|WhvEx!HB^pVed}JR5;uaz5)?<+vhqVS1W?^h$tSW1$@=z(}kW9`f{JieQ zlMf%jb~46ZV8l#MLYLZQd6LXZV6gOl-D#}|I|BMUwMhc($6NeC?9V`5I! zCN&8rgED$Hi9*AO^4(Mmybx%{63hYMF!)UxyM;cHWT5c12yW?aqs|q=j22v^K+1%j z7{boL%1^1TNMa432WB%#J>22~?O-EK`$12=?`};#{Ln;an(H!Gmpi`nCTIWGpWCCby`n7cY*sJ%rACRo z;~(L>eSI|Hb0XB=x8upGr6{@$=i~+O7z`YA2Y_HOpW^VC9vpr!%KFh@&MP@r$KJnJ zJ^t3Rv^&l#oa!BER z_p#?tHE{=*Ld*u1_Sq(KyCNkP)Jm(xiH>aRBr_|99tvPGIS_ymAb`Kn8!}(oBBx_d zT(W1w>wMW9Nb0~B{C&;!F!1!S;AVYLI`BBi<{+0NTp1#)`&&_)&+h9gCip-{X||DB zZOmHT3cn?S*|p}c$+ZjN@L>y2IvC2$io%;b3H}~ zH_GiO5|e<;!3{)?wET9gkv%0AENm=VBt@~Qpt`ea4*<(H2|b&lUU;eiee(95d+XWL z;K{*zzefV|=Z{NpClToCwC%yU_$ce$IAV$ybUOI)`H|MlIc=Fuvb0EhfQ_`@+x>SY zSRPQRD2>@<^b!W6yJlhUs4*9;TDqIU*Z<+g{?*f!4KrlGut0};SjA1W6x*$7NWSx5 zM+DhYjnnn;w&$1|wRno&t(>eTI0f!yo(x(^NQ?cP_aaZ}w%G}O3uar*X1EfIsmbPL zg$S{GEXDFS@|DK?BkW0s>EQH%_z6+?BSE);NcU0lsrD0j8l>l#${6+sn=L_GJ`eKq z5pg%2gl-F&PAwZ%!wOgrrBh?WLLGbiD*ki?{<-g4c!2Kp-jC^+*TOR63g(pYg%pWD znpw+X1Kk#I@oy^7g0Gl*AmQ0yWJ8H&6#FOg~F*t52(TU^H@$> zK`6sW0gxF576C%ovpd76%CVs0>74jpW1QbUXQ50T@Y>t)7FFqWUgb5AoQKi;$-Agn zB7dBGeYP`tm9rExR|?XAcGtQ~ZEN&8xvb>t5fP#e3>sg2CZ*wqLxje1XhLm@nE-$* zI9Y5Ql?D18n`!L-Fl&HB9PMI22|)5ffZ8SRfjH=MRiKhFpaRO2QM>=GAB#qVbGpw^aOO z7#!5o;uMr&Rs-)(d|uNtT3Wqj{andt$j4T+f}?^i<1r*wmI~Hw3`S4fwkJvE>)94m z{pYIwpHsZRqFzdY*%k|;fku7KZEAdH&uuK^Jgp++Y`@o)6ntdGuZt39QG)ujoV{|Z zhrZ!JQUIS03@9{a*c$gn}X=rtEA z;-v@6F>)9SE{YPqfV9{%5BC z&vPmzFBxZh)Zt**8v9ouHZ;ydpE{gLblO%@&-HpV~w$GxSe3H#_0s?18$#*>WA8wN?u#FDv zD*2bOQ+ls2^fiz%W7R`>k<*q|ankb-Jlp?B7>nlH@?!R#*>Z_2(&TF-uO2L)b}IT~ z^lkh#&I+k(y8p(x+-F4I!dAK(9sERoE=Og3E(c5}XhXrDt4({Er}hM+tRN5tu=!k% zL|-{n{Qf_))>-FQDTp zMXfaS`%C;7#n;zsF{S5P=UZ{ub$9lnH?NoIGRXSKm9PheK&=x3rh{339~{1 z96`a6TK}0%ddpUv&wWM6un1B3IKuk`Gq3|TRKD>$`o0$|UL7rbHs|0C^$@LEU67JG z+K73r3Jo6moT=T#h{C`qzPsOo?*?=mK&FAkRza!cfm3^)b1N!fcs6N19{plKA)Mp$ z3i+Wg(CFQ)Q|84r9|XyzwQNy7^~)jd`mpJUIVeUXtA}D`ERqY$yxO;U<%}u%{@a&i z!L*7r>`p7viCA{@N?0L!G@`%VcuC^H0OY3syg9oSJs38^6}l;k6Y5cYHcOaS z{eQ)Ql)}aFhiX&b_Kk&FM=#7rX=P04*k`!;RVIwRpKyN(!#gkZD8`27@Ugk1N|xF+ zNRAALjZ?EFKYI&}72hIR6J&wx9M3Lh zmBLl4>3W3_IeD--s`+?3yKrH!U{c@OUl!K!HbN$tR%~|~PPl3BRN=pfi8Q)89X0mb z{Mqbf%ldxyo?x}O;Q5P|FQpUx40N^HjMrUS;mFXn|9H?8I7h=(=@BAsELxq+^Xra} ze_nk$XiE9;qP%ERSnyrL;@yH%Thrs*(;9iG1{^zl3iq-CtKZHNvnsQ#Zq^+F8H4ay zL2(66FwvuwfefFwa*eCrfUV-Nf(0W$NQu}4ca{toV9~_k5{nYtdo^eg;B+_Fq`R_1 z9w3_Op%y^bdtgPc+;xwmzWIfg6$rGlLJS5sBCR_NWn`mdLV(lMNHGhi>YGl22m=4FUTH(JY5#P9e;ynF z;E!0e5|wUBB$)IQ)oYcJOu3Q3(j4;0Ip>KK0m%=M2~BkwTG+~rY_g0OMY)CQ6iUyH za>YahSGq;7#uIGS{+RmIp<=Sir(?*X72$-YABH3>)y<;F&gY5g0c4slgjwn>e##_f z>g0M7ux7Yunm-|kBSz!Q1NbM!G_R>uX10E4;L`?8ja(V~Yk2;cPcRrYj6Xj(y~y*) zx08HE`&}CG`}Xqr?RS@>Ac&(-oy0g=={Agr_iN^T*qfmLJ896hZY`wgL1nos+ug;8 zj%^CES#CB9FH!1!CD%INi<+a9FFvCyM>qA8QQrfRiRCY)+r3JA&T3wbN>XurkPJEvs?U~eIuk*i^%#+=?Bk0% z#j3b!3;+rRy*n4`*fXLEcoK#LT^{W|^7{z^bBUN`U&~PFzPoz-G_N4d6e^{D_PTT^ zt1{W<5be)ABC=-g3#ZLl^p<2+*U~|Zu7Q)*Wz)1_q_gVYaX;7box2yA?VkARm@K53GJ%?V=o&UThs2t ztE*Yr4#N=}oKDOI(Imn6HoNFeAOOj=g^(g4_V*_!g$FIU0x4d|6j2ag1dSYTy0`5O zGV>O$PKq_t?zF??sx`u&6D z8858_^OE@4@ogoXG88>s0>~hPIttI~qOWti;*2*C>LZcSV?+%>39mrKJUezkM}e`2 z898tYDiW1ca(3cq$zlHY%^^P@(ed3!3>V~E%fH39e@9pK?Ab}0bgX(@!OLegT}*7z zK1$&^donC*;@QujyCe5S*e;$I&lrnz6ZaCUFdEtm#1OG82BtT{LQ(4Sz|6W=5>X`%krS8F-Uq1|k?tU$1ZI!i6g3-qbo&2k7i^p$!li>)q zx;^{b*X9n0pbuhi9zMW}gKX8u5*m~&vqhn!xa_-9{arGUSF}znZi%9j@ z8H&ETCtLkwRYeza1z=!~CK6(&lF)E$B_iGixq&$gwPn+iyw%Zyn)MrSNjRhF^!N5T zWk#pq`GBD*Q2KvsawGDEES!X%Ks6;Cz;VoE<-h#HQ*cSb$(gQI=kn%puvjZ)r_GPN z)P9`G=jQXy5_Go1dpE0Qvv{{5nvc_RX!39piw)BdCI%jR=A`5w zKHXm2&~maU{&?lQ3aIJl(Kj^Td#F%YoDUCu0~WsN8_5UxI|Pbe-exSWy?2#re^_); z%c;-vrF3$Xl2&W?EMGa@Tx<5fhzq$k|FCD*Cp&j=9w&x;p;Jh=1w9aO7wg`3XB6-_JJ>sW@TnWS zr&TIoU=|8x>II<^LK>8UDWGG9Djs9QdzKk55_RmC(zDvG5wem9YF$+^+ivF!oz4+P z#%wu&!_l!K828fhU1!T+jajidx#)l!$tygxrKUD@J173%6Vqb`dO1YmW##Z}%0xU+ z3}2Fg5DFnc8IwiZLTgmaT%8})ayieJml<5bfpOA?`YF^}4VnnDKfxXv`u4wGiQImb zyr22e9tNmH0F{dx0f?$|@9t0aHlX4Xl>je>V z?LBjaAk}wCVzeoX_acPI|4;DSImVj8ZNOYVE+L_-h?z1VxVrRG_;Qsqx7w0w-9q>v zUbCaE?Rr#&;sZZqSf>7nXU*^Eq@d~*+QR;+=835X)?Cot-!-vA6aFJUVvp8rz*V&ZP!N)`SjVXXr8G)2 znjZUBi@hKJK99V%n}6D|eeCbvwTXN>lwRrSSKB3hkLIkS9U3#0M*H=7MAv&}@UTrw zCXx11<7Os=gM}g@mFT@WAHt1B9;iMw-9Ub7I@b&G5%6~(!^uj_^;o@IIb|!b1NCch;s9XpcdeMIrEs5kzVA zo{Zuvb=JU0u$m0)TLtn4^2ttv2n}$!!-N8mGc+gr_d4hwgpMNh^Ltcb2@r5T?E}ek zy}DiH$x6Atx}bjR>eL|Gv0OAzZYZR&&kDq*D7k&#VIvrR)73nbw zeoV*D!c4CR(MAexQ0Y08rUoo37TFjt#>Mo@w)u5Ce`9`AiaFXKRvfU=#CvQ+at$)g%b3WVuL{Uf46ZX zMGG(p3=W~ulf?@SxIA84t)Hqq@VGwvoORTHDQ-^Fdd=$TLv0_w!;>fVp(kGskns51 zP8+cZB2JRE>{|gB@QU}~p{hH$+ zGVak``;YhUsBI6e>qU+8iS)TBBKWT1j+M1(?}V2ixO|%R`4O4uT5(vIHe^;>i!Til zN#an30AQqH2U@i97-cxgS^;+v()Ba9jzw#bBVKL#@8UPY4?$TfM&j82|0JsSu0jh% z#BeZvTq&Pbw)LpNzI&T_IeAGQNtf~0S+ijIyVpE6>6H1EE;x%nF2 z;WT9yGpE|&=WK>;Gve1$&vqTuvuLnwjlBYLn%e72`w^;8~zfR zP$&NHFhtFij2aA-0jV*9EjJ3u$(y!C`uDGVZ(`W`pJt&n{;L|t?F|~90sK$<{iK(sas$0P2I`R)akJn0n%ftsDtAtuyfRe~tW zwiH~{Ctc$7iWp=HA91rN6@*ZT%wBR{gvpRzJjA|b0jH5bk`*f3d}gD!Y}rha?4JK? zOm}CFl|Sonx{lMWzDzPueBqF8y-Z_mYFa-TxoaP8q*Icgo~9jKfUiPnBi0+{)=anv z0{7FPCg!a^EmNiD)zS1bjd^I&>< zwQl_Z5)um)pJs+7X^-|V_nH5%4yaIyae}SFxnjQL7&C+O}J3mQh%)HH~ zb0HHMH~afyPWY%8QL`fU;ciA>`n(P61blSMEiS;lVqKG+0}9 zCH40X6Ssv3ZH0%?4^&o{dd(0_JBe&n&)tphttc&SyR1DvJ4#8ZcrheB$u09)=Fh1v zL`Me9po(KCOX??4xlo39Qf|&}YhA<3VcsvvGHlU)&Sk%(_Wp-Zs0AtaP zswu6IlDHY4-HUr4&=%lwHF@=M@4bV^jZh||X+^7*S#1vcTTHaz;F1zRdGtsu7kdvT zAwU4mX_|%!A|#c8_y|Rr{t4!bU=a8uK&IexgkNxRj$BIr4?s(aCLsxyGGGA6V5kz# zd(8f9wy|@f?G8R*zdEagv%wd4L^=ENuUPv<;_;h8T1^~9*7>u)t4}R>xIBNF4DGn9 z_XB9XHd`hjl#&!x0!M?Gl`u#Y4UP&r_TD5M+%O1w-ThfUvjeH|F5syZ3&bR<&DCgp z4yfC{#t($#EH$}7zu5!Sa*$}dArS>g`%oY$6iQS`m>YHq^(Xl+QNSOrjj4d5BMS`8 z{rQXi$dJ?@ZuND_XE|f%{pLg*brKszDR#9y@$uz}X-BS0BRPlnaNCzEHom?^KV)Ai zOVv?@%?qmQs*jD2%L9@6UGcmKQ0q0H;w~=X(bd8~x!fQq7%Xpsgbc*`2mx?-G@o;m zacx{3R)a*m@$v0N-0>Uyhad^RgV}S}U(4GA0<|KeKO=;bzmzGddcA>!s|CaA4+xvZ zhrb%r5doMjUGDk3ZlE_VM_P>kdEf?lHjluub z4GvhTulb~Kpur*NB#c%sI&n(Mm9qe($|3p3Oh}VkMG$!TxxliQ<{|HHst8)!(BAR# zNCZhWMY{T22=Fc5^%8CRtcrUY*MHIuw_v5yKK|aTA0-yW2t0C#|NCJ1guE% zNb_)&^V>hGPf6_pIU0l0?&~YRPMG`}hckJsU(XsGyre=Qm6g?DL@NUKEihAB3d&`D zGXR6*RDNbAG9}IAic$?jRMC;tTpSA+7^QF;GkSw9CuHT&Pz|7Y@DazF*6F{?g9OEv zXm>Q7zrRi&?f7lpfrP|=&1>%8Nncjf+#f%2Krh4HV;|Z5r1?y7UD_VG{ZVd5{l$9q zUEXL4d!D#owyC#uN&}C}+M-H| ziWyodRESS-6FIBcvoHo%6eQX4qRH#LrhFQBSbB~{g9qqzJ_+z)NWg&)$7?l|yI+$`m8~wijt%M*&|y&7V;}oIMb6Jt4jSp&xmnQ%QSiThk`D z-WPZ9kcXtArS>P3e2caaCrMkvKr|5F#a^XCnB=^_et6>h|1A0eI+vBsbO~2wz-@7 zY}>%Y3!?8MbuuDN?b+33uGKQ zMb;Xq%1K06Aw4po6xn#*#k>3P>gw;DyZ%$p$G%=04I|y|;iy{Jo+t}x5T0uu{%@SC zemAS%ua|wsrTo+hmVhm zeP0hg#7Trc|FKW=a(YN;Dk(gqh?mtXb@TD?)WiRQy#7un?Z9Ib;ZcxU{0-d3G*JTVHLeVH`Prw0gV2n^kx2 zy;RbDfb|eqKE}6{pJ1BNexJ7Hdw9y`)g~?_WrdHhwW)>A9(bi%i1~RFG8Dn#%$NYv zxvYhSI@^z#Vu1OzHS_AZrSq2p)cv*ZvTyvkiBKGku;avtld_zCpg|y?7EG)AX3j33 z4@vXq1g@o;Z#}Fv8$Rrxes@eUQ5SN5Nj8W*Fw2V$2Bh$+Y|%4LM9r@ZE-$a6zseH% za}^^*DWESaM*IO3=-) z7?7t;mj7EK<9D!A^&&NbK=R&h_EOpO zx$T6PgTwBS{S|dg-CAl{UzJCcI2&2rFO;6XN-(C)=9m)W;__mchRy@P6W@o85tW#* zOC!8l%);GKN&^f)1@1R;Z~|6u2>|r>$Ko5K4j*Y0@jDG^2Eq@IcU}z5*HbgPmyVLZ z;teG;anL}mmm(Tg9;lW>4Iadh`;)>9p3`YHK^~B;boDEE*R<)VIcH$U29L{UQw~q= z4R?R6nj1PUr~mxX9P^<~>z*sCd5CDe-S!Rqk~ezg^4wfP;B~jXlr+|06aLi{+~J@U zU;7t8xH#aSq(ZXhf zo-uLMk<-@Z^ZgFoQoycr1ROqsN0!B+m^6a4sJ-fx`NI6~EK>CM`0ZCixnC4apREos z^gRN9ZSwrQ=zajFq8vN_iRud5a^w=e6|X4aB`VUgkYmO_CZt{N!RzDR7Bc+j zbrWc*T^`pS;y1*7II4RlLVJH2c#xFZEP3zyv@TtH|1;qY+QOy&5p{>_V~B^}D}qM9 z6rTGXk!phBy~^X3c+mtU7d5dfM1T+;{Z?+jo1j1#+RxPDaP^>1Gw^2&NI9g(E;qn2 zqBIN8XvRGn3zLh@sqJG#wNTzv(Fx|}QQdUAKF7KtuV(wM^$c(6{}J^STv31F)*whI z4bq(gA~AF~f^>H~gmiaEH%Le$(nB+JcMc^TLw9$_JO1yz>-_+;X06{jXFs|3)-!Y@ zm1QX>3dBxga;B$2G=Vdw=6Hh#T!1b>294{9FnT2sO|gR1XEGO+L+GWo#_MJI<^`4L zxc`%({4e4eNVUuMYl=cbYVA=-Eubqj~`0g?;XsFOK|2 zM+p3{!d`6Tz~g?WMMh6MU&lKhm~FEt{MK>}x06YJVLu~_T&MX=bH&`Au+HpOY(p7* z^@|!odL=EWx;)fBaIb?66(Y3nXd_-nm;I;_M3eu***$ay$pkYW2Nq~Y3WrWGzg9+hc zy~fOpH60Gi_O)q~w_9A=s0|^~xb6%rQGliXR#OiY0ljSB_1r}S1((2b_*R|F3+kI=~Xk^Jz5o-GY=6t z`jwTp1lbs~yM*)_03v*aIUTIEay#*(0+%&Rx{QQxCeEg3w9d(-L_G`kPw3wF9SgdZ zuTQTRtf%@<6H%*o+pMpb9bi}ze=7>DzkKc~&H7C4wf7`{Q~y(mVZAxdd-LFNYu0@C z9MA1xWCLB)|MVC1 z-Jh+U))%hM0<*I2ni+Pii@3)EQN^oG+l9nM--K9JNQ&DRzHRIw#QvpL@N-O9D#i4Fnr%{2&@NMK z`Z~h4Z&2iIk*=IrVX&h{#_Z5H_2TZk+n~hsd4?mD&2&s`22EjuzJL_FZq-1MnVl2D_PwEkoLnDsKW}N&%XKHO z^3`3*@U_KA;I;A@_?Rf`NfMREg{kaV8u7!^R+yTurhgDs5)rz*mg7Ps$HyKWLD~?6 z*ehSLW&cL>4HCeaPJAaz{t%P)OlJ#?g1#v$4iojMu+PlzTp)x-^d1U!^-{5 zI57vKL9?lAY?v&&@~P4o55IYQuE6fFV(;)t@4JsxB~-V6k$F{L_yWP>Lg`?%bc*$5wEQJTuw)dB!8ePAVKH{v=Ji}q4kJ)} zHedrM;Vg%ECW3QRhl9FXZI`aSgt8PhT$}W>g}mL`3~?u|>r@rY4gbmbfSTLd+76nB z%zvsLgl32L!#t!bY?pkaFMZ7Ftue+$CMq#4RD2|J7|-}=EH!^BOX2KDPQ$hWsi~Dlnxx&fvLX$Cni z9gFuRBS{|~@=#e=xh;d65+t)(3InV>UNo>uT5gawv6S9yABq+Ki?7Pxff(2pBs3nE zoo4uw+3~>o&2xGhd}$#fyd-o9(emGX%C8}1^?3@zOsV`FX0Pd6j@Mve;i3Q6qcJ~M z$)+%N-&^+*r)xA|j^Nre^eua)$XH&8x{uhGI(3_Xu24%eVdCt`X~ zAMhnBGa|YV)tH_GAPQSF;qP!E%`P4DiwhOp4@(V8f0!{Bm_UKc@I-zmif4?lVzCgu zMV+maE|y<3IokWCddYWIoeF?+Z}|@25ku0BoT}&X$OkGEI0%kDQ2(xsiJ()!cllgL zmh#qw)1AYiz}VQhQ~m_}ec+GNrhE`(Y_9^GwvN>5yUgs*v3r~~-VXTZ5WQjhvtpRw z^|gPIBx{P=z*s>AJWzUa|;X?@N5^{@R!nymLGbDYpac-_)! z>r1zM_xcMp*XH-f%53OD_B;ofT;!D7_xC@NR8hL1YI3g8!0gc6Y)LC)MyZO27eWxW zD`F)+)JHfBTU22tU3XKXNWv-0UP+92EoRzNRHR?V;$cv|B33qEH=q@CP<+rWDVcvJ zhPs6qQ-RX;o6iE?79XAVw}K!1OyEM$u9-4)*_YYb_FlG=-RaaI-c(B50Z4OSsOv;+8Za~@>B2b z51xfdAnTto(gv6L`>D@^%>yo~5+$DGL5@SzCa^TwUEe5&^((WjM#G%$8*I%0479FA_$ zQBbt$x=TB`q(u1NUDu?ccmjo~Xg;Px-`fXdS$gWLjUZk=$q2&J25YMVSunH~6YQdDnURlH#Eqx!z}_g7t9(7Mn-rW(hzOS6+e{2H8({ zJX2vtDN&va>mCu5oZd(HB!6V&h*mrXW9uVT#A%f{&1J9_FRb&o=u?}Yo zJty=W;W}4Ke!U*Y)!I-VnK>K`u!%oyah>lR!H*MZn47+ZD7HItIyr?RS2e$@?poJp zaQ;yv0OtP&a8W22q61+&kC&`D9PNyCYMx>D#zHFUCNsuR^1V zGEgF;M$kXt)pq^;5!NOzM}D>)f^?6mnBC z+Vn*Ja8%=fJ>e1JY~3=aGbW78L_Ta@tMIO4*r}0z%E+2PoKX-U#gDyt@s4U2I5}%f zX+`ME-&sqD_Vt`UHY(6o=(y(rt_RLEh5fw`(`tLXs%9mPN$k-wC>;>FO7LrfISO>R z-gN6=yVXFU$DYMzTaFM#`UF(Cl7vv@a{LjhYc6DS4;LKruytN{?@`0l0V%_p^P3wt zpb}%{C8)$&=W!lAuTbSOKN~F!O3qRWv|x9GIslWFwG^UuptQ3}Y5A;$9sn!qG5 zu;EXvLDoBgeWZMJVVW9dY3cwq2P!LV##;~(ZNlNiQ_Bb6#*gigY@zWUDqm@2`z5I#G_5dn%$7~k-6*MbU0(EF2|0#|HNFI zC!{ICR0M*r4&QPsP$dbeQJJcnhJty^{g6mys;5veYY2Rl*xt=X1j&!DK zV$(7f?>>#ub3gCrxM-AWDwv==*e2YBHi{y?p>3~zsLrjbiG;iS8~XfKQsP~qaHps- zC3O8slYHmc=g5dIl4{mhH?;YHYp&?(+IWrXtk&9x5ZO_~o<*Bm5}q_)5s^P4ro}ty zYmb){X}GmsWl^H+I~RvblJmKjjYQj-FqwK*p=N@@%JwTr2-y4ZnZ(WSdrKD2(v+iM zc0cM=CVSZD>~_)aOM-E~3!V$JIfA9SFfT#i7s?Ox!Ns2rXemP*^TY^@yI4erq9ce* zqu*)Z567b+QNl#^Xg1y5+tt0EK!ULRWSnzHQ6#Ebh^=-Khu#dId9*h)vsJB)I&9b9Y zAJ$s?x>aHx3nf1y+~mFkC4oWW`X42={rT+^GiPx zrT9#Bry^60tsIfIIr@2=e2K5m#%UNjpONCY{N>%!K46i;ZQmzyUVVD_D+<=iR6LN? z&=JTYXN{8(pnz?jvnL?pV`F@xmOL)cm|JQ#s0b6hY4P60H|QV^JFd44ciYjUu1OP zr{r>X2c#ynAX10#$j5g6IkQ>mefI|kbzvG04b(4PNh#*c9 zro4?h#hIe~)QjOfu{Mo+4LNZ0FZb8>a!p8uYC&+p<_>pzhR{Qa$BOXe{fV!1o=1P@ zZ9@p*PkvEc9ZkA>KXscaB02sXwkofu9Xq;*-SmdZUhk0s>#q^=T%5ONBMUIB$zUrWiv; zX-3cCs#&}*^Z*kuR(8W*CGlb4LX&tG{a*r-I2LGZWu5fjMecCYVv z*CcC8X!jg(HGY5b?MQKpjJ)pi=YRcc#Bb6vEim^#68|8EM!iRvK zH!T9Yosw+aP{d%tsHTi_vPhwmByM)pnRsLvINI+ZVHYm7T*AZk-2uh@u~0uz)m#=r zI^o#D9dUkA2O>CBQQ;sSbMQG`s#$)7AH6U_Xkl^wB5se9IkcrpOmV4TIETALiZc0+6z&Dw*|0q|?`P7Z`L} zd4>VJsn7Ymzb`1DS5xt4^?h?UO=TZv`RZUoCx$}5>GfOf;lU!qYgiFaK3d)eSR%IZ zczm5B#{W6JJnDYiBE#PIvzPOxUFLov$N6^qp ztg*PH{LxMl0!@n!IW#hYcF{@%FkyYlv7f zWaL`phxNjnUQj^c^%xV`{#r8U=|udCdN{Z3#~rR-Yn2q`xF(Z~nDx>1Q6KNb%;h%n zLw1GqOpm`es${;~r?wqeUvnO|Fi(v<*MIpRMtKXgRJLAxv$LvcY2y(B&`i*3rZCw3i&`;YgsNkqz>V9g4}eL5F)^6}Mwnexocky0M?&xA& z3Y5DXQDa7>2{kz^@=p~T`~u52z=)ini2jFghc^>o_0D(kj zIulD1vV&PzcaylrB%&3BD`iqM1kR{ znMxe*jWJxW6*|zc;bRKrK7Yc`>a4adBi2e^NY~uyGG=^Rk{?T%pE<>N+%lKLTKW;5P;Y-AmKxxVs5&vlA7mk(=(opIVV$NrEHRb~LQrsWkKPgs zhS#DCNRkxiFKCbz1GjX?1n1*u2XuZ~wLhfbwx@UHsmOnq6&onTw!B{}jEMt3YHEeU ze7PJWk*Yp0sd%{AhRcnfz&lYzE{KH~uw7Qt+>wnOlg*4aVN-%i1+W?xFTwl=dty`> zAc0^)6vCj2t#WdlJDJ;G5JJ|Mfhy^ z_c`$J-i1xcc6Odr#g1~Zs1Dc;=3#8g8H=N+XcpBrk)l6qlFjjl780qR>XS{ucGIsk zxsXhzY>5;XEx(Oy9|dOoGRpMX@9d+@mL+&?$G4> z`F|*Qr(o__RC{t$@V6{)uj}5#y{(S9eEh=FWN}y~uE|;aMm}Bk18{+>6B(3BG%p#* zo#no!NW&~Jub_xA54iji@4kSih%0XCJ&04g$cYy=GnR4XOFT*gi9nUbetqh=-F?{^ zf4&xZp?J=DdG>!Ua%@p|o&p8Xc)W`A+3CQznDuJj1Vg2o%2CYXyRth$4Mx}YRXB@+PvzcF8U@608b;o zWr{;=qa6#O9uaY+qV~oOCj|eYR;in~Y`x`qF@=A*CvQ12=;KF)zYqVior6vAjEK^1 zp{nbM9vGM|mQAP8JOg`SFqR{LA{Bw?>xcFt|4qG_8>X2{AOEb|X&aWgC>e>?54%C+ zbwc#@9mI3_<-CDWX8FOb5yCIlfeE@RMQT_nL3RwBfTrj@u=;x zLx{FSz7WAIvman2dE1-{#m2AsaVR|@9B#=c1vRgx(aMEbztM1Mqa>Ypq$B$vY)5TN z&?UvJqILFn;<*cOl3cM6fD07hKLP5km$)1gdG`MG0u^1JuM9O?^>aNB9zAGX`4kP4 zT8qiF)(K}Vn3Kukfav0Ac&I{UogEI(0M{l2<84*^vDbq1mTV!?1Vx=*{9u&1QAD|6 z;{3(-Ymgp)^V#~e>k>aKSMyea@ql%=7lOJT1xFKiYITd%%HS z>7Rs|dkSM;QSHHEs*z?QeTgPc{(*7n%U%RO_sZ&#(yrTCmJ2(Z7IbflTjV&Z@~(-H zmL;~3BK~N<z_hgT^1}n%JDHz9?bT zeHT0A)uD8WVg}#(f%I+OSS_DQ)4h3xqoC>+EeyEj)kQ;JZ;~_~J*oTSxRES!t^9$_ zvka@hv$#U!J9;sNnVd=&`})X*UUp66cr|*_-TZ!K&F324rhivysTmrv;TTrTJ4ZhH z3V~mk$b3sTltTb1@(D%3!o4AjP5b#o0#Oyo)8Xl%I`eK)MfzzLXz^XJH~ftiIp}< zPr~Kuqk#YR?hEVd(CgOtv(oG3>wpnFHI6d8sPT`YGU~uCQ52K5laA|Uo>%*h`-H?i z&!u|dNj-x=?q8U~e8vo@HvNT)Kj{eHWJI=|I9*)Z`_DN(*y7f%Vp3a>Nn_~giChf6 zo_=M;Y!k@cX`P)~@V-@i8CZ3GY3kVq{Ic)TS2tjy3yG610wWV;%*90actJQTi*kHj zNV5H}x4+*f#pbu*Uf`8@KFL4j#htwDcH9(bTI2Ls%AQoty&By`#68a#z0%jn7E}o9 z+F8wB!ii)xpu=MfOa7the4%(+!+jkER6_MV_zPeQ{lln^N(dn$&u2FyZiia-1wQu& zEAL-@`P*IFo14PU%HQV@VOA)`8Qot=#0hpZ){#rE3zlaPC!{foE>m>4IJVdp&18l{ z)HQ;>pfHs3oX!QM%ca`bp4y#~uC$8DGH4_h!T2tlydZ0Xfdv~k506IOa#Jb!^}+NU z|C9d1M44YRI>gWN#;Yqj6_pA;Ay{-1sWC-!88f5ois%>?)IZMXA;u0DiOg5Ps=&-> z28X%Kl1Rkw%AOxe_N|`xp$kQb9eHW z=`gp~C|18I&uKOO@9T=exHeud1LXH39WORA_$p~}g->=e2ENLiYjxYw(Ip{PmBhTU zIn8GotgAjRdjZ|m(dkmqWc6s2ix)-v`=EE(L$uC7jHwLPp1&93e{+jG_)E7>f=5N% ztacL#4muu_3AF42S+J(HMJRQGPw}y3RpTTSrc`9$)CZ7-A17b$jQlQPU`jEZH8z{2 z3BYE`CG!0p`}!>6bbjjd_)xA_nPJiLRb%t?#^dCJ`zs))@oqT=Hjwtsf>NSgWtL9q zO3D22`a)rr`f~*pgU072|L?H>0(^)dC%zK32@-4JEW_$>)C7Vwzk`b` zZ9^TXFR6mz3eAfyeMMD&hOSeEe`?uYv3WT4dFVEJh_Of69__Dhc?4GKrSe?~j&@wX zu6XTJoZ8Nf+^p+`aj*MX{1WzAZ*_kBdojyPDkyAqGnQo_6B|Jx+>-HVEwlrqPif&X z^sq&>qG&t2-#itXV4gWDQ;@Q#o>aaCd7{)e_3ez_8L{CeG)FxyZ3tz&^6`~CEsa-p zp`)DEfA?2ZZ5VHxAS>FdM~{4$=uk6G0jG>2ex&Sd2BgkU!qEH*mvIp}NYh=wf<)6Nh0pU}%};Q=;B3 zgoBbK=3-<<^3&I1_abK4kp!}q5RY~aY0Vc(H97?4^b`z?L=06LJL7Z=eHka$+7)M9HU9{`X;$}6+ITq_e zn6|M}RL}EBCHCYvOxym+u_IGK%f!7QTTM;_+Ag@|F_B1Xcle#zmbSCo6l#FxDU`3U z5E$-&1=I{mV^263DoWLnBt)#q1cZt1SV1NsE>%pv<`%oD`@>$0QJbnSh|`aNuVuzxq(ZTn?PaV=fZqOLb9esh-p zI#u7O0@oSK0-PT`wWzNiz}^DP4@s=Unat1@)b_6$f6S`cI^j%b)#e_ii78h=ibXy= z?>`4sY_3rp?XE*{X8gAN@`GlKfyIO?-cI}lm9N?TXYw(4f}+x4~$$H>puuXZ|w z=6J(=s_)eY42a_~kZ3Z;Tsj4YxpRj`g=KFBYp!x_O;4m#rkY-zzar!8d}-DXvxHCL zA|*$^T{>L-mM>q%_f1v@BSoS+9PJ@s`E+UMCEE5cBxWU(-{Gfg`_N~G0U*Kc=!rx} z4`|AjC8`ccLQ+fPbl?>!cH~kRf#OnNke91U>TM0O3K{BR`X(#6keUfT^(JkhN$?u|w*Qx6P3bsBWt8AX{;+1$s*B*Wo;NUTf3ePnZ_=Q2K9Se}IJn*%~1Lwt?#&eW#(Z_tmxl57$h z<RWJM3z3AYR16Bc2-5llfL9l|E}FM@qF{ZfX)QRPSqvzLG{Z7Cgjq= z+v3_SVeYhJ@?k7)#6W^BkHLZTr%F=5?;VOH7(fuG=|+j}QR^nIaCFtsjwYhI_ao*y z>RV?j3z6KiF3tJvuQv*UTfiM-{E8I5%H&=FRl<+`@J6G^aajv#JOtUR9Ykuv9Re+p zY0Hf`^*Kkev17GIw(>peX|hl6p4Si!KA1XSrdu3xF_)*|D&=MATSN?sWzZu3IhzHK z!&MOp!oiust;j9VxObwu%Wt$uEC%z7o>>y*d0_@%4SHN|Dq2|o=MeLRk-u?5Rh1aS z7pP6k3tF~90m|R+&E9}J*!-VIvJA;A2-Vz~ z7NuGhP+qxpW3-w&*Vrj`jVvYNxS9N&W5}8Hm(-s-oLA2>6#hTLsiV@cr;G>G=Xfw4 zP4s&n&RqPvaF{RA~FT2=7S`PKH7l%?XU_)%p5gS=z zi?~Gx&(c5iixl^hBG=tBbK{&8de`IR<(13Y6DwPjOl0<2*{*_xv&-KPe|$_c3#i_u zlsTjWoN`X>&kF96n(0^00aF65aY|>j6;NmW?>!FF9GNqhDlR@!@($W-MdR>^zTriX zgg}l?GEH_cmMbvV67Qw9GE{mqklXJ+!7WO@3+5q0KC=IY&%64v(QwO|e@e?x@NX&P zg_2w9T%J}jBf?#7M{?c3ZVmVS?>`6Jghk7U5R08^K62yX6Ep@BT-8Ff;Ff90A6B?C z4h)*WZ zGL(}#)v^72_-bZ#UKng)SwU(<0YLY)Uz*v12uAxH9vqsia9-bpWR(NeZ+kL)+s_~V zq~$aaR;O3d9vwOGm@(5P5(FG>1g7cbvP?kIM#7W4di7qlJZA05}*f@mMO@@b6 zzpS8n9#YErdA5DZ%Nh|UH?Qb3&ChX=-yR6-kPx58u6AnQx=pt%Wx*zPnXJgO)L?x2 z=-L04LSUuB&Gv0@qJifdTt0-Oj4XdqvMmr0$vyS`p#APk%i_`_Ye$=l@#WiAMp6eF zw|QCgEx>B>QeE z74stmVDVUCa4mB>W z3SDmDD)}Al?X6w^!eq_txVpI3viLCbj5So;-Kd|leQYKo|M#=18XeMt_P23e=s}w; z$DmU-=kb`&VV{-}`xJYTUzNo z&~~GCev`VO1A)}Q-)Me>>ns$)UP;&9cA);j)=PABWV02}J#+LdLI;yT|HG^BhlsC7 zNqj@Hw~*4iVI5x8Sqz_gs!=!cqioh<V;F4{+N$p`Obo8RebTZLGtSI z-TwW)^`Yd@pqde4HG^Tf`HL z#x$bLw1}Z0p7Ft*G|$r3PBA}sXXbyzlZ`AX+_xnyL0PhEi1!G+An&bg#+qp&N5YTH zmtV5-iFC*peo`U3Q=2k`-Wd>!VAbGai%REj5qI`>r&9CN0AfBNq(ZI30OqaO>9Vek zB4XhJmk*2E=lhsirO%`mw-5g*IraI<#7GW?bE*%0BNfQb)DgOndG1oGEJ!)Q-#(ex zDWj;Up3!)Ec-Yf-cD1k6NZZ~F3L=Rj;V1eeQlHhX|Dsb5+)N??*6xA+2`ApKxzY|noZHsXI6ZiGs^J47Abs5_jy=-j|b`-t+qJ$ zp>QAGz@Y!U^t#w_ZvO)5aCJUE5P6b$736@3^1h*@-cbX10CgZ6 zVxu=WB1;S-0J{8fXKMp7J#GY>9|@?w!QKd$nU;Lxq=CLPr9V`~mF$|_of#5vVwm#} zLH;zJ_P%Sga&xCC0g(~Wv4Uq?X37Re@auQ3ZL%&7UuuPK=ADj2Jhv~~Nj;Yv(k)bK z$}2wEW*P|2zg(5BCoX1UUZ-_74QZu+kTpb3?E*9f^il30@f6-#%=rCpoIo9cwIXQQ zKwrMhX{~ykOEk3mI@|!}Sf5K=&fb^O^5@viFe)x#&N#Wfw82~`*Ze;Of_LlvTwktN zy-r|46BUUQl-feS7gQiA zn2q5`beyDq#44!I-C7Rsy%7XC??3o^?BBD-!Fqm&7K08oarU1f;`*}2rr*Y_)HE`?V1?dJD3QYs*6{zFhZ3JLr1<+Z%Rr~+ z^w=lN4=V<&FZkl2sm8(We>;rj5?Xi1}`tFuM)ZjjMH7hHWc5E_alX<|iHh3*mjuj>(dlvFzt#zaY09k zw0sR4uF2Wm?}1B#t4yxKWWj#AS&`-6Dh zcRz7SfPb5aFJvKcAYkjGh)KQOaF_=+sygH+%7-VW1>g7yJ^Z^0cZw?L zGGw2&x!%f=HZPwDKS+$$v4$4;H(P}~|h`5h+aJWMnjX*xPp z`NLM@>_x7p)jA$R#_eApUWF?A&wD5Y4AH^I1B7^@UehoieTWqMc53=m7d#xy-;T(i^$r1hX#^Gk4=e3zX)y;P)xsX34fFk(!tJeG&0M=nKwy!JgG zW@=TWfMnvrmGXN*-#Fz>!(c$thyOCkTUT0z7FWmn?u)OihA;d6&flT(1I*vwpK|aA zVE1F7!%cuhO#%W25<7Xs;IZIVA;(Q$f7LCLX4v;5#`nO5GZUgoyBRP01h!z+531ym zvq=XZVR2};&M~aF3?_>*KvM!0MrksK4D!UL!Fm-;(=0mE=3HJuEeQ#M(a`k>{i3b5 zme5qXLY0tWdhDS4k4SHlj03xR%BJ5nt9C`rwsq1zMbc8=1O|)4!w2HwBa3Q1)jRI&opKx{1G-nKXhBqX)TGJs4Gcuc*Qsu+L zp1(8WY~?Ygim~3xz+r_-SID^0X*M=G4m%+5Iiv@D{`X*^;ut>?87GGjg`JVbtb0!` zyxfFCp>6gr%O}5HLGI0huV$P}W>$j;*r=k2A51r8jX7eiTbus!tzk4#ZTL*5g>$gbsj4Hv200@vbpK=%Gb7E)>3d5;FaPa)*)%PFZ6rI`OY4 zJ@(F;{0SZQe{oDn`jUn$J ze2v9d<6^^1x5qi`-BOjrxG$*J>hz=e z6_)x57H%O7^T4mB)e&`faKPuNP%SZuPFm*QNfO z_RmKa!j}j<1anrZdNaC0A~itw#aQ1CuAG+XtU2#rpY@l&hnm4WW=WmLJog?IGAV;a zB{Rle%oI0yo4v>G_x3)bZ(ew)Xw1-i=I}L^g22E3x|PpxSjWLbtqn^gFzMq?YDbg8 zKEwh-EUVpONu_vqZII$Q0OzukaMc2VX;~}pVj5dOKe65d6DF!Wj^#ny2ZsMj_abTY zoY}u!qS8SPRA~9hDaTN?VE9yL{BwmOs#_g$wdl{uUbG+B-_*2lS<`k?0jq;mBBEo` zYDZ|%FamG`r)LzP(qu9OG3Z>lqWa>*d=H}Twl^;SYW4IXpj{OL9N+Q~p`4{t4qbS2 z&i%x}5slnW15oRYM@r*|eO`VYj07sQuT-w)J1L%14LQN7Sws&|d)g~SCl1Um$!I#< zw8z5efHZ(0`-)qZPpSE3LmagWlCd!jVW5};gGJ+yl}MI<6;~f}tZnzQqWRN~Ia!c* zyCbmrTUe!fgva^f{jR^;Eav0d!*JBN|MMlQQTxTwoUqsa)5)dL>&B#Yo4~^J{H@O_ zg2k9nYr|zS>y!1U3_Gh?hx;89>y)c|B?x4@bnlV^GYu=XHkgBh$g6RK3$4QU_Vz5* zc$77g!?!Y}V9tiFF`u2VP-X;iTn+e7p7l!8m-efKa<$Vy49Q0kO=MLmu-7pf5h4}5 z3>~du1Y81P&KehMc0e>LHwJdqpPg6;t34Y7|Iq3s-=Mq9WJ*0DG(yIzMG)hlTK}ga zg1EUw0B5ki1|K3du)x(lKAk>Ht_xoMYjNt#z^8tnzmXqnqWJpJ<{U{Qf|}Ea03^|B z)aJ>ONMl5DO{izwjk-9}?v)f?u#-85uUgo4({c+93XcimxNsS$k*3{@;8|KeR;nu+ z`pid_FhMC!51|}JO}8WsP=FT+N0iSXIrFYdXRp9wnl7)QojNk+p?4bD+40!jd}lAP zqA(FarfiBuLdp<`N56Vy-fU5a4LcO6^RViYU^7!2uOkPz_sAI1*Y97SUHpJe#M?2h z5XIeeaM-|1UL32q-&$T=o6B>7o{YWCxO=w85w5|m!o1h@#RzMM2lGz5&ba2|W^=o6 zq=*xYm^cIp%`Gj|%Yga;VEUUUV~`9Kc)&q(#i18}R^&fj&LmaUYeJg9hHuQV13g}% zAO?j}Tz#lfM|t=gO0oBfj7{pTu`Gn-fOpdj3?#)MN+*(eN;Ejs? z50)s5F&bN%x(p;cmo4YYKYH@S8hjmmVMBJ_qW(a!(dy%wTLa>(G4x6vNY(qz< z;o(EDxZ8ZiQ8IQ!a3qMOH57W*V2*ULX3>*St7$2+;_ds!_V#Xj^3Atd-;-Q|%hisHuVU)NUS5fF2+vd_r29D#zQ6HesoM z_tiI8CT2s_mdP`lB`CRs0{alQQPT&-N+QdIE>LH5S}RMH^>Y@RZ;JAn2IKGs4+cq9 zsjzXmx~n-^SoHoznn7~~YSL7s7uqJylEQ1s`VxlnTZp^W)MUe4E|897CIcF%p$)jB6L}-QRSHdSTGDfR9!-L$z7nMGD-MP&-YN-R zl^8t}bkqpsJ`$BYeYm?_LkMpiACFVG12u%;U(IilED-J94 zY}21o7e3WTBGdsL?0Xzz9h3i_l$HKHsUV8aIi~M$alSEq3>J1_tR8*L-vH5YWMuTP z7z=PaT$g3Xz`5~^C`{-7BSe28STujk?=Tf_7oK-jeDDTu4PIV1{w)meO;Y+HSOZssy z9o{&#boW2#2iZZyh)1@C1d!w{e>P>E%vrBTKzuBO;S7b=}t!Y+5b-D#X|nmyRXki zcOtL$m8=hWFBcup7Oy-Fc7`u~MlTCcCEr)tY|rOydqF1?;i|C=$YDj6bDfX@#cfny zyVmj8i6M#!Sj}ke8n_~=q49ihrb)*)MH|+&OJodccAa1B>+q<%FW&^rEh6JvFwho1 zN^#|XxVwp_VSj^di-cjn!)%{I3S%>>24VT1X^1hCvX;@T(dg+h&T6wP{n5%%M^l+>l$`A7>sXqD@H6!c zE8`3vH8*OS=jD;=LsjWV>xokK9Oxg9S+M?nppDylq8e}2ANKDyh#}oL-0rJez_MOM zdpcmuP*19MVj_Pc62^U22N*jQwby2>m(3kH5W)ekXv{z#Kp2p?(ibZPi-Cw{uE26Q zCkE<;D_nje(YvLj{~f%*)C(92)>IkBiDWZaCO$to0-%X4YK0AGxR$}cxdzfL~9aF5SZ6V}E z*ZsP@Xh2@XiDJq;<+*xGg$Q$rrC(ZFI%(Jc3=`6#p;Axws!9Hy{*|^)E9H5$*=zmX zm|>0~UIJrW8oVmI|0dN{^!oe@>2IB=cE#6vu&ia?gemX`gYeA7}1 zIx(h;|LG4(sVoa25d?(4>6+n;`}vcvi8(ADLBJUa48V=ZU`a9fO?2LwEA$ddp?-c& zK@wLv>zb%eaI%CDaTMy|^Z#hNs-U>GB|5kUCqVFlU?I2$clY4#7TjHfL$F|j1_A_k zcXxMpXK=SSx%WOk=7*|XySh)W-LkqHM_6FhQhm6BB(R7zE)^q6s{19mvqleCfHv-0x`pQRVIG0QKf}W$sqx$8T_LdHd^7YK8=n?`~i8s+Ly< zcq}cWbyfbhEoYzcb9u(Go$RA?b@SYb?EdCAaM0K8d{2W$9fss#Iq~+eV7j^_r)2n1 zzNTG`)vAngw^`1#g5rlf@u5hNF=rK|Tptyn!MO>wK7)pl3JFn6I-%g~l?4UK3Q8#C zZHK=jg3HoWXGNG)$*t0)u#G|}s4t#FJ5ej4!LUC0XS;FRv5cuFB^5ImIQyGnHkE>c!uY#atS8H^^y) zWCv!3;UUYg#4a+(fE*hpuM7ihia$N!5o2Vo=k8v0 zXJ^1vNegV40Z}wAR!pIk@Ku%(f*r+lR6rX~H}cvM>4t$*2(+|lu3$P`s=O~o`)B$8 zGRrSHLM-1si$qzy_L820Rzdn7iU|~5xC!s4zF$SQ5G6kqP7~VcJdD2%zm2zFmcL${ z`CP_jKXflI>o>VX%h?@(v46ZEqP6*`uE&nBe>inNLQh$i4Sp29?GtjqVL3JuJ&%s2hcrOo+j{Km$jRS>hyhY5Z=5i zM8IX=Uy_;N{jQBQC5rNUP>mbxmRSFZz}##TO4}P1q59wj!)vrtndEYN7J>)TN1GV# zD7@MmXQ{B(YEItx?0!+e0hKOM@m1it0|H@<0;dNudHg%_Y6yOKCzEdE+hKJs2<=R! zq8gnMjnQoWU2U<|mfeGjm4sOab|F#Y<>q?3P9CL)0)!P|l>dv0NDdkdDb-4Kx6xqE^ek{?)2+l}dOF>=S zw@Fbqj#{zNPnQ?2)u500@k2~hG%X*AJZgJE-Nl)mn7Dhxd4oq{7G_TL_y;%PZ=!VB zxX#81mW)fs+Qk=YAO@7d9tJdXb@wV!%ODD%bCqIvFbay*&D;zVi9Uv{^plaxaTP&t zNb2gajmtl>cS9#gUtL86GqJ3IK^sZj=$R5l8z)wfa0rP}4HcDnpY&HxpYGv1I6}vikZBaSXL#yVB zGxPb7wSV1rqIEIzD}wLUp4``2j$dbE?d}hs5Z+exUnd1F9d?iAHn)0{+eA;UGOAS$ z=bsqegi2%fU5_7JXYaQ4SQSQ}Emmu>iZSqpgNC1m>9TF9i>B&Mzj0t=LWX5I)M}*? zgsPetPW!rCMSpdBhvK{>@J);kaR92CT>dVnxyo;Wf-KD?qyrVq47O z1%*L#BFXzU)nH+E8cAVeJy3d_I)fq%OU%<~Jo8W(X}i{>Q*W+#CjLQci^)|Ux@ha$7t4Ir z^WF(g<}^)6v~3ins@jVW%3s84rLEl=X(iryC~k?i0(LvXS`hX20fve2nbQoyjD0Qz zQx*^MCS$|ym%qR_J(-PkfU-=@SXrw(x<*Eb25C|~&;nAsm-s+D!>#-Rf1MzubSiiz zOfe=vr5p(6hyofDhvQaFYL*QdRfNCjfLArL(raf5ySZE79{nv>!0FWBHC!SyG1rcm z(UyD$-EG|WwDWEAHAvx1yzt~h;5=4efNWyM#ukITX^JxB_=^npCmI7_;3w{s*bgi& zqujAlY@y~<)IS3{^8$#OSfNFAS8mYUri_Wh2db?({0S%_qv82f=}2h&VML%RR!0YJ zrKTx6kYtTa_P}FXJ7mMK zyvENvX;!xnetLQWRvkS-5}nc>^&EAj8_RKMpUdGvaAVyUs=LfPtzYQ&4vsbXcutxp zu}XQ&ILZLAjonz>vpYv1HQ~KfLEF8fB2A~VVl^miL&Nc)0wKi0I@!%VSsiRK4QJD< zI8A2gP8RHanmC-m7@th0zyf+=iiE+ARwv@YwUHtwJ|vk2FJwwI=5sS5=mjKc$}+zJ zR@Hw%9^pGYJS+*ojPn9@+&0$<7Ld{jSU8nKyiCyjIYh8((Ih-uH+{%8u^BsYbTiBO z_IN%+Ncxh?_%)&2&3J_`U5|G`7rd^hrIwJr_&aBXdh_T!rD=QPL(LEoR)OfBsv)Ey zra~y;9(w?kA!`c~2K<0R5;NpW>3yM12-^1capzsej0zgR@~(1?>}G0kU?0zfVZue) z7Nq5#6FtdB#Wr(@A(1?l--Oy}AmD`zn(q-c35Hr(l3YDV5?yhmk{}CcbIExYQ4R;U z{Xrue=1PU9^zVG9NCrh<4-=XD^+Y6u2sFunZsT0gzzqS5wtr@`g^ zvc64K-_x&X8!lLbL+HdEgKdY%}O8>S0Z6i|u{^p0k!$PEfyE5b5!|b}_c2jEp z2I5u9GjU?L5y5*IDx5n(iP6Lh%wW&br2 z2wlZkldhaDvNoQnkfqo2=e)&P3ndw_0cOuCDFM5A7wP*_h_O``$oqOXWDLEy)z_u? zPZ%kGW|npqF=AX=skjxnTzvi@CEWW$Zr+4|)Zkua_%Qy&^a}vMA0Q=|nM5I<*zT2XM$TlG(CkU;H@B#Smj|{_!1Tl550_6f82wD14~D50D+w z>^Ha~xtjh{)87q^U0p~>)C6)dn2a7wapU25cn+7YJ@g7oZD-EbR>R4&<(9(#^I~|y zR)~=SodbLkk}~s|?e6Elo!pO(<|Oo%A7&oBw>Ni(nb&_sX1%Qa@Np$f6?xp^jyj6v3_~JINx((&_N0 zf*enmbZ>6{bL0%9Mhg8@bx;6Q`}X6@9wHRhnk@C+a$ea;hgs5WwHYVtEU)r3eh@9! z3hv_1&z-F@BFFL-4kxP!Kb5|st2?BO&c7IF#l731%qF7cq8+A^-UN5i19O{;m6*Er z=CZ=@nZ(TircpwFRkp&}Lo!K#cy`~xca*n>Nm?<5WS=SVL5^Is73k-eGfcz4bIkQn9vz>1g0M?O9CoFaq ziicqCz;>rkk?tSY>nYJ;)w=M{ab@ISqV-Xd?9Y_uRIwkp)k0A^Fby%5IeF6Z!bW-E zM0_l`G8T@7Z!aEE);zB zZbJPOsm%L)(?6HnZi4o&)jr}YE^_51uASWfkmDq&wj@r9WQ#!B-*op$*gkG?v#)MV z%ANYH&eXW;B!%+Fn~cd0?u?_;f7ETk`O?A7cbVe_L<*aoHYzI!n8D%(a8Lu8FjEBc zz-2q~WzzKEy`nsL*7raxe<52lD>8G=A#iw=h2SW(qx$%fFnG(#idI?80uUt#3*Fh7FNvIgQoJK)s%mW(P;1-457~HH zk8vEE_|#eKiytRW@TWAXKObf!WpQ%{+{dH{$LhqRDFj8v6ox;92Bo1oKtN``(yA2u z7ZedtisvW!_PKN-r%UOt#A7t;yoQh7ucSg_e-hE8gtZ>+518s0IcXs<^)8r#0+6#{ z5(AMqI0!tK0C0ozanj(Ha#)tA()nujqEqR<$Dl>vvUYBaN&YNYG0cqC{fSO>R# zSV!YL^5U4x5jGTaqmNF~ZHFT$hWSG{klKnedXmP#=m<8C3~*W$oCK4S5*Q?i>Lzr! zq5T*5?(ocLx}k~4d3I+)NhS?vdF)(TxF^CmKzJatVKaVd9us6aeFYUYs#Z@=0l**6) zqo(M>v};<~>1QLpwb))c+u{go7-}qgGJ97SBg0La+1t4WovwHJ(7=K`v=xW*%tug$ zn?e!T$7yprVUlYHnvAQtaih$1G44=GE2q`PYph+rR_Fg_L`~%gpK7f}nurrXStl`G zUa;TYDB)OFT;s*O*myA)QEy)TX!kZH;JrC?k+r<|3d#Gb30zE<`@Uou>J*&!*w=hW z;-pFl4=u8)Ep03nHPKSV_2=w{DWU@sQ5vVruF1Q$HKg+=Fuq$xSMQFC**VZ2S5#4- z{Mzhk_^YITJ9waD3+@h=A%t^0@Og$^DzAt|5tk5P&%vtty|KADjmiBh>yHV^ z%nejLXv;Y^Psy+ode8$5lPfV)pxM-jeA!~P^hWw8Nqa|XL->(VO<9?XzcdY{6@d^U zsET0x9wF&(P(sipZ+vgWcKO6{*qs32AHyw=UT3DOib6A(qlKr`XLz|fQ;g#;AD+@? zM!xVMQo<@5Ibu|SK(PCW((q7%vOZ8x%7rSTy$t= zFgIZF*77)J2|pe#ODoERNyr0G88$)i&l{k%7xQp-vj@P8b?hlR81me1k6~6&{o6{f(1m7 zQj{!632LJP&6r)8q$L4L`%_1fQoo?_*+5}OW+#}w<3J70JQCgvrm?A-#k$1my#98pj9*E6@ElfoKzB9?8WdE!8f+)=24#c-@FZJf-uUoIwZ}Zg{2s}VsCM86Z zy`q5d&pCvbk11>ksm&i9jRLE)MsR%*VBWw>o-i2l$nzS+1{E+yVEG0f%wtaMcaXNp zgF73xueRHNvH1zQ5~BVO(EW;kI-;wNMW&5t*Ca!1gpNP{#=JA+aDR3b!LgBo^@C) znA~U6eY{ojDXVm$euqBOWRA=&|Jj;Eoqp8J8OpXE9a>P7_M_Qa9v#z_Sr-&53bTJA ze2SE+hz2p)kcgw3N(vPRg@gexNr#!B+Mn2#TO<`f4&OFOE(XPvxUR@cy=bzSRzP((-^f%yb<~R(Joc)#I6`j6{@Yw2!=h zhup5Eef9?^b_{9&+8$>+t5Dbc**2{cQv4MtK24hIA`K%xhi3qmhL3Ibl#vt1Vwg^2nclf-WY~T&rR(piUiBal@b*@ z3yA$HnwYR#v-TAiF$`d24A_y+mp9pnQBI%hHobQ(%V(dPbVID=F!$aEs?S=bD1=(e zT1w%`ec1HP#OsnH6^=F~wkA#?(^8ljU8NvO9K>pD6blkKe4i z7Z*tI@t27)j-g@*HG)5&s6%rhLoOqX9T)+{O}wtw)%Pew*%KwT>kL5z|)F)7NnDC&sy8& zSB00odoIDFR?HwzvUdOQO--2@Z|YX5eac^LVM1J>9uajPsVJ`YwUuwy(n!y$QPDmVf99D_C-`eVN_Q2mF0}l?_rpC zf6Ym>Cp=?y1r7|0@TVyCflkaoG?wQPd*SdBj-3K1kXqa^0&CtCP>((0&35?2f7-g4 z&aSuKkdbsaSm5{2{vm(*WwakJncu8zyyZb(L40}Yv~|h1ri8C$cW%;RkzZ}Drr(=h z_y|n0VKTHQDu1~)FIxg(jTaRK7HbO`VgOiJQ?Q9*?BE1eoG8Qd6g_2uCG|XZkHG)&lO54$m2L6mk zVYpFxWzotp?vNdovFDwen1fpb6uiA6Oc$Bs^Yb70U;e6hysXeGF1UBpXlN-Ov4Wxn zH(_dFVGCg!)GqL=gR3Jxy6=beBBDZ>vZOuP+Phx+>U_W|*4>23ccJG=W9^s69|gWA z^ONXV4$sct?Ss12`)KjP#})W}jwc~J3er%{kdNJgngFRmRD{JNy+42TP;}HPRMn&a zgsJIB%pe*AYsf`tteyY>3z*A0SXEjMDP1E5$k=rwMLVrI6vos*!{CAHl5GcqT3KSo zTUKrv3O*Vd10yc*FH{cb$`0`Sggj(!fD2>PLipy`m!8f*>yeLpzo@y*0ccKCK`}rEvElW;ZNO6k8z%l*T%uMkm~a&?sd*IXA}P^o)LCRcYv6 zt>(eT5bh{)SUd9g3S_l*6BRY7)54=ct*GD`2=h>#JHIFQ_$jXlFhE#=G{JH;=4!e| z*J+|i&ljFDqH1M6PLS!8@qc;T>YNjh@-x|an0%07s0{WsCRWj<+rDK9c`yh~%C#8; zV^RhTVs36RsT->bD(=rgB&Mo*lS!_{&ceacZ&eq{qh!!ul(%`&BTMbr(}|47NrN5e zg=70GuO@EMhQ69Nv*U4F5-wX#`qNLn0cuGpEXTUE6b~ zcyX|^PE}crQUhUrmRQtgNpE6NabLThM^EmCYr0-%pOopTDKud9axc|C5@NGrkBqvj z(j$^Nni-mvG`*#6){Mq24tB<;4N2ktRwej|h{*&-D{DP>%ij(vw|nh8=JT6d_huu; z9U6Yv!dv&LF6|82r>Y|!ZYAt1y{ql9EBw{jrw_Ug3MV_dSD47xMP*$Y3wVfu+@s!ukWP@ zpxYG)Z)UF{bO2Qs=5wC5@WIM7)An@qWhM_%{>1agK9g;xcCDJTq~sd+A)e=hQnA-P zy!P9PC2UUD^{!{d{jX^`k*0nJ<`oyQqbVO;&73}c1Zg4$a5pU~KK_FF#)iX#I0cd* zDyn73MP3>5oJRvBO{Y77<977V^v}21g1L+6+ZlhhN7Z1 z8ETs(qW*gOK(~BYH5<}ZK5CBlaUmZooA-r9HgiUlDIl#`NG0~_>c;&>=~IC!$sVb+ zjm%C7Lu{V&MIS&yF15Ef26tM*l@OpwKmP@gluSuC*g2vR^)Gq9%NpnZ4g&_Nr-iP- znn)%rbpbW++|~eT2g6(iogoe;R3pBtdOsk}<0omu50s0!e^o)A{ta7cM+yz z3i2yiD!WN>H=i@JCYBpRD-mkY=OgWsnm*PBNNwVIqH7ZVN1oo1#U+R3`} zqtx9L>I{rYD5`R_y)zBR-po#SNXiiowwqyjgtm6Qt#ZfnZtah|dp~Ln6T8x$OlSts z@UTL@qzj4$kb%{{Fb=7u|LX1(RuVLET`3ndvVT*1+nnAuHFE7B5{Gwl`sgaWPE2IO z>oM_qOuF_I9StUgZ0WnUvw7|Oq5l|j%B7>374&dJcx`sqBiA{+ZGLcs?d+0kO`>gG z*YBYe@;!-Ovr}X`P<;@ArK1DN0N=_p`yXZ8eP@*iv!*AK!8*E8ltj>_iG&7XXnsHw z3X_6P4v<6tNhL=?6o926UTI?t3^lL52+r8f_TZZErTv1NIpBWOZQvu3?~jGaqvnG) z=85z7%)2N|L<|@~071*9qS~m9+H82hwnku_8O@)FcbMV^iuRWPM{FS4AaqbJmW;{! zsvH$Jy1jnW^O3W8#<>5U^axe_)lKJ>P{0UpZVfYpg)kX3s^DGy(pTHc=(9a!PIA#J zzIa9vZpcccyVg>*{4}d10D?mnOG$v@E@lXCPMKReT*6imcz_T;&rVLlDw=oGS4%C^ z7Ve!r-3#joB|!Ku0$>>mjLLax-HN$M|e{q=Y)d@x&_@vS{&K3{jc2 zgGTFl>F}8|7~KJ4Qq;tsoO3K@dhnkaz@o5KPjxh+e}SevWf9T7zzFcGTdD&GfvFJC zXYUFhfCNtt#q&L6X|A7wQ$n8-RL^|3D6|q08aq4tu7^B&fcPU1OC0t=+qWYsyjS)_HjmZQOyV=))E3ytER!yq->>r7l#6D-p#!LLbo3 zFh|_5dq%%<%LrQa1mUx~h}&~g0o?{e;#&)A-yspk!G*#m0|MXME#iDg6nB(SexGTT zei8*jH{^1Eev$~-lchibJ5|C?6Jv(u_d<1|V)@|Xk!zVVaM=cXuNa1Bx9=|_n>!ZF zQoe)7#LTLBy>eR<&q&0(&*rWQfXYt@!hw$KZb`w43!|AC=@1nyIcv-M=AE9e_43w& zY~Ik<<@?12uDD52O`Gqp9A~BCH^y)y&;qbw#6?Bn-5RiH%5vkBqLxOT1hM;8e`%l| zV^0Z_$(*v`!ngoIl4TC>lUPv~dl#?_z(rRX$}wRyvF8@Tdqq}aqq0Zyj*t@@H^4P% zBjY>$KKt(k-5fKhOTHYJBHtS^+3&X%pUsUerxMZh?cDSv zFE(@c+nP1O^h-CZ55>+;yY>3sSG9!ouPgfZKQiAQuh#Et8DFMa1|43GnsoR)J!;%6 z65b`>?~NvJFZGFlgp!_r^R~8r3(`Zgj#M{G_>|MU_o#7h=>BoVQ#(ZL(?#5=-8cQG zAV!iX{ZVz$pD$%97T!&*F+nYg82AAlU?w9sm`X?Mt_{I4W;|s@8XO~nsNnFP6}iYX zg!h)l19{?NilP9i*upOGn*U-7WCJ-bexu4XPoTR$3U&~p@Cf48Mtla*4lJIapgZZ_ zSm`4L6SD;_G?yNFK!Ue_){PDdvM@5M8_}INhf1ipttq>)QfHsAu)OG3#Wfe6zWxAI z0QktZM^sd)`{?%CJ&jT=l4Zkr{e9s-5z-S1%;LVaC2LH)7ZGm5}l)9o#ExjAVLmQa{NgyFr)-tv|faMpw_z<31gsaeD zOf3X@oNNL|o{ZB`!rlQ-2yti+))l$<>7AlsXtuea4w2`QSjwiXW@t&!2R!i#4R-*F zVBE}V>Nng*GZqKngcok9{!bK*-X4NtTZQ7S80R!{Kn2|g`;h%`LMY#KV$ISNgNEFC zVNu~70CTtDo|*K;YW;LBMp*dZ>SnRBISGT_Q*NK3khNwp?GYJrHzZ*+Cr)=V7OvWN2vC0#w3R+t9fgv!cMa-3 zehm3&2~TUt&UrF2^RE++B$L-M;uG*sdBfYak?5VDk|U+=Ma+41K1@%*$@;6jVl$C>U+%;Fxkc%J$XJ>*yI(E7vghj+yC0fffloGLs%a;fy~R1p!Ur8Z*W zqvA)Y&pM;b8FScN+o%CW ztgNh^E2Aj5(A>kK?LkaA41V`!Y1gpImWI4Z@}n z8&S>Rb|orDvZtQdn?U_@0qH28xEVfM(|U9B-FWhQ<@CLH^GYG3H`y60%7gg{)o9OP zCSI3Tupw?A$epsN9`5bN!t<&oa9j)Fqo~em4o3BfSQC$<>#C$uCi|;>&20`MBl0)k z#}JgnR9gs@X+wJ1e!dPWw59je z+r4_b9d=jMH@h68C>(b}*_Hm-q4>e?Vt>51?zNkB!dy_C#S^B^?5xo8Fs=T?nFh8e zxi)a}-X5wJGbXVW$g`s#KJRm^+oS;H*P6Y>9WD{&_BvV!7Z_f2S!lsCN?7?MIrgy9 z4$FhYw+3`aN5U5j?5SP3!mku3cJ(%;c=Ir@WSlVstB~hdp_p7Tz8eBqk(|iP-iOAD z?RV_Z@9@VWee&XPjbDiW5hkdNh`luy4hYGFE!Gtz>-N$;>Kh#BEUtyFOZy|?Dxpc< zOd(<)l9Kw;P+p6jZes7Gr~?0H6)@C8a7Jm#%qJ0)L) zG&l&zxXJnFwevY;6WIU@sU)j}O>0*6=DqYD^LPo|5b_j7=7(|8W0A!-X^3xaUz1`?|84At(m`Iy@zBC7HeFoRt?7wX|;g+|uPl9KsQE*ag_s4yO^JV?eX!r~#x z_HDaYg6j2%q3zD#dZN_v^s*FBfb~f$5!49MmDd!Oy!~#u%y2mEH9bCBN!+@LQL@g7 ztQbVFaGJNrj~PeUEfp+6V{?hmih^r2*MTL>#Hy;wi4{j91jH2E0$@_4U9g*YVjm9!?0ryv)=0MnK# z!}_Aon5{fox&AQZ1UzM9uq|<8FD$7tXTj87jo3R&21OM6l!W)tK$$g9x@OvtDYq#` zk#-D1=$f-gZk>z_%@nQ%?q@a;AC7;+kfw>5e%5V6{1#`Emcg2++~sZar3`Kpx^MO9 zYm$(0dZ#wEt!MYIr1yKtkQvcDIs#LJbN8286=LR%1WDU#>-+=j_$15iI58Y(?YG*= zued#J?9F*k3Zr`7QSA%2oM_V9+|DG*((`GjiD=j8_Wlt z`Bf62S^Cc2h?m9QNK3;Ca6us8Sx6*8o$zYY z%l-SRlX$x|e`-j~)S@?=%Cptb5LmbP_In1l>ViglL}XSN3m4PxuNd*>Yl_i70yL7a8)@?oBT+#x{9;Ez`FP zO|RMUD*5?zdG~yFpZ_|w{c-;F*6(Wlxwid5!S~|9UbP?@Yt0*)C#K$^!o%ss>6qo4 z??b!pv1^yP-$~{C>)P&Oq%n8_Pkm0phNvIIxinf76ow#|=s_^5|8%`G*Lgj(eD%q0 zx7Z7$UR?{&2o6Kcvj+ly)ehDn{~VCUiKw=QX6s0SBsQuptQ!5X8=Jqo8ZgGhbeffk z2{H$peqtEXOpHLoKtUr+|7oDufg%|lfuDehd3}AcEUyA5UZ61i*0p^WqdI6%0mTp? zEW)hOZt^8l)9(>vodI4eyZcl#%ZcH6HNR|<-?`RrI$$CN zh^-iQ$3kJo#d=1Fb6Iw8eX2Xem)6{EYCx69`PZO-zyzJXnJ zzte)(wRMfBRKHu+?r7~Sg4UVaQ9=(N^Or-%o4AMEabn*Cryp-yZ)R7g`%i;QeQWAT zn>j6aKj0U&*hVKY%5^;?#gv7thUL~#5R4;_)$?-%(x$uU5}Ux=Vl z(;Mh>JP;FEoP8|50I4Ff4fezV*qsRknB3m8@(RjZR7!|}g`I;5 z2|vM-UgjoIOMx2hBCSy#DR6B3$};cakq1|E_R3^yJb705ezr9tnQYTlV7Q^Uz~O zih@ik!+V{iv^>SY{GQfE9>bd$*02%7wj%*0)w9xRQDRo?xW$WK?;YW@@-BAnIXfv` zCS@SLe?XWHGAiW?o;fUG5qhtqt3@DEy1Tk7VicV^|VMY{4nDBh0Y*__SqPO8|hcIRSw{E`5`d zg~sIB`75Y<3*%HgYiG3K#_ws+kL}CKq#26}87cejTtF=fAOZ4bK{0iE1O~UXR(>6IeryZ=<)8QvZ zUhl)|d54=i-j;Lbcd2YDY~dngJDP*LV91IMC|mK`{nn_90Z5NgE?$%3q-n z5_qDr8OpxZAcKW8$b{2|6hl@RllXL7i{$3_$58n;;$!;~Bi!Hsu%dZgu2Yc)%nG2{uco8wh=WdKAU~UFY!&JNaaq+VGo4Q0aa}6Ypkb9f!=HmjDvp955 z(9zvo*FGrh4BlM0$(4J9FA72n>;zicQk@qx$6lwio)_^X&<&6O5Y45Ta>9HvL^K4d zpsCd)i~m8~+(DaJIphkw@JH3Y@yCptPV`SLO}b>dV!{as3u`Vuw)&IsC*Z!s?H{~= zxI6$@JD`ggm4_mYlOF1J&!|#q<)cv1zhDg08>079D*ydPSvWz6ZG+QX^>z#ki#Z!X z)8xmluz8N=i-QUe*nn6U`O)8m?O#&QE-nw=i}Ng0RVZ~CTDd#D5i$~$=r=oB)?J(| zf%JXoqHNpl>(@_iU8%ALH0SFsm!3;qmbD{&YgVw)r1Et=YTZWoV3$v@dku4*>o^5G5=fWtECtp`rnCgEV}t_P$P4p22dF-8X&B^4_12^WhKG}*nyZVt0--Cr zkWkV3lm*|#5ss(|K_QZ3T7MHyGtbi{LzxinLhW#*JuowH#6lCs;qDLu&__cR##y_5 z3LjZr0XG`2b1|w9--ZLI%v}{5?@rTyy3Up_ZtVCoharw6D)?^JP)*ULAJu3_g-3u! zk~gi3^jm$NsLR`DWl3LOC2r9DERLrY1Rk@VH_sk-_uF5c5cHqeTfa`#b!anbx1X4a zh3igKqjZ?qcUdA)X_ew}ACvzX%TX;m9!ua3#t>d;@iuicH($Jyn-&c#!crou^d$Kk zt3&?UQ5uhp9ThBo6Uo(e<-E)MLmyNFF+n^@IrP63h_lG)Khx`aGe3i8K2}H$y=3d= z)5Hp%J$KuUw(#dVUIk2W_tI3R(wm-C6Y_c)$3Nvt2yhUTGom{phAM2$Q6->Vp3I{2 z-3dIRgdR^9?3U`kM9*ivR%YE#f$aFJVk9K4>wO>V+wQg%z{xLJ53{ydq-*%wkOm3c z;ftHW?M$~m@{?u_#+3z6*~ne~#vit=ORiVc^;w=JJ_kvB)!NGDU%A|P2WOfXg78Re z6s;O}unN?Yz)lBPJ6$#rDoF2|ekXLSYNQjHoI@ulExTs&6{jx8N>OxBmUu`_bcFSM zmmENtHPnb{NeLL(u&)T{M5Q7DhNZV)&;tU3Yr+(+T0h4`LBoxpiju{O{2;{Q#TRo_ zHu&D$+{iNmCBP6X*062D|9J$m7D;`TNGK;Xa&C*PO~teQTBDCl29dS{{-_4A{nSwM^6n9t2!8wqPL)KKHbA=Q6+=asahutNsO74l`6I8{Wp zylbj&7hZo}->+T0aJIWTCEl|7S-y>0BfQN{Pn(%bkGH?}l?Jt*J(Z1n&)+?7s3h0F z{w94kmeXlwUw+$>;CHfdIz4;6R#4Jk$_2exxYkIr4QJ4+Aj>USG<#f-Q^fvWe2rUu z6xNPmP^c@n;s5Zl1-#9hH9nuo@BS$9SS!)GoczLRQ0L*UJ8gnrx^E^&OOCxup2K<+ zHXxO*5l$paIhMGZ9+6BF5Y!B!i`WYQy%QbIFXQJzig$GUKp4)0%he%hk0%bxwGbcH z5jb0NgMIg9%n4Q0StqjdAU^x3@0SVBvW=a!A!?1+|-9yAg2p2K6 z!*b-gv{MH9jJNx9vGncwd{ly?{UKK;j>%S4V<%9 zj7*0=!Sgq5PW634)@@3@>>{%yCyr`7J@-yW&7UMJvs92F3?w02F6{3A*qiDN4|e!T z+7L#}BO6$IT5u<>HvJ}3m-63gXDcK4{U4mTmHpNXA388BGrZ)W3p)>Pwra^u2~5To z$3g@zTJb{=7rGj+3%j1^eG^Ybj|w)#OYejI30{PS`vvq{7p_d>(0z8 zwQ^+B@ED!x8fmU?hm9|tG|7fDuSZhiTg^+;Xcex~A4xm>^V zoo6)nMoJ@hD%l@?n2+(u7LD4d18`s&bXMf4u-f?HC08#NJOEJwSK)Px;GF=8&2p3BM09V6IeyQH!3eZ5*ytrGs@|oI4fcJWn|6sNb z$!^{ESvhR?9cZ=bRK2~$4%>U%X`KJh!b|zvk>3jAo-Pil9ItFiPFg)H8-)W@8Z#uC=WH#TA;=K8hywUg)yL} zQr0wPH=wx*))X`1Uaj6-?_EK2p9S1e#1<;y2HY>jB;Ty^IuAEf5Bx`dGYUV3{p z!n%Q#IJQy$HH16W`Jh=T8QAh+`{KJbONSg)C#Se=Q;}^9$FVOam~l9xBdavC`;q^` z;E|h{=;MYd3%_GzcS{8~^%lHZmNh@u&21~>|9`zg{&(=}DOPDX$R8F`B`4wyW=KMF zMsxPSwsnugtc!-$bKVC$(n3ZQ#5cdQ!~GAUtqu7mXpD_{O-XD;vQkU^jvi{yB!AGC zpS3m3+UV`As*^s&$zH~(zg}cLhWiz-wp}auJ(~Ny?)#eI-;!ivgH;*{Axkv`d&=H}86w(=9W8V(Y^-Jy4fvHDwZ2wN`Rt%cfk(tN9Ce$a+{AEiE;v zBA8Xjzp@!SiXK}B6UslGxH#>#7+kC8X@qSKjW2BiltfJyr+r^9d4v%ZB#bC$S0z0= zU}YKF*xc=+^M&0BiO48``>V$+=x%Ka#=+hRp%JRM48A!09345JLzjpN4+sOD5Jc2p zY#4qpR#&#d7Ubn>m&Vm0nPSY9HvWwbsjq}XnnHE>NNysZi1M3j=+%sC_>>VjjAi$i zaE@&DS>8m1LIshV4P^3^Ma%s2YfpY>pKn;0Kb2(_9vtG&T5I}%yoz9&VF!W!6Zd&|geW+jJOKu3 z5#)nG2kdlrcBr?V>!S3L4eEx4V}tqit|8%e9+twy5Y$fabZ< z3yHV3^_HFPIK^_&3w$X{n1Ajb{J-{6wEoc*#5J^q0oyYP0IbcebRZP}=P}GwnX>1a zhPD-o8ty3(>6D(S!+lzb0vJ*OsT@`OCB0RP;Q$HP0q0=WeDzA1^kBxb^oEyA(&57% z8^?At*575Ewm7{jka4dP{h{jo!jYu>T%KM&4S$-hqvZQ|UQbHF9CiY?_rreX_CRi!mYtDxycE3IJVJ2<^D3D1_vev%m|9^ zAscO}^0_g8=OOL~j#CjmoLa(PZPQ(G;8|+MhgulaF|iw=AYki6^%gS)u*nWdl zGTZ%JXM^Q=rq>qLHZzm={(DFux4aAe|1DRNJ5X7uSsmv)o1)6FrDeT!Nc~iW5t&$} zA-g?iMv2T8ztugvtxz5_#lKhh;(xl2T`Fku^2gwci zwTv5W^$aK|W@dm>5R8@c3c}Vm;3fC|ED#=094K6jKL1qtrQpAB1`^M;|4aM139zEU z%GID{LW`HPtiVKY(*0QvpMKD=NXLo+!?Q99MG*_rlGgFefa&)Oo$cFWDJs;2hH(i& zwPMMQBF$LZ&4W8=yDWxEgXrRaOnr4!Tix<@aCeGp@KW4e3lxVIcS@1s?pD0Glj2aM zxI=K)V5PVOcPYgQ^6|dCzk9#^N7hPK&N-9Wv-h6NJo8M?iN;sDrlfk`%;Lh>J5{J? zL;d6Q`O;Ry)1`w*?wS5JB=J3=2Qj5=ghd+aqh6QuEQschR<$KifXA=Ac#br5E2`9n zY<4UDu&j6rmgxH*zxYLDagx9Olz)u^Doq_TzRa5<5*k{YgLs6Ta!B{?{MP7YY2c_3l0*!)l@JlxE8m%Hb!bJwh{8q4{d;=S7j{u*R4?w(A$~~<|C5l4^FI>i`Dke~!DA@06mw1`V8Aa^;;DJ|y{@Px4a$VWwSlaD7&knv zS1m<;h0%i`Dwwa%F9JBQSOAh|XhIDqWSW3L6}lo~0?OJwhJrY%n@*<8j}QCb4I|Rj zKYWN*tLNeJ*!IgqtwERw9wnp1@n-XYXrxXH7 zDNQeB5IDId=(Eo{aFYaL=l(it7a1^x(&Du~@!lV7h(B%33oLjk04d`jlrFsr`#)LB zsAT_7eS~6MrZnmWsFEF$xK4Ex>+}x+5A~`GF2Q>vgD(E7`Qy`_$7|m`Tnq&x!y>at zf_jT?DL+N1Vqk4Jh@r{$*Aq-jVlNCYM7xj7I7Sb`C~23VRh<1aoB#%OJ^N*vKFv>v zfJI6LW;7&ZaummE{MNygv$Y{=(@<5V5z^ZmCV>f3=UI{x>>s!-r7m zXj~h!cw*47&_>7-m-%vfBy$Rj98)4ionldnI8+?2Z~3tJwQB?fGRl?AIK8GMPHLX< zQPgtp8WkEMS?x%vJCspZ{L%4mBKzM5P^tf&+SU2qwUdDj=|nkt2(9PX#3$?drJX<9 zI?4c%amXR=6QFghIzL+;#!BF9%*$=l^REH*X=nJ3sX?cx!+}Pzx^o%=# z$58v@tYz?)Wla~G%x|iit#1n3vgJBy`Z-0dXQ`Si*&q-&1)ipe7gNUQU&!j;+td6f z)YWNeR`&g^FD=eQQ1mj{57O$-H@W=Gj2^f>XZF_}ZMj4Y`j0Q10FsKQ(|NHz8Ku5&L;b^ka1@7O!FJynv1^9<(= zO>4)3+XX3tw>&8c&rk->f!$QE@UtC(6orapUJ+_X8->ETWTv8CQta=%iQGW9doYYihh1` zD@K_Qj+zTCDPg|Xn{pHEW_T@3Nz!GcoZW<5_G)OG9_-(DPcwQXX$N}|G zAI(H&U`gtV8b0yaViG*JHv1+gVkC0Cbg=rslN&Mm>$pwRT@sQi;WtRFqogUC zkqWZfte6w`H2<~SGN{zQljIK7LkcB*uy>v1ublU7<7u#zr=&ygY^^%}$x!|svqt|Oz_X7#4M|UWXTy)Z zU%Bfm^szBwv{k;WRxm4*o>nMZ1M5lt)MZ5ME0Z1;11(O4tU?{*YpTDTfjR7YC(L6C z9A4*W26<}-W#U;-FoSIcB&_w#1=lUmg8&{8A$O;h(V$MPj7aO$EiO;=Iz^rY@aLDD z^I28}xoNjH%45zvUs)Nn9#@(rY*qJ5(smYc^gtPHPxB4@533g&`H!If)&KgFryS)O zWISD-I~G)SZ@V!W67Ys2wOx~yOj&ki`-8_-#tFC8!5(~dyx@Jm9M}Y+Qqv~C*81un z_Sfd7>FdD^$eIbk!?VaT7;&PYn2+Us+l7RnMvQf-t{<^ZDLkqKW=f(_dp;wEqn|zV-UPNF6+Ij zl-%Ow;8Pu%StXXv+`ypVw}`g_Y=*Z625qje@ukqTB2H2Cc(U_SjBK1S8nagaAIVK5 zKQh|{RO%^Ot*Jr{y&5S)+F%k^S3$l0vtMqv1+*QT(<%TU9y}hN>ZVUcS#RhBbK;Gx z%ye$>v^se9B|ud}-4}O;S~{%A+S+=xt>ORM@Wplh-SBTPJcr&fR}TJCee22N%K4ri zcim!;-U!!TjsEtyMk(F8}i3$=9F3_toW!EC51diqERD zM0A^>@|m=urE+7RPIqM!ex>bjhcW_+2;IZrP18#6PshaK(y=RC(3swrtr}&h;lCQP zM~cw~MZHqA;Wj?QqGiS^bmO3R5fZ))p)o#98~Kwy?eE#$G*!qm{XKop z5FIRw?Ro8Xv-?37d$RtN| z-7Ph{VxLM&6BHL{(q2VddWukXzv4QHHyxYVym#?JBxgCttHQF=Ai~x8Sofc(|I0#M zE%_p;j@*x!K`u{^2De6TRKXeAf|TLI6Z*?_j$3)@9gF>x$GsT&w=Xmlys87wj2VlzV2^+sb6d~Wil~&i-hTH0C8|2EG-g|TyiFYO@Vu`C3 zT?)p&w#q!Q#&`B=kpTaxc{kk8|IJj0w}?i8<1~jBm8PYS5;1{PSZ~3|q`@(#M z`wYy6{7p5Dwkh*H2EzDF>+148@rlI;`h`5_fRt%CQy$>tV*(ryNV~JOF9CBdf%CNL zR*6?a^`^q1H{Ar@-pVZk>z6yKc6JgHa9{8}yJ#c~1C| zI;5E`kNS+N5PG402Lm(DgNbm29JqojI#4oC%v7iqlrE`5jW9p_C!!wa*bC1OSl-H& z`h$+Q^i}Vn^4{fBgeU<+LLbSH(j4Kohq&5B+vj(dG7;gK;;>Sz<0UL9GO?0_g&bM> zWGhXFu)>V`%&_R|6=G6vl9ml#Q!2w%QugCTjyb`|ByQ1~3_RtqU&z7xLLJzI#pa6e z9KuW&smZeeoJ$!BrUZg=M@@;}V8P*3L}vI_FUKhxz`w zAbpNe1%1U8WNl_?}Z1(Wu1TC;P7 zCY!WyLRO_t=WP2X*W`XsAVKTFX--u>(y>$)90lMgeG)cy)8Nf=Im z0SPA#rq}#Qh>B7(m_4Ec2pPZCwF;9~E1xzQ6?sb{rzF<* zx-3k>$`7(*#RS6#T%tmm&FU#5VrnDoKJ+5wV1wm9#yfpg$wYA-XZu!x5QZ$63k}7J zZbLy|WcCzN$ZGhQAGB#s$iVgE8~M&Ukb>ur-q|0RG{v!nDqK>F^A}*ttKpDG?=GD* zE{CBHg73a@@I)gK^J{l@{nS58`AL z9_jGp;nQ9PYDD`R?JQd6NZ`5!Wxi1Ed@x^sDY$;q;e|3+b9jRIIsNY*KdIUtB#ij4J+pN3 z+67;RHC!zjhX;h8S(?V!37A-;^Usz^hDo!iE?KI=$?DAhQmwjY zFkDM<)z#AG1rBL`WQ3BN_Mrnf&+b?fmLlej+6d~qE$YC-ikk<7M(g2=yfksJ@l&@v zyE}&b4E=PI$dMS#)c*lJexAR|u1G`Qab>k$A8lfKVsCz1G&0iQAk~~xw0Wez$cD;9 zq-4l<-kF38$FK1~qCQGM!Xq_{CHSk#mAMgnb?77Xc|?Bhlk5_b!ZrT%b#3nD-i2QV zfit%yJ7vu1(EOkH0599UjT04#%&m^)L%0|ErqEDs`-N<_Yx1Ls$bm|;WaiAHeA}W@ zw4L>WGw~xS(7D4wa79R+-&v6tZ*07=z06lM`<`_r4Dw1x$2MDq0*HQOoOtV^u~O5;#@J6ov_(Nx(5M{= zqx1$`7dGMjoYxwNc8W6;Q(ekI`lH%lANBL?7Ww85J6rMO(P6L<4rnRDW^YL1QP+$& zXUcSLr?Rd6?Cj?5E`TS|oAjG?9CGn%bD5`Kywwu0sd{<)7=n=(0z}0i$!~I#R^Kdg z0CXY9X$SVQTJ8y6Xs53|9=SSoHk7=pY5QzCXKumiLDJBjBL>+vL;s<^T zEEHH5@_ie*VADx|9-n^65&qFDEB^F&lUMmoUApzwo??NMg(o{dM7`~^zhYI` z2k8<-TzIb5&P;U%`XO@6vNzdF#Ll8{l$zN+%C*_smkYP(Tsb2q{EaLYuE^xpjQ zq!s@|m8BH_{%+Sjh>4tvQeQ73^d;AvGxN!f@t19Wnv(q50w-p!w^vF?;AG=?{aPc= zFW?KsR?}JgYyY#OEhZY8xpSx5(?G_fr)lT*MbSiA8MVS<%J3dPp+DCzN2>ocN|Wh! zh5vz`fcb;E$aB=0pZQkS+SzI>sVA`OG8X!PLOPx>f~8u*lJ8`A-VMiNrmLaFR@Cm| z<~8U06#Uv12ibg8=yhAYgU7Fk>0fRW#BY=zE_Vle%ooIT2PF&8H81e9E~Plwor#Mf z(;En0vWMvzn%^H8Ji?>h1Gi-)amZ3ve;q@3SV0+B48QOEpl6o*&DZhaIhEAdbZ_yU zPV3x`5B`x<3b#;N);N;jjBRJP$K3NB>}Gl1Vykb1O|U*WGl~=KEAhcQ1aJ*56Hf-K zOqgI7%)Vpg6MrZV=zI2$GfFn&Ov=_p5;P0fqfC;8aiiMr$GfJ9%!}vCU-F@8RDuI> zuv3O4&dcJrq|E{dHM8Wv&!@fM}TSQIg_DJS7kqjfv$&|sRh zlU?ANVBHZ)JuVyJq#;D{cTmD=c>UVWO<1kWq8`Y?{-)bLgD*n(GMIgCtM+Tmn`877 zDzu~AOp0wD;-7~TZ?GBET^3a6d(~EUu)(Dz9ML^rVFRY*oLPuiHk_`2T zDN;{6B@*Xh%?_t$o?-E*G91L^z84YAO6oiRurz0T-30<{c*qSq3rUensHc&g-%2m0xy@3_-+u7xrD{&pjo%u-*#}tm1Eq z>!{q{=$gbuN^FvxE%%O9(-P$le5pDfUVwaE0k&NIu+t{N7DPH)Z7p{Ov$z#ZhBlTE z7F+-y19RcZPJ2Ux}A6)Sl=Y&lIx7W)z`a23Y>d^`>1Q6NX+9}{XE zkTt&+{W)Di?Rz{XG7oKUl#zXW^nAW}iVL}mXv)3cr42Y~%wz_=r$2I)@7wk>Mgh`d zF(fCWpbVM5Tb7?&jwZT6H7bw^pj~MCTqtp28=^o9K6aC}YyK=aL zg1I+$o&*|^UVBb!GIjNRSi@!ZxD+}>I&>Ra%rcs8($$|X#4| z{)8#{kYwrKGuUul7pALLeP|4pJdCIWS1 zQ}Y^p6zZo+jd50NZRWHb(<+74`vvH{4~w2Fi1;G8KvH&$%?CG}{pV5JZd=TpMk^lP zMvix%GiBIjrQ#}}wk~qQPE`-?D?XVS>DTMUgRCktaAvU4;FjGB{X8R<~RuaRNSR z&0*5P(@@D%P8&A<6u`$%V{&#>Ax`yv-PO-hU02A{lsSv~hKpGg9x4)hVH3*(R}rJr zO-Z5MoPxJ)?t{MWQ??01Hw_No{9>bx+^H`Ng zvnwZ^)gNv64&=RXHphlUOaCIpxs7d``x1{*#bx?)wlAKYOGYRWXn(hye`m-Ly z7n1-)Gilfp*PI-fv7ivOEXwa@D9fG{c+906n8RcUq30@PwcIUC_#?zthKy6Ck7~zh zmEdcN2)5pPHYHNfB$!2ZMXMdhp0RVQO`9AdxyN;>ey1Osl`e*XKj5dzfz%g7Lbsxq z)P~;^N*FLb@!V<6il50zIQeyf7okDt0z=y{I&&Y0gh+=3LZKlO&7BdE!gikKz<=3+ zj`aj>&PkTmVE?#ggo*kEv@|kG77QWeGtOT0Ex`~eQ#=#}5=2C^tjVGLHc2YV*O;7JQ9O8DYr-UDMD*HBaxaU-r2AE->(K+U3f>no30405cjC;?HE6_LfjZZlw z6~e2oCx3`cUMIh~PmBAiQPkqz{;iI-$OKJSJpckac6I#HuEL~lqGw}i2I=QoebXDx~4ZBrB-3`1B&G--(O1td!LWD4^& zh~;Qr%Sx*VJGWsiM_@{{*VYg|qO1|9?jC(}xT=v*E$oFl%c$JMH^^1kTXG1zCW>67i z7%E2t0FAzuCp+!*7g|ATp+pm{K5WBL~Z9K0g1h3nIv!nBp=I=!zyG*`MgS z*a~#4vHcAEhDi+PITlys*}D9cHSZWkrOtj#`K{Pu)La82VdC(?uL}%o*~6#z{$-xH z_0x50E$J-*T@)uzX~{Za%Y&*-CrT<;dE$Ey>tLat$S|smoAZu3Ob1JdA8rD7x+eU> z%63-gnb>4-lqQV;55`(4=`j1!qcOi#8hjyR(Xpj=#;bto)8}-%H`6^~q<|mjbZzG^ zK}&8d#*__J+BOu|FPz&2+0QAJp}UdyDmJJ_a0Uex0mjgqHla{^E9%H@Y5r}R9$lGT z*ZY-{pP9)@%yR@uAo82tD5qLWwL(b*;rDp>`PgN_(qL;d3CWjEwWQ2fi!47fSsvQg zo^=7?_<|G4A>WYil6?eh8F<4gEoy*- z>Wu8*H{tI|i2@=edU-Bd6L1P}1!ZaaE2b|U1q1$sUf zfKq&!+J85ehI659h_cJ~(z)%w<0kb!&-kBruZUvap1Mk?qk5<8%BFc2s{K+i2l0zz z%>k3IMr)Wo*$VpmdYGdW3nOz}L(F_vFpEUSu~rsgNB-4s{)cwanq8fp{ig(9ON(}Q zrNKT*kXtRJq0o6pQ^M>8ViWN>QgemBfeew=?Rj`^v(yE5DZ>Ybpzc}L0s z|E{AfK3?<3HirnvlI2J3Der30qaPJV-#oFtSN%)e^&jzIN`JHAT1cNi3Pe%59{g!k zw&hojbMR<8F}WK}gq`T6%Wtui@V`$s{{n5i3BsquivG!KCiSr*o=o4PK7g0}M@2yX zHTnN1C`|V6J!ge}y|<}|Vka%aIjkxm>?1+SH)Sj?;$4A!ja^88yHw?SeNtT|b~&t6 z=&ZMLUCC;aEAdorl6$f3Vvb(e3kZ?rmC0r<`>H_Q4XXG3kZfb?osPESI&)Qtkv=Yk02Is>F z_1#1+*ts-(;C9rE*wJVGQegt#)PLnCcPN04?<6)dW+<$3%1n_>Dk6zMC53m%sU^Yk zzKKI%a7Hl2E{)Fjv;1I_p=LroRnq?^x@;}~FBt8RLf@1~5Rr^-2%IL>TD~tXWYR9$ zSz%s{MP3cbEG!oDJ8B#=@$z3B>>HwL6AB>lCS9Dd=H5m+GR=;tQMW3XD}9yN3BgOF zy8qB5C!Czu1{~W&2Q`fg7N$0y*0|=`OY327TQ4`L%lv2h{`goba_87mB~_(7TZ^m9R$U9dsQ<}ON( z^jW?Z_t&q*)5eMGc>M#Teu2-W2H!PoE!$DkKPF!rKlj8w#XnA2nPi^7uCiguEgVr|#OuGU&P1I}4+k^Km zzjQx;OXLXuv@CAdVP&jd|9xMD{NoqBiN(2}!eAT*FVd6niAt~L1>EOhIoN8h-{wrb z(JKCv#M2f>XNm}!$}FCBdZVlRSzm z_tDWXyh+y8{~@veQO*_iUwDYto;uWx4ZiV&swC_!q$1XqHp-pu^L18-E+d3H(k2#$ zyGN+Vx=*|EDtFSjjhoarh1&0dYpv^X<$hc3x%13-_mD9%T15Ju=W*@%iaHRC5uZM? zC2=>__2>Gi>*_aM?)_c5@w1wE7ai~@vwF`?$vEB`Jhr`Pd-RtE>R+2}o%{=$(#n=G z63CPjw`&$vnRDxUf70jk+BJK-6H_Bo5@KeDK^hTCD16{46C*_YG5bJX?kW9gp+w?t zrKEU}CL_If!>a3drpfKDt>ft%ul949N%222@%GP4(bda?Gl3mS&t1^gyJ-QFmYk6F zq|gQl*4~w-_pgz;BM{u?WHkS0II|-E5l0M=g%s4SH4Bjq1u3z7ORIFaJfx%IcAHSK z{oy2#R^I0qA^o*)CA0P=IN_0LrCajSF?5c)U9@Rn?4-WTb?5qAPvCR&vRUWNE=J(> z#_r^^Z=j{CYe#g%%+8a{z}||0z~E^0V?r&ox^LR(ju6iOH!?5 zt4CsS_J*71cP1>PC=*f(jDqUp4nSC7G%Y;H`i_3NPfLXm*G^ZAbPt z?p0vfeVRGeOjm+N_cydMM8Rz&x>0~Hg29oPKL5aFsQ&{xE<=%I8CdP12Le`BSKs!h zNtx0XV1mNrG^Iyqr|Y20y_F#&9ztGMuk*bxL94xmBNcBfFKW0r74;vUn3w{$3v(yZ zxE4yP{+I+@yO8T~7nn7PHne-(b=~$78$a9&JniE=3?D^A$iq>SnY4P>%nY7< z?<>_SH1F}1Jwua*V+c9C_?w5FecSMxF`NdQ8Xc~XBw7>d?)-B*-;<0!vWx{@$=1~P zI5x@WtG;!LlHBof?E)NUXUV+#IIg8-LW>9hXb!BeN+4O6#b-!D8b9^VKhVR`az}1W zN(Obqo&Ct1k+!i8aVGLG$3>Oh-j;K6sagy^>@EEldiGMB*H5Cv0%_1xl#1 z%2Lm2;xNRx2}2T6jhPQ~illB$c5)*x_?!xo6_CQh(f5)=;9nrvi&84mFiMv`D}F!? z2Ce!BxBL2*WOvKkU>UZ1j3!}hmu7+O259aO0dqC1(tg_E9) z=h*g{-17vwoP73s+TrlO1yjFkzY41kd|aB_Rv5F8-Z&Un6Mb5ImnX2c2Ah1^oxI}7 zDL#l_UP|K)naF)UOn6(fJje)%z-2S z95;agalc8E`!gs-Yb6>R?`}u4$=%bza^SfMteM(pe{{C-JXRffb{_72E8Oeq=(YEYzV=6E8?}v)MQOv3_2=vh0hJdhB;x!rVp*tM z=JmyO9|{;^4vF44dcNFwzD_sh@FDbTcBGCn$g~*i@%`$Y@`WS9$5!Lt?bn@)^v20j z9rI&?+cw5n_KvrNT69QB@>A3qEKcH%!~eOlR$YVWZUlNi`h4&B_~**fp7q_=cWI;h zdsFlWYsSReFHL?e0cD5UrmeY62zoMlhLtIHc4o4*Sa+pWBrTB({U2>r@|>OB<|Ma5 z->-w(+LFQ5pV7IvLK>PFtJq`iF-`sv1ScqqhbKa->y0N+ktKF$D{yZh;GX!oZo=4h z%!6;H*Fwb zQ$+m1w(FiX@F+Dso}W4`A7f%p^~?Cm>52O|8vR58prIudfDw#+-;4MOO}YoP+Ky?r zl-vHY4qqTtze0P!0@CVJaLX}@=~nfHt4Q7Bmx}Y5xJqtl>~=IB$TY^(c4g58|M7<5 za6h`;%x*=4UK(1fF(&UM4BGIBKS68-9L}a0uc1%6-Y_-)(~bD&Vd_AyBa>!JO8?FP z3xoAar4ui1kp&}m1!`69qy=~Hy_!H@=L`LrrzkZOAxQ3RSK#?w2X{frdLrJg2QQf; z8&UI5{o!c*O|OF@P+SBp`JyR?O+ni5w_-R&hPGMs?7wF>ak6`2X3AoOnMdOl_0z(~ z=R+&;>%|hPVDjN?Q925nTJv7ze)jReTgN9|n8`z9pcBUA@|J zNF)br#Nv0$s!O{=z4IoE^Nv)-1})#WD*3~{V~oO3XGD_W8e682dN6)AwvY%PI6iLJ z9!cB}dqUakO?-Bas;Lc)u^*pYgvjHNnck_)#JyK6{6<;I9wUjH0LRH88!QDF9?6$2 zcVHhn!Hpkl{vlX7>I?@E8jhsEH8L_jdnniP-$pSPku^uBGq4|mWJN}1k0SUR-n3F$ za*~6&g0Yk>x8X~tUq13&j%yP{b)pl5GxhXD=%?D>YTdh8&5i zoBC)YOpAE8(K$8th@!?|shFD;2h~pr4Q16AB-VbLqT#on8u#I2{@>J zA^^&ZfXhu0lz2p^1Ez0RSt4mmqXw0iMP}eh#Vo*i;^IM8#nhc|a5J^eB`qgjAnGEQ zvb__Sw7@@@9FhlqE2wXESQjM;$$-=5qo~L(%phc3U(Nm=7iCrTmda~fTwI|Qo@8#T z!Sz$+UK&rq>`B3(pp0$+I4Uy%<(j$k-uM3GhFrGGUgWc7|Jjpa%{Q9!<>r|pQGL0P zgAZ+0wx4){T5_?R$-){;`7$i`4u$W#vM(GGMI?C7JOuHoILpu0LA+b{S)7v zJb~|Ba5-zfq5^afGC)Xm_kPF-vr53U)Aj--B zi9p-4M)sH{0(mC~o|ztWCC??t*8h|22B1ytfk#`gmF$<-jm(W;*#B-gwSv&>5V5Cz z`)8P@ocL(=T3nw#dp%=p<^m&`xwWBrb+p`8_RWTq6$*rb(&Scf7tL+uTOpVwC&VqJV-7Nn>X=(9DqLj+3UKzgRb~rRZbUXPWn*`7#wxhh#qaq zid7_Xe0rxEOd{uin1@>%nHHz8UibA4+@Q3qs=jJ9xg}H|yy@XM&z0UyQ|oACraX+6 zbzZfsaRB#)M+AD;K|z8@$FtYI6h9j+NgX37X$AzkWUvD80nC=BpJNvC!=-|7;Ui~X zDs5#sxg*JgvougQo`kDz#Gd*fkaEIq^lAqMzzW!BeZsMBR(|-AH&_u+>0lm7ai&~> zF9c^fghPA^$bMLFzwd(uitG;b0C-L*#NB;r;*X4*3lA-iQ9b?25< z&~bzya1{<^}+*Jc`-(LF*{q^z!Gi0EtSnRa_(4skU1}|o zjs0+?#@CZmT4@JhrIm$Y;zkYi02=+x7$;ev_t75-|E*T#xgpws8~Lio;IsRFKW{Jp z=iTyVPhtT_=n}Mp{Q$!Rb|~TdbkdB@wt1VV5y#T9oWRY~h+_1a@dbTwpXnR;EMU@T zNyp1=z&^)6N36T8`HkjAaH5=H$-}R3gT@?wr9XRpd!qZ07!g~bXN2IErFo_ezwKj| z_f=pA(^(r`DM51IqKbB|zm-*BAoK=iFH&yh3y`Jp=!X0Cru`is=!wtdM)gF8i@ z>D@h^kmMwE_n<=|=nZlG=L7`eM&|8Uh{#$HpF9n*v?RA?B#Mv+KR<6jdZ!ePI8%o| z0f83=ryY%+I%hmH^^PQf5K4NuM1VrVI%9Imu$2Zr18C_JY+8I$KZmG`=|FexJkf3p zpKVb-?ct5yPl8WTKo)#@*)KG3@bq(}kZ?WUv%{s+Ykv?a-dVhX7)ip6@ay)T4Z;s4 zs=0>-H&3qv6G4gRZwKLBnhPh6<^*-U_~BehXk@n^mKHlEoRAl?i3EmG${yV<17Jd|fjgz6D5Gh$d(;b*lBH3qdO=SFn zf@TW;9he%7n>F5ED%?$B2ygnznf+Tv`9gxND$lEe*NDVlP>9V`x52t+$F!{=f~-zI zb_96ISe#Cy1vucvTZBhTb^EkTUsQ-!;eE`PNEwJ;c62RC z*zG4;TsLRvCsh@`22mD<_l<8-KZv?+hkw3uFk`{}UH2Rslmw<~vPrY9X>WrD-d@B9 zJ{wH>O`bhuf2YULVWV&o#0V+8{9!qVe^?Z;_LwVryWcE+bsl(G9{(Je3-bu zHz{Qya>d7R-Cj-pn5VWyEa3*NeXnwH%O7yPSo^U?;}ZD83Jc;d(B@gR5|y3IFV08N zx###)c;XfMXt_phzlxn#-vn<5!< z>#pw65VH;+o zKR+IL-Z>~KdBcQGAlJW$hHiB136H9*tZnMR-rgb?non}D*~;CocbWy>9VDR8j}XkA z+^T8KchGNyOhuKkMYtE-iPPj4{JhCmdAB?TQ4moxBN7{06}>oJikdBoZH(RX@Hl)l z@%Qp}dj^R#-Tz*N{$6twbw9i#7YV%42{_-&{g$>7scSp&BPoes&(JmNP>CN)##qyV z$xPp_Ir77x*rbi_kLI$4Nq-~xw@}x4^Dq+2@1GHg@vK-6d(u}Zx0O#F%)>fRy(7rS z0m3YtYOba~Mh5Uu`2{w#f1B1ngm2;auiM`|mj*(wDs2})Pf=ZuHxgBS=XWS5SNCLo zYbUDu)hIMQ0Q>bXd30;(1QlVJn=nh7Ecl%Zj1>*-o3)Y{bF*gXz5pN7z) z+mM0YT3M2K6WFZ>tVFvs=K8}~S?~%gx>0Z>8&iAhj>$PVG>k5SA*-n|fEohAx*Df} zs7Q#eQC&WoMI6Wc8y=vQrGT%|`Tf$?Z%3bfWJv*-V?_G=PB1tq1Se#RRl+0h9Ip7- ziQDJS10F@&((i6Ox;r#vZ?xKPH*j?m_Z9L5kYT(B-uT!^e5oBf`Xf6s90p#u!vr?5 zzHXO%#(bqykg~2Q-Krk~sp&0_-P&S4A%MT;JtCJgFOi(k&wnq8GwlyXZ5RABFcR6D zxa-bJ!9>eMaqBXWm6aigALxI26G$8frVWA@6hx(WdeqMi*~HAC6JfJcXdzJGXMSA- zx)#^hWGm$GyN&4FAb*-xGO{|-<0@mnspI;lR*B8COy2SP;m_`4yb)D3mV;e|oIrBezckvraOGH?{sU^3xZLL9pl6Mb_m`N|Mk#MR6;Z0M+YZIUdEdsJ zeDn&(lqY44SHw5+oQ7%(YBHf7k?b$@70yD|f{>_ZR6v3u2yYDTczijix*JRy<9(qc zC#zaL-R>(o3~~|d$IZ$hp68SI7o$d(CzO?q)>WYRo`@GGT5Yi2f<{MzmM94@*v7`y zKt!>m{TdV!+P(hu;pjazJ}YE36~OIIV%mQi(BMr;6rmeQNAx)tv0ac7q!7^T7(YB0yKr?rXb<^y%b(Jw)sIYackHHE&H9!7z>`tNhZxD`=f^( zQS$g+jr(huTM_|_hX>eZdt7PRUOAhxTZTcxve7hc8L>})B@-u4p{H2h5B&g< zRL-w+#^f`EpjoJhaTjtkP`eL&;3GidvE1v<-<-0Q2jHShkW_c~jCa9FI@ET#o%`LnKp0H{mBN!ZjE(%tuOKPt zARl&p6eg|+kcKv6vVH}75ru^r`_$rw-F89)j%nhzKHBb)4{eihA=zq#@u z=j@KCz_8VZK!+CA{b%L;TuDc9{{4H;luh1LM!$hFql&(_8mQ=y;1!(SkGQCb%nU&F zSG0!G6@}gsT%J_ZIz( zpFn=SUyPtV_M^X5&v-4J2`Fbu8NH>J*w~?S6>Wo+>K(UR0NY z&pcEHv}QRRV8HxxbBJt}v_Gn0} zq537LT9rNEbN508J^3&j`1GC{%+=BETZw!xUDP_RsKY{Lo`xKAeAvj0*06F}v9$RC zAavQ?b%^2a_TCXdL-EsrwkRZzyS@F9Q1!U_YvCx$kqh4Kkt!xo16ZPfpM?mSP>n`` z7bkn7u5)q1*lLKB4}wYKElLOXfwERZ?lya)*FQDFa}E<{3rp@|hkY^8BR{2C41OUT38o85>T}bW zT3iJ^(r_I!UkLMJwS<^thF0vh|?QvE?kpb__4>MrAXfMP z0sTM%zr5n%nQq#0i+^7M3wO7hIA8wTPEAOPVFBEkrO+)`XXzG7Ue+{u;Ln!7oMYqJ)mEw_K#T+6F+bev%8xW zz>YevufqOuH^AL>VE`*76B6oI!;j5bWgID1*{K~-IQ?dAec^X0l8NJQ@DT}l)Jm~wLCnUM5F zNIhHWb1z|3QlKy)B6efyf-@rqs+gq0G9*JfcNl8onkhF`DuvZy0tgcc*tMidn#W22 zu(J^Wu*0DMm;*7KiFmOE*eDaluUETwfn%fxVbxfa7QMB-L0?h(-hJVP`giG7U=f~q z@czY+raT+046{vp)o4<3sRxzJbLPrPg`Alwzzm+P<$-eoVR3732Y-S)TAPS0m=)fJJh8a0P z!MXN89dTueh+f`b0&-F!RH?Q$0+|gG26ND)pLX+!P-1hG934v`uGQ_ZsfKlPqPq0t zHa)l3Xc(=w6vgJC8AQ#>eE#9f*B*QRnMdFJ_D4>vqFKGRyY)AJ>8U3!JijycosJNj zQK?W&2gOYReA!)S?zD)H7LXgMP;oGr0CRTZtW+uqKGR<3bDKkFBhLTuhre%otbhIA z{Nm5Nur+Ot55x=g41&*&$JIXhHY$zPj#@Rn^1*vH{>BHty}s>iAz37mX;FA{|M`dR zzxO^vpAcs7nkk|r%j)5qR-U;wJNC)3Zuy{*whl|uSkc*yzS*#TIvsl5tjm7tN|*5P z@#%LSYp?R?Qx`5>pUk|cS)n*s?P{ZLuWp!ylFrOLatn{z7e%2U#w8(VsKj6fifU<8 zxL4EtesX0}wsub}uW_v-t1$0VQ&m|tXRX_+Gi6uGGFoc>@{fG)*h{y)y%Fjk|M18E z*5{tCkKZRL2UjH<=MF*Iy7gIx$3^noVh1f&I{ByEhQY z%NN*w;6qoUMKG88!FZgQoczexKkfu_FbIp=R3;&J4U2av%!QmmCJrI9LhiKC%{8X$|LXPuY=FfEAtT+E>cUldG(EhRFp4YiV}pdC=Rf=3&Z05 zuf%xyQ%3^F+{oRX7q{kaWMr;ne6)ET7Kd2js@5BbLBs?%;GiMP(dBhZ9CrsXgh&=~ zB8$wcBhTxQIDGJab`G2zHf6ti)boRv3XvHdy6z3_G`D|mf7Gs^Kvovhl z)s1Et3~-ZSn|KmrWc}{EDE5#b*y3^S=q?<_bK(`SSZ0w|B46yu%8>1~-ShII??Q81F}Y zJs)lznj68LU|48yl#c#C?E*Tk<1Gj2WA#H%sj)~Ejv39;80It(JJG0 z&+G8^53p!FaA55T5y6i>uS4UoBRjIAm^0h~eOV73WnQn2X!+`WW+{Z+z)E+9Ckdfc>Ul9Wp=ZgqIb?< zuiBX>4;b_^?`E?y>pT}XDb!TWAhyF4B74KSxg3v=mQSoU=QcMRsULspay=X<+x~R2 z-}jyQc;4;p?>><|Sl#%bX&Q<;x3QI;-RnGJ2@JPXh?OIkhKf=s3NZX&E*>>VP9x*I8g79&#_OvpG?LQZZl*KWoP7Ghoq!2xzh z%+ZmJ3Pha&V&c{ygw7$huz9=?lu!s{P!KP?hh~5u1gkbHqWf2Yz$YJRa%v%IdIJNY z5^*ywRpGdqxUer1G{L%1G*;eFnT@K7EVIDuX6+rKMTKl;kdD|(3CJFVu*xu@9pjaH z?FYX5`_20i#qa$FfUN^1$Xgw%$cnv*J->DJ)6YCM8FylXj~-vjogN!jcW-Z>KX>fg z9y~vcbnL{bWk0o-v7az2jRHeKN`+!s(q_MS)5t&g!$0yfpLz0Me(}l2uS~AwIOsd# zikixTt7SKhE)_R@PgU@`W-Z=#Cb0^FTxRa-&IGxcx&c4|>#9MQC-p!|U$(cmM=`wX z{KkW4POW2iePwlXef`+->c(I=s;WV#KyKiiCtrB($)EVt7k=*P-AiR@SPfUHyYIx{ z-16Gn&%f!uJI|atK5{6V{M08t_cuQL#gpexw6Sei)1z&Yvg-xQORHzv=3DN*yE=Cl4709C8zTc2OTb=0g8&We9x}0mg{si9;CsIF zFTFkY7t{D>Kk{!r_UT{0ns47SSql40x-@OhQd~dgFZGkX=i2*r`f+!4r#dtI;~#oI z7Xzh_zjWy@{O>>Y+}1Y75!l%)39X@0RhUOX6WiT>Zb|29u5ML`$5xiKQ_8a2U!9Gw z-?6^(-t!N=?an*SuEoB%tIv9!Or|o5OHoP^&qzcF6$ilN+Lg)b=}}m3jOFYb-sz8D zyL#ifVLONjl{8CnP)ea$ax)ImZw5In*7+hh+i}pSSCOAM&N1WFoao9yD2uN^XVKWF2Cq2@6j(%rAuy-^z~=oRuO7tAf!N)d14F;o=b)JaOo)+~h~TQ0UDJNq4^*W##VG4+-c)oWKAAaMB?|bCCK6CN%kN$5T8B?#7pv%>UaWn`iArsXinViW5 zU)?#l$W;fy3<4r?b|?`oy1d+M1y3NOSG0Eg@*lEAi4Vq?qaozWcF_O-`yI6Ck078R z5?R74l>bp)&xgcuA|`dsq{Wy;yRPM1SN`b~AV&=fwbs9n4OJ3fx^;3X3`wRagKZuti;J zFRljT$lOqjQ`u)S0x>|&!37G_QdI|cMGO+=Kup8}QuK7tLE#|Y`JgBEj1xzOE_=$J zQw%*1+7emj?haq{J6{C8xVzW3v6pq+&z<+j;~S};f8vF$x4hwev>srlKnF%2i5sR`-vOf`|djS z=f3^kx1Br#0dZjpP=GQdcEE8=D+>x*S*|*0H`h5=HzuJ^vr?V>l`DOytB2qG;JY8a zXW8Ymm!AL0&p!Ub_5O*gJ2x!Ka#Ph!>T>F4h`ygJSXI~227S@7s4xkLcrjVdaDa?B z7@6`uVbJktmacuvLudZzH$D2MbH_j#7I2VA0RRRhPax7<)nW9O``+@F``+?@pX~3= zCb4Z+msUsMP7%Zm2IYR9c_jP3G+J$jJc`|_%LFXku(iZm}z2$cd5YX*8TW9c^jZwTXyLcQp+TJ7r%w=KPM^VyJSlf1U z(5_+i_78pc4}a5+4UYGHzw_iTg-U76fq>e5SN;Sa5jzVG+_&IcZPa5S0phP_fQB$H4t!?C`0qsx?IBbYll za_aJ>-5a|z{^d`7`a8eBV?fFkU_oXLxcdXr1lVIF7&8(Xe zE8O9W#OcLNxw<=3q?-xQuheWfv>r=fFpVUSlqBLK1D>T z`pQ>c=Qte9mCRVx7wt(Y2oXr-Nb@=@PU*>u2pR@M&76TqEG|cz*R606;6`SyMvKVd z#lB*VN1E4HfkPYvYW9k}a%FY`f(1M3ybguqi7Yc);6TC5*+gse;&!xo-3o_H7QSgW zXra=>Gqc67*ZqDg9MWJ$n%D}H)xtmti=*{xIv$d2>Tbac%fct?CSLB&SFb9rEww`1 zO`P=@9Gi}R-{$3S|Nb9a-dGO0f5T`Em$*H4u3k09b)8JE&8A(F0# zVfCSrlj%XpO)wzrL{cjAvM|_~nP_p~I##WH#2(q3;1y{E>7omPeVv|z z9vsdnWJb;)Cys$gUOT(D{;hi66rl3Dl8+Gr$H0JBo<%dc3R5sjhr@xh$YMN;3T=z= z41~b4wYS6Y)iy3$Ik!I8XxsZvF1_)dJKu1}>Ba#!kZ0xzEcR?Dp{8Ow?Knt?0W6Ww zX#qJiF&T>HAX-&&+nHm3;>@w%{f-C!*{^*5pM3VkC-(O(gh*+FLt>;52eE0mL3T;R z$|=s>2}Up`A~1--%+^y$C_N)bS(GBTY|3O`pMC&!CzB8&dmuH3TNX=1kXN@iEG$BR znHRl^NKMTQdW-Xv?NxEcvc;t;JfsM0zwie?_|6}E$2&gx#i#$wPk-v#e0R*tO|=xW z2P;PNB)PWAO~~b-x)C|3IK`=ux))ES&!wA-mX)?$8!hLQLtqJ1R3}}YOeUR*y3lYO zLnCO|tH450K*cyrNM*&5|L@8Uk z*@HotnL*@csw#C~&vw4!?DCJk@4cl2YE~aQv6L7EH2_nT3?hUGK+Kt(M4(hs;fk6O zH$5;hc=VFW@-TLNs;OEJIeGZR)qLUCKYa1AU!Cdl$%ntc<;~yw=Jrp1+ZzKi6h%t$ zzE-I7OrX$^WgY`!MUje?nrXF;$T+)(O)1Un#%# z)ZUZV_V>zcIvb}hH!`^E)S0dQ-HW@I=kqvNUS}ERE(>w2n2cZy6ZeCvB}N`WI~@AC zu1>pexNZFnw+-PvCUG_(2AP2!%z(kHCV~e{vQ}!Vhr zYezj{>G!?1ab?$ru_m zNtjn4D7#k987np_vHIMz*S`1hr~cFf=l|&YA6*(XkDh6M{71f{cnM%=+?wXq)fj+; z;s|qxw<|tHUptA*yaWkz!u~u-@KV*7XR2&3_#i|n<3Wf;Le9N;D}h(*&3J01fXDsd z*jv6g4C;Fx`mTAkw0-&7`dZvwUc0f~4IBTB$3B1GsS|gswiUpHGi%j}mBpP&wlBpQtbZw&dr_1J#q;q)k?)D$f)#1xFMOd zPeiYm1Q_kiKmDFM@G+6Vsd9TXA(Hn#SzI{MaR^^5=e~6ATB{T#{_36 z?4Y(9+{wZGADF+_VC=8bop|x}A zZiQoBs>q=dRo9tM_fkJo_u#BYn%7}*EU~J0b4_#CL@uP6G&|&LJ06M-MM(-JqawWU zqWeX8$24|&JTq(oma}gdj7_Ec^Kk~ z+cyJs;ZssZ@)lROYk3sCeZZL5!o$>sC10128uW zOkj57(wA;(sWVN)sGydMsyUH(6c;-ZyuONkY6?|Hv679*-QYy8)&4pv@_}>d$+MI! zlBqa|S&fK~Hm_Ua1eifhO#w0@W^?;$^!%;HbI_!d86mv7o_{^a$DKH`hiFQvvphrT zK;f&vfro=vO7X?*05f;34`%7qUiy#T@%HckriV9Lv_rttcw9|p`NqY`XsI2I#4^VF zWj^f^RduUN#|K$4*+t(WThF08vsT5>=7jNnao6RQIu2t>slQfTo=)Ph`9tq}&kwzG z^0_a5>6vTuRINVu;`OI@uRqmIuH|H?yhA8bg=(;zNtrw>+=V1YZZ4)>Of&CiRuWN+ zpX3TIZBA3F{@{$@B&vas_Lb1 zRut*haz9(D?2$7YZ+Ynchfb~CxjH!3*2ZBt91Mmq!q$#mytaL5Ykxe=AHVkEtmkVt z%9ZVItjU&GMn$FPNnH#sZVh`5mP66vou?QVzNegYp0t#*r~*`P=6b$A*-kDevihK% zduzMkmFWNg5pkdp#0jNXQeU1=S2mBeME9RNzqQ+UGrsivxLF_6%TfxaW1Y=9A{q>u zX28s}Kc4U2Kt=8G4d_Tyz--3jYyGh5Wn*Oyjg|bybCaeI>n*K}+DE?Uzxw9iZS$>Z zTP=58fA)lvI{JF2pKnj9l7?s2(2h_MU)xf2h*oQZb`6=r{@8Bp4lf}r_;D@gL2>Yr(PO=?D1>6msdkP zA-o?4yPXm#yTTP_=2V=DvG#j|c0T^#o9;V*_p$Tq8icHGZUe~UV>WZ`A+MecJa3$3e>Q!8G%h$b` zQ50G~T?Nv9NaO81?ArOH+eKp7I40hp1NcgqWm9&k)Dhf6KjCiAi==u` zam~iKzTGLsSvYn2j(QSol5l+o;YOb*wRbMn6%860Uk`PMl4)m6_PDCMe5OA34Uc}n z`#4PCHh0;#ZeV{xkEPklYCA2Td*rq^{*iaRZ~4Zre)`JZcFW^|?##CbljB4zfkRld zwjwjAx=@F}esDeDs!Bx6MC~EZ9aJI=eLkY#!Z(`D7UGc-kVJX+fqcQ|%NPX_?I2)r&j1 zYjHPP_`P3OyuQ{rAsP-k$uEjueMK>$;4rv49b_yDBE0Tr)P8YOZ`-#MQ|g=M(gwEM zGMc1~J#SN6R>Jk6Z7~2oX65*F9`~y2YqH<623enCbK6LePZp&2_mxqj?O^Excfc4_+2PUIq5>(y1+9a@v(>Ues3`$+S;6^@?Flt%IT zAZ`zwQe0d0lYN#HuOC0sybh1EUv8V(={bP*(q`Ol`@zoq#DoV&o!8-Tu3f10nT=&{ z;)%Wa=1w2V;Oge^@)Rja@lDAB2^itL^r$kLJ(Kn-l%`*!W2EIxNoW zjn&=jJjVL0oe1r>s(yVq-ab~B^4g3i^mgJZZOo~v%9rZV&S~_!`w!+VnuYz{^Yc@a z7dKPIa{JYM%)DN<=P1qZdhp_>-}HfVmsb3jpX=}5ow0r67k;sOH>d)^^gqw62rZEzri4j&FlB zTwkrPZW?+!w_~UFqzl!hb=q#zuJ~Ym4V%su*KG0KZO&I90BqyiS*flS$9&KK?z**Zmu`?03 z6*^&~>Y4|hY;9cs^clHUVW8<;;yDkmt?9J^53`=yn{VvADo(w%?t2Xtk@YE`+%;0V zu_{-Ni64#iL&L)JhIdwcuL)OsS(;>PwAqyludn&KB~B@N`>vhbp}so68TM&oY=dd5 zD|~(E+X8^r)s9c+)n0uI970zbb;)%Od}V|7MB=2J>a>}@uIB4CoA#>C@wU3%$-)%>(J`yI~g_h+Y~ZP{AF)+o|UHn(`wXC9ZfhUr=iSjLHI zf97fqJziX{FP+Oy@$A*^%>Lfo{OR`C>~xY@JUJfRUS?+J>bvioJ^bP?T6)Az-5Yv} zSD(fU&-P5=p*PFQ2&K%fJdX>{jNHmYj}EG1h=$oEJo(rx)aCB`SFR1O43uxYSiShT z)j2+xD^M($IpX zb9?E;EOzbE<>PcM_Jgb^yMB0OU~Bm@>$ZpxulG1PpU1tmXKwRJqu{bR&Zl>cO1!i& zx_HdvTu(j`Pmvy*OlfCyZ8h%-p~i_^Hg6P9 zaciT!c089^I5*F$+l|zGsqwu*5evt{{Pc^h{uXe>)Hfrzl4 z{P-*lCd~_#T@CHh@{P6r!%tthba_lKtv&uE2K27K`h9P_flvIMpZblTd+coK9u4)h z*}Tx7o(G&L*OqWamqR&mN512>a>sF+JbBy27xzyrum8|@+;_)a0Py&)&p!DZgBk5U z^rpf2yP$LIzR1^~TpregH{CP%!Ed|MN_^}|{NzWj{?cR5T%GS{otKv4W$h-L$L4!G z54P6>ru{X!ynbcpj#b*Fv-``pb!V%=qv6fVyLHNYcfIZQ-|;Q|t~

Xz@mUp5|2p|AE{kPA;3fO8Kp--9y97eCL}u5=QmJXD=slicH_FW9xGnt9!vdcaQK zQ{t=|UK(9}I*ph6vk%g}_mg#UVQ=}F&vhY$jYostiFuvA^x5|Mm1JuEKs z&o4d&_43f8;hs%2Jzjc-zw~5k27XtEv-bw-+n283nI|SBc<7BQ=kA0Lgtty#yEHXx zPd_-kZG*A8Hrk@+p65RGZ@%-kx4!#LAAJ17|I1(h)yFUGVda6z14A8LCM^2}sV-9t zR#vXwbKCBp{*T{x@k@7Kd}8ZVy>|ZtVfAjkqLY`Nrk!Vo8SO)l()s&P3NAlK&p%_$ zpxkv&H5}EslgrQc&pbY9n&u5}8s4@EaC-8w!G)Jnaa0e>`AuBintl4y{cIOQY5vX+ z{i)B4ANwCa_SxTfp|;8B_;MM~FLbBJwp-iQ$R{mJ)Riu+?5DwW=~;}X8(kdHYBxQ< zm%{Y97ti6dJhSO6@9P=s(zLpKtbTD>e17kPtAF(mp1tqVm9xW^Pr^~=k{@inV`Sf%2 zV!yohwny&D9WGqpCmzotcHbk-+4E?4{`m9l<%?5xzW2f5>iy`u?)j(q;i61)qP2YajSI+Kj>oZ?`{;6ko zSD$@xx@$f-r?hfuHk)?an`Q4d*z)|?WjlUD`mi~V^@~$(ESlb3VCSDxO|m$rn~PmeX7AxpD2derl_n znw}g#5qGLk-@ezyot61!ytJ-8(|l|BE;6{N;>K#V(?}BEoTbw{Dq3Az9bGt<1LZ^%I?9`Nr|iT~;E{=M72{YUS3@B3%9&ZCN< zO^7)ba1v(e0|heoWB{mQXDVJ;g$j|wTg^>Ux7f#e-iqi^!C@dM|Alwn`#*nn>uPsn z7PTt1nH_arhr;=xw;cP8z5Y{Iufxl*E5VwcDhmNLKhnHzg%hMNENy`&H`4b%bYfUn zKkC2mHBCNp zd5=1;`(RXB)UU~S#!SOOmGWLl5d$fuY;)Z_|HKcRZGYc~zWe-b?Vtb1XEin#UU>0W zuYp8Xgl0Cp=M#VNul)6YaM$@ef8$gC_Rc%rD1Fya8IIN^)Llw1UU;!-nhP&pT3*RGA7RSKR;vA1LF)ANiG!ZB0%dU;c-m zy&_2qC}h)-J0h#207?L?(b6U}tGifrtq1EX>qBO@Hc==cA@mX=&m%{uSvCM18N%6e z@Q4gE^SY$EC!(0EsvijQVdc!q3@TOX;@sd(X+~w)tUDAAY312#&bh+z6Z8k)aQ-j< z+;er2037M{d{`Xu%t8LU_ucy!f8ps;c;;Dx_JxG_Nb|ZCPMzBhS$OVMoxL-brpO}5L;8aI9InSRl~dlq$4382`OlR6)qTF zc@|Q}w?C-XL*wgezHW)bHfzc}W^rqi#2ku;#DU_xqTj54l4mKej)SV)xjFd8d)EH; zuRmAkfMOx@tQiQVi}AdS^ZH!RUxnQpC}v7=76GLSJfOew-ba4yr#}DHjpyH2K5Vnb zZq66IJYO8Xw$B&QDlcvXugt36%y=F-)eWiDHm!+E`qRJT4S(;~9=kEAowYUYv}dW| z;wU7YnTfYWLmk8{E2%S)`H(^bdjHv_35ORieDU15jnkXAueTdzu=KzF;wL}z+_Pb@ zd16T4{?O9ymp1NS|6eAq3CP*rmbi~hjqd!NlxAKlH!bkv5WI+|JOxt1^` zt)oQ)Yh=Pa?RTkzG|^+Mxsax;9NHj3)irli#g!^J(!{!k+sX+A2mzG0_WIAfaQ)vt z@#5n*#+Rn$`tHPttJeP5yWaA`)$2chX^W+mxhga3EK22da_-5&sV-~in-ryv6m=QYWmrNnOr3i~0S1->mm;pE>RgRT1Og`YMY#+VtpjRS zfh&dVmZ%^idQ~Os6&^#Soi$aRtpl4vU)A2Ym7Or-jy+967Dt0hvcCQFnKLI>|Ig1o zxxjgO{k)9x>d#GFX0>*eiQ6DaEK;14aDi7WV(e<}PJ%*OtWe7p|Sck3g+_KW}YsV6_V&vVj{zk!%|Eb@5$1+J!b0LRAiJ8c! z^H~UuT4f3nNPu|)SSZYUnEP-@8L4E2b5+&FvDewotecgxwO>8|;!D-uvuSPf-jkyb zKY8PLMRBwmbpGB4&b{H}2wp&K0c)S@0Hvu=i_FfkmxB%%5eUdrjX_W$6D*-FZUf45 zR1E5Ka?fp5J0vAyz^Ys!GT<2iI6{Gh+bTEhu??#f2Y`2; ziE@nqFftjaL?R$+fflRSPzpdAD;6Sk)j|M=0CJS##1;alWL9KYBNMJ&yS}=*Dnd?J zgyp(>Cb+67Eg}D@$vZ5d4B2J-t_hpC*P%Y^U(dv zAxDpKyTg8l0-Alfp~xPYas~s#0V*mCF9jvXS&zQ9RHdQFF~m@ii+e*;FPefU5IHls z01?Dag;e2=5KtY|G;sppw5Ql(y!6twYtLS|@AxT+`R@B}8v$xsvKfK-pnn4ZHZ}tY z#0YEvw2&5*U;ts!?0~)WZdzA?G+ISmicj|TLkPp=p}O@Y52?-t${wNS9H2uitw&P!Me!L`e2TJm^*Hs0y_i&uA_zj*b=3p=-+J0mfS z6Mp_PPyEtTmrg$Vu30toxt!I8ApO!-%P3a2v*ba@3ce zzw(BG{mA>?{DbegCsZ+Q?fm*L{foc#vClnr`Uj$N+v--C9Lpz(Ci``*-BpWcW|-CE z-R`BQj>Y*sL;p{{;{$Jf(<9Z|`U@AXeEg}czwxOn9QP?_ab&7lmYgO>#r_c!a^%=| zb>~2x+d8*9I&qWL*gisAKm1Mi{={Qjzi>?&?j$IY#g50qO0==u{Ma|%{^vgY1*8y* zM3G$QSv43>_7XTe)Nz2fxrE?eq2e!u(%*t)U#W( zpCbT9#=@bZt`c~Nof(ymRT~5gjaAuFmSWLMGynn2N^C^rXiNd6p!4WzCRvA*Y@?%r zLnUeFF<^z~RX6SW0*Fmx-80IH|Ycc&4`CiWg`S8TVwDz+9(7)a#Hrc2(c~mvSN%vzA+j{uv$B4qdEE*} zlW?db>Y{z|%(Y7sxDRCINb@=@PLk3HS2ar?ysu#Hfl-xCUz>AqxeP7sxMW?*c`7s< ztiAXvf9X5!`_6CsA%(l*j^^aG`HMHq9y&Q3i2U-U`CX&=hu-^U53QkFVwm(>I z9I+oxDhvgadHN?$yolA?Pb;Zgilx*XvRl;Hizu5IMKiz=d>Y-XNXBU5HiSp%#i5C; z)Eq3d9@Hpv$U1=fmQ-7AH@QO`A09G;0~UM5{d05LRL zg0qTPGsreF9#fnNJoXLHQcMsxzlHrHft@u&6~X%@Hg`^`**oky{B4s@?v+fdqLrde zVkMhWCO#_mkJkXlzBXvbsjhsG&=jy4S?42js-t+Bv{uWmetkdF`HF?M3mj!kP078(orYy5oS1g)V$ zZqd7%w1^a0N}X1cKwz;@7qmelN9rjDCsb}IR6?l4TP>YdY*KnQ&A9?Xff2@$zBHMv z&1dhvH@wXt+@_rqdaVE-hDAuU@Z2hE?m-loA4Y#j2bmqINc)cb=2G_GMb2byE!n z2e&HyTol}qiIWuHK)$xG3vNv=<(n}Ze}jyrnsiI;kPC+m`Z5uu2HZMgFUTn}S=(qmaE1ur zh$Ipfl>?6DTFi3J{>c+(Sao#C2d-(Q4IAUDUas zr6i$Ygrd}XBvKKW1~^H87AvS{eW@FXF}ib7xEBtH9FaqI^?(osKr*F*3X-dynNT8Y zY(p5f0E!{4Ern_Eg_k$kG*$sz&kVJKNg}Z@)R22uViq`OY1mD2K?az`B|GNX8vqK3 z2^&Pjp^4$jS_~eE;D%ywR~F2WA#fv4OvKF51ZwlOApps4!`w-|IxXJ-|?|e zeDYi0@;k!G<3{0B&7vsc1vGOob{iR>mzZ&Oh|Q&H=Bvw!><-evX3n5M3=No@5hXVf z5d>sI>Qy}xfrObVrsy7fgD@!83hXG{V^wfwaQw_$k1IogjkpRJ+9=0{OiU1h01O8d zK0>a`zDFiZRE?Qy?07^yyR~0zIcm``&Ts8N-io_Ud ziXeg$7ljsZsKK0iH<*M?W8k&QXV zr2rU!x@j7!HmkJ^$q9m zfg-d0m;cRQ`_w1@uX}GhbNuA(pS*DW2^ys7sJedbVc-Ar-}RpFe($@jsSNEbZ^< zxC_=u8?daDz1I0;Yk;;$(N~iJkGP}q)$&ZgTM8$ zFHEo$WXNP4Qx2h-5$b5Mh=_Exqz#s_N9~~QA8VKJL?lw6CddvS3iLasO)Gbux*as~i0fihWD=Gn!l3gBqY z=5AC9P>u#pRwJ)~rs&eVZ-7=@HRT4~k>+)1oT=5Rj7-f#l?4zcjiD2&jyA7b;DA_7 zBu6UIIfuRpQ37+&*^%aTSR7Z1Y(wOsyJT@A@*KhxbhLRL7N?}ly|#!#CdkQ6RD!oD z9c^B>!l}LWq&4+{Az5rr-u0Evh{KWQbyyt3lr_jK##w9&gh`zGBG$b&I@*^L6e(%&XKl7J<L z-tmr0TU*Q1bl;s14+dpBF4f@4WL~w+cz&ZDRV(WwL2Kxl*$k#k8a*%^xuE1ET#104 zi9oS11-IHuJu;QjnINXxIYd-{=v&_Ne}3kPz{^2}a}bM?QA)=~f!S=PCWIzAy&*^=bb=v*bt~ zf`HKzAtvTog}KF~6@!ht6x5ish|G=JSH)?p?~RHF25M$;E16OaErp}a!>kP_b#M?sfc34PO)k&}WtZk(CxL(aR(A?2eQMQesS7Y*)Clq#MXeQoUxvN@@6 zjzkc5CL=Q>O~UQKS}WXTj#VmLau=Fh>55UYz(sSnH!)39vc1HojJOuAqOqVbj8J5$ zTKdBe-u8`;KHwCyQz$?IHJEi<-}>N*fAw=8{m6?KS5B?}(@%WjUw`Hc=cNDUz{t^ zS=qS}wK_dGp(R15yR&_(byhVZ2p}?|Ud_}{DE1So+M31GOJ=7KLV_hDgb4$|!B8hv z2w0d$6cteSOj@FWn3YoYj@p_yxI3}}V1bJj>qXm$V6e{m8Ie>qyX3Ak%%O@!N>|Jj zEw`Z=%w2_(DaR}U41i$7%52pw?1aFGa8yJVO2_OT`s`&#h^?^W%j<{#o$HV^R+1Z0 zHRK{@3@U+g#~B_(>&9vT3<6~ptvq^Mt@A8cdf~?G$NtVg`LloMM^B+IRZ}Xd3J;@z zT@h$B%A?h{on-}JHehB&Heq-ZJz+SRqZGJdw1nO*I=l0{Ll86r3UG$9GdZ$aD)TDT zM4|5u00&V+wv;He5s}d=w#s}n*iKGvO$-VRr6AVQaO-YCxXWt3Xi>v~LJU!a+&!1R zi?y1V!#qn+K~;j}g1QcFC<)Ag7V+C=^D-ZZh)Xh>8{83yA{kLhB}%c{P2h|Gpm@x% z?JbQB4F*;-LzKkx|dmvsxuWDqkiDTz4w0Ky#f?iE^|Sh zC2W^G2yscvm{)s8T_Hqd&lOV~uw-}f5CQ`R%;xIu5)D!e>R!+1NP8Vgh;=0qhEBL- z%|Sxl2B4_BDZ5rHz?1+krBpegt58K$T)8Q`z+eV*C}4{)VB!oYFo?B)mX_Oow&N?u zCK(c;j6EgA018)aD?8`0aA-i9)7&i9O>v|QW(!;i!2s~0gH6;D5fM#lR#!?nYLqO1 zl_jPSNvN`1sbM7Ufy`0G-d&sojRHm8iiKdKB_VMPPSiueS_Hr<15zaSP*H@a9n6XQAg2NrzGtftCnMDor;_4(s6F81bSqh9&ZANc;? z^8?>MWgqH%AcBHS=qhCNf9=oySV(479SooP)F=PKfB)aU=}m9?zTf|WN8k8J4iudG zoH{Ub^YrJQ*?ICyy!rOMwaq1)H@Y(IscDwyytBKt{YW$alRxmGZ++k_JW*62c2*{^ zz#(^K7=|&7bD1|@q79~L9#Ac5gUDpe-EY>b@pWd;$I|y;1v7^sNF2#5tG4Zah3$DW z+Kwl-Q(FgBwp^@D)YQSNtW_xU9Hc;JF;2<)TnHKhP%#768TxGTh}d<9#mQkx&czg9 zP`r5ya8nJwqGK@waCdW6&s_HkBp9(XJD_GhQwIsmB57a-P~6Pj*i|UAB&L8;yNV+@ zXEe1O4V(%UQ4vZZ%$cT5G-U*Jt&TRY!{Sh5#*|GAPEkqRKmeQRk>+(P9C9Y0LLqkN z9FZKMc%dWC>(Dr%F9VJPI<5vWn$EgaK3Y^{Nk9)Uv4u z5QT=lwZcV37+;_J{Z=?h&;+8^xPua@b0F(E)iu91?VtWHfB(;a=JUsfsgJZ)4??6YLCf^H zidi@k45AIe_WvbPqByQ@j z)BS!c9Hz#g73G@xAY2%Lt3dr|^STv|h1P?+PK{hm4Z_UcL`1X51rms)A~(03}S|y5`>)gyO1m_Xk(U6ZL2((5brj|OVK6_C(zs~0C)^{}gF4X65)nl)>^?Ya?3q&3{x15YLBf-gFhsJ>% zPCg+iD7hTqP%$qvEg<^Z?)S=J23Mo(TmWMg6e?Wi*;r{gw7C?HK$L7Yvt19GiP`au z_n&;*@k;xyTEFe3tK(mK;_2rmX`7@a-)L(^&!oTmr+zK}<6k_};B=GkJ+bkpzVAD4 zk0XSo%a-5v$UWcnfp=Zly0W#?DK#fgZLYXB^>PR>ht&iU%WmdvVCPXOz@nasc@%<% z+!2%Jl+|V&3I#C@#8nmJQhnS4#@Qz+2d!2HXaPY7G-6zejI}efCh|&bYQpVpJIQ5u zVtukRX@z6c{PM^D?S(5BPMv(0)UmGg%Jz%XWNmErd(Fj%q12{TVWQxnbrC{OR+h4H zEyZPK1st4EvJp`%rWAS#&J<9{lMyLG%2E+UC-Ptju)tnw&&q5$3A_m`pj@;#oP&#H zo75rJb(^wjA?6Z8??!13rC3+3#{`<8srSe8Wf0r~snD1AyU{J}1VNuNX3ha%(J1hm zlCaU??A|7*I%N`Km*{|-`b;fzEsIcZCU;jt2PsZV$h}^-SoH=ucH7Thy7bTg+~57t zM@N77U2i;h-+k6p)|oKR9b4LusHUjiTV2Pjxe^o4b!ZeJ%xxH0TFgr)T+K^zL}RS6 zFsm$r!xd%WVK=SXp*cD$2u?%<0V0K17g({0jvHQU;xHC*G<8i|u4@t+Kr)J=2YZAw zYUMgGNX_Qa+%lM|8qF+qk(vNxWABL;$~dS(&6qq8!3@L@W}>!7VW9wstGhF^ITd4K z5F@gHP&m(vb-j{_X|6*X37Rt~7z-GKmYX1IKG%-8K;Rab+9)U<4sLfC?o<>6B1Yg~ z4l`7}RYP3S(zV;TL*oQ@4-(w10|b}G3GVKVyEN_&!97?A?rseU?ivDw;7+iQz4v=@ z&M#Q&rsk|MYt%C!0%(+QD7lK2@-qh?2ys0ZU<9Qp=nOo5grV;aN>`MnDd$~d(!|&^ zSJggUCn-`T~pNi?ch0Vv!=J$Q6q>q_C|=8#zf%z=UYP z{qA*b8`(aU%;Ei;r_H}=N;Z*;1cnRrmvQkF2a{B<%ujs|BeM$FwA^3* z8cK$YuZ0a@3?{Qg%7YeWWh2j#z{?vp6W{jLS}xE|FnSul|Hg}X`n_JZK@!_q8(|^b;x0PVzjHDb zGT%RFLdW5oy2xck;cCsfVKZ)(Y=|w0r`56;z?!nn(fVo~W9M5{D_tO+D`(YpKLw2& zPu{;Cb^P{^y*dlRrw7x`h{tm=o>wD!bEj3?_Q}7fPk)GhXVs944rYL1)e(@C42l=@ zZ0@Gjp%FH639U<^4M-va&oB3pF^^3IVM@#vcgNH+Wz*D_7Si9%5CIV+!R;Y^ zkq&Gs*aSR3CieRU@tO)H@C0ka^Zw0S&MIYWe0vu?^aB0QnoWy-{0b?ZmZFMbTm<4K z#oLzOKvkDT+Lx80>9PZjFbcUZZwBhD{T0iLlZcHlp%j1jP# zc;Ap5H>UMX=vq_Uwj8pN`c+grURKRv!uuaD&CSnD`Uf;**lT&ZlTE;@YLbd!K~~xaGpf- z?Sv{n6_04(La+$&5} zh9m(Sy@F5>jxhIg@w{dK{5+me|yWtI*CU`IXg=FrmRmQ2|#k=IwN-BxJ=nouG8;f*leg!L6s3ju@jo#mu`N?~7ed8ubRe*e`CSseVO0?o_A%m`2n`?TR=T14cBmdb!(g)7Bs=+R{Yh697w__sl z0bA2i6S+p_>U^=|;ep3ZN5Wp;c}?(?FGtFl0@T2naug11>beaKWLhCw)j{=%{?@dd zeyE6U`r-U^E>nWjb0c=v`&w$7Z6l_#Yy{Hn>!Yzo+S3K@!E;c6WPW`rE~;PNh*h87 zg=y2`^K@13aBy))5TZ&U5{~LwCkwd&k_hb>`%Ba^lnfic5|){LsjXmcin{^GtfmX; zct(_03bW{B2)l+fRgRdV);WO)`d7i2wDc2JE_7DojQg?{4kk-vBWovYSdAtAEbg4GzhH07`~ss*Amg1IZ5**iUSc^IZ7rZgg7 zV?k8{B~4@rGE{aTW>n_H*qUJeZK%}Mu4!*dEliW1=0-;YCr?c& zG9UoAxe&s_b2PX(p9^u}O}>`#(|V!4uQRKTuunIHYm`K1ZrB^+K?{|ZcNkZWMH(K0 z`_jtoZSBaHRo0chuQP?()BYbEpXQYVFPsxSS$2dJ35U!aPnoz3MD*Hm>tC(2!neTP zdvjI)YvM2e_MR6og7y=i$`<^n2o5mL`R2vK$Yl}@sY87(ek&BbgLJ0)!1?fvRXH8` z_HL=lDEMlhQ1g`0v5qf}g2Eb551G3zn8XXRrEt*L&t<<{Y7Q@Cz=mblSj zq5LvjynMpxx_Mu+zQsfDRMf(Xc(r?)J;VYXb@4cU7~Jg>5ghZu6>YRZa&O82!b?dB z+DHR{xRTS-`k|E#gWIHIpF*PwPo+`!nK=OA>1c8Ady*r}E_4txj8gcdf60A+l;YsS z{x*Q4%CdcCIk@oOULvE0L+VgimGg+&YO;0qgbLgKJHj%Jj#y5MD^YiG>L-`x5_tn@ zNu237E;;al(Ub9;_{`$|y8&rr}_~9eyTAh z<8b`SuUIq4YWz6x<`;aP`yCZ@(^RnXuscU}KQFiWFx^_v^IBwA%~SAws+{NcGxy5H z44!wId-jwS8{4E)nIo$}gWqufdy9y{bv`$CVI1=8>^1>qY;(Ktzsz440*|XW{ZFqF zpMR--?h+MkCdf>vWsm41C{EA%Or(s@0CLSt!5RU_VeWy5K83-Js4Jvl(hqeaK0Y@zkF{$?6DVEG-=^mlb;5nY?cD)WR5)BA=wZ&A`;*V7c& zIuA0V?$+I_uOEdw-OTUi_h&nVzNwZ{yl=hq>-}9Map9O~*wb?Jkl9j=-^%%M(}C;H zBS=4+2Y&tr(6DpnM8tbNf3%N~uVhuS}LoJK+aE*CH!{mg&d42d+g zKfr#b&)(UoY`2N6q_DaHJ>B5ZBB(*UH{Fcv{iPLO$mmZdzT)uP*nZ%1&RFuDRHn$a z5@Pxg&t1A@;k^F+z<5K&C7Q2H4dIKQCBON?b^vOGY=m(A~;W1n6!P^vVzXT4e=D9+{BHcv{;qYeT1$t+aMCM+Y-fKmx>|) ze};}J7R{?6mKs!dz=-vSC(Wts}AEayvzw@(B|6w^t96b;I`}MMO{#Ut2X- zXXii|n4zdzGOk$G%99{V7iTvT>9i04InqSKtCnIOMl@2(aDqEN_&}j*8lx}~q70I(LUag& zHf~a}w}vXLsF)E9R`X>=$D@It59N%B7V4M{sV0%8Cy;3bsld1+g+*lJ)Cf^LpyYpw`FtgF$&W6or_<{s^6@hKI`Hzvz!C!D2q1&% zY_XhcrmS5{#UKZ`FD>FSCkWGVJTRLb}5Bf)UgU;LpSe2O_O^o zRgHV1^V6n;Ocu}TUGJS(kd1XOUhHHM$`1v|Z0>uNFcMZZ_?z&Y?ZFS_FV`n*6_BM_ zrY~oIbukpO{jy6)QDbR-vQ2CHeqNQ>ZO{4;!{ut?HL*!|QZ!hBFX|x2d-#c4r1fuuMSv0f%3uF# zu?bfmX$Y4`3W-!!_H|cxrD>p}IBl4|J0%1{p*uNOpf2=<{7I&R3Z^sP%9pW^zjX6g z*I2^%KHwOQ;foL)F_sBN?4nkyo04NJS6L`pxdKW6aB=V=x}QW-{~98O$R`1kqC1JT zstO5WkSevo@VWiIJS8r=LiitNWj}A=%lGdke8CnmUg;|}gA#=fYcSi6Pb?(AVRXb% z=m;l<4~9on_68`Uv#|b)qv#t5VXmajw*N3szYgxE7E#OFA(+Z;PGV?Vt=ZiZG_M;% z9ASlQI8nRVesAqi@@2b38oi+>>2sB1ILm~l}C(( zNz4fne$AO+BAHU&2Wv(*6SW~O$anh8QST$Zx-3hrp+~*vM3<9q z8PvC#LI0EiN~ZmY`hgf#1P;@x|JXUV^?I}UIj|SzcP2C%WTEBh3Ar(_+BDh6I(jVW zWM#WL&GxwxIqi8aZ1IiqaRQ;Hag+8(GGKb`H>P_t^M|Xi7DY9F^56RqUd~S1Nqi_(-HW zDj^_k@eLv3F` zB;_cZ@zmn4*iVuoG4dj7B_ubSDEyRC+|#tldr}klru-h?jbAMeKAMm@vH=XaAV?*v z>g+_oRWTz79!s{PlE#oZ3)rUNBgwF$V4ij2Z(!Q7pzb5+R!3K;R<&5u=&)E*um>7SNOB>~76v01$*qq9p}x z*&Xg7I6y`_6wwNPOjH_v42!{L6cbM|m{56DKQ5xAozGpJRcMXZlaCH%Pqv1*cu~eA z!ZOScpBAo}z{-;o(agz?LB}!@u~LU~h$w}+fedRwcO_fHwLuzj7VvY^Q`QwtTNWS$ zh9A^LCvqG_u0(Q&M=d2sFQVW=^u9RC2yN)_o%B31KPoZ|rE#rnFzSwRv4h0mHaj8$ zH2dCj!N~BCz!ca-c#=vq$h!nGq5a>&Ohf#_TM&Ptlc-w+>#nSLN^5L@2@w?t&;TsU zp`%=d)h=X;H>k8u`BLWmNDM9V<|)STxXI1Uta*YiwA}7mOm6(@QL*?{2KZUgYUpv* z;edukc-$SyaJvy+NEqoDO$t=g(*jA67eYG(lKc>ck4l}aPjf0o`r(JwG>it5*G1lJ z44Kq`Qr!Mn9 z8nk!1i{0^mRvlvU1?rHUT6aAlm!c0-we@}!^1nAmYUyl6o)`v4+8WTfW4Eq5{>W!HdBV`amY-JLkn(3oebk9 zb@sMc&D!<*iaR0ejz7v%Pkle<+n0bHG+MtiFxwpxAX`YmjqODI5o@gXt9Y|K)98et zD}hEl5Xyn?AhX*S$>_Am>x5e4hLR|Zh$v}yVHm;G*n;Hr08DInly|AwVVD5X!q5F1=Wp~}A)bgkk6ub}0yk0=iB@R; zFf}_P{cV)}Cb#S)cgTi3T+-6s<>uLv2pbc4_K1~DZL++-+?2rJ(I|kj1J~V?zx>~i`4_Fe4YtQTfDbv-5jK6{xf@ZGI= zJvQE0+4MR2ojIK!C|LRy=f{0)xYnM$yXz0hkopwP6;las6Jtesl4BGtZ*O8pAs+cD zv`@4=iJNYh#Os~W@vck-hxqk#f!|H0QcLO_?eH#pxX~k#vg}}U@h1zvp8AhAaKXsT z02joPPyp)f;E*P#8S7%wdUx#`IQmEx^%lJ%)|jdt3lt#sckV%9^C1yM!6OuGo)(Kq!z<; zJB4R7V-oHm_X-hKLkdRG0CM5^pG9BLn87uoKp5iTiLLlc?qDyYQjbMH&S&fwBuRRm z*Aqp9-g3Eqw{_;m9mo=S{sLl1jUzef!)0-$VPZodw6s)&2z{cFF1;ut2ls9<;6@@K z7Iox-i9-ee%}DIq;Id+=0z5XT+gD!SgRuRAR4e2sT^)QqS$r2=XQv9d1y&1B})U79)WF6t=hT|da*uV zaf%xsS`|cw>?`MIJ2!RD0;SnaE(guJXS;JjUV8z}w zlYLg4>^Y5a%66^(^!L|cY%8iG*?K%&6E$X5XS@*N&JiZTC(#}?H263ciSBd8Z^~uL*q)*Sc-65u+rn?Bl>nfcGDs{ZLpTD z3@9Kt<$d~yCtGqj!Ie@AQv#KxSNLb>c zmdbJJKaXf;%55YcnkT4{Y`1}f&Jh;oH~f+^QwSWB2maj{`T@wp((NJ%NVOIWHSN9=wUR;+V(IifEdl&L_0tT@6wg;dWXfCzO8I*kPcwTUk*DQtZMjbLa510m5* zO9Kv3(xoldk_uw|om28m)`b@EQ9lbqy}~rMl(~xrxyF|+Jej!v#RMcJi;7oB?Q$lG z4j{oixfFjUZ+I0B&x*@;8N4TFq^13U(BB;j#S;J+?wq>R56{PDd$M!$N{yj*v$959 zjHuD$32;8Xe8n1Bv+>WrXv-LQIi{Gf0GQBHirL|FIho+7P+LK0!-4B?Ja}I2wCI#J zhK0ew+k5+fzq(Q`ik`eKQba;3-URKZtqFq2ZT%CD|nHs`R;2 z#fq_ktq`4Bst;s`3J(!Lk^+Fv0jTSQ9|2>PXemCtivZCc5&(pmWDHJek2}~9-2c;r z+B3l^c!-vnq(BCWj^->;E%}SX+o;=`ySgJLdXP< zU|xs1LRI{H5s6xa+y>H2MC^Mf`R)|~L+DZP5s{YBoz(bQmSAWse+K2Ckdo(%ho zjlj&X_cMG&$!dhBB@7C@eH$dw*!B${5JI)jq}kzI_h09k3Z7JhPObz{>)RQnFw88p#=N`{9XpM+X` z-Z6L>-z444ZEmRHo6~wy(m5sZEvlp;Qt(;D5yi&VCjMgwWcn2xESuQ73lQob6~z~v zClKyOQ*jB!&)Y_n<3`U)Seq!3C@m&*OtkGwVwW?(1+i>q)iNUZSjvC@%~}RVfE;RL zPAufZW0VeTs4pjpROc`pM{uneobqLI_;3b2F1`LW?zs&L*d1t(ZHk`ncCVPu57>)t z`JAdpQtGhL@i|H~Uu9`&Q6olXLEj!-G|-p3i3RNW^w2=}%X;TNEV~v(!4gUFb!F>y z>h)~NDB+I-w&$+nTFb9Hu1^+Ruqe^*TYvNf|NGMr+$hz{nI2NiN+@cy%zKlHy|Hvz zg)}G(_S~l+dHsIj`SV+(XePRbRud_1b~d>!8qS_ZEWsyfU_CXlTS7{xR%osby=?q= zCj&AHG#^PxNk#j!J+`B!0ynKWQ3i8=rCn`&@c4T2IDj5s_Jeoy7XmQ2h&4PdG;E>C znq;-*v+cN$DyX`O9?fd8RH|Fsp4#OoN-33*a2%%C9M>}Py;~PHc`3)4|B4}TaoChu z(A6-LGl;4LM1lF5QcBc>x5D6}57{=g^b$I)A-&v14Uv zjCvg#*X!jvD*Z#`Af`5sr)fv)Xy_5#mBy_j}3xu}wuXQXbzD;%ct|`ZUl6(LxyUqO?5{!?^$K z2x~LTKNazQ_3n8P?EPfDg-tnM-}-eo|1KNHkzp&Mt)dg6&DoQ^;J3cPEv|hgiWxw@ zcTw?6q3y9xq*IK_a9aGjP+r1j6KczLN{bB#9|E|UaaK1}uCcDQA~IKM#ENdw{)nc~ za1kLRejDpw0Om)+jqSq>;2*bqf8KlVUhrZaxSLvU+?k+1?O697(h~S+J0*t+Hg1Jy}HV zBJzn~H8KyIMF!Mhw;CXpwTmkjQM*DvvxT&h!47Ht`?u=J6w3ieWl+o^eYl-o zH+mM3ehKKUs>;2fNA84m8Byf7e?1!L5b!J~{(|;STLU?IXPCIBMf!YMrDJ(`kEX7+ zHd?;GeTfk}1)$~W4v-2>;L@f9+EZOqhaIqW zH$NhNU^wYd1db1eJeKVJ_g{OimR}pvD$f<_3eD`!>EUR+$33WcZo~jud|4QMLqpQE zd#4E_;PEa`SVql&p~kO-!1n^N`GkCpOGS=5X5>Dt#u{_UWSz0_E=*U{1e)WDc&8ey zO-TuA~<}P*N5#B3He6IcZRVC3v)jZrHBfzecct%>X#CRYXKZ`sVZ#`a;?&BxWkU58{dsK}z1X-x9 zZ=Sub^>sqxHB;g`_I0@T>h9%MhhMx0pTpy(rELCiXhX*bH6*$!gB@?(Bl~jzsxmZn zT3PWu+_aIqfM#p;AedGLh1osl+Mj)H?-V{{utpdR{W$b_VcxDYg+X~O|Kn$-7vk43 zoTn>^%RqZ-&tOz&%^wpd6M5>;ByEU5eRB>zPon6381hvhw(nE_Q&t#VeZZI}oRPnMmxG9xEMfEHv&!M+@NJ+PweLNK5Ol>3=a()K+mWutUD}=OPTg7a!r7 za`ZW)7A5|oKl}O?gZzhhm&q2hjmm!D9n+7Uk0pA$Z67Ze?a!Q2&&7g-+v;(uH-a!J z^N2nC5kheEm4-&Xq)Q>w>@d6X(uV;W*m%kC`CPH@jlOO^VxB#`;c+ z4%JcrYx&XsF^7X0GGFN0=d_!dR#>a~!!yn3|F4keW#IbSrr>|JWHdz>k{0i|@}-px zk#=edVY2A;b76MUsCMwCH;`!eGpc~#X_;!NAu16XVFqQGpZyw*IjrK`pVRltdLIuY zTyGPfmO0mZn#Hnzti|(~3fnWvyhnhxEw8Q)p<*J+66#?wgf|@PaLeMpBMcuwu3?Qm zhQLkli8={S)f6c>==<6>dMcvK&_urM?-N+A@=ncGhm%CupGYZzm~KP<(^70TF=Hd- zWeYK|e5pm6oQoKCV9j`VQ`kMl^j>vqy+Yd6G4mPc$kv@px49PsqRfwn7^4Fc)i|F#?K zTwL&QO`G-8qBWNmJBX#{4P~Is+CYK1>9NT2_bXwl@8@oWkJE><%b%Bj9~1;37Ib;+ zJW_U3^8kMO^Z2tss0 zh3K{CW5Iu!Jx3LO&QN6zhZJG6qZC0PbHuWc!(tKpKZPyNCy_OsGA9iT5HC6a27WTW zsYERs&s9ilW`qyf*0c#gWm6{)Bg9rJnff% zb!#Zl<|vDRDTrb}r7j-W^DI+NOy%d>H}h^-n13T;9|PET>oX<TayWB(sg&`F z28U_OV#ebt)wU5^S47fcX|gtlYkhuSWp-CWa0ikOCG%c7A)g|R`YLufFO zVLNtZi=Sfl%u`m>izw}kHaLigXkz+hm@skELXDJ*iLP!4;84a*+B%w8We33mss%k` za7F}_%-IRDam#2x@g?wkx? zR~IcB4UbPxaNUT6830!IyRl+1q&Ft=%S$IlCtO{4=Q6`s-!>?~txS{3f8cuR?pR%s6G5DYd56Ni|A1UxkT- z?5{oTMOmhiP{2NH!qM9K?mD~4s-^^o<{`RGSelflF%8{KuIier{ZPx zH4S@Jyw@YD$LmbzV@K@J=z!~@C?g}Mv(0}g_0CEMa|@fH+g4UMasCqj?ZEFaDM6$3 z-ZG)pmSgHU+e2EZh+>7x@!n}<_nR(S_(|17HhA1^*+~2Jrj@bGrg)#!j@+2{^JdlT z9X5! z)*#8Or1N|5Eg#M8hE%-#p%coHD+Q(NlfVK(&;9n;&V#?&dl6KRZL7fZruNjWAI%ic zH{uLYzoyVmeTT;Hl6v|#oQ@!EV*z*PbhVrHOO9L7MT^x=g?9S(%76co!JFW5h+YJ} z=)6W4KW@}N^-LQzU7F+kpc3{w{cHR@@mr4a<@~XrGvMzcFJIFNd&KdG@%`*%!NXCF z@tXegAhFTQ<>}@>{6wVOuMyF=v1#8}ZaNnolw?q+-Bhg%3R zt67PXNRy!K9jcLwxp_ZrtDZ@lpQ_{g)@1zRI@}G^?Dw-qK79wjnR)f@6#6As z)W+~P=QaLvT{s<`6VJAIpGgw`KOC)(BQWWCY##v?o$mUUM@`Kyo`2@3+MlX$B*fDU zHCmfUjeLKN5F7Q}J~$gb>@MW$Jm$#gThJAAwETTjvxYIm{l%8klR#RYPBPX+h(w5w zg!FVgcCX#mRc_;%eqI*J{|Rd*Z))h6&8vGr8ggCPnt{AdE9J>`Nn(A#-fWeVmsioB z6``KniA;&}>!!qlfJ+_Yj+^@Djb8VX;guqq+L3A_3S7LYEW0feT}wkzv4{J|0$(%t zZ=0S4{`&(F&Eq%H&u(|~EL8s(*bVgNzoJ_^UCrjP7F1v%EpF@RK(b0|Z_nn&R4hSjH}9nUWU9BArrsxoYMnxB8j@HxHF?*=)a)bB zlBN(B8W*Q~Z8uJnt(aOb8~fjGdHE9wfAwNXQBg1J@tBNxnO|+Aopl;aSkfBH75>P}*3>&4+iz za~)8_EN7R>n%r1NqNta0J=m7VPeriUD`Ac^#Z)69D?bept{!6MGV1yNe`=w zCtwDzP&oawaR@&t@IM<6$3z$My$mZxNf-|N-e07Ij41~r^ay{$S!TyKCJx{7m4A|s zW2ZGY3IEj9Ghw}|Fmb>kam7^Ny|3CouLpqmA5}470{fqyR;r1@*|LTuE<;AeFV{;z zUkA@)d+%dk`;8xQBwBsAMITH2Y>66lnV6EtlHGcG>3$_+;R6YE!q{9J^-a0U7Ptx= zgPwo9u22QImDKPy>uW#-)E2T4-W4v4;TB6;YKigfiTl1#Hm-2tcVp}w3(7dxXPUnq9^&6|yvlDTpQ6;Gf1AnIpbuQ*QUYvAu9 zb|b>Kh=G00qjFJCA-bsNweJTII#*wQ$o+c2)|A`s@pN-oANTGg7s-jb+ZTI5+@gK= zoPmAz6nua~!AO739Zq3Ry3$v|*&FZZ85#`jL#LX90e?})4`hjs!emR>=fPSa4Fw<5 zy%Rf@W(|cCdVp3cSG$waQVdSA_u+YI zlDPbHP$0~62VmV-W@yX2;$C;KTB`3#)~R7^nYOJremI@>H#c8YTK|E~5Ct?#`t8=t zUr=Kwko&!9+`+tdQ30g4z@`X<5NKsZ^nom#_7q@_HpJaiVjTX5@Lj^%w zO|QLMuL?%L|9I%@7|b{Ck8BP9nf%|;MX^JPmfStOxG4=LTlhxqi;F{W_iuq$S+3{9 z7_-;qmrO#^ei#iyOm+1jR|6wtMx%%OzG~s}D@V`Sg<>Y>m&cLY)1{UcXRTgAu{oCx zH}lE0Dr9>|z5nsEb#jv}>vn&8if|)IDqe0>76Wp`RoD1A+ydD(KdkKGo+nfM;dpTP zzV~$(<8`Lq|8~i!r@Q!%?NJmsiU2dttfv$&Yy`X@ouYvr2CcIB(sH3m^>|)=vJWyO zFhp{>VRjv(*P5dzabt*$F*Ox&(oI3(Sq_ZsmHPIfjM7p@2>3~Vh5d9sSA@} zsz`^Ic(rJJWRfck`m+S4!Ai(()W{leI*>vw{J$BE2=o@$)&7jqPQ`D+-cR5hmm3>1Zv$y z5=yT-yzRLls2T@)n4=%`(0-GACYYVQm8Zp(A^~A$0i2F@1Cw+^`x3YxgL}G97B+;z z9wwc>f63cNXOnT{d;g(?_?0DPjYJe3wHYicj9$N!^L4!|7Ay6=dwt%g{1L?ynCN$J zFxx+xGUHq?>+eq7Q*-Zh*VPl`jnk}wv$eey&bqYY5u4OmRjoIi1&n~Lqq)H%t`kQ8 zI_CN$?A1$P$8aN&BRrx3C5NaR8#z(96ZZF!C27gR9F-&yAcAmlGkNTIsHLUw$Z9d# zYe-8i`EbPxwX8grwTeaBwQxCwVk@kuQg%BxeaG4uco2e4W@=823@s~@qy`}+3|Fq* z3GsRgo?G8N;ZlwW4o44bfndeVOsXI|bKQs~;(tp)4?4Zx&pEEgIpa%C=`&y@P~#_3 zGFlDIG}G^~SGaQ3xrz|NIn$AiaP(_ROf5+1n zewzfw@2DSQ|L%Fk7Hf^SPZ!@*zlKa?AQ3pyfkQswWzX_7;kN zSPJEOT|K-VG{1<&pZ4N)dj3UE{&>O(+K*a7Yd7Yf8Uip@c5n&tb|y%@u$c*94N(Za zDgO}W0m)CPI>03O$X%5~_*#X}+8*5B5R3wk5mcXlqI~gv*x%GK@_D|it1u4S!RUqD zq&Hh#&OaG`eK~SFSnzj`7p~7C9$ab0>ibxFU9s5rT>ug0|}tg_qF}ABKPcCYM=kiB7Gn(mPA(ym%h!P@yZB zv?_N3sSHpT$HQ?T^zO#l0Ib9R09%Im$|M^eb_kx;k!WEZ3DwEuJ zK*W-DOTFe#9sAs9>z-IR#KKrYc*gmI%^l}`s}S7)ctL9c1_EK1-e#SB5Q(q*hj@>$ zzCZC=6E&*U|<25J)rwdvtx95MFgHGQ%ro%sTO}5e$ zfmf59^?7wC>6}L0H|w<|YqLexU(pJ42%Xn~gEu%fPu-X#+yk zdGxTt1Pf085T29DfQlcDq+@f;I+w|iV?R#_`4lE0*W-BJUcJTnQN8xybiq4pZQ_k) zPsOY3-PRhcTtBT^)!wv66&PMoONT{NP_22NRw+tc_&n$Y{h2J#i!kWe^xl}&k#J>w z`d#oZnk?)~?!@NesmH3;oPIy|DYi)2>c>-oiaD+6_4Dufeg0)u>+FyWT0Cj$;&NN7 z&zbIw#Zx>7XpG^X*0@j@9qQlOncstCJStUZQO;eL|4I_Y*GqL{(!R>9H7jJ#ka#IW zLVSQfnsgvuIX+7#gm?YSV7mcCOXu`G`sY40m2J6LymT0QN#^({Z^BwDLLOlfQODtW+O|c)>D)UnMbjz z7CdOTfOL!%%axC#Aai5cy!fbKznf}xmcfxee6<)~dwV@t%f>u6c0nt|RATdW{;Z}Z znRFd>dnU8Cww)Ib-gOE|=?#^W-N|^cJ|y_{9|zwl2POnZ8^g~sNrqKkZZVFJe?AjW z9*qb6qw+g$vQhMmqO>GhcV?Lc6OwoH^6A@fN;KZ33fcjQ7$RMJpB%!oq4eQBXYQG8 zoVedwS8IGcol>MvsRHhcUhj;bW;TY6>TU&kU#oh4e4cBe)o*0DZ?5mTYz;iP>b<ly!lUaWGcDTInvz7Tu15aUcyzdM%px1Px69!GMwKCG6Q-%71%cdg z&BTGGtr`q10b5Z(czjs4o|b8kLLRvWTXwOj_3YPVP&0IBY$m@tTb3h z%2TFx56D*2qF*oJUHw?_H2=0kKF(9Vb9D3adOuo#@&#r4yjp?A>ODXkj@Dv z#ePWvy7NYH?qexO4OXfZe@aXK(!Pu;R<(dW}Hf$?uQKf3;2~Mb?5o%uB1u z!s~WJG2?3zR8pYg@q~TiXj@>tk0!^2h33q8bi#Sd3sM$!)Un(ogB}IWvjOUEUM<`r z#la>jASH6!f+|;cJVuKtwc-^jXjh z4Ir!hrBjO+x^1)r!6>0RB*k?QJ#Z03HDTnjjtWLB{r(#4cK4MKfVQT)Cci{U&m#_O zauxBn>j(197F(vTtF?W+iDsH+UhbWRGVpqlc$XFKVi2g{0+E!SS&`cUcOCX~WQ9TF2j8{x+dOyU>L7r0DezxZ(BaG+ znW1Otf=Nci`j}Yz(5{N?T-I39hMZej!W;{rGK&WlTOx^Hq7W^yjRAkBvL%{ACIR7B z&9AKMF5zkBQwGL|gpQXGrSU)v2TWFqtL~0YV0*`vL2PLde%oAS*g7S|t>;v+g8bza z;2rdP(-tO%U=~Gs?WH@6@JR+D@x6{>GuR&(Zej&uwkqin%7W!zSiQsU2|)b)oDqvc z9)!_l8S_U3W555t-2B5Pp1<0IjlbA_I7%So=SGG+kQ?_9N^?c2SLlan?sR&J&;};N z+ov}Fp0sB3{3g2l03I$n6cV~ek)2bDC>*Gxr71mfc_%;P6hOoA(EEhuK{=VT5C{>) z_r2|8sTnv5;>f{Zb=IWiQC(;w5*D|~-u}R8gF4SyAvb{_Si?QGu`(2mCh6Tbw6Hil ze93o_Ls)3*?)0>Ok}1f|!Hfhor%6=+2A2 zHPq0X#v@iluQt876n7OCtg;H~+Ongn6#QG#@#6k=17-nm&wGIp2#N{};s3_65(H!C zAO0pO4%T$XE$3~$pA;NRpxOJ(A`^X)aa`)wR6@j?EtB8wrrV#{zID{* ze;c6cv}iZz*4!xm+w|eF;B}xWld6@vzWeqlzFW*p_3F=w>2lnkE6%R--pA;~r(H4u zQaBd`@rHr+%6dQD zoV{4Hw|X9O>$hGe$m&bUjXJy>%No&eWi2Aj{@^I2UL!KyZ}!c8r~vAXI7?9eKLGeZ z2fsh;+zhO6w?n^R7m&FdNI1HI*_j#SW);C&U+cG*lP=}evWaagoncZ|I;4R{(P}qU z9kr{qXiHt`%lc$8lrrY`Od;03*BF*5ZmD*Y^ILr$e0geE{^`dqZ}HaFqE8}^J$uQw zDMX}mkNwWeSAOdo+f9g*l$sQxka5jygL}1(h#15G%-O(7OhjZH+ddE3bJH|dvag1- zlj+_t1h?eUBz7uQORW|{s2bgwKM%UEN*{*+GfH2)nt~z0JD#pa0w+`=d`k zc5a@)LmLE+NU?a&pr&b@uu>tMj|PxNzGftTc&zT~5G_gn_x3aBn6`#<`k^KUQo0~-&0{9^am!?O!(=a&Riv;qN&LtR%T zcWpWUS(u2==bI$#3GV;eV+V_Yl(RVJJ@Uh8_ziaXC zHu9Sg=sOziw|}i1AAR+pC*gyZ&4aX|-u(RIDt;U19GUl{CqF1J!rRfUI`UpPxRBk) z%pE@N(g*-K#*iiRdz&}LZXG6l8EYioe;+X2l|lWmwIs*qvL?Il)*h$pP`ry-M8Hf$ z%vj~pG|j0~r_Ai)Ll;h;KD}72RCP9)H?7^gaf2z`y1hM_&d;3MxbV;?ejG(>{Lg>& zv;WcWeDO0^U!JX>Yh+fli)SoWDKvYS-B{$wu=tmM=95py{Gb2o@4a&K`nII284HBa zPF+{)v)>6T-~a%C07*naRC$Fhf;K_1*lik%?#9zaGw;fBw3`H{5Rw}r0#nE=%-bzh z%MI)1PzRElT)ykoxxOWwm z4?X+J8!tb4?h#w`w&;oI>dRN2dFC0f{LJIeef3NK;4l7}KWBM9t>6YQhmg#>ty_yW zCiaTnl7zBcY)RovpR`bV4v`2kpaB zRdp6q*UX6djlAWC^+`9G&jf^uR+tqYq*gQp20QH4Rrz24jbC}>;fFqPzU?{kWD0U* z6(TL#APjidiI)?c;CmL{pn9PKG8YO=48V!1Eks0u02oz4jEG30$9~l_a|m2@Aa)TH z&1A9Z;-akDd9z#%(I{{-b+^bOrk0BeB9Ey>#CNfpY)-aTizrO4O^jvO3nIogS#fGN zZ*0HuL|6anpZ((>d-ycOjM+wEi-N!RcmC^D9)9kperT`YAHKN!{7WzY>Q}z}#j96# zj48|#C8|tmijp?+rT2quqE?z4Au(fq-cX9jC z&5aLVe&mA}&pfg*=^8g0)b-O(zmJIA_4cZK>D8-$@3UX|!t1wW?bN^VV;@wM+Aluz z>{Ea(y;P%s7*+$R=}>BCA;o1La;|OLvXL-^Y1*cz&!2w!p>xhKfNYsvuIvn-ef8R} zef8Dfc2g?UM~RGwxfAE7J{;8+_@=x^h&DhOSf9k1^=80Tb*-sU{xp!?Z)>HG% zflAQGjyl+&HMUWK3=q)XCsTINrgvpR{@`#-7v2rrLxR7h#`?SEkAGL_%u(ugJaW`` zn@9fCaUTzUXde4^U}TElJdgAr7{1-srX&5@|NpN4j`a6<>9gbh^nmi|*vI4jW`hUi z?ZDk}Iq!O^p$7}O9q0w^$7CI2D!3mzwpAV1LsPimdrArPd9bzQ)m1W56%C=ryn8kkALDh0K;;5 z?bfZYzJC2HZ(M(EXZ7;6Z+!mNe0$3CCN+>c=uC>cTfW&_h%t(BzsfNtcMlX%C1T5X zwp`U|5Yk!JqYzNz;wp^0Dvh|H#F!|K1DRwN2JMB;D?r$>dTSE_L1Go2Sm6-Ux{lP(x)@ zGj7vfzl8AF^ACUS%b&Zz{P;6Z$!vQ4`t|dVJ^JFcE3du!`t#2}|Jh&vuV4JaXP^DR zr-z7@)+E9P$==3xbKb7{{)Ml9?WxO;Z)~jX?Cu1P%*>AEVmX`5oZL!nQp&?%Od@iS ztPhjPgosi~yQMO7W@L6Jiiv8i1YDYJ)M5+_`%(u&j-i%fN>T(0tPTh)ZuP{R^225 zq(y@ZJ6omTNmQ#+O4DVpG0j!249g^(`zkG)c_fNWGP9vp5$Q_ALW7Ahk?>Y7sq40f zo~|ucYm@EQzwiTV-M{_M{?kvMX$+-k%|)6Z)z!#$qb`eO0<-_zv+3uaeRlYw_Ir($>N&f6snr8h49d99+z9J1_%L+B zkDa*?%(dnr&32cI#m;T6Hk(YlX}44R?d48IDSOvGac=Fe{DuF)LmTnZY%MtUtU|&o zufDN1oh%m%tM#eJ9|b)3eNl}~N+AItfJf&w!!=(-l8$26&<{TI*r%R(Z0IdUX0E7! zgoS!^Pl>G%iFtK=T2|_;QO9?YNI6u*eZsPJ$(M7kDPUP zA#rmJ?ak$)>7q!uhtV5<)qtg86}Uu}u7Qg?O2#DZv*zZ|gN(c6J|?o2QSf0b#{eMl zzF3FB-Hq9sK+(hLDkNA|gK(RoQs@~kY;RB3*MIX@fA#vyul(dEKk)36=LG6%r)Se0 zLkpNe8DKIX`W+(wTZi+g>f?Tw+ddl^MoFJT#&hi55m(A1l+B%kqr3Zu!jW8N+fn1k?HPjl0MgAC~Xr{ z|AWL^?!Ww~G|Jm2 zsoye#FYdKKymiS(Yqtk6s&Ca1+)euL$H;tlcBdzwEvEJJJ&uqBEgR>;n=Dllr`koA$lh(MUV5nOQuyco)Q|tUpZ@XR`rH@e zd=kv&$sSs7*SmQ)Spxxv#d0V!ZXwNN{gHP4k@J`T=*OW1h5hYc{Pq9fzxyRiQ=Xj5 zjd|T&Hd2SZ70txmita8~-DFm@a%vQQCY+XG)l5V0LW?>ZN{ra@JVW=kXXm|#6Z60c zPH=(~oZt@y7Sn#^WuAvkIn(oH{@~C3t*^iMg}uMF*~t324{Y#E+Tp_&>F2-yBWPN) zcKqs>1F6<(bz9XW2nzw#IRQ;ODOIcEiN~JdAO<{q@xo*_U0+)h4(BgiY}@uDKlH=j zc=eSaUwZ1P_q74b(udedOSZ!=e)+}Otea1!SFc{Jr)$$RdwYwUH*a5l^s(u5+BOkD zDXO)`7>gS-*ILJ&T8y!2QYm#BLZ7SWn!1hxyE+%^CXomddAX{qA$QYBl>LqMlv-{; zSS>9!&<{QO$cG=lMCCf^ITWXGpe`JEC@?p9uX)3r-~``Eta9KeB5<8X(L%+SZ95_yQ2D`pD(Aue&uHE=Mzx4NBSm5DP z8;_ozKXq>X*>m&nd+L!13M?y6)qEI+gwUh_#v403uim=;*MH~AjVt-D{MkSK;jbRiIAa~U;ElZ57RhzaEVlvZQ>Z%$`Ou=Qo>UX4RE2LC3F-!xVe(0g6 z&z$~|4}AbUO1!&+axGx46%l}WWKz$Dp$pB=eE5S;Y+m>qFMRDYUwiG1Ma^ja+O5Y^ z`Jevm4}aqE^MCu7f9(rj{dx&abN1quRekZs*7d!d(aW&?3qSgU|JakKKk@L!<~-&> z7iexR^rfCdzq~XFmru=vfsa2uHUlU)NPOxwXLV;$a27VOFl;fjT@$65aC2*QYxlGs z@`Fdkny)~iR!ct@nRJOF_7r`|)6g^o0L4+=t2sFw2m~W?GZ2iO9cJY222!(1`|9gr zG=nfv!4Rn-R<9*dXc{p>H=l1CEjr@e%niG9pI0suUU zQ$V=7QIJv!fKtjL=cZ|zn3|^9+uLL2wrw%y!z7Tik;iUQv<9J&1O_4mQZtsORl>ZP zWV0p-9<1}Mju3*GRdpf?A-Fp;WshHe>6ORNUV8u9x$rhvSYB~Q9MUo z^R;Qk_nn$Txazvd;|6RmY~HOKSJ)Uf6lzv-@7kb>`aoHdcZYkvCll}6$2(1y`z;po zV0Hdu#Z2CA$G^qK-(TQ^q6+A4E&J~oJZP{FXZFqG2HtYA-W3zeVXEHyanvO6pv2J+ zYVZT7`C}pp-iaBEj{o&r9*D#A1K(mf$9nhQi3vMq3=f4a>8ROe+yI0SjuvugMS4q% z%RTiTJ%Q;iTY`wxT2o4s$z*0fet%fdZyWSAI>xTyWqPd@)MKl$;zSZHXwrrjQDy?y=v z`A_^4OsDn@b0KFY zc5n!7Mul1ME@DUVAN;NVW@pGM4>2ePMqo0inyOZ~u*loZ)0tIy%UFkyz z=6P7`cyrb*qYhrGTXis`5(jQ1HqtMb%jKRk74zOLrexN}#nvYuIs4~+_NU%=X12lF zMqXQ6``wqXe(jZ4U)^53yu14BH?F+4EZeDDWIZpEcW(dCM?UcW3#TWmogaAW@h8q~ zo;^M51EOcaIS?&%nPX^~r|_D%)EuKYtXgq5h#Nqr0>BYGFcX}rvY8>O0YnC8hq*hE zfG|z#lLkguoBM^+T_Sw{sWboh`~J+Ba+p94S5*N92Xh-rmKai(pbj;FFRZVfTAzRX zu_x6&&5S%0OVNmi)k4}dD2g+|Qiy?2N_8fOw(k2?q$mUd9IB@9J~a>tea*{3B_!gw zs@`}v5LQE-hp?hy6=hYONh%|o8XQ2aHN_Z&wCZY4QxifWZJ9KPMlWPNmfc*KlA|H< z>bLqHkyG0=r~pb%Q=bXsy*@e{2Rma<*jmMcgcL}e2WfBHfCWx5dR&G_um$NkBs~s%4;DO%U-zhV|{dCyOnS5+>}ryqG}V@`<+s6t}i8&;{C zMrPFzjE16KwJaCI8lr>yu#7=tA+bfBL~(%K$QdDt83u)Fz2b}-56AyfI`t|qZv&BKy5WcV2cQ+sJgA6U-ocH~^ zg}1LMba%Z;B{D{^*n=7;{t51oBTds;1e zYf~E{JKQq`i!oYlEF+RKSc@hW4qR(x7J&LlTZ_eH(vjQF?JZS3b9z%qKK%V3-!)vl zarJYb|2;K-{NlsIVrTu-M(K6q)O_aC7hd}6skQm(jZM!6_VrVnWZtHR)Kpbf7prAT z$<%ADW+oz|7alV+Gt6hPX(rJ?r3}R|tV%|I=Ild>Lh($(rPgB3f+%6u5sa%(8prl- zR&F_OmoObHtJhj{MD=R!o+KuYuH{=E`xBht4+|uA#ZloJ1Hnq}Ij=&=*KWQtNnvx= zoI5k0PbX%!y}P%wyZFZS(m~G!`-$g2_*0+wi0|Iqe&e+ZXU}}*>do~=e&pjH*+>K^ zAjP>7J^#eH4?Mb&Ld(G3-g2>6)LIvd#j0&fKRox$lM!Gb0OSf7DpgKQ35qqvhJ5~HFNtFWuVH$o(Zdu%dX*xD9kljyl$uFvKGa>+5U zx*B1J8Uo0mJyc;(3M?Y$*p&Eg%D8_2CZi~)KcBDB$iIQzXPt2v4jcuYOO47dRdcX9~WX$&-Bn8OGl2>=lo;$#LC zMuUl!EFv+&VGz=)&e3AEB_3}kkDr-9>R#O@ZZjvTFrt0dL48#BF?aTN$3$>g4DBZc z*f$2S%f4ha-R&l6YV>B@@W`(no_~*6%3E@s`(B?9^ohQY%zEeK>Amtn`|Rm~M8$rw z`{$Sa_(e5kcURjdm^L@p2-w+KEEanJ1f*7S&ciT_o0g_)Q;g>s!_ZvOUHvvLHEwrapV7vrt8h-@try#zqzK$vFq(yDf|z{N&kUjIOaG)Y9ITw zcvk{1-e$4iDS;zCY63h+e1iKVhTcW*_%|5@ylrF8Q3Cte6CP+lIy7_Mky+hwz(Re$#Rna#V_0A>{BwY9ZUN=(gk zGJpNrSM_#TpP$>_(~Tz|o;0Vd=;ZO!-+1-PZY>udd6+@7W||i}s~gug&u!ekaU&1) z#tl7x;i0w-!)iH8Qp=(i&}2O)ez{1oed|^Gp2dlI-~=Z)!3j?AhXQE>D>Ox^-rB15M&S8$ zl^$!p{Bs+x{>(>z^r7dT_gUO*rdRgRcDozYIt6lp100=E>I!AqqFG5Lrxb!g2g}MX zVqnrGI*E06wXr^}EWiHizwywyvrj&C`Bq3;d1B>>GwY9De(bfYSAq_@m)5&h0U|vA z{onV-t8cWa+q=EnruNL~v#WmTxvp)j&+4q!TC_TmnT|4%wbridn4#(+2obWP?|Tl> z-HCa9bK}&;T%B{N!(v&~3ooPad^VYN=~l=SOPR6k^E&0f{DYs^NWc4Q!&QWfhz#?< zEC|7%a18Zir*MK3ynPs9FXzFWgstyWt&f~ud+O5Df9mnIht8aO^upgmN>;f+h{-sLEe7ovB;q(bnWADE!>S*+Y1dRx;2;guSM^@j&>&%VvGgWXH&ySN zj%#WudV`#c_WK5=F>(#=%Ph)H?65@xNg{ngjUv59k{M53QTig0D9|u0<~m(QY!k8? z4{U~Q!%Yn{Dm{w`pJAL1G!*mJB#%i$5Gn+TIZ{N6aZIYx7vfi|x|J-N#y!ZT%{^Ygn(~Y&uk37;6 z`clDgziUPgJ6@~CC@H0pOyXZAF%+&!+VF?i>f_9;)j?9=UD|v3E z+g{X~lQ$B=;0P5WlEt{Mnu409m{d`#im;4{K3VXlMB%pyZ!WNMCurdC*xM-cTWR(K zuYDUPlWzx@-;B_Hw{DhqiU)bG-;Z}MMe_s7fA@OyPTT*E2LB$4`Ht1<0q5WUcs%&| zfteNFM1$yPDvk%2{MHQqTg8sh9X|cORNmj*N^p0Fxse?v#~~2*O(Sm^Gvo)kDGrm$ z+;i0!j&OHp47khYbTsd@yA$2X2)*Agc4iJClv3P1r8K5Gh#|NZ5I`JbtY*x-v$MB0 zn?H2=xnKFp-+%Vh`es_O*PcUdygN7V=4TQpu*d+3Xy@xUds*%@O)Go-?UnT}UwP&1 z=`&~N^X==q*I&Ex==&bIy4WeN-(2n93JqTS`b$qd|B35;iJ@JsR?Fq`+_^I*L_u<1 z+}yg|ojG?Z@$*le{}2D-PycWJh!1sAH(5?K)K*9jyR3CL*LZ zz=~U*x09jo0o2Z24Pbysg*h--RUreoBWmiZ!1|^Qh16K2?nkwKr*^vUQFsQgR!_6?HiB#&7b=8 zPn~`0lRX2p+h7JqDGIVSBnRBw95EFD5*t!Iy}t3|KmOy}ySshy7q_<>bIHSv>pM^F zZeN^Dc60dUU-^4q``YLKrN8tq1P@bbPoKTG$^*n#WV*NLx~^CTi<<84E~e9IUJlIc z%%Mq|eU%4hF8$D?Bq3PIUBFNl{i07@JDtv!%OX*PX>DyrL55r<#aoLN&nN2($hX(z><7Ltu(>lB1{Anze5CLrSUZx<sr_j80YJ1ZGnLASSdyRIWcj3LEC~jh!hlvOyTRXb3X#wv5i;hD>1+Lhf@Lqq$V{F~sdxbB6*8efBBNsuRx8U5tD7ZkGj3Ok*7*R4M8i~gA!-e~xpN)*!d=HA zATVHNaBoDIgsQ0-8x#yUk&YrfV-y9{G)d0YomPRj2`fdZ05`*4WzVx1*BS~cS^|p? zsN&SNt-C7)BS%(|XasX-W-mELDVhTZ<4{oDN(zA}6{BKo2yWUTq-3S4$LIm-Av-D1 zhqkZ-m>6}$h%mxw!DAUI1ZOI$B4TE$1QLzS;7-2pTS~+rD8UJJWW9yW0iojHzzQ@9 z01y~qOgHR6g>S1(f<=k~VqIhsleR1Ry7M_RBiacpfOw@+8cee|fI#f-3Z}6(3dJal zX@?mOSQu-AI>8Sj`$sWj1%Z_T+)L;m#l4A_0Rpif{DI8fz)W@zgLdd;Y4?^j9_Mg0 zU-4m5NQYqGNqOA<9fASSk%p4FyW8Dj9fukYZjSpXE+QJ+bB~e(00!M*VGbL4pS<-^ zN6!76pJT$|10UPKax?|8`lu&H26uBaa|PUkkhzweO=LRlHa6Cnd2iLnCOOgdt*u!z z9m}7$Gq;NAd~-VA*jO8d3oo5Jn<~WZOJDrb*I)jfy;pv*oqS-jyuEp5bMw+8IhO^T z0~k4YGIJst#W0L`-24{SVA3+O)6D_96(VitayewI5kZOAs}CFm0KuGHO@veR0)R2; zbrqYbbSnTr0;+%rif>Z56r20ua_?F%87kfbi$jEhQ#~c zyN52}5AvrzbY3R9lV$qw-=nmk-PNcEs%adnW$$m}!{lyD*fF0oNBVm-AZYWW)}-<8sgWWJLE**TIrwy1(#y+VhSumY@JJ-Cf|_;{J9F9S?vW(E0nP{h+{u z?)sRPc{JS;yRk#vh}pqv7z5v&iI@*x7Iy}dj#Bw_Bue0}cG`H3ne0F3_hG#baMLo5n?zz8BSFcaAiGNo z><-2oT2m)w44s)La^Jb^(5Gb4bBNZ{^J2i!{zV{&L$CXmBwsc<(RGt|^t zYpu+@SnZu!Pe1w7KNF_2m#=QEZugthb{@PgbCagzLk}qGj2i~XiqvIKP1`mvfBnk@ zyY-EiUTmh9pMG}Z@};)zE}`3b$oFO`#+cU66t}iXxAwMv^Vff)N#WW0T5-7n<3mXMSt(KaGBdQJN#vaLqIVj)Csv;sWR^=@%RXC(3t}KLcpJeV1Q}t?Q z|$|5Wqvg>z@OP1#l=GLCsR9XByV z9>Od;GCZ3a%B*eXryBs5v$ESP$XbxsUVr_WXP;dRw?6!#4*_`?a%>s^ZQJxEFIJ0T zu{w8Zy$Wl|HM3eBGdQJ`n7PzCdRvV9$q>RY40X9oDXD4ArR%!Md^V~&TGgg&glSl< zN-nj?ylu4~RhVFn|h41=nAIM}kgGkIijd@rY9I>8?{xNBYw^G<+h zFmvmz_Us!?cwt#fnx&ANIMjOGV^~?DkVvgmEx8O<3DN~g6mKL8+Lp>aEou4c_g^^u z^yJcWk32K@*WY*U;wPVb8h{_{v$t2PE=;_-(mvfFAa$?6P=-t-#E~Vj2#}(L$SPs{ zwp1lEb29*@Dner*_C5Uyf%{xSiu3tA=j>)N1~aE13NNN2k!4>*y}AwLOh%E3Nr=dq z5aG-sTB?!|Bl8$3fTB7v6XH@z49TjAPzYh9t^#4!wfpYUNPs7yRV5B+lGK7?AVJL~ zrU(Wx7DF)+RWqU{Nh!9=#DUx)3>Dh0DorYcG?YMXzdJMyfI3vp>>P3#0ucofz^E1x zJid7bn`gF`%O)blRDhdH(lF`w^0|$~ABgQHZ!=!6V_;^++z z2Vy0HC;}Cma7z`n=1J2~N>wfA(shkF`cjp>XnhqS8P}ca#Fcb9joz2dHmVL?h@T#G(4VG}&EAL*G5T`4;Kz?4TYjUU7F~$78_vlUH{Uz6bQvoeSI#BZOuR8uBT|sL5t9&z2~ML; z1)v7j5b6qFy86Z&x>`SX`pLER(+1^)eS!}@CQJFIoR`1jd+TpsM8bnaQNFEhzK0Ee z^GohmI{N0mcaAz`AFc6C5;J_q=lw1F@|~#qU90;$_0f7X-0B$RoZ}K%-_!T|%&x}_ z&OY6L--Sdp8X9rL*7fcV#ZmL#{Wux{aAyPXrnlamBLDz3y;GOpu~E41|9VHU3IH%# za}SL=<385mLifFoFz#&a(7Zk7D6X}xR;x5?+7N&Li(ibBS(>fw?reqWq!w$3o2O2l zwv2{I%+2cA^Bbkw&0DvdBpVxRU--SxtgWrRcJ)h7z3;inbn=UT^FMj~sb`;g?jtJc zD_{9a3gPVf?5m&u?Kbcq|A`;o8*ETNb?(CH^XGK6efG@yN_Ss+`Ko%od2RjL)i=5} zeB|;UxxD#)&5wQQwHLRYt}lr-kcH4h3hjI$t8{(cCOhV*xEY`mneOEqkhOPlmcQLN zF%O*J1SdGbTVN4Sk#isxugQss3TWZOS}BN*02j?#Yq3C)C}RK@bjNEmf;4$W+Y%qaYy9WgwzBiCNjSwn5o4`jT8Eb`)k6`&#fv@*1632;LgI z+`anT&;OM#XZyGREA+d!cDHM@Y0J9oVx#fTee&@ui>tqKW7(ydw|kVEEp^v^!o#^~ zGfSz9>=H8OQ`Q(FfefaqwU$~7fVEJ%z?HEq`NplAdwXGa`mE*Bm@M{oc9|c3^wF1J ze)*+uygFZBYdAdn)N^$(4`70sEmyl68|&fpy6^R>$Uet)p(Msv;sn2B@J_9sOP>EP;_GvG!&JD^`mJHWq<% zjEyQpAfUDhi`C#wv%M%A(-=ybw~@`f3qu|nX%;=m^R!Fm!(bV%#k9o?0N+P@(y`(- zhkreCRN(^B(cAZkTJd=4vLjP)ps!hd=|+Bt_~A{tQny6Au#}pbxo}hmC4nDRb4KZ2JgCVeSMu`S}vD4_mjz_ zujTgb+ru!-=kts0=Df47zwq0C_m_YDnGb!u*KRrF^QWFK-nh|Twi`EY2E*eQ&o%+7 z$On$8G$u$V-OqqRp#WEqcV|s2lQZ-6HGmbB_hwC4Gpr9f z_043F-eIi|Fa$oR7vJ=FzEj-4>Tn!M{q4I~$Hj9$jeayGz~PajkIlE#O}f99dx|1L zHN{acdpbP%+tOJ-?!`G8R`)|${2-j>O*20qTC5MpjSt>!`)`V)ChhwgV05p|+sE*S zeJd}R8B8F8;?VtN^o?WYQp#%Cf8+J9zx3h@PrdKC(-+UhxL%8aWOd{EKltx|`TKw1 z6KiebtK2pT4haIcSHJPqg38(N3|^gsEJfBw(%6VE(% z?#!86*RIwaPp@sf^x_LwuKfM?z5fGi8|x39J2#x3x!XgJJifdA+GjrV+1boDX1ds3 zz5dDzUiVVupPydiNA=Y!ufFcwpMC#luJ{(OEj8@K&1jF-w486fFqvN18p04Ze3H}f zZJFP16Hd$nCpf_gPVg28ETt4Bip-<|Fkxr*I%~_ar(V0*%)%r?3$zfa7jx@kgW~2T z1`rMXuw1oswbC#{feOuL>@JdI;YRMx&MYD^sA?%HL*1B8ht*KkL%UXOsFY)H)H{#v z6AQuH2isz=ecFQiFhn_hdhJq%@C;XRbF+r^I^QLwYyZ98y7m0?KeYDnsi!CY$mz** z53N16F`shWICU0qE~~ArofsoC3rHjeP)JFLGmOkpeJ-iSrkaY7H*$v3TsCj-ZRM(u zKK_`QwXv~WFh4`mNZ@L?y>;`mpZ(0k4?i43cx3Z*nzUg)6%pRI1X$lMbG8tp5n_xn z#xZMgt+mgq5Q1=A^sC+7-4L7EY}U5z?(MB5`Et2jEp|6HHUiV_+qWlOGc-GM#Lh(* zaWV}%JF9>FKl|HqW^%dqrIg|(96|`pR@AgA;>o%?!S@apz6!Ci3L+90agQW*HPqTt z%X>M_gxA*ui|Qj!>waYqbzcBlbIK3T_zB~^vp>#+?0 zj#_Im?T2B434joSl?ou&A|e2c&DB*^Vn|KYJp-#Gk0KPqAmxVQ>SDDiNHMlCBm##B zg&2f`G0Z#$GD8$FP*fv}79Cp{i7Cn`HEX4^Lzob#uhp4qwZ05(OvS;>xfBM8h?~VI zt5q(mJYSnNAq@SJQkyGV;@yk|bP|12LLF!ln!ev{xNXs?)u9d{1gHN$d;cA5S(4s| zfuG6DW3GF)Ub(L7>aO-~x_kT##$g77nE}8d2!a4XfFJ>KNr)j)l*rxCZfGM!NxLB` zf}*rsF_xkSJw=KJ5QzbZ0f=!J-_y3*clqbvyY8Hmncug6oO|!P&)n*&F_-~ZUsP1Q zci*{r@?_>o^ZR|@?^DzuO1+)kPN%co-Olsepg#B2&knY?(Au}Y=YwB(>bV!b{C^fh zf9CG{&p-Uwxz5sZa~>Efh+rx&-Y0Oq(5qWJW$-iCf(QvZk*HBDW7^0H z@5|^7v15ZO;vr>X1!77)bdjnmn#@){TJIZKni1_PxD<+t`o3a8wv7M;4B~}=RCN+U zPy@|Whc6J;st)xan+0S=I09>{Wi*CStU?1}0dTXCyt=yj^o18sw6eR--ap^Sh7pFL zXt)#r*874~wv!G7Yg!ea?r}@F%~*SQ4d{sv1Kclltttc=F4^ zbOn>tn)~u|0_xZecK_uEL}@0mhI_IzV>=9KP50(rkI=r0C&2-#I@*M&Ox!$cIZYzs zdK+O(R7k9g6bX>XHuF}VHj*set%5jn=iT?8Ie7}wrM30VY_R^~>(8{8@2v(JK(>8( zVsW7>fGfkA{AJ%KVRf-sSahLXDK2WT{H#TYhr`Yy36iPKs(M`_doV?spaO<>Ri z&GM%B_?z0OK-z}@9suoX`0Py~*BdZcI0Ht%amC*BjuQlbRu&CCgzw){<4lU(``-Q_ zkE0#=Z{XcOL#$?B#U{770aiY<>Aq=_;Cp%_J*$6ldaA$KCq1g18&1T(!yysVOu)38 z#Z-Nc;9L(#wB30Aj4;lJJicY!l#@cHqZbauFT5Hoy2_fw-Z7(bdb)0)R2bS}P)i+TPjm!~Qd0 z{_TZk^4d!Ge;>DAXdr%&|=j&)j>UoSrUAHMPEBM+WBaeQuW zuITqJy!z@~YyK^dKmP33zr1nn(pw&T^4wz&U%Yg!v()_fBM)7@cKQ0{OE10fRFdZJ zc+dM@dhLxn&)q#7$jb5=l;mq)eeS+{&Xe%+()=42Um2oLIaxV1H@~ymNiRP!cjfB! zYd*+l8y|YZEbsR4Bi z{4N-NB{dTu68f4ec-Rd*sb$G`+slZHn-`4+jiS-+iiOj z<0>*LVx_7go`4;i1h6Xt4OZZ_ATdT&D}ntWvg<>@Vb3w<2C4EfN|7W>{et2U077m# zN$fDjwLz~$p#dgIVk|}PMam?xsdJ)QBv~~abSw(^%BxyimI6Qr7LyzJ(fr;v!J~sH zXX*OINadYmKm%8fT}Fnnx&Vymiy!Jyg9W93mJBVE08acOB`q1|dWo4vHb>XXz~F-0izcKh{c-<38T zlOYr(MDhwogpipj!1uaN;%&QaH*Zas7G-IeBO6fx!wL!(Aql(XPH($=Pa6O7FZ{&m zrFH~^005zYxTOO`PQU?RKg8X#T*%ujv^1AP6krevDccml3NVNw1p|YsvOc!cEW;!s zAh7~QfhZ!1OvDDI1SLR1CbnEl!YYu4GS-wp?MsD-L`Y;}43m@P^hq@W#z93yN?7y< z%|;_lUG$P97Lh{;gcNEZn1}$-(V$GkjNW@dsw1k5wMrDdFmsGCNv*1e5RxQAf_{J4Xk-qV0748PKm^XYR^~!b zQ3R9s1-c5|0bF=v?bu2(pS2PIW{3t%vw;9QRFjxW&8`G6F%;Xs@yq|_xzB&$Kl%Be z+dB8Qr@r!4D0(jRpZ(I8{=koXQi42q&jas$;xPyskphze1O@L4bq-<-sJm4uY_F`Z zF3hzbJaJqE(pvm?B=C-)2%@#l{ed;N{<%V-#Bd$MA---U%T|g~vDQ5%A|*ql8h{*; zh>8dUA)_FuszgB&%N7xm%=)5=At174bvBI<#aEV%gy7f)Um0gj1!>9`wuaY3rWu0| z_h!%r+ok{j2;Ny&6%hjlg`UWQnL3&(&{PuL+K+!44}H^oxiH2%CRetB7%Nb1MZCZ1 z5?;An43<-HY@Qu-o6Y7OoUT??w}^R~HC>KV(F2F?@+Pu9H*H52azhaCo4O0_4UB1l z?=eH56O!P_Jsa%L%Ow<}dpefwKeSe;?m4wR+w_MrM4? zey{c$W^}l5jSl?&y@?DSQSa~Mn{y<{4!z6(qLk?nfe}ks zAL#aQ@^3RWGW~WR8D2K)S!P{!;PscO^=Nvom=KpYAX)^0#;v+XW8hvatFqHQHEou?(~rx+`Z%EGH2<=#`?efH~(aNd-GjC@FOed9#Sdp zzU$6mZ|lMfFNlOY?!5ct(n6AFi3S9C;e{7=x3;&oHang9EOp)9;9vdAe|YxX`G+5W z;;#E2zH;SSZ+EbJef83?h|ZimdB@34mFT-}aslaew!vS6=}2KnlNk zDNc2HE|~=T^$AFTU@g4DpJ+iu%!dlM@d8*C_sAS$lq$@waV%J;n*R)?{TemTX{Z8+I59cQF-XA$(` z)y?ZWyOm8!rb}B}U>XmUcy1*<--!1uB!c1SFUV%?P8uf)5MU2W-bemN8LFxJoC2Q*qEBAn{3f_yGe6?o7(o`^M5#d`uF|7 zPn~=09o-nqMtd=RlH$kS2xT*dxw5E~D6yv9I(GhS(weJ?K_gZwQDPN?2ml#F%p_=} zQV-$E`r5E4nvHC}*>0zKi`=jrwzAY0l`=KbY%mz?bbFn}h0SU`f+~XdezDzo=R4j3 zKCWF~?+%8*rrmBMn^1XG1&BnX3f8$S%R&g&+FFp25JRY%&88*84j@XmyIYh#%Q6X- zXw38c*wXUFi;`xIMzguKy*+PR>%G3eCUYlFg4(aYc4ea?Aqz1g2oV#Kh8O`9!59j1 zn;y7rzjrL@`w&DeRI##+G-|cm@4D>l&Xz1Fzwe=YfBK#0&n~q9wC96YWXl{W3k*O) zr~nP%4(FzXO#_vItkOm?$n#7Aws&E-89KR%0D=&KG{j2OCdhS8QXm~=8y&f9Qvxht z1tEq?1d-K1P#{7b>0@9Bby^U^j2a^%)rZR|^;jXY);U9?GhtO#22(_|WE8XtQCXSD zu>rsswct8E9ik`-CJIXot%hqNAVOJc2o4CR|3EJqj1O>%dCW)dZO|AkL43?X(pE78>Yx zuS;pxwzk)scdtf08(359vT*7KtMO0tyg79A#4!h@x75qKcrZ1W3B)gjiz) z`%;xog%?aaKMo?aEDTY*j$*7*W9LrYacTYH#)X%T-Fdc+IH-anUT!!NuyGGFI}k4L z?b_5F3BG*0Sckf>>H1GmM>xLGNJOD_!iE`$7@-1Aqf}7q{qM_HIsFwlni81KF)$5@ z3XB0GL>=dxP&_CEu{WfV5l8ya<2(mmfJDSKTVR;#n_6r4ZYKf&%$!&UqJZGo zsA^PY;w0zYV)51M8?DxorrF$sZ#%bmvXwW6kj?o^>7c*R>R2zmqPM=jKG$e8$VTtE zj@mR70ZK8XP(}2mB4TdP5i5d08A`aWIB&UjNFDJU`$6F&o9^2+AmEVqF5CnNsH!?8 z!Evj8KeH&JSbFxA=GJX0N~+=A6-7Sw7#~wwzife$zXS9VQGPspL^;Ht@*k8@4oAg{p64T-M{_! zHeP=Dtq(j3(7?R$;SYUy^~$B)-Ca)7q9{mVu(`f`;`IAI@R2L48!N|;U%Ya4(A_@1 zwD8d%`s6ph@r_S^`qPg*^2o8p`NhSh6D!AugJR(2rI%ispO?qq`tE0*d-lEu9tPLh z9?)$@GHUQP zJ#gD@+ikmTZ)Vh!$Wnr*)SImGgGSgnlhczYPPbi&-B)z`rE@FuPdxlYyLn<`vu{+F zk1dA#n;``2^3JI{H?MAn$c{I?k_U70?70xb#`UYT^ZMiG&e(imE$x$;TRe3f2%svq zwzsZdzrNOWSM18)`_dcFZ--tZ=?-_BPDLsPy!p-XZJSsl>`-EMLr$)({awgCo;>%@ zpL@=K`qO{(RI}AMZ5tT?Bia*UU6w@g%oZRx944Klry>DTU-SbbG8jv$VvHy9M2s*5 zL}R&qOtD+CDKJlzt;;1ATivb1rYh>JGfm#^?p|GA?UrR3d{7XS(wC`ql(;h|?%1h{ zq8L(9GLc42n2Fd~SNQ+{Ns`pAxBx&zX_6F!etkGEsuGl#8Nv7a{oUR5EXyE*F=lz? zI5QVM7?*cf-MtG1a^!j1?BY!FDe|XfGuDIMo1Mc8AkvL5~F}dK#=j-EJ}z}LIi*mk=ZeW00h)1 zk}_+poeiLMrchO*2BS>Sn;Iglk52(1#)!xwLe`XJiK2;hp$q^Zp+dDbF~$)RsKWMe zXq=T0t$=e$BZ(P+6{skZvP&>ck`Ms^h|v282}gh`l)e7$wQJW-ojMglDiJ!3TvZLz zuox0SjPc5qE6%xrHE~c#QEaYWzxVt-t@(xD{_>Yh>du}%x4pIg%7y37oLV_`;$*kC z)9sgN-P-20rG>?nh2@JE->8PW_uYHvi_d)#HeNZMee}xf7k6Vn?PSlt@XUkvKbGhD ziL+vQft`|AqoO1js5@YmOt!6>QPitMTv-= zAw(67B18l=I7_Ii8ii_~eN+I6<3xM{KuW5_B)J9KZWwD-L^VA7?AHeU?l->vwU2-7 z6UWbzR2 zo;FE{aU)g?Nz*}OD1szYQGg>goO2_sq0uX&Aw*S0V}L*btZ6`Tt;mu9oe*Ou&zotkQRh#c_C=XHBNkQM!if`| zBx3+d6cCVk0I=CFH@CYHP2sEMg~eek5p^NWmlAYJ5vc;G_eeS(uSnly>fLXJPPl>i z=hUzJP-(#_`HpXg9Z(UOC8>Ylvb~zUSvyR=btsw1(QYNv#q5ukII=wvQ1tM8i?d&2 zow;8<9Jr&pZ^D}yQE{WREVpWJDkC#f<>$a!PLUzWAA?dCw}mgKaB}rUcWS-FGCE?+%enL)z#&d6HD_;+q=DNZVA~TvI;)^?DI== zt!A_F)YD)0rt$VC9((n|vzK4Fph$`ju5AytdV}4a=dN73{EZ90{=mHtT)VOwW$@@* zp8Wb#&&uwVJI+4*miPVv!rrfc>L2wsue|d;AIIdxXFm7Ki>q&}Ce@=K`jh7$eE8SD zzWS^9_VRez(%!eXqranen;y7rx9zswwl}dsbX7D_DFVXaT6^=WAIMjK{Enq}{JtNG zf~CHzQVOC~G29;XVr~nww6?uD?5`^JFJHO1c+p>-6VUf1dW5MpGn zd^`(b2Q~TRTkiYhJ&&!fUwWpD$6l|lZTE_z5O&6m9GJiR2aDTw+rE9))u#vrkZ27`FR0E|`ej#5-r1v1El03Z=SKrNGk5g>(#5D6ln5=R*WI!vGx zC59M+N{0K+v?Fj?#EakB-D$O2){ew{z`EgZ*leZD%t#7E3Q-jmMPw<-N|CkQUGbq; zlqw-;0MaH|9|M6YB(867lQFGMM+{qlUVqr@@4Wcp3(q|L)CWHBAyAs1Uu>p!V`BpV z*4EZK^K;E+b7O10-EP0JJ~(}B<<3*bAHMgzuPQYB&Ue4-^4e;v-9CL{>FzsDrj}xi zS>D{(>N3)zrKLu*EaLIgXClI81X{fBJwJR$^Ny3pOx)a9z3+V=*{({TY9e|*o8N7> zEO38wr<1w67LJ>WUoWo@knc{Dg(u!6L_!dXB5|gQxKj?L96%$fa>c@@VAd zhc8`v>ED0;H(q=7;{6Xi^!B&E^ZLarFF*IfR=0Q8JrBBj?s;{qXg1Rp!y>|m+Vj7~ z*FOI%pNEChi$C~dJu{bPCdpk70(*fG_A$aoXe2i^!rk6K!T^WW_UkrDgL=n8 z*CJdhVisV@NtKw6LSUUEo}+xIAi+BXW;+Q^+sl;roqYYrsUVMrWoCU2H*mB(x*WVg zIs3IuX5vjps-qlmVm&OGa=0ebK-h#W*h9f!A8LF+jA+6%C}ACGz|7uzBC7d-`UR+} ziioVW&bhjf`YuEyq$&u6WNZ|TB31CJss@JwTf15X$f_P;|9_-zpdxb8^!vKr}NXLnPZobiO`r^HBjVnOY6}v$xH;!AH z78fD~IkYhCRd6iBNJs~Q>J*WY4%2mrC^AYp008y+Ql}pXL3%~>Lj7X6vT~de^DK$g zU`x#M*$4mpfAyDkc6N4qVPScxD8bP@VE}>~#cP&ACG|irHs8$O|DI30_S*ALedgC1c{-Oa?Kt}I$A9|txko7}|C?X> z_myombVJomp7G|!?03^{(*w8dwtW{Y@%?VH5S6p2=$z_~hJN5S`+>EI0#yRF$Dx!Z zqm)Jr8c?p|WCaCClr)KMwBtb?`yjTGn7a~08|1|l94mTfkgAamn;}1 zi5$ga;wjIxOIr5KTvV@OkUdHOwV+f=ft3C6mHu*d{jE=Y-yP>3`P$RZY<=aqx4io& zPCoMf3fs;Afwu(Bk~nF@!U|R(sg&g$z?}e~536$y0Q&v@sm0^{e)r_DGZ!vA`_k(d z^8Dg%cRSDX_3N8?o@ZIs?+;I%I>pS}TVHFoTiUxg*jk@!*q^y??p@8^^P4-@;@pnT zy)X!y{XV3PV2ngaNS*gqW2K49ocE<@G>&5!i9HB;1eOp8n6#D~Mpdoodd}yoFf^PG zac#)U-FO8;S*4jTd_vZtRgFFrE;qxnZ>d!;)#3x9GE(J>#JUJ5AtEvX)rspwHApl; z(UGTsOj`H^Z3vag(;y?U;RDBDh1@_Nq3~=2CK@$Yaw1+EMbd9qvK4LKNaOC6YQD)u z0|<05Yz>}u&6BU{oXyH4P{3wM!v-gzDuvW=W=eD%gmyEK52}VtH1Qxj*&-<8`>9M+@!LyItL8+f!4L>ANke)_C5DM_K`pMpUy3wY(`0p zbcg*$vy&B)lK_g32AL}#`q4*`s;Ycd88FSH?KmZ4(ExKYEIgu_Z!HmsM6uSQ0(wD- zKnw=@yZwaNSyl;&Nn)swp{mMZIcPSUO>2vS%d(7Rfr#FFGMp!wF{Uib7%L&Jg6|i@ zes3s3o2wgno~PCUz|vB4b7Sp9E4wE*S6_XlbNcwsFmy~BO@IEx+`9(F`~URE{?K3h zH!pAbMGReGMGusK#&6;+yKT4an=>M+J%(!6*U6zM4OC!=tg38`A%&_KT6WP=48a2N zNC`ucNfCxobGGW$YxD_5^|2fzs#_&!e~>5UOo#7ZIq`EJfBypuNpItd@ePlvgfR${ zNmN0DsuU=Ih{h1$Bn=0V4mnW7M2$N0S}8B|cG4s{lR!$&E0A$c!KhKyp`swGA^<{t z$gI8c0f2z&*9*bd>hl7%Y!HDU5+SI9L3Rc;iYf?tGNbIcMARP?RaNDA?zBot6OA?^ z1>c@)AyX9)NK0QaB{4)Fg)Nt{BmyhOs>bfl+*})>EW)s=3g;YPl;HbC->Y1?_{#Y^ z&&`39{eh%ee|NVx7(V#GWA~na@bs}|3w1&|Y&PeHh|8U00FpZM*2mw*qDwvkKncLa2+tSpcvU?p;C6+#t#;yB6XnHhz|PzV7A;qJMm zdly%n<)SDGC8?rChX5Y5l6Yy;pFY;g9Y7Qdk(pH>2dj zixy*)GK2`hAQ@y?pZjVB<^z2`#(PQOPRux=O1{-d*1!XgO4O8 z-PqV(dGh;O^9#r@DCOd7udc6eKXlI{Km3s&O|#}sue-27XYM?I_1YU!^`86f&tAE5 z?fA(v4?Xh8CYfx${q_(2@jIT}>Y=J}-iDG);-MM1umYg00wEFr62&M=h=_zJApoOc z@&E(xi6}#!Xm8ZoG^59ty}Y>f-m)IZ+iG$P4XMs4o$Q#tun(}z@cc%l3%N}IH7FPX z>m%4hq8Vl{&-2mqgW^Fz0jE-@PD}1gL>2Zs(n9^AA{a&qNKOc<7xvO$+90I4FVgsKE2OJyk8v2tW) zLJ9#GBPw_w(lqrk5*knqUeyp7=5FLBRf{;Iciq$T>IY>QNQrhw%aTiu zLm6gIfAzEM*G`>!qO*K*5Y$=YAr7J>z%9mBZYyWK3NkG>SrtSef(jTJu>zHYW??)s9+0YvqBt6&q&K9UZY&!$ ziU>gfm>F9zjh-KgBp&pV9^ai5Y8f$|fx=JVzXOHWR_lJyxbQuv>6s~@d9Uz&T$fDW zZu0q_v+}s1MkiC=mu-9M?JxjGvu-%%21gGyU*n)wDyM}tj&>!4=+*m&P>Gg3huP{lc-o`j60wSVDQJJa=kN;Rt zv{MS1b(9&_CD%$r$Yc;Psvsf+zrA)%BtV{Y7MCjKong#!=@-Lxv(+({nWI)@#H5A; z8DnBpPTI|RURYQ#N#5`Go5xN*{@A0hzIq|c(#_4yhD+NvpKC63?pt<*F?uz!v$3XE zuQrXzoW1hKYpwZAFrl=wm@ek0&c1x{jSH7I9yodK{s-hg7U{aDkZ>ygZ)F z4}_8K-_{`F?}q8?o43ZmSDZ#h3#F6op{6KlyBZw0Y_c8v)$N5-JFoxJ|L|Y^(0kwg zBOm_#%fImd0+N6OEIcTc!*1Sa7=%EfCuNo!4sI|Q#OP}!R_}e5W$ku*b8|DZ+-~OY ze(T%IvNX;Dz|PK&F_a_;002-~mOH)fefK`Fy}fPeY&qO<5aMv~_9q|wKx>||*4lRe zYp-Abt*4%OcJ2E0f_63WHZKGEL5O+R_an1IuyI$7F+>nTMVuRwqAc>vdhcywtV;z^ z^{zrLr#*1MlL@2W3sa3=efML`jk)F{u=qYSSByF@_KUff0pNj2Wts zZ7NlyL)B76P%Ur@XxPSn*)xrXkAlt)qL|DMLf>L?xTyiaLv$KdNjMXwltWec(zO+b z0n0x5%%sr=YBs{4$OuCj?*9*e`r+gAKl@+)i(kAT|M>FOc7MH)%_ZILT)B4Q_}Qmz z`x;>@D#v~=33IWPS%6r^ILr(|)!r?tjFL*#r421A9}0$;rc{Jj zGNby`k$6V9O%L3*-#MF{;wC@?03hP7Ril>L3$OVZne1 zfFMysg0cZGsu7TLggY8p0(K6;`@qbMXb^}{bOZz(SX3v6tP!lKFj5{LyEF=rszMM| zB}7JKV-QCB<=0hxf66XNiBc^fdGI;Q6M6&;jqJ2 z`>7ET!bq^4h-zi_`tVIigh+^9B*gggT17nyTFq&q?dE-N{YXlt-D=P0S^;hqLrIfX z-T(xz0#JSAlRpEh7A*k~z+#p!s;WlR6V@h;Rx2tHa^d6F*0wYoDPgOB(x6{nfmLz&=v(IYCD4gp4r?| zlqXge+nGxkYKVmhfIvY375B^W?_usYA?k8t0gapYQJr<=zI%0$aVbmL zSwqG;%EcX@YZPTj(!6iyu5QKlBkwX?Zk62k?2BGfAjH93qt$9y0;oKc!+{z~Z5rYv zg~rISseb6gF1__{F=`!8Syn++tsxQdF;LnT0EnaHnf0WN^vxuxn33`NTRK+lJ z{_e}MT4?;6&s^WR`;J^-*SH+Q%{>11h~1_KZrg49F4!PM#lRR9Dq_yk+y?2Fy`c?B zlCgEF1m34nok5YfWrxU!Y8V8<2&_{gBmszu;2}nkNFWdapb@eo?|Wc-G387FQwRut z53c&(UfWEv29$t_$0QAdgvwwuK-w3k4FL7RNwg8{b9h%gxA3KZ`cMD*U;K-I>(`$@ zcmJ6fi^OrStavW(X~-3PQ8iohMO6(4y>7P~LNJDkqS)El84iabgw(?P{Niq_H9xly zf~ewLXMuwql)F?rp$LkI1;dD)Rx8O9rlnz7c<&o|np-PUEj9G<#+Aq4z0&SHYO+p5 z3)c0$-|2OoQRK8Y(Dj|_*_U1{BrKg+cCl<_uTE-R8=vZg%W%{qpC1>%jceV3wbl~1y;ns= zW@aBFL`ZDrLnwO-6T5}%E>NfR7F|k;p*L*hox(}!ve!QNKY#Ap)BpQ_`xj2FzVu)J!k>H3pZMAJ z?x49-wqlTWn|>{NhhyKUG4151=@Tl?PFgiK;wp72mJ z_Qv$YTj+Ra9P~wotEb=g_jWgb`4@ltoe#WnZvFZD-uwM`KJu=#GfxDL&B^9G#Ykv0 z#zI15F_|^Sp#p+bA;JhS8u|3q;;0k|02F~NE5RVd{%{ZwbH_MV4TdpJT6+r?MG<36 z)3grZALZN=sdrRKk|1ILj9!Rn(A^ylhZ5qp>fC(W5Lj#TJa2XG)DV3cSVER2QG8Vj z`>?gSb8>kpwM2;($40$WRS2oQy;Hbtzk_z<=^6k?1+A*E4zq+)&)x0ePU&rnx+l`s z>Gu4xTX4>W%2h}Liv8@1r$q}eRsj5*u0e~YATS{!NS)JFWGw5?^`Gif@mO^o#;Z@w4It{+ ze^v;|04}u{1fU8`!U!TND!AWOaN26;J0=1pRfVWT#6-2Mn3V={!0CMKvd6R2LfcQkg9@()MPx4IY1z+b9e7)6CZS2nFT`}-mD%a?!0%+3`3pq;hTXN zH8FrQ{sMdJ*2;-H@60*R<`NHJn~4KIfMoy> z0f~yL%|?cdh)~IhOgex=9e~3oR%;$v1~j&Yo?;9JOa%}XNW>u_LKKMzNJzKn9Wm(| znnDB*W7%(NhcUVv$8+Cm`7`f(h!}twaO#mU3O7*`nZ^t!U;4IG9su-EwClKrG=bxB z5|X4~`NEpQwRF`)-|RR0;#KQJq&$l-ZFU^?1qe|bm7NC5y%<+_{pgd?IX z%gVC6dUdluEXcSR8^5R^4DK@(Pe2Q046Un%adj!Q`J!U_Nf*- z_F)u_5CgcRkz~d8?nd4^wsG~vKL7Fm?8{&K^+#L% zPd&HsYU{p+n;ZC@W!2%gu@}GdcKgtO+iu%;!#K-a9=s0$6YHvK*o&oY<*rJ=+YpgO zoME8Yml%yE&LxK(fNWlUa5DX)pTxPJ8awuG}{#8Nskp7+Gw{gSv zy=AE%Dy3*rCjzQfFjYjBYV_b^#=i~$C8EwNpZ+oP%1`{{&zC>-m;BaR;bH6LSE=1> z%r7==8UY&^(8dIflrb}o=Ultp9`%KUJ*OKK1e6#N8Hqp;5Ic))Hw0r0kqIFnVxBjN zh;zocE2t%E+U!)KRV7s!^0ag6kq;rTD1Z_ZNEu!1A6v+{DS!!pRiXDic2_NBA@B~T zBvH|zk6;i8;SY7-ZtDsi4Qi>h2mm;$gygKV@8VF3Y3T8q7Igtpt^pEUt&ZnQ^OUwY>CUwQG;>zg}y-kfvB zhG8Fk(eHPC(e3y98*53{5F4y*7Q?Q|L1fU>Mj0BH=Z?QY@a2E^ukZT8|NIBa^*ir> zz}UHA0m|u4=^dhxNX%q%RfqsSj$R^;iYU{;$gzqcgoI6vXNiLMvESDsBIf9UEf515B*COULZYxWbsAe=pKSn< z5D+vfOiQDT&uIqg@PJYHf2`qE(FirhBt!&+hzcUAV#vf8YoGTz7qPXrndP0cXPL;E zWPX0W4qQu;q^gQ7^hUV>$vh>D;1x;?1Bh0&jx>rf)`4s7cKgF0{;-fWmOC=9iV`7s zZ^=|X0;vRoC_orvL~Cv0st`&9R5HM%U=Zc0>u~<89-{#XXICj04PMJUaV^*PEb*?4lk%ro)Hws(F0T;?#CU8ga}Ym z3$^+O5dk8o)JJ7tHc=&dkbp=?Tw?*VLI+?HZU_j7GLAt20EmK!hItgikAR5QTI+|- zI#IRE(MyY{6a>Rqm0)a1xLXY+SeMwL2dboz(y$+TMbYWBOoCNWrD%vvs3H-C7^|v^ z8mzUsb3T^F7?r?)sbNK}s-lSAn1n~-RR_WSG);Y|2^SER`|i8%p@$x_W+b*ULF`95 z8M9$@4{C*DjD?CCx^sT62)Qm;oE%l<3d|r=C>6 z7!!RJV{|qJfT1r{HO4)Gl(7~EB65rXQM~s6;JtU&)U0Pa6X&9;GWn{iq9CH0)MUh= zl1jrsVTf25%LZcv6=?!TlDwUDgZ3+bDsio(^?ZPx%I6##mKIhPN`S7w#cr{LjflAV z`WvU_m(mnX=DdJ2IoF~SQN#x#A~v89M5TV`0CX&PQo~zgI(Fa40e~PeMjye2ApL%K z(C>qWMx)Uyf&<9WpaDh@tS2D2b<+df0000A-eOsM!{&EftNKVXU;@=0#H?>z`R|y` zauuJ2u1;PA#CrM%ISj=fnk)`;14o8RAR=O~%H+8^K zu^KnX#60nCpKt@Ct;RksHqHu!8eI@)!13R5bl0TKk1hvp&8C6-eZdxVFgk5Qkl?sM zCcn~{W!#JD1W?eZdxS6|stS)q_GVEyQ3UmXKfczxpj&#bS*m357F(!ZY)3oXsXC9f zEpON_)3>_S>1bAp@Te&tiY95DaT$A5Ht zSY5vK>U-bwo|c7XW4@}Y&wu{YpZn}@S+I{i_~@_w`(HkF$J`_D`Q+R=cWh<8@N*Zw z@rBne_g@+`x4!Yp|N1M>Tt9ZY8R*=?v7(Za(xF}jn7g%h`>xt;df>L*w(o+4X#3>= zfl!Q=o*aVByF)mxWlu_zcYtu{3y4xi6t$*|#6;GT6@>`VBWc1EW5I|D3;?L43KMb4 zqivRt)s)uT?_pM8VjZ>xQW=MpL?&F}?nzhViJNu(!qb0GmgxLj|E6k&(tZ8vE5%?i zc)h!|(d{hE6JXivriN7Y^5z;g()nbe)oL{w4FD+1vc@%Hj6#V>jp=xbQ6tG0 z78a_?CrN^6%A$y>-g{OTLdZ;_KDxw)GEkCw6Ri8-u=Xbc<}TK6LG*m-u$U@WRV4 z96NUG#A5bmf9U&uraLGrS>5fHWjQP=AnpwYy}_`k{Km${=eOYMPIq-smM$0L5Y+n0 zsMmpSbQTC82C)(ffB=R$1@;HxF>8yWMk*$zq#1zw!(NtUQIu@c7MEKd8)xr-<9aH? z-L=hrQyL4O`>*q7zVMSj{bPUlFZ_RO=g$I^3{XNs!@=&>YMG>2>zI~RYK)3{+oXK= z?e`vk*S&ZAU%SKYwT&z_bDb=NVr>{bIUM*)<%yvD@1J>f&~6SYI4SY!R^Pdzuf$^J z%M0H7)|m%Z><^gM(#Gp=99uD&ZF&#JaCB0Yil|)sp9B~w>8-ApE`DIeSN#Dp~dze{`wbQ zymbBK9p?r`A>;s<`~YrkH+$P|+qcJ5Rcf@g_VO;1$`otWX4UQe{(H~;*$=+$-jnUh zA_9c5JnP64oJkc7c#qVJA*vW-99uB46iK6r3~W%nIHBN$jqw#EIo48rQ2+#Fq@7G< zQ5a*{8YUQlzN#PqTEhGBM=JIJ>ZYV-jSj~1r?M4EP{lBYLO7t7;A@X6rBlq)S|kzFbR(RL_vXNAI?Y$ zign$RB0^b(Mrr`r5Ck;D5S&r(6Seb6t_%f05qt%5f-;vGN7k@nL{ask;;WkAA;3l> zs}}_cG59J=(z2?=Sv2Y;DiI-zh*g@v{i-!`G(tnFsv>3uj3HQS*-%mXbQ~}-j@UKV7azeG>1e3ytgr8tvwvGYOP1sY&oXt#ePqCaKXtp^G) z?WCn$o^Pf~y{-Ps8*lWtdr8{5^MU*S!xz6COi0?xs&VD~IS0@i4$FW$K9;Us_O~*J z*Dh^PNb`FZwgw?d?8e5R>7sMau|Y-vMTtsCpi%d{!pFfR`$R1cG)R`%UQvb+oU>Un zH_Wr`t&KrT~bke`cFY1iMv(vwdgz@Jiew0A

O_EC*aA;g@<&(_WnQ0Cf?K2 z3l{8`yuT%z{^E_N%_(a9;OCRVNWaTj26a|$GddKLF@pYQHq5k`&6v+VGCreL9-C0Y zGOothJ!GnRC$tJrVEr*?FuJSETzw8q;G^6Rv%K)oL;Z}QXx-y3&bnETeJ{I8C zia%JNJ#u~KV;#NTO-Bq&*T`BY5*m$0cXxYrb#-B3Nkmy;XJ@D1?VUM&$^a<9;@o`Y z{p#v!e)1$De(F=7`rrpYcx-7YOHvW39qD`(v#jB=1OQ+hPFEIT<@m`@{^*ZhUG0AH zOJA~$#>I;lpZfBbheiLbPrjQJ6^J%UIQg>IA;p$x{78aLQdS$WccrX}LLoZ;8sDmJC5}UaWny7%N3`&Fm z0_sI1;?eU^9kewadOUVx3=@Xnn}~t^U1wW>21?5+CJmrcuJs2S>)e@j{n`UR@#%l} zm+${y{)Zp?xnHQVrL#+SZCtsQ9dGBjy}5l+RaKKWc7h@=&M$(EsYzpu-EOx&tA`L` z^deG3jxl;)iB`TUtfl!*i0FA4?RJ})1Aq}=5CqXsDQT_g3>wj6 z282Qoz;R|9nFp!5#MBPMs_%;$B`6DTt#d963vV4msIoj?IWgC4>jVsyp!M4fT-Kuhy0tv9uz^X@kqbE{Ymk)gm!(qd@`NW18%3-*a zlKbs{we-}lee6&Du|M>u{%@|={5#)x_BUVK_^o&U@$Ae;{>oEd`K$lW|6lspdt24k z#xqYt@%0}+f9kRGPb{z8eeRLWq=~SPDxQ-pv<8Dgvz;_s*=84Z!XS0+w9%T6Jt39| zr;g7*cK-BhS2th2cBMBA7hit~^84O-uKn`m%dU0D`_5$7ufI6=z`J&Le(Y3AvA0oR zK^lA8yT_InhUL)Nmd08roB)BWQH_N!IcbizwIPaz`Z%JhNGKWr#Tv52hPl=b5l{jo zz{ENL5Rnk1mI*4Uk$@<8An>#K#@lw=zWp{? zfNE@)hy+wbhM^lmxId#m^NDwS?ClQ&Nk4e1ETVYtgFz$`R02o<8L0pd2#HHr04j*X z2qf$%Br-(U@lga85{GICMI0E62CX0jiS-dhWptDqyZqzGa~%;8tV0ayeHEDj>$5Bd z01ZNj1jr*NMToVw%tW{r_Hqo=HiigCf-UtQSb-3P2t{cm!UG^9Wg%uZh-B&%^vcX& z3>YZON>oLp63v<|Lfl#H1_+e|&MjGE&@|auB0?ArheRneBVtumH4d*4ED$pj;UGxr zxU8b6Frl?p6f~$M)}VmmoD0Ap3aSFiObVzPW$)l8h$s`+eN{#SPzNoJ!Dg-Td{!B? zGZEGK15~6IqpelZCwk3FI0i{%;ti@WYUlBB9BWmdk>4fmA7f+x_fe2aogbjqdJ;ka zL_!)(xe_BF*O>=qfjU#(sY*mVs;nAIA;uU52#JUQV+;_*S%SLaA!twl%i=lM1UL}a z7$5@VqIYICwG=~@0DwVNsiPbOB7i2AeN}nyqjIeeQxA_&MMV(8N<-9`B?$vc8DWwF zcRwCS1lHORqa~AMTzV;sDodSI(_$bn1`2foQyw_yBVuF(0E)m72n7{IkR)idQY&+* zXbgxn0bliyxHiw&hsOQa?}2X9qC*FjKn@myhs3@P&p$V?H-%0P;P^Atgl<-EVw(Th zgDIyRkf-F z1iQAme)alVcUZE^PTg?^j4ev2HoNn!W;0FZQg8^@suEym0y~o}&%2IY*l8y7C+_;y z&wpUaPFbI?%L^wHW&jisH(X}mfNbo+56fvzxnWk_uq5JJ##B(`Up|`z7L%Q z4Lcg=z#t-KiU4(nl2N`E-PhDd_M@tLk;FP?j0&QVC5f3|h`#Ex%@Z~Z5n$X`8@^I$lxt1kjGa zY7wSVi%)Wg9HFL*HzWxT2*Xcqc{rYF@|Ef4qFWpWN0zz&lN%LS6WRy9GFh7GO%?IK zwQ>8cjEAwZ&(tReKX{Xd8bxFxiU{uSh)HpWV@=}zKn+9`Rl(6IsQz`|7iDs*)w+(Wl^(1F{)I9ei=O_%ggS}@wsc;gJTQx7oPpHiej>|842V-x<4258Srf_Fb@0^^M^wc;%2Zn>#&NF4jJ}xbwdE{-M`j`QnAI z{4;;$({|@#`>MP9k&m{HKmO3iev-&Io13(`Rq4}Lu73Wxm!7+L=__%rRu}*PV~jJF zh!AO(8$9hl*y4CsT`v)n&vmSDeCz!2?+&xz17sL62Z_Mz=KgC|04A;_`(jZ=WasYp zxt}@zpFef!fBJ8J@zfn>vC)#=?!Cw6@7dkj*?nVyi=X+qKXF%cDFU$6;m#aF5DCT@ zW6Y%3N|XpeA`?&a`-X$TPO>W^bMx~_l5Fkl647qAUlfJSxR$5P^SmfMSyNR306LZK z4EvSg_RrQuWKi?sj+k&AcTcpwe8s!;FnIE21H#e&6@|edaD2CeVJrPdhJH zNvhp!7zXW(mop<}ICkv3Gp08v*l>5~lSXrQck9~P<=~6X++v;lR#c5KX(M;eS+;}V z%ihj;E+2aPUD^8{HhCKi0l=@lc=f;k=l}ZkVKBDMVt{?w?-}vx_Mrfi1`n9oa=<3F z3&;E`zYWj-yWBND_Ot)@FR%8Fz2}{cx&HHC{p{1f@vlDk&X4@3A9?ssgZui+&;5-E-OzO=ivd*S=8{qpNy`|XE+_#gbhCw^i)n6>h1So3nX7gCpI!9xhL*jx;; zi2cI5*sv|kxf>-`y|&37J)S-|e~OHOciyF@9TDF4_=8OVl;P=fXE^}18h}vFH-m{) z2ySk^8(2eQw_hgKrD^6-iz=uB7!xeH;jo5m>LWS<)|mhk1P~!2%8W)4iCTLzNeBT# z@Xn<*DCR?@P%BXFWBBT%f-Z5j>ctr4A?IAP*{qzaqEsQaI=O0e#x4Lo`0%6si!XlV zncsfL+_0Z|7wFvs@}?TWdqQLc=DjSwB3iF0qwAv>W1TZ0$WBp$D2kfg zu*F1NRNNg5ud}3Naxx2fo*I@AfhdX$D{pO569W@WSyiPh2r+S{EURWCE6Pd{1VoW7 zKpkq}oD+!(F?9|CC`iQGKUNtP1uPJdipW^14}iwd-Z&8pOu{MZ0L(x$zsIH;qXa}$ ztmy*19oXNAdwnOWSj)($?DzIXQC1MqGJvzpj0zA~0|+uR!?vm=BoV7f07?=` zHp^jd(hviaC1e1N2r-txJQ((zb9s`*sP!rkf)KN+7_x-K5eSF{P{5E?+!LRfhD$^M z5uLNDP!=ATow38BO4&~L72qhN10MDGh**aQ`Un_=SjnIucrUrZQZ*5%1Cd2QWMt=?vDtmD4*o|Ps8jf$#Xf(>Q z9Klt!EFq%zUNjU%!OTgX)oG2aHHg@-WGr95el>>561L~(hQpGLt6B>i6=X}tSK93| z1Y@m;wAxvcr#Vy3-~s@{>E+`UL%$rpwj-T{uV3uG(cQ^(o9}%5(MR8PCWQ@zVTwD2 zZ(5r-@@SxK+3#}Sz76hsJEkK%b6TV5C9)Z&{%*^nSSt2w0KYnwD z;E-C!IH=;5-8=SEYx~4-4iDE^Vq7x||Mnd#k5U*z9tUn6@i;UJ>k<*J^NiGd*!WGH za14kzA|m&}lyIQr{kNFWj~XUK9!}3&coc`BH|plYU4S?-U?$c80Du5VL_t)OVMIAd zpyIxXA|9Y)QI0N2@Q9{{o1@332aDW#zaJgNGTRP3?*UiH$zR8G>5NFMS{krUFoURs zfQkqbr9OYrm?1dqiNCLF#)q|fGcZv!y49zit(kX=3&-sI_AvE=*TmQj+Ii-Jbra8a zVCdesCI`$U&X<+fn58Ka0TlH;o#pd)oohFnK(sK|{*7P%wbN(M-2cD>3c!FHt83@a zpMU(xw}lY4x3@1|y#gMKLGLSH{lZ%wd+4Eu9+F7wtJg2ScIE7;#XHVDaQfc+{^no( zOUITLO=Y^9YiEux_(6BrUA=Vajq6v|?zrQQ9lv(h-FI7)M1Tl#eY3mXTV0&HduQR^ z-+t!v)#BXSh|tS*SB6=-F$U>cy&;1co}%@Q*!iu$LhE^PJCLgc0Beb`))VP?~s+I zWu*uhL45*+VnZ=^;wKm2IOScvOGvEv^w_zFPHsW^#@5>UrEbgUuU+2Rj&3>jcYo#Y zywThK`JewY^PN+^^aU2Elq5+l&M+Jf>m*dxT9qnqG=K9D5PE~_ue|Wp%db42NZHKNf|eh8 z@GVQrCzUd?DH;%v0A;gO0yHLFUg!XjuVlB^k3;shDQtCt=aOX(m3V2lIuT9f2YEBe zk~{<%Z0|TCNgJ(|m7)(pN+L=V*KRiuVRdb-*XzY@$qBbQO*h{$X&QkBW!c>weE*rw z8}EPe|NVb{p)%H5JM4CwSu#hk9^;-~=M+PbAc)O}%5&wWyz=Sn*-v#c^9N4-7k~ca z?@#1?0Do3d*EhE=?R1xHX&dz9-4DF|%mZqn>UOITefZ$Q!e6+|p0D{UJDXRoT};TG zTs~2hgK9WvyT;sNCQwBHb!by(gT)Gs(9ra&Lf3t_lXn0h<%~=gqW}*{fy;i=HHHQB z2HUn{=EM&C?vAh*n~6)2ZGvU+ppdlK0uo1#LLA93kW^KL&cXip&6;*{FnR&YsthsO z#Q7Mvwzo3l(lo7s9v!K2O9;VP9yz+kSYuVjm?TMj47FDj5s66A@B0|47$m}QSY*wH zmx!Vsr3!IQ2%XhR&a9Tyf!9(zoSSV0sh@7JMAR^?f zFcuI>pBZZ$MU;ws;f>n?S{J_vuAwkTk&RYb)g zG9e*Ij4}Aim^m;E#G<0YpsYYag0UzHqN*|qcgGQQf@3~@55+im=#?7XMnqeC#EwXT z5$b1cBC!O3Iyre+mI_psVPj(>nV&C;qM0|eW=a@~A}CO!-A({q@I(xvAp~UcDp{7q zs4|k7 z!0B_^tq*bErf7750;q~&jKToSMil`>L{tLM(b1Wc<&0ERRW#JdjgKCfu-;~giYO^K zHcY~y0*Pv#HKbtbEEJ$r z6ommIHi*Ci1Qrlt8GVpOp7~i``8B~Dp&o4YRnAzPN=-Rh!bYBkN0p+=AifX;LzV!8 zFU!aV62}S}A=E%85Fw&c&~f0+UIAk_Tbe1NasUk6}~I^d3O-bu8R5 z@tM3ycgrJT);C4IZg9bLYt-#_z{jQjYmBiDH>`79Nr(#3Fp^1YsD1J#h6vqm_oa)k zJ#gQ>M6kNCwz0N~%t7S%%E{I;`R#7rX)Q7bi5U|aGz4U&)OfA8dvRm43|dyQIKS9# zc22fob?x=FH@<%E?t6~SH=MBn0+vHuyVC9UPTu_>qU{B^xH;g=qDxLb_U_AHdnPSK zijXA*hq9F{N(4@5q@6X6DyyG8#xUJVd*CXXNoym*1o^dOe26iEAQ~hAMF3}P0#F4X z2#75+X#`SWfNy^Hg93ZF^9?wtn}Y8rgY``~f}6;>`ldIU{$Wz!bS{wTt-H#1^W=DV zD!9Rwh#=@Vy9DdE#EKHEuKCy$@VlJxxf^Ho(1%c4Ekx&NUDd#l~D z+I{kgw|wsNzxlb(|JL%tiSK*whmWtEx_UHwGcR0tsu~U+x$lAHQ>OuaQ4PQH)N}v#!sh3%t-snG_J#q|r>!Q|{xGD;)28+6 z<{MRTc}idg>zV?mje-8IVh6yCCrSX6T1gZTfNwg%_U{!VRZt`Z1IPg|O0DhiU0wCQ z8|@%!6j`9&uoGes(Wo?I_i`|pXlxM@0N{ENMTL==Mg&dNNI)^5SfBV(7{-b);cA!| z8*Lsc@1!!srLtp+aTMFHs>CQLAymv<)BPw~ryWFORV6F~bkeXL*nwmfQ3KXJWR(R7 znh1iA&Llxp3`GUfA!q~D2N105j6;hqU;q@2q8go{7()zInx=&(WFjI7Y7`ksM5BtB z@l>c05ox4kr&MoGrw$eM|0O_@R3Yx5bv6)I+ub|A@3ZGVas3~?>)(F%H~!Y&{?(;Z z4~3YKb*boMcb|dtP?lxC-!IE@SRwR^BvEEQc6{aJ$&*h$!nG_=^p*EsBy9Am3opIA zzPrm=qvzwb?VUm8!I+|#x`ez|yMyj_x3@7E6cNa!+pPuH-R-9BOVX5`Ym#(UgGF{W zZYD`$ZNfmR%4moT03?3sDfKp1#tO7-QF67T#xV5-H~`jiyS3l~qk2164F)Mg^h0Co zWf@o~;ArEmxbI+ND-wVx-X^93iP2})R#n9YQ7QQ7Y#xFlu(4DXg_@jffH))%CTVvi zsFXQJiHT@r$EtM-M-%~Oo5n#f3HxEi&Z9HIqroZ)Y72F+tt)vLbi-EE4Er(amEc<| zx{IsdcwD#t=knI^96K(TvMdqV z7$Qm#u`EmPV+ek2{bE&B&ZV_de3oS~#w1A$S>vp#0z#;~HH3(gHVmRuZ8REHRc&r> z6-D8_C$qk;-VgiT-d0h_sZ(z|amU$XE2nB@n_8V&{15{I0FMwBDNA1mnjm7cWm|1$ z?R-5TLkIx*_VzY2XU>XiscMjjjLmif08+_BB+WF<^17#b{oNQNGqzfdWNCqjj4?=D zRY623##U;2s-Jx5?vLDg+z`hYov{lG3o*t&`_KNvYnLu}gL<1>@0SB*%JP1Y7WG_d zg1T!|oqF&`R=Z~F^)LKC{=y&mi+}l-78e)i=H`fKzB9MHxKzJNRaq7xNFm0UJkMi{ zTe~~b>kZ2?@yXrC&omkhYc0S?HfmTFmaWEU(ZmqF_bzb}b-3-VwF@pcUIDaU^y|lo z!Btf+#)!o(01&3o9}ZkZFmr^+0w6?$2)VNW0LUT|s~Xk^@j?WS-QFsB4gN>0RfILuAo9u6wJ1U?P82+Vq=WahdS&)5u9^LlGNV55TW#y3T9d6y%%4$ z^4wY5ulz#0gQ6i0hJLp!2V2`~G=J*&u{jgWu;0rvc8I@I5@xd$W~Gz^O9P!+ryMV0tXZjOJ~8Y^jx6p;a|C}|_{!%{~Ml(%3+wk!uW zHC2GjC0@KmkTOO_5m15gxmN%pF$kst0U;P71TLfa2x-!W;p?`uQ1rc17sw6<8}C2e z{G*ROdisvXLR5%BM1leW0f3-bKo#_g5iJ-6P*h166oV3mh(RqUCIXa2*+3Wo3=}ky z4PZ^%A&8e<0_gcfhIMWaKrFq~iDWH;8IkA_lmtK!)L09#;1P#s00RITW+12maF8T5 zWfhsx5>-`2#4(0C1}KD3)5f);jdKv%gfco{)mn2&eHjta5+h(p3Qvi`j2gD*qB`HEReJ-C$trSPV$KL)_qY5JGlq>M0z*EE|rx5`}2%w;0?6VIL>ye}y0DuStpb`cIbP<5j)GAaVs;ZJD zm4E@gbyCFujHGzT336PC37vDWwfDzj zOlqZY6rFHbsbFG;06|2fGDH3NAO>(|O60?zsK#lO?H~vekNl!RGa2*K992CVOnm?A zPgJ$4syg%##&F3v&}WpNyxuu8^T_*CR6z|f0C?{aF?9*l>95B7kdYXdpb8?0MDLu- ztkD=C_)wJ#t^6I2JWNE3`RO~3pRRj+91|sayrcCzZd@f$1&lEgD4|?@{v{?jwXje+ z?&OXQ&MRdn&)s$MToM2fHdP1xxO;t&a@@Fl?TH7T>?v$hEK+F&=~%osX~#$3`)|MY zwRgSiJqahuX&)mJkxES-g6eddT+u0b3r8TYs`5A?LEQ&9K7aXI;Ti!zgT`9nF+^Ym zsN==P>flNc>TIRD*S!;|`5L)8j7`_tj|Qps#1#po0H_EC_9JDZKHWby_X-@-2IE0O zctd`G>g#ptlKt{2hd??(M9~;k1f@<FWUw#6s*v zAP^sFQiO3T^~29g$hE}1PH_zYNJr5{xHs4i!tr`kt^!1yojQ1L-96Nk2%|L!>iNHJ zgd@kv9&3Jpc{(ya7;;YhXeE zjg==NMW}uI0XfzqHfTLq#@iXFa~%Z+06idicOWQXU%SaDDHYN_bt#;oxqqPMhk6YT zB$2rJ;IsM~*8RneZ$~QizscEWlp6zO^4;l`Rbrel&V15Z&OSB)`~>3VvBt52^r zzfwEB)q_oN1O(MukBC_B022g}V6CmZS5;8srjeBgs0_`t{1hDYD>9>ZA_`1NOA|D|VN`s}sU3)|f#+hk^& zG!`-<1b~thu!>Oh#W~|31c8vw0YEIgIVei@UAG|?jvaz9kW~gjLID|d_9%LQ2mmRL z$3cuSVEh(t`n%7FwHBGJAe2yv1tp2m$M3Q3{7#!kUy%A z5;4+zw8OGYr~?=WJV^69-C*Ef_-DOA`;YzHpISP0S9iBm1ZE^u5VVP@=>hM(s&>2G z8ongMjARU1V~in7%w(8(Wg)xcz3;AlQhX(B2o#1xF@slnyF2R}*LyufX3-{ROq#o- zk=vE+-sUzGr(6E_{q#o?*Z%DF%U`(g;>E$~&7JLy&8^LH=n(^v7(zCg50I8AHQ}Xa z^yOd4@47cU{E3%JY7Bwd8g^-F(j+1A<)ANR*?!}f3d->8gPJVv_IRY)Y@&;HbS|o$ z`>2*#k5vnj{?MdZg{);sCJ*9T(e*7yA+{#QU`!}OzaJD@t{QgZAX<~`mVK?RWo%on z86-uD79C2AK{K+9&Pyb9gV-aZAoD2)UlfKrJN>>*7I)(?E7w+NdtrF(RGS|O+qYJO!Tj99+`{s`=NEqPZ8QK#0Njs!9JK<#L9c&(>$;E~ys=ePRV@L* zY|}KYPkP4C{QSK49)OH7F~$&rbFMzZO?xknAgA#)Ap|07x7!N~3(PE_8UsR1ETS<0 zMg)t>k9oN9=6>uSQAdF;+4$y}4urbx}WV4XuxL~5P=iQbO)zN)Iap_Z4Ivn+FNG&|LQs;WwHL?4hsuW#qDp;P{VcWIQ_YTu24e}-6~p0hFtDkH!ZKuR=9-Pp zZtrrQbhPxZzw*kxcirDgvtw_0?AI=CyXzaOm_dgefK?TVh(Urvj6~KNFTXeU17tA7 zN);nOL`DZ{BuK#TeCogNDML~v5sqR+gY>H&Q$&Jri~Sy5G?75W ztIthnCCo&_06mXEO^*FMAwv>DP|K94VQp`?D)yvwaW}du)#{C>f9&ps|M6%3aB48b z5h_GP(;mk$L;w+_(K(_%)*}UjAWB3=hyz##;)EHzg6b%t97!-0#o%2CUCTjZL`9Su zKB#eO1_UJp6oj$J4FUl~3SNvc#$fHo;1!CZsOP945*2)`RC#hDg0YYG#DC2gql%LW z767Ws1Aw)bRYOoBiU?@1@BIs}ytKWu^S(zPSB-#R#)}3iqG6)FIe*$OOl2SF14ltx zwH)~}+W8=IcVp+l`s`cZV+zWg2}{-QChuMU z=E%{kHapA>)KMizUOh|Hd-h$lc1)S9$aU^%;#(U(({$1Fy=INWddbEqn>^TI80Emb z46s+>ItNLTxcajC<*nV40l?OID3ohgkI^0jk?I7SH4H?IN_Bkz_VfwI?>p3cDj08# zMyO4QiU4Gyz)r8*?e~*3Us#xT%mnbnTON1T_~t7ilf=>Lz55IS{zBcShrpRc&x}WC3NeuB(-8j)0tMT0FyVQF&9FN($u%=ixE`G^!-g0eavjZCp*&vKJ`PGh*RkM@|^q|=W^hrF<19By?>{W2qR0B3z`5|MD= z^I7wcs!S%9$xu6R{d8%QN$CJ7LQ~^&x>mEs;q*Fn162J6?Q>KF!;ufDH!f#VpZd{p zlu~Mxnto=xf#P9KXNpbUN3D(L@B`JD>;n%!^^x@)QTPA=f_ij=%(4Sx<_0+fM1yG1 zaqg?}goe|c%fxHE1`77=45z5;1NW`b1{n8#y^g?m5v*;ptA{afH@Q)LuNva><0!HE z%871sSqxsd@IsO#W#Jc>m*(c?44E|HkA3u`f9D_luP4r5{^h^_zkUA?y#L(sb9tlj z@Z)d$NB{P39DDBR4}bs9pS?d>?=QUZ;`1ejpnh}r`W@#_AHVPXJ3spIt z%U53f$6t8k)hp{4cZ;oHx}~DAjWj!#rxgW5L^NV;QR+dYfsfwDey_RRXJk>hW(5Tl z5CoCTXg)CtM+7&Kg3;eqDVD!wlPc6JoaF?fv4Catv53a~$JGuj&Zw|jRRB@&45~wTW;0X^Ozt+A^@m~mOK{CG>IN!F9M)tVAqSu&{s|n4OvSFq-CrCpq<*v zmr+!MMM@Y_#t7;q3=K6zq{iE8;<4T@GX}J-DtrL99h0F_S#&wKO=zO`MgvA=7FNq_ z1uzbGOx_{}iZKa_q9T$yj1WzNQN2{2kuVBIN|{R*^?@B#S|}NASw9>Anh0Q1Tllik zZ1o0%tkD__1|=!SSbIX@-uX%uWrzSg-^ZR>V+YyxXaB$R7k~9f7Ek^sf8$fswpv>(`2n*EgmiJGh(qw&kNAe(8&!{l+)GfT7#( zU6*3}^r@8}|KuM$fA<5O<#X$h|LtG<%u^e^UC1`O)o#Cn@d{@X=bWq8PF*9lP!+L? z(b*ssAiC(FiULxkiRt>H+baxX20kD59+*$&78bPhXU?BE(KJ{KFSX4m-hl3cSPt(}%4TH%-5Fe{FPZp0S))+dzTsw!Br5BMJe@DbxB*9v1Z8F*5 z_WS)LNe*BW_5Xvxpk7~-B&oOW^)-$i7z^q_MZ79TK%2U(F8{D6LXsqt1Mu|rCCf4x zvjri9vMlQpU=458>-6MXldaDL%bpx=z4w#zDvUAi$=5>&5@P+obFM5)%W8}xmQI?U zxc{8UIaOp!3IWt(4CSEDE+I<2no_{9R#jCuSIrYl?q1h(vb{*tG|Mt;ZLO722i(;4 zQPozbT_1q!$7JSOM7xpY^=qYLS>E~qL(~H&s7doqc{l8R?!B+SJ00C$^9lzJ_G4Y( zWOSo(Xn*~49Up*z^?~17i%g(k6X(*T=5LCkD2f7^m^n|A5CQ=Zo1!R0Bu!nf*R%b8 zA($<$0QlkNhW6H$o9$M!v!;4wZ5JD+smaCe4ltr~F)9Qjo&c1PDMqCTK!{}E_vSi? z5;YPb#z+v*K#I|@j(k=A<1ti3RfrZnlNzW>V;qWsTck%4jpze0FE&}6Zsk&Pq?N|p zoUd~&9fK+AlTFuMb*f% zDg!WQsmFp7dA&{4Vl5D~{n+-&dQqd%1KYCdsS zKm7RnpO>b;3}ceO%sB5JE@yoG(eud!o;!G7=KXHaGJ5nG-u)uCI1Z<_F);F@ypcEV ztp3#*SK^HKLSmfe2By33FbG(0R}MT&-C4|B`!P(M59;?lP1DJKY!X;d=OsS;@=o?q z6Fh6;>^X7mtpDHhZy=5_YV^c*zTH{q%++x+5}>F`RBs3%%0jz?L`6|OaQwva<-3w`y*2jpm@z(YYCf56(mFb%{uH3D$srhYKks*x2eBUcdDL(u zayon8as&?nQ}dpVTR?9`xMr#x*_)yc4f*LIJvl*4K0orEI?Ps1FGiC}PWX%i7frC&G{DRudX)GQ?ias0 zJlRebzCBN`8T7ztvE9e!)!8Zb!R34HuVFME4yaTCANl^)r5$Ke1t2mo2JR2E|8#rq z&4)hhUAHghW~LqHs$mpuFiqu+0Pcy~&;i0@);NLvK5+GNKXEslCdEg8C6pKmu+EMn zB5}WO;$+RLFG!M3XKv1WzrDTv!VAys?rbeDFP}JZ;-yz!dh7c>fXHVSJ1@O(;T`Yr zJ3E^$Z~SL}@IU{@zxY3G7B4>c>3>lY-u3YN7t8Hep1*u@<-wo%sdowh8EBnezPQ=@ z)Ng;|r7JuC`ub|+BXW}D*?h|xP!fR6sNKQ5cDXR*CS5;KP$N(4`xD%p8@Fo>?p(==g2@oYsv-y+x{K->L z%Q7p0YN$1a4D=v4Fis)*?uw%T7D0dHJ@u>ng=l{zO+_$`R z0?-Z1_>Bu+D~e)kYin(7ZF~D#x8Lvedc)zc8km#@bMJb8FI~XvTRYtOYHwQyTj#c~ z^deqbT|IYt*YiEwFCA7*j_W*g6S;ro!xF97b^%>k`NlmSr%;CQVA4 zmG?dd%b@`RaKtn zbpS_g)4Px~G2?dd)6Pnh9+{ld>&C2o{6V9{y?tJd#fVCXh&_a8uyEe$O1oK9A>J+-< z#cP|7+*Ez{rSz2*dya1c=O43_w_;bu|HCj0qu{)Knq#dOc%ItJ(B%500;=GF4Tz zwI(4V)}I(->Y}a~3x>EiT@B`VIm_jG_V}R6{ONP}PjmA|UA4O-U5T zj#mc`%?zcl{Z5s|XiO z+09%pv!WbkihfM|g%8y8fK1Nbacu9=$9}1B04O{vU+PUhI8D&-@j_XzZk8C1gd_Lg z1ICd7v+l-I&!r{B(qb=c@eOwTlsBqJHU+3?fN(@v;k@slIk z-rf$4!!e^}!qwZ}ls?d5qs@iv&2fS9NiO|~u*fM&9Rne5-{>xYAZ zF{bvVsCTfVEe%>EQwM1gDk4-xRhGj%Z(Bk_>&p;hL?I+Im*;bNrx=DLOH2v?GQ57Z zdi~M|kF9hbJUhR1A^;4%ym(u;KTbw~_9cJ0a_1W{!-8X*fNq&EQa5js2 z)6Kfj${)m(eZVt7da19fWlv9|VO6c=+k|4g!-?RsBg6 zzrWZ~E%Z=!^wBA4Caf>h?Y4Jnn{>f6DEwyQ@UYRLd3gH10EfGNYGwcbv-ju0mL}PK z7lt0mBe!QzXH#NXjBjlV*f1Q5NY4 z+d*2A<$xUyDI{wlWs#7GVQ@%s0bqa_%y4Fa8O+)Wv0|-5nrn9cgnjx%Gbal_4KK!-% z*Pi}+l~O$Nh0B$w#cRFC1(D3H(UpSxxy~%~Sv)M=%jq*l>0S@J^}<9om`u((Zx~!w z^PmTS;N>jy7+5jr8L70NiLzvs7f$#d1;T#|oW@eZhVXY6m}@bnkSsbLrQY*dmIX*5 zxaJ4-K=_;rzc}}S>r8&GK!P?VjJ69v>sgFaG!c!^<~c zdiH(K4bKlg`mrCod$^ZnW;#9k`=9)U@BZKq{?s4+^N`6yKlO9}#JSI#4HrqFoB9;9P)=jlQa z(XOHM>MiCzfDVA6VaI_%WkBWCTsIl_E}YzwEbP5z_XZpMm$N8B=UV^}TPfdg>$Q@C ziuQNgJa|jKhPEaILLdbmJhENq6(fA&FPVPtbDHbZ5IiBOAg0#P3V8%zFZIE(ZN13= zl<2nPfxWe#Vo7X??iRS&Xl2ZC}28ap%zs_YU@HT7CGP?z@_wb9zCYaL&%lD<`8a=0CzlzQ8SXJovOxZRHUIsm)Bt1)_bX%W{x12 zTI(D|jJG@(HX4yZ6R9zHMj;Vk0wPd(8QFo!^%q8$AHKAGY>=XTZ{)vp}9 z=GvFk-t+m8fNyHu>&d^zIEbivapp=A*O>3rIaCL2&phvaw_3&#>E12wSQ{nAAHF}{<_dykUv9h*4kKY+E93=CyNHL zO+XkqTBkNmW34riiAJM{nc20ZKohaIMNza0tgSNV$qOrKKx>s}Nmw2tD$BB)2V;WY z1I%3(diEe8tWyXs6u!yxJg6CP9yKVa*}_PJ<{*x@X`0SYd@gB(Ku;YGn3ySr)YFkA zieOV%k+9Y<=b7T<#|Tt74@neOB+B;8?1h0jXcmhsx87N2d%=jjZTAlkjLBQ)>$)bb zO`Z>j!#vLq9N%>=Kjz1_x651@+l!Aqthl*-c>LSfZ``@w+`dws!bqZSroD(bWyz^P1xoH#%J}5go z$g{eC(AYKoG>W$Re3xWA;V40GBQsMa_u#=E(AH`LjCpLrGmyk&)0 zb4Sg(U_#KTMg#oRm!;PirBq$l0sf1@L+U!1?k-Gq_wN!EaGxgD{kmA~{cC&krK*)` z^#>l;y2m4GbhZ~4;FWFy7O!=Im0)0R2y5Z;rRUcTzhg**VJSj70LU6dlEla77we?7 z+N7e@>Xr~|TTZH-!;Yj^KEF12!uo9($Bw*WL-q@vR?peX>9)5gU7A1U=iUZAxHIJz z@;vV_{i=%L4SULfwEhUP1gRw0B|zeG1OSm!iro%A1e3}#vm(;DQGm|18R1~O(X^^+XTjRKGRqnTt;43OMx+$s;}-~BXMV8)sb8^w1W(p$$-t$$gnc}!$oHk#D~Gdkydr{Xt#$eG zJt86qN>;f+tXpMfiP!D|{X9eR$+N7{gRb4m%W++EBEpL}Rr1VfaR2;uNOtRKYK-NZ z<-X(XYqnP02q(3jGGo0_`=G|_Df94TFumGX`i?>A40H4c1x7%TGqD>8oNUpa);4~o zUR(25)_JF`QBL&P-Iy9>^>~AKv^miA*vlaKvcQOPzj^7k#E7dvNrsiJ)rmM?^r1c@jzMuaA1KH3CcY8 zKqL>rA)A?n!Fvyq|4w)OJ6c@p1q^9qZ?xHzdLjTK2HsE%(|>u3q5+|R9J6NxV6TM< z5RldcmypQEEPicgKuSmhDn?{M5%Gc=Huq=yIZfU@w(mVxKe{)PdIX>UC>`{rzM1VvTc!hkO&RZ5n5+o4;_8K|Un>jwb zclV!t`ZquRqqiP?&Vra~R^=uG(&ZT6yR-e?XC4M&@a}5|@~gl7TQA(aJ_Ig=p1xKe z9K8CCZ+!BPJiPVXbI)y!uV%&!ho!epBb8;jec@f3=Qo4MLUSt#51^O;;Rn9!2LONr zpb&lxF03Aa=8};IaO?(z3<>}o0D>T2Ie*EWzmkiuo2fvHBID5*=iy#qPHiJ(tThb= z1wiMK5F~-3$U3mH%!L6NR7h~xctli09vehdA#WW3bp4VV3KGa$P*`QTsl7K=Mvjhr zLs|g=mRX(|ZA@d`{&cc=&>Vrc%?pM^%57oP;$bi-ykcfy=h!;$83BwUW%jf?Kf8B% zV;7p)=0=g{o3?Iz%`=z~QHXSm9cZEvVbUDD`o-UD%JGD8m-ykQp2@FXdu0Fejo0qB zH2(O{{2Tz(z_yusg|nGGmqW`=>#RMfJ(gY<4Z`l`_W%1QfAE!$yyu^P=^Im4moA?B z#&7=Wul@bMowvsqAHDMU!;Vpx$!at0lX)|nG;GKK87H-~0>D_12cWyJ>U*n4lSwYD+FIOh%z508$H zHa9oRvH*ZA%Yq+wK&XR0JiHQa!~oNz7iSm#UI-Ns5fEDMzzQM~C_fM9mzo0MJctma zY~b9w{n@$w6~3IHILLYX;6^N+Q~CLr+{OfwKfQ4{CRfLiOEiq?4%K_U<#O`yoN z^-zi_s||&eCi)iw~qanCP?-xS2R_(M2GASSd9(D;9=s_Ckx+ zF#stt2+Yh**fB8BaC3{QDyZ&8Tie&KUw{5{pMUzH}bOWLqES{3quYN6IvfWxE5KSYnd>wo;MU;1O$uRnI} zBDVQJ7tVSs2-oZ$Vk`@8#&1NByR@kS=-nSM?PsCaw0(y_FXO-*o4`03X z@Kv_nTi(hyKl+g$fJ|GEmZk6*=wX?eK$?R4=}Cus>VcS1JRMwOU*8Mvv9QMk1W^I{ zd2IS%j4LN*91t=(!zvQGywjGSnV*Bz$oonl`@S`&XjD&ov0{vC=w1w(p7NjY;AKH= zfDH4qzpl1*slU^( zL+LP?|K2HHiG4rltezz}UfOOiXTVDJQg6t~zX12+?^o_FTp2HGsh^AA^fhYfeRtmu z$C4(=xbNJGbXnk+XK$W*cOgoT2(QaIA@k`CQ32y}ugm?D2F*MDX7B2Ee&r8~-Vc1D zWsflk6W>Hc5PxF(zc6)ca&M#&dr~^ksTr{UJkPUicS_G-CxFOvvo=`tVyUVs&vR{z z^FD~=UU)K{49XE;aMKNb^FG{SN@oEMLQsI96oeJ&@v-4rM8qH_6KUt%#ful~rkTv9 z7cX6T=%J^6;cx$!fAQb_k6%1Ic=FjN#^X}c)v+49c=Lr9EA#W8{LBFi&}=0Wzxn0gdZBvv?&Tl);74KmT#KT-Q%48BIc~*KrVx=lu~T6Aw{Cpn zXj)sLjm@pC?M-jn{kyLn?cMw2=e}@s*A_+b@FS1^{-6J~cfa?80^E4W%s?sS8#kR) zCeNnzbXr%nZ|uSB4}JIprufJ}DG_Yhde{8ykNv2$j3yW71Qn2CW~GT46teTJXkuYv zAfl>mTk&J+aCxn@IAC>6Mk11BS(ar=nec0%?=?GSR`ZatnKmpun@onqc-qv? zSAqb*gvid@rlK6Hx>k*y_}VmEhqg6B9szOSz0Zu<*w`p_!BZ!ejTA?@8no=k!-m96 zowDqW@<+`SDKAV>6b0qhIm_Yzk@Vxk$7k)~*IxR{?D!5g_VT4iw$49Xv)L?jU>0Ts z=_L^Wm}kH=x*0<0Y5zT zJXrnrq)?b!-^Xu){Mn1{*PZnFaMEKQI~Ic!!6L>HTY>r29wgYtA0wgBx_cJ@84a=4 z17xf$DV+gJh6s+OX>6{dy+D*#{hZa1#H%q5Fgj#}q{QKs;cytta)Tj5>KqWAkyA6e zzlP?A`cYUvQ)Joj7`%t zP2+^kyZ``;@=R6TGPc{>+mAo>)Npg#Id^!#H*UZ9!FNA%emwHlzEm}z_`(lq-YLa`=&k&#dJm!kgp6AU8CkN^w(?q#l>1%g zHAtBjMF*38Gz1zvF=Ye*P9AI?L~Lc;V0y!R`B(n@4}ak4N6%M~i4sSkfHG$Ejfi*z zOE?oYa!<<^>0-uJW=djeXOLxXs-Iutn=RI zgM5E~@7AqbZQJhd?keU&sev|*Q8AQRP69cDHRB z1S7z3wED^A)mb^BlN&HG#02Q0Mlydm?higSCVTIBM0DPswmQr6CUdLg#2y)CExfoE zj$E^^Ja{4kW|=QXDC_E3`#n8aHrEvWPpeHifw+P^&u1Rrg*o??=UpNq00^vO_$jyq zMYbWZOgQ&Y1W2)OL`3Us{56_N#2$9ZArog-@jlP8pvYv$N-0F;oSoH`AZVk7(KADr zTau`5ThBT#9Vu(Q0u(C%z!aNvK zoh3(DZd_QbRr_Cp>r23~2!K(-M(CNZ_E7lc?^nXX5W{`+LOGqf^?N^xNLZhA$rpkuZJ-qMEMovNvot(QKIX#66tz0!;lgf4Z`}tImtP%>AKcrgl zw6-AZAgqS@)I7M8@CkMrIQC`i8`gUv2%U)%#(MOEx5UX<+?td*r^L!jG-ur|U4{O4 zrAK_?PYQ75Qub^MPWoeXPe_d9Ze0zl=j~Mg$ll_Lhyq+5U^HJN5{*PiDgf3%DZeZt z3+i=R3he0yx;lNluNMsJ)j}kqKp9TfYnh&`3kH=Cr4`9OPiCWYs_36_xL=7Al`M#m1f)IkQv zpaxmt#X(VSHEqLE6*}XVl@yvrgoFwlu{eheB^C$+M;4auBM@oV&h%T$3%_tygUY-| zaE>h?2%coc&|z4q-XqKOcs%~zM|Xawd;T3Q#2}P4u6d?(?>S!_k4fcORZd%cHi^dcNaFBFi#Km1YkhJJj)wp#Au~64=Alu!U*v0-QAXf zL8HkK5q$I5BadtJAOC$nO|zNH^1{I0G;^)IKAQ}^tJ$GOM48mbJQFQuW3+QYm-}?1 zzIFScuFb~KgSgiFwj#9S?Q2BSrfCL)LPQSs_O+ob%NlfGy&^&gX}(mM7fRq|Hw%tV z$exI7+jjOEq5_1600F)aY^GHdOuo=TTMNxXF$Bv&c}v0S2(!5}l) z2pcX3)M}8wKAi!~Zr^+@Xn;ql_InOX0Y!pzj-JPO=V zMcwXiwuRZuMPC7}wkK7_ww8I~9*Y)7)Wyv} z4?xn?z^qV%5P}fVOzx2-?`*Dy)4JwH3uOS9HBI0i0=O1Ydz?q>j!k#suC)R8kNGyc zTnNq)QOL;=dKJ7nAYpFecwrutDGnrNVhAB|!wQC!&O!XnvdkExwYJt)RmIG~Sz$Vz zM(>0)$aQEvayh|(w+p-|LRtl`C{8|mKtyAVc;}o8+e1V=lFl<`QG~Z&f8BY%xw)Au zDyqXnIc|^lSSiEBgEsfp{jRm$OkT##;D(Ixd%X$1@}~LKXheO)g>4-;GCm% zV$!;j0Z)9pfxB?^$A9tF=fClP|8IWpd!Bpx1Ml*!Yn^KZjHd2XG82)Pajijkl6&W= znQ8P2sBx0fV3NDW!vGusTAk_asM<&B)NDA93mkLEFo7`omSxJWc2Yo=XC^~* z)-(7TopWttGl-5sCZTmwJ0`FabRt9~b5dq&37owG1Z3x2TQ}C);nv8yW^-%2x3@Pv zKD>PSG65d#-&4A9&b3W5C`)Ut1+Lr-5k>qw)|`qqxy&q}BQ=Z4OKYty6j&=LTF~2; zoyXSskOD!S#K$0_5Wr2zY2Z$rx|3xWQA!1$s@>h)fHub9BrCSb*-Qmbe;(E#P$II< zx__N|0N0DttG{2`Ma=}xoi!s@5Rz0iO#_RpSJS%Qi(NlS*h!ig zDV+4Cnx)Of+JZqt$Xy%&n$YvW)xbRZGt9Xcqo|rr0v}66CowU+1RTG09Kxvj){00Y zYFL)D+B&YZCMAsCdGL&70eI9*j$0`_5^2;qIa5JH1R>sZAW*^1BQ-;bJSQ{zUX1TK z8Cf^~3Xl;5=Xx3e-&$uaYO|~!z(w=_bSL!0-ji3Ng>QI0hJ;!?)CtPzQ618q$MAj<>3+kkR^-^FsG!2NKtThf|N0+p?<~%~* zdq(E|!GZV2d3NrOHmU7vL3&d&s=R|u5rOC3+DUN$?ADK1!RxU z1jnpS_lBL?>~m*N@6{R0^1eztnJT(MQFE`#^!ub>9hgVTy1$~Rg?(-};TUi&A%^bgJEqaPcT z#b{bpxd*v28jijE&=Z@~dSy!6dZPx0&bMu=%WTq2b1az|NeGbHBO;MXQ+XqXbShn} zzqL_k-eSRM9A0g`xNmO1ipb(v*deHn>jI5Hz#Q%r1V$7<9*qXq#>3y~o_}YjiCl(7 zV~c@;i<_gJ!lEWMkOyTqAGCN(lyxKtyKqA0Qr1NI2McC|R>}P^yVkBn2stX63PF+b0=D+b0D<$!jUm?D zAd3NdjP^IT&OQ3%bIKGfs>n*`T7_tZCq4xG^W3sh84-Eg?12p=rh=wzTO!DeshY{G z1+6tQH?wKL_#7fLR~)!f>n2qSWeUGO7#31b_YZ1jH#u}wRlVM}!_j7dXfw=O?-;mh zCP1>Wb8b4F7K1X_8awTLp36K+NuC?=T-8&PDXobJw$)gd;G6@|RE|6XDCel!Y&Ig` z2mnw50BEh-wvNWd#^mK-gs8mt09)Xi{Y969g<=%T{F(!{_BLIlXL?mRL)1-YWnk3KwnINX$@G4FEXj4-Sq3hztNh>zs35+SUeLLQ!T-Q+w+=Ze~7L z;efOK()`dKp8(D~85Wkt(dM6Csrt)4FB`HAm?5w5Yp=(bz78=WlI57T4 zpS|#-&t90!@U^3ZufO){?ODZWLE!+)bAu9;JiR+}gE3WZ1kg&;h|jX3Vy8usbP?DY zJ(IAKjECNP@QjEmfE_FV&=||iAf!>FAO_9?RjpfeY(&VQV`e}tN{J?Rtf>*NwI;{T zdB`+tBKziA@wW&$Dz|CC>rF#O_>s*q4A|5s)a8d2*gw50<$le+}4{)SmT+?mG7aboAO+IVeEgXp=7W?Gr>8slt!^aA#bjNjXr?1Ax~kIkW?0CIatT z5k#~iAOxxid#XE|9-I@UNNX*ednVWTc^bo$D^;ZJ0DYn$x&$kGv6xYj=Qw_7699yz zan5_MeMe`ix+WrPJ+pZ4g8V0*TjztNaL{6kZvmk7oif;2?<@d-RNel!uyhdoiqGqy zodDymkno|)3ez-ANazyCopY|@urB%eet7s$!|ZXeK^9y-%n0%7JZwEzgOhqKqzFKg=e zYo0&xDo9IE8>|4E=%k*7C3`@%&APFOh$CWV(BiFQ=huY^>i8w@2aA}Gx4)x8iEwuO zrh}`Lic&_eKk2=b8ZJaLj_4vRB2irwQmvfKECb9#V_V-n@7R-cTwh@Mlv168lJb2g1t=bBR6VDzIK`HVejzzk}D&Seb3kU(!^^_153Ok2@fP$#d zw@uq-0N~LBcy_Qzd7`Um>pTLqzD;L7eyG|-#fe`F+;>BnTRlftf)L!gO>%4Xy0LP1 zMG_IgtVHp-;wU+9)AfygUU$CYdF&mH$n?S@blZws+0S)W>I57p90G{9;w^;Kpk!bb zZ!c@~0}(LoshxVMbmO9|3ntTh9p+NQ>8Z=_Th?hul6gR2Vdob^ocrZ<4?W~Hxld#s zY^MtfED|V{OBOJVSG24)Sb&w6>R)e_-0MbcYaJfRY95g$gje%U#Gfc)Wle>>FFmE+ z42jjbKe?{Kg#0w2y4~7*3Owd5YJwDCn8>0%Lyxi3B!VOMyNtW#B-FOAz@=&>)DC7Y<&oO z5EBP>h<`Nf3zrdK@EAh|U2K0$eL?1By^(j!Cs%3f+1~j#ZXVns%TI7w%scP0F=aXG)M;Yp1%@${Zfe$|9c} z9N&5U*5=OUvu}Ufts5^Dhc+8kcSH?QyUEL+`RuQM(Ly7k|D@#gPz&%dLE(4sY{&Es2F zKl%LK*-=$XlcPE#Iez2qCRD4VjDgxFmLQN&Ls1W=ut2AC4c0kbsH$y5QzrYqQn+!) zw_dBlS{4z?j0ds4{gdx`+t>FFKKI(aV_&d0^{k={YigApkZ-_MKnnDbk$f}t_3?L^ zUwik~`R{$`(YIZDa(v|>JIEEekyiQmqGwV0&|0?EYpsn}08nTZ@vmqd!_3}Gp67wE z$D}33m>66IO%`nK!mnD>9InlROgo#+tZPtcfA4TQJ61~V?(U4o=bEOTPNxDLOcM4=Qilxl&@U5cI}a;lp2r5moHy_3?Qp!Hp?k2Vl>}o=B8=NVF7b- zD8z&zqQPL0Wm&M(lrA$>XFC?g7!B2Bp4Xu$ir~5t>^dXs;Jtt1i6=s+nea*%3on7f zg8_S(KtC_RN-G?J)Wa@aols|tbf04My%>Br4}Txqy{|z*kGw>AbubvD9^CQamG&S# zc7K@*KSlD1M;j7joMnL|t#Db)4htQh$0Qt6ufm>4*^@(}piV>~PeaVg5YYc%+Atgr ziD)_vnmiqT3b5P;xe5OxIs1b?Hj zow^-jbQo-1*C7L;wLTIPV@&N>Ypt~kZo*1q2nU?11JmJej;4=2bmhCBedNX0_CEc! z7e4j;i?6kfKHPk2`|FolX2c z5bxc+cWt0&{`N+uE)Ap{Uw--4%P97#OFJKb$1^|j+>_fm!1Smxn~+e0s61Phyc7UH z!{TY3f6v1O{`G{$B6tyT;5;itW3V*JAb2-lyX1LHoSoak7r!owK0@gM2sXM}t1+wQuuG6zE_foB^G2B~`BbhXF2`}8dH z@CL@ULc4Q6>)KOnfpPAB$MaN#uC;@~;DP$T&PsQknelpc1+9JwdF50*Xt1eMW;Jk4 z!F)K}@9;qTIZ^PWH^qq-Gf@w~B2Us|PL{c*_@Z>iAC%(JB9eMhq(?jrDw1Mt$Rdl$ zjYH>^#y|&R6vXgAKx3BS?6gs4@QGVwcsZE22g76$5z4}AB0hAZGc$)X!M?FYpTsU- zemP1s81b>&ZQDlHmvABd%*mEe{nhE3;1esNGpaq}Vy)C4^v+8!lF9NypRt^Bu8X1w zKHO<@Yw6w^GvKH`p+*0*^!at_fpxOaS;|_vy4IV-2PP-J?wl*iQaxbKEnbtRWgVS7 zovyBw_|86FL=?ALeYE9!(0IDuo2Y>l?o}I@nu1BKDW{nY*%Oq*6k}^ z@ZO03hS-^~d}Yw;UAoc>C#W}4^*}tNjc!P*ayZDm*IEa+DkAc(<7UGSiSl*hY!q^t zSr7=dQVPUJ*3t6`am$rPL>!hm0gCV0L zex`(!v-bOJ>a_3McWG0#b!P|ECsAC*-i6oTs7x_TkqYl%&d2{b&SS4YTbD6 z@b=x?H-Y#{kzYc&qRmJnz`gPK?$&T(@^g?sX^L~3L)t9ccklkijf+=cvly$?qw(R<1UhP{A_riM(K-u$^jd2~ z*xx^Bnr35TDUXX84zn!NngCdU0bCcMObj4`AZlV} zXBnW_*w}9B2{R(9vK*G>P(<2>_wMZ#gIp;kfX+MT7)gWDfT)zKr+cnxT~mA8Zj7$} z#2@)f*B*W3&hXp*wIvv8P);NTW*HsSfI&d68POu0sNaVzbuPBP}XPVtN!r72;&zO{D`E#An8^zk#Zm2Nj9#ZBb1>bvRC3kyY*dja` zx8JfDkc!8t^c70P;Bf8^98(cM!lx;$Sd{PLvRW`3fE%A6sjkZ=Don2 z7sbXn2Y~6Ua?a&>ZnBdUqd|%zScndpGOqyO^w>5|vwQs9&g0*=^HV?Yfg|fCZC%|x ze#g#teeK@tmDeZN?;N~(Fr88{^<+yXluRq5K_huZ!Oqrzj=UowX1ZVm=bR%{0tBcL zoMXq0GTLO>R3m{f*8r61TtqqNJbP_9)4DWDM3B8`@$7`y!JC`|fSu!3Deb*&{cNyN zwzjf7LtOv>0(N<~<{JY|>4A_viBsA+SI-<|#dz1&4#gviDu5IUWJ2h;u?AR-&P}F8 zq;(cVvdmO){N|490Sj7W_8Xf!v)X!A9zlcvz<^=zn5Yu*!k7^eI^Rk|hpxUL)t`Iz z{Ga>rkAX&24s+*#EXW5yWh~^_YwfLTB_p6N5H`$dOJ}ndc4f41igY8S7Ni9fKtwF@ zN|M4J8Hfo`loxScrfijPt`GnSP!=REF?k1n5FyStummPhYb5gGc^(`E39V-l?yxyj zt@9zf+8{4dpQ>0=1%D(#L;*DD6K9cinSD=5`TJnOZD<~wyH^ycz_bL$Kt!@EtEwty z0e!Gtv<%xJKmz6_gJ1F1gJL45%)`2}Jo&JM^rbh>x>kDl)P0BWhT)#{MOw<+dEfd^ znV%j!7lyy?`StazgU=qUpIy-8RcyuTcOTGuU>Sn!^(wMG%REoo`tp`I9Y#za?$yu( zI+veG`?+YAe$wakT(9gxthqBn0>s7C$FZ%HBZ}WJM)Ue^jR+ADG)DZiOM4p#EY72C zBojvfUYG$fB)S(7!FkxIQ)g^lSzf{3%)~EGU$5R_k6b9M;t7|r@^!1fl5})ZMzXcu z0J?p$ALb1%v^-DYyG-TQo{Povo9@PCrg46OuTnxF^cw0NqLmZ1?yQ{xE~g~-4NTy= zSp4x4@4Q}?%JO}&vdG+ZK<0mTmh5Yu_p3l25y7+NBuFP{O^4P!j)*9<5J3n*S=r5}p8j|K`u|l~)fk(%85rx3_1^ye-T~9$&pq>wfAsukj4xPao~=jmiUC^? z<~9326NGlKIUYOTD%JS5wqTm9%0w(%5RVkRKM_eYTYO`74R2a$vLT5&VgRKW#Ct2y z0vLb{0JMOBAj|_wdO;vCd7=3&79{D-E{1{t4*&tJ*2)N?c5m)HI9YHu2}!&d0o8(Q zQlwMbKUjZ8q8zKTIv(Z2179jT9Vqu20GM_*n~81R-NU>-zFL|sUtb;#J`9`BUGe|R z#~y$D+SNxdJ)}$lW&l7fxr_}k0f8vzffW<7_Sy=taP2D=PfW%u=N0=(yaz--7#4Zn z`56JQw;WBUgQa{R&E8w*+~v!ccXoE_X12e-fBW`p&bL=CU&Fb*OyFyS5&dL3K}2J+ z(P$&YDuRVA50j<4Rh{=zptC%zpG&Yj=AI@r1veI1&niK$nnlV&s? zz|0h_fM{v+NXN(F7P7yo*iFpFC z5W9xwhKiJBc?bp?k$t69Q50d?)6`@=vuj)oAQRLNQHzt_f8oWZv2CM>6j7!q69Mm` zb_Z3}92^~4>vwj~ZEl}~EOSZ~I0OJdF{X>EtWCCkt_TrGm{~|s4v1peTGtenD~*z2 z0h>c2rB#trAy%$l+YX{f-8$!OmK8*Vz}|UaF-n`lgoTJ1+fwvAjx^ed^u(vg`$il8 zsA)*ygTY`BVBDZNh#npR$0wrTrjX{|31wqrBWhW~I(9*061g;DNg?cP{G9S(5xzM^ z0gzt*spJEzxV^&0I(0LNcN|4 zY(3_j3e*4S<5<>hPO;cV5}s|Kg4MrF#dzef!}0fp=;+ z%8Q{=Ik3yD5CIEl0s&?Q@x)9djdvaiGp({h`y8#8nIB{sIFDi^BTwA=77#QN6`A+m za`2yEV+<1@D(&A~c1jdxMUEMXVC%d9IPY~{7`Hk*Yt~Mk8PvLP&S`Ji%dxd&WQr`} z3x%21;9c#!hyv2t0Gie|U;zLSAOjXbZ1V@a$KUKixwqh^)t=70kT4>O2a={W9+g(w z2s8T(kem8&@7_;enttEAKJ!%KL_-N9CD4G>oR5rx78t;?Ol*^DREQkBvx6p& z;)TLIXK%XAIX}oXbsqyp4Ct`-wl98DJ%^@4giRP#KhjSYl(>GHm4^Fv$=s- zpz|lO4hTI6Ae>}EL)2Y*5;0i@)*54ib~(inqLMOFwjL5Ok4f55*EHujGq05z*1!Z0 zDm<@y-mzCkyWl6Gh(Ul*XJ(!A`x^-A5=j3kV$mDM$WCwf>}|=SI=Oe>^%_cVB{&1j zae6=3p-#&vLkie^D>Js-v9rwK`W7E7_`|x#&Y~VTOF7H@ubjh_`H>{}2`n-v>-0b? z0Mj*?@*LKuDZh8-!(6#1y$Ho)X9k`r`q8KDjXRP)s3J?gX))dD!7Kg>K|BSt`) z=Qtt2kkl6d5Fx@(a|W!dC5Tb!EO`V(T$i$yAM9B72a@u5Jsi2GR*^+~6ML&Uf5tBb z$w~R-83V280+vX>Yq%PJZZ#`E9&=5K%DbMJWeozv;@uq^NGy{?R5z{{5&LNZ%w zmS=<4UV9C`dhF3l%h;`qj#E# zc>Lz)n&#=w0YcoN2V0j6Cv1!^)P6gIT#o7-Jv?^jA3v(I#~!+TWpi|O=gPG!mmh!f zX&Mdz>FBtnTvU$1>#erpop*qx$y#Or4-704QmCTX$|>FY93aaIDKrR>_gO1?dy^U3 zw#|$}L~E^cZR=fqTxqR`!{KN&ia4ybc5m;l_x|wkaARYmEX!bO?VP)L^XAsp)@(Mr ze*HQTJ@Ld7!EX0lwheP48^Y!UjIU{I(=>J6hDb6o7?F2QDU;_q{F!B$F^ZYJLJ>hB zMXF_cG&@Gbk_LIH12>mZ4g%Hz0Srq(1Rw>>V^wSv+oaX@&XvoT9`j^^(?D=#2*H1p zQbn$mMr&=;Q~<2C4ghgkmao40s)+3F?&etu0C}E|$75@K3Qq?{$`~`7O{UXnNCg&h z6bC$!*@fZ9doiGsT4zzU5Go0erPkuGcz=Wnvn(siGPR*drI!t^D)_FT`Z z!`EMbZEydsI{5tl;lx?BwRP#rl_ziK+X%(x)-De&gpfW#b%7We3eqV zR2rc4PC2$h(zb1;vK$Rhn|xSUKMQGKd2W5m+{pk%QRI0J03pX<(53`%HmFikOY`U! z2lKly#({|*6C0Y3GU~e087|MFQ#WtvPv<1 zNn8q+J983zY_8nG#O5MZW2{sU#2HJSYSY@{;sQW$9SF+fpauvSUz)vOFc^dfthKY* zOeDliLv1k23+Eg=e{^(oe0;pSyCWjw@#gXI@$vC-mSyAdIOrGx83`^3K>;AxX%T{V z3jl~`P%8Kn%&MA*Ku!WQ1vPWnGApH{mRcfGT9bga0v4#Zdx0{1o&}Ty!(-- zo_z#<{5b#sasKA(cR%;a^QALp{rC6L&Ixq6Z)>)ntB3mcY zf+ES#2}xTyA=0@Aw#`(NADB!FPF$;r3>nNc3o;Ra)?}Q2b7M8KAb6>#b)g0T-o-)-@hH5&;!ZJ1>YvI(4QHDqWO+@5bbxT>t08 z|L*TRG8+BN4?gw&AO542*F(;H!4{wfs|8D~a~U`THF)E@<2KimqW8?qF$grKBmz-s zlz>1*az_XN#xWuyU~ub*?5mRog~XxVHreYh`eNehNEXaF+sIj^;|lfs3^3$^jdGn8PZDrU+K%*TAT zGJR6*LIBKlsmRx?<4Uy+zzXCm9di0W_#AY=-M9!vKnR6U>%gn8*f}X>NZ_-D{`6#} zxX$4&J7s?&o+;==z->r??oXE%4gc4++t*`t-@t-jRPy;g2XA>};~L-Gw0kSnE$O1i zK!3Ps_wztG32?o)!OBw~&kL-ev}d}flDEzjv?uA_+MqyZA1{PtfotkHp-Tu+OSf)5 zPlvNp5*J0bm7P3YK3(z3x?HYwTJo-Cl6%TN!Fe!PNUYG-sln8uoEweymg`?h;o}9j zZa&lXO8dZk61*XOAXPKRc9)r=_)IkSOoxex;!Z}ike^acl+&0*?nS~_6(NO)vcX<0 zI64F-U!=?WaO3bOGiF$p2;iBGGG%5I!nCe3QVN89>pgoy=bUTX$e;5P9=gI?!EpSUw!o#KKqsHHovi7G4e5CF&I5QDDd2H(5gL{ zC}AT`c?O^ss6xXk*XwdeHMlV>r*(bgvUdNq?1isngG>6{Lv1!7t(l3D1W-f}3=6*1 zauXNaBC0&Z5wadUc%_shFaTX}A?LtkfJo3WAI^)Ww^;7HH@PSflbnxQuaXAw=VZ4tM?~P*t>gf<~Q-yBVYX+@A~kc z`7?j|XMhS211fipnL#O10i6xAu%@q7*wdqgLv=Hv2O>;lCFKL z+CxHaktQhhEBA;1ft_u<_bh^nvaA@EgAf!xV&B2S`ts$6oOAp8_l}Q`1CEeo#lij^ zt+j~1aA9|6XY26r@c8&}GC3lmEE@!fd=7$lls({UL6_S!R-}yv-I7E|7x9zH({R$Kwkg3K16oWdI21YOwzu4#&3Ua2YG5iD)#;H#RodgVCL%!2GJ9x~}H{ zdIu%P;P^^muu>Q40^`hWII#;}bmCu+iHZAR^4&DLRc&4Ez$F2}mx?MXcFu76Pr} z+;Tj;67chUCWW;o9M?6m}0AID+6k z2{tM!V4h757A2YRXADNvW7cE4y^;o(NcrQ`N}{K?fMpqeNeqq;geXj~q*v%MH*p|Q zJ(I%ynD`Z?e0=IpkT%^D@E~EEVz|ll-x#RMIcJR7+}zBvEX4VNxl2JfZn24|C<;r4 zbM;++;Kh>&X=Je8G~f$clNX{=xt0w})&dLo%mEM>b?NHG z-*@%mk9_wBoNb(MzkF|c>)yeaUw-wAuik$3*zQwyVBz@q=;CnLw9NpiFa|{0s`bPw z&p>60gjB1XD;N!pt@e{;I=+aW!o76)-b*Cv8!8u{JX#ZgO6Q>fpI|Sm4)a+UU0ymk7#gu{fZdXRWbBlq(?i>`Q4|2ZV}pYq51c)218`4#wAx zjG_vD<@z_8pa0K4@bRBC`PLrKSTeCTBjr7`07^quwTzG}6c%715EWe71wepBVDVsv ze=|1^_#g|oTNlSdPJ)+&WhxB;0D})!uE-eN6_5;taIq|Yo{lx}14=1E0t7)uM1s!o zSDa@NAs_OZh=_N($c=~~up$t4ia-%4BH!ATfO21p8Shi%7%48&2MYc-tI9fY`YcfR z!t=Zu{3X(DsYhGo@@Z^)YjSJ}3o#j5cv?XF8rxq8w7q&qcB z^)Ch?JgMhfwzW+eiS))_He9&x{NSQ-dirh}ts?F&NED5*#EQTo4O!-GVh_8vL^v5s zc#@Z>PztTacH^JSk`5nYYGUtG;?~uZbh~?AM8MA}D0Iqn?rbH43IVZaIvGM>uMQr# zP7RZ$7+oN+(q|4=Zkry|8%LVX!|KOGLVTncVJ9GGDPOw0Sj)iuO6F5U08dlD^>#bs z3V87p3JIsF_avQx69Tw4CjP>?KEX*7ZA5PpdM&O4Uc2@69v87@SVcq9=i%E?x=bF4 zbrHTJoILA-9C}Uw0Wrj*fkVpiE#PM22QdkQKrAbvJ2^4L$LPV;Wbycg?yQDhnw0f?GMuFgz3C_nXEFZr$A?F&~0V7$Beul>G{ z7eK%Hwa-<1lS`K_O^+wwxoMiOeEq8zFJF52;fGDRBj7dR-8(k{xT>bxyXWi5YOP4A zjomG!$8b~xSu73`kCdrtt09KxHMOYcF;3>*4ewy0c~OxIaG1c0rA&v*Tib zb$g6R&M{gw1kuE(vC$|>h%ERTTw`lxXZ7rOGHYte)o?h>lm_Q#YSchQ103W-4Fm0! zF)+-%T-!weVYm1CowjXFkt>~*!x2H3;UqwN0ZIda$z(Ph4vop9I&@=WBhSmbckfIl z6X#q=92BDN6h&ce$Ib@GEL`NyIc7?2I&dBtrfu60e?P>GDT-n=8kJ>v_2EAi;JKnG zE5H7k;=&=)Sg|LVh9Wd;DnLEXB>wv+lmp^ggVK$_;Xg`4#z>gF%M0HWjTycZ5UTbe5#aUcH{9l%Q7MzV&8|if_6dXVQu2| zmgi;E0fdPP@H8f6fxVvJkugqAoW1Zs5408WS(ZhV-@6XEwhHR1LPR7%6hWpORe>PP z%u10``J4}fqR5;tKmZjfl&ZRS`^NJxz3`>iZ{N@`z53|8wl6)ief|kkY%=>no{Kb| zorr*tfLQCS^+Z~0&Fo^thOiS-d^QF8!$bwofDkD@!0Rz@L=3?c1Io*iGkA>l6Or^Z zc42CyAJr`>a~YSo*S9eFJ!U;xb)^0@!%rNNPUpjE1kSfoD-C4L_cMNfi-x!Pj| z6P$>w@qyk{q~d5IkDc(TJ$Ag;P&(3H&zEf|0##Y=XB5j*NgI;bK4~1MpzsTuss}8l zu|cBAN!Ue!;1E&BAsjk98Vtima?T9~#bb{>W{hF)1*j~`P;*R^;r4;svdSO#a#z_rLk#3$vz1t#hM@bXzwd-ol&fmH42AjJMz{wH4P+Wumng*I28o z+)E({wV-q2oa5=ld!Ol{&8y7Gb8kQQumAY}=4G?hRB)aKl+BQ>77ZS}Si*dh3?a7Q ziBOP;2tiN)L(u9(q>6$UBBFQU&$yo#J4=%ZfBDHQLCs>_OpXq;=Mgi_jj_FFGK81a-o&-Z_@SzfAV z&ASgOovzbTq*njwocGV~e3qg)La+YQQ+h5>LJBISJ2r(!Az_@-j7sTGomatJLlA%R zbM{hXA&(R-a%enHDa^6})1SakEP+s;?8bnR&x2QV>??Q(paOeD)ooJ}gpU0aaR5l7 zxC7YLTwk$f-V(2cg^s1j-NFke=X%{*_kg@*D1YUmr|X;Y+nvwuT$t^>xKc47i;l1> zRRybe&Jx)SK|fY@bxl3%%wJFRjX(rpHJKHz_R?5)@Nf=#rqbmp{3k3)lo1o?ZSrNA z1(M_Wpk7U@(ro<=Qwrnh!$=VAV1uXD2E`dgs4iT~uc$!l=Ub=31C2LMo~5q~=M zeqWy9Gu}rh^C<+-iZUABfAH!bjoXlY4jG$)5Hr-KeMYP9e(B zy7dkB=JM5#;eMIxU%TlMQ75@Z7EcYWP|hKu_(QZwnoFp-uB)Rpa_saRaNJPg;(aKS6+EMAI|n}J@V+4o3CD< zO^-hN>Cb%lBl+g$CR=xYH=8M?RQuJ>fAK%~#Haq@emT5!c|#v7860ypC^Jv4wpwOr zSPx~>(>+1*tOb+^Y60{>!i`?NUJ9CH$abr{fAuH+z{j6^ipdh-y}$I6pFBR!#~X~= ziXfr~6c%BEZ_~N{b`^)Hr1Q*1)frw{hbBYl)B|DXF*3#!k&wxooD%RYRw~zO$&sAa z-q{MtDOA?hnY_8)1Fj=;=-GSEmc6s=h4V}yB80G9A%Z+0-~+DpSP`TN(ZCMmah7A-&*0K|XDQPW)&bsOBczWc-WlEb&H>NzdeeT?a z%i{UKR<&>1Sp$tFGow65E~*MDX^qaB7WNM6hXxkdBd~Sta8Pb+ZBJ%ZHJt^EyciEB zG7ws4#^}w>?eI<8)`y3OM6@v;hdt@$cqJn`YokduA~sDc^HjqzG7NMPelsF;&Lg68 zZTP;brT`G2d1K1(r7;Qsj*q9_xy{W@;&Z^zTBkJwiM5quueF=`Ap)+Ps-R22MX(;`h<|t{##;zW1M{F* z!QmjfEX0sOp+ZS==R(X5H&2Ni9*CNZD9EK!dZ;sMcj9_QFOxX5?z}Hz{i(Ry`=+O; z=+TW{tp5~5=smfPL0o2)WL4=oyr(s!Z^iF9Nf>~uoH`|O`d;oyoY-YWPHzGOWHuU& z<^e`jSc0fb3aboVR`Wo_bXdFSToOZsS!=DeS(Y_T1CTLuHJg=XSvNLrIh<>%!sZOa z3>zS*6`gaMx(C{}4V^?0&m5R*L64vmAfK?RtM4zRJs~P4@`9ikrS;x<$@LJec8C^L z4WK|6M2L)6fXGHTqzh)mmWk0B@Jed&w0 z_Fk_2OGmR;?!6A<@p%%>w#hbKQBrf`z3PP@y~2O+U;8)9!RC$q`m1}BfB5CE|Kl%z z<9J4>%ME;U`4Fejd&lIf#>sGCGD2eJHka&V+XwZ4u=oD^c1HikN8fWkWALrVVuqvu z%mxZ50@Q*euvrecbg$gL@wFRY{nD3@YTJ&+n+?0B@q@w;DQ3y=%~g9mXt9=crhus| zM#qzO*0gM=t#?W_gDf-RcNIQ<`P{QtAKM)iiktUe-PjrJDv#$cjloZyYppUKY%v|M z0t;T{ioJDh&6#POEjh?rV>L8NfZLRkmLoFQ1vDc|=X^5VrN`0gsRh%$$aXTB3ekmWY^4U zG!t35I;X6{@^UX9*xtP(iE&k`KI}D}PR?=|s&$O?^2S?UELm?QeWJcvQV*=0fu7;> zx=BdBKd~}SudsQZiKQqdB&%y)ik_5?be*zfrG{XcvZZYQeF6GYewFoB?Xij8YRZyM z?m;PGzdwnFfaJu>XSu7Vtk)UG+qJie7cuLy2|PKTJ9AudV`&; zCOw$B%=#I7Bde1Vz2zOq%z}~sTu5fV+@H9IW$tCNY!7itrs7^b%bi@_|NWU<+)=$w z;u@zM@eC|5_cbR#KvYV#we3VvX3ukg0(bCEH)E!8%(CzN+*pKQ3+n7w7r#K5XD z)da8I_zOSz(T_j(1hjy;5p>sY{uXV1&j7%qR~|?yFJjmOvX=+*s(G_Z4h6FqhXjJa z2o3-!3tkUFJrFoK5)FvL03N}&oreem0Etj%w#5$1dCd!&cJ+pi5Aq^SI z0nv&pL@|DQ0Kh;$zpQB{g69mOcQtE=Y@{ksHDY|N2V>cH3|ao9zxk2T!AG8XM~l0k zdv*H7*KU1&|K2TAwE0+^8hABALz!vY))=6+|&Lz@B)V9tDZWx}q z;y?eyFaNXu)BoZ}e(;C>&A;}y&TnpxvcfqhMtOF{P>Xn-0|4;Ew`hSd+ZgU{j)c4T z=?KisP8<*njVXw5YeL#|K>z@N07*naRG@dY2wH1JywP-Uuy=4U0pz3Gw-4NYu-P9D zhX`b?-PqbT#%ydp5<>qE2BR46O)2HQYg<1D$SBYA5E3_7;6{`7u;rW&wwwrnBEiT# zFl&^iU=_#Kbj>+NMZ!oJ!VozCr<4jAx4f5V2Nzi0G&)W6A?S%5!RS7OMS^I}gyg&k zQI=)o#n;o4ARCObtOd!$7LGx{d+z`U&?x5>C=}FMfd&9i3KW4-!#NzRd#ppyhHQ*4 zUAlAy@x!q*0n_o`3vy#^z%Q8V5GR3DhkkkwfZ*rBb9y;=Vd=&Vdms(3jEKQ_JnWO8 zHwyS~cp&1xu|k_EaHxu zpLdOko(bmZPeYxdhiw}y&Qw*mQO5&|vPYyLT`(>r@g0VUA>?{6+50x`02p9nfgpk2|5YpH{TX@W^kbom>5lP5^ z7H*Bogj9=AizksGgKoXcR7?@!B>)qOnGsqBGF~}Ns0f{NM8H}?*qLrVJ1V6~|H94c z7yiTlT8kSgzBno#+1b8!Zfkoud}vc`k2ki5V;1%D-NVnl^6IBwx$(;W0qgBdYwH~W z6k4;h!Q5vZUlG1vMwgVhz*!Pe@cab|-da9m3jDYz-B_M(kc-If-`{qgu}i zT38zpwj#pwL&UaA{UREf9Riw6&jYnN0&%Vlhbz#T5R(v5h%GS;b1j)75@%}w&oezaoOjGLJ`rClWHmzRgz=!cpZl$Ot@Oku@pK);ZlvVm+ERju?W9A|Q|gKn`Ip z5H*|uwm3lqdRD+ER^NSbs3RV0f+;=dRPdUV^6TK0b+z-%xOuIy5@!i%qW zG<=O16oPdiBE!N}+w*3KqgdH!3RlVeuyj}CIyh<3(=HC2L*foafY>3mK+5Q~xh2*v z&>Gkp7WUin=UzQ(jo2)_8uj>AtngMIy&m$yAVI?sqpS!33&WB+Vcm&XZZw6Y*9Ex_ zOuHmj*s0)?a?bP59bbe=yijJJDFXoD8AJ#m1Xu$R)blLw!6$&ijso!rEI~yC!Lo`} z!X$V5E5g_)ph9yH^O_*D1SEiCkf8`ZcKZZ-bF&ULAX?*{Gk-VYq`?%o$M>Er0jl0w zNXKdlLf~p3c#kPS7Dya)!>K@K_MSBoh@c2{EIne_Q=FPPSsi@{(MmBH|J+mPJC8>ZtnLa@EdP^T`wdnuo|27S|2(L_38cJSHqw zz3k6)I*7ka>y}RnBp{|Nmh{DC;iZ?-U`6E%BRc6)*!F4Pf-I0mS(@)C1OQmH!anKt z;(%fsyo!!_n5Q^inub(`u=sWWV( zqBf;Xn>iD{orU{_q zI!{iqoKzbB=+cBB_B3r>m)7Bm#w^y?eN;3e)smq&h%1#FyrxH^wE!h?4uu<{Ih@hsh|Js7r#9IfECSO1$uqMu!O{r^W8y=x32Tgl`{iEkXF)Cs%7zYHBl6zc)_2?|boQ`8Y6=W4 zB03jxw<d)W~HMPqFxA|a4>u*I84(OIw##<|gY zFQ|HtY3T7)y`OMyYro7yhLSp2!y;tadww~%N~E*PastlNmM+`=#MtW{Ad<)cLF*IN zB80*RROhyz8NBNeBCYdG7!X?#uQZB3@s@za8#?UxJQ{uGPv1)PbU2{3CE zdPAt0-a_|M$imW<8C!`TdYdGC4k>?EPEPnHZ1wo=azL9?#Q4}fN$hb}3X`$LC>Exh zQ0BUdUN@HOiTw!xbf?lwgA%NNa+afaJ|VzbH|J(55s>3AeMVsw#!FJ=)kBjWccD z*WCHQJD+@J3;*hmeb0xVdiz5|P!6 zv8S}YJn-d5WqujI8Ra`q;n$IfbeU2n;f$Mh<-NRe^}7mTL2`sOSL7MzX9Y)b>U;Dp z&-yYh(RDz&y^{W_9%TERKIBvmO8<@OilusFo>%JRd*n{5*gFNxOmebnDJerb?GP)uIUUl6qBFP`Q0e9#-VC;X(!&$PXC-Yo*t&j-?SX%FxF_=HH7YIpz2uKSM z3$p`H?FwCHSTJE^1`+g5fVs<;003$3f%K|FL}3R25@NI>20Lm-%CL#eECLU2v0Qs^ zv7!(fQW@qbQgN#V4S*(T>EUc(tAFtCKRNl*2mX(rKOS$7+*G3j%*W@?z4P3KcXkuY zAZ^uF#nC-f3W&s7W>%R2=E?E#o}12Qvsu;Lx^-(X7!1eb{r&xojg7LryfqpOw|2n( z!QJzJ6U>e!?HVAeQn)`F)kxl(Kuy+B9O56FJHd_02eM?7>!2G zx>s-BeCdT3FI>2A_3G8;=y*1pmF18{CX-3s9F50grBvIVguV%Jx?>=Y(5vug3hRf0 zbA`c5DJ6}SpW>To%+Tf9de>Dd%=hk(p7nQpCXIF8yLe#Lb+{aeAUqS2pc-SWZHSE0 zLd28l@x5Djlu}o(UPI-MCzGPgNJ-r^$oa}v($unPiHRQ|u~3dG9}x-e3vsQ>fd(We zAOKg+Z$9(HQ*HIJwyB^}XAJ!D#Q`9<$xp*wWea?Igha_w?%ri#B6w@$w@(5K$bzL0q3YxaGj?4Qe-HJUbk=n4GD>0agn;#q)LC8$f))@~jEXpeoKQ%}2zieUg9EXKL4!3Y zwXEjm*R3PFlb}P<0uumQK30GP2#i`lg9?&srp$C^RXGsC`gocVAA085r@xB<4vwa`_wEAX*4CERdODqMZJggMVgJ<^f8WE8ZW{nA zV2?o?&nKQek|+z)ROji39`!z<4@>ZC=gBcw0K=SIQ<5)d9o=4@z24#KeIMyzAOexF z_d>48TveHdJ?zPfaJC}f0#yBGly#k1MhPFZys?pfyC!EH-1COV(qr3X@z;}f-_Y2X z@x%ur4(VIVV*Ke(_W6^DsYV|u1O{P9lz++RXw=pNP|gy>X}L?^%ENDLaeMb{%6Ol# z^>tfoC1-dFi@1D`%NjjXkM%lzT(C%WmpilEZknnpeK!*A`|2v*FnW347a8yKX6Uv$ zaZp&X9I$HD7H753s`chG?wNJLjwmsTPpFIJ*Ct)FCh@jO21RyG<&a4>d!p$#=s;9hrO8l91 z^{<{iS^&^bK_r%ISjYdYtZ;b*v6}SXb#z#ubG|$wN1^l3pLyvVF9ZPygWu^qH&`O? zEN_FApSv4v@x(2&0S~Yeiyc_mx`@R2IU~ms#5YOIzPfI&*P@-JSFm&wt^6~dj#Fad zrR@fvm~OpTK%{D`z-&9`thE7=&|0%~Wm#t0paLQA!VH3l-odPGvMd+IJj<^>bnWiz zujBUiBbP3I?zevF3t#%e4}I*X&Rsb7bANX9^MB*7QoeER{LWXt`CHHE50|*n(&Wka zyzTXaoB!fBK1s+Q{NRUPbt=nuYN!eZD9-=jAN~)gKmP}gUim-&`qO^}+R?PAF1H&w z)h%b6R!!8X$%YDzW?*SVyecvhJ)5B!P;FK1cD;Xf=S%;^&y0@#$RGUQZ$16tfA7Ej z)Ng(B&b5ahnrZpR&tESG+k=C<)E+@OY@~#8=4j;>=2pymfj$EW*j ztKzT|5rDumgz)#>^&`ug&$o~$^j0i0RzQpZ)>EU)41Lx%M1rsV#^|g6?R$&qdw%H8 zs_e3EfT>iin^H&yLP`O9+q6>vHW@XWR}nGKa|Bj%ayHx*)2KWL0BBmsG5~bjma2cwN~F~XwU+S%EeOeS~l?iEFG@#4i0O9$pr zIC^Mxgdt+zLRklYhmx{->8XH4tKPKcIHFL0Oi8~CA1}YP+;n^(MKd8e-aU3R;uJ+u zgfQi=zWQp@RwA6|`8U7${A~6Dh?+F-d*bo8=lOu0MpjCOKou@}4}|(-Jo+Bt4#84V zP(FaF5i#k@4>LfG4B`P%YgC}kj#lv8dxnaNYRbpQ#{d98&iV0Zr-y_0pjBHG~i8{#kcHrW2d0qtX&H8TN`K`i!0cmR14@xV+51temu7 zbkVfLex_x4T(HOMy-@hyBb#{wVq2| zXQIpd>_8mSx&v^mqvBu?PaD^A;;#mPVG(QEzF|d0UqE z(rH|WW3H@uc}Kyzvf!DUmU%LJ1Xyr(KC!dwZJy;uMPyyd%^o-uhY+0K!)#|+7V;`v zYcn;e>P)KvaQ)~={rQi5=+FGn2ZUTM3PgagZQbGVWHPnWS$j08K0m3aO*3_jM)}hj;AuKECSh|LZ zB~(TO5n({o=n$+(h@?O92g($vob&8Qz((!eo4Sd8kD2`Vd*5;G{KlC0Z5MaXT{;Jv zz{9#WGbyOdoLM>xG%eUC_wDiR0)$G9o z;w~#sJt7LM*;lf*p3^s8|5sW6(|TR_cfRzN=8Z}6r`4Z~4B<2kH=UpR_Ic&ID=+Ak zCEe;PtBS?RLh8j>Gj=hiL-34F(MzZ!XpuGsi@^oB!CWLG$t*Nb1K znC2-9eU?2Vr$)I#L;y*RotNp-e4n4m%%Zmh%gYhvL73l1;imJjmRnmX_^2hmVjb7F zk~qX!rPO0D4YYDU!rt4QvMNiehybqG0Ac(qyZWY{>v*5`I=%N%e2(u%Gq8XYx?l%a@iSIVR=!O3C?LWbKL{D4zGX1o$dGY^bV{f zq=JA1S_q&^d%F!?^ky)b=+0G<9lt)E5g-*{NNJ@!3NQ;xDJoZaSR60lwJy(XDW%Fx zea}OGB%|`qjoa0vx_I$zKl|_ejTdk2*Za4eDqp*ItCA=&t16m(D-NugvAhA@!LUUw8kPrGE@XwlZbO{y;#(WGUd4v0TywBh>C!eGNhEqOja2@2{VBT=Z(75 z^d~z?&-XzLv)jGtFarp=2bchqxbTQW19NH;Vh#l>wzn$R_f{!Hn4^GgJmjT9?QJbs z(-?p8FFf|#5Bxv>>t6n*-R-l91hE} zjA2ZSF-j?*1^ezi&ySCfZ{4~@M4OwNbj=P%njc2OzD8`ZETF|T04uA$pplV zH-=j4q8LmjljFmKko@+EC!Pq3f&KmcAiXQ4f)7E|TLe(42h*?Qy3*^X&(I{HQC5Oq z%ZqgizupU9o_=13C$1D2(qJ->7>PHH%`$!G&YfGgZaw2#pmNX_F`pV;-Ju*pJjHx z$9k^gM0+ou!f`HtiAikYl^ZoZ8tEQb{uS16iYW@V%xy1sJ7yI3-a9W5WcNHhI&Q^7 zndf<4=D7xB>#c42u`fdpQ}qnalMRS0M8fyW6Sz{GgH?=OdSKVB+Vb~lz^>(*dn=Xh$auHiJ&cKyQ7x`$Oo4F zh$!Sn2&)qUKp9dX(DGI*XcQp8v~DQR6spX%>~qv045)bP+}6%HAOPpx_h11Gpa4YJ zIWh_WT+=j-MR2~EjyE=;*!Vl2{_4;F%BR0NJ=krxwb5CgGlVpodd8kfAs;JLLmC{Tky(_P``OgBD-M2IMedcuh;te#k z?v+zy-Ls4_R`RV#%Wrmh1ASY@gTm7HV2*Xj{_j*-20_j^6+PM>&)_LxDp$|aTPxGv zn_g1BbotjF%X-RDN;lFvz&bwppoB1F z&KsQhet*OBiyXG=+{_}hhzp=Hvh)t^>DOF3hpErOf=(Gvs!C@y3fDb`gz)6v@gR7T zQK*-rB^>i8^@nf5e_@Gv<)K?gX6HMn?>@3po_e73`^FB%8C^!cMM|;~MS-QeCjH!t zzkQ$jLp~T15YoZTtFwUT&Nmma^`5P}w6m*}A^~6l_TpS?Tkk!i$%?$K8l7j}W81bS z&pg|&KmV#iH#xj>_tjVSU%P(!;VX?~!|khE=P$hY&Ch-P)4z7_`U}Umzvt%78xKA9 z*kjiotJS6L-3@Gx@8lbW8eD(*g?Bu(o9W@zCm#D>|IM$Q{}2AqFaNE7tNuNIk>B~_ z&Cm|%9Ax{m7LW!6rgF~eT$A3A{Nj}#XWp>EY>ZRhS=;)-c|EuwJC6+rcy`2t^BWhbyl4-On%jH(SXo#W%<0GSmPd^E-Fat0tC?(Z98 zc6N5O)>d1$zl8SYfrjp>G;L9*>`X`suB$ zEdlR6Pp7jiH=f(3b;jgGSl12D-w1@8=Owfr;n$R=BZfTE3z$fFD+Z-W3nFHj5(Z{L zMTBIXbs@DSn4&BYRojAR9}@5|qsesIZXDWjl%Dh^US5$1A<1H@=N?}h$CBoRNU!csHBsVVq~!1OCbK*L%mbA zR&P+f5?|~!mm=dK@E4ILBFoaYjZxCJZAN(S?p|4zSze&Ybe5;hrDrouW&n%#Rv

+yMrt;3wbr%#Xa|@o!vz`S1O<{p1%u`_gnD$0MlyKu{ZU>_(D1 z%R)><45*Ywt(cL0MU-i+LpDS~WCT(KZgqemleZ$lDkkU!x+$Gep#j5xgy@AGvj;J1 z#XSPBkZCY2uJZyxo*Sukz8|VZaU#I1S(IZVKn%*xCZ4pbj-T0pAAD-_CqDAthp#^D zAlvh`J)UKS$;g$n87bw;?9r_pH=gF!S!)$()XK4M*o*L3(X6i3jB!KqV$jYkWTqr^ zbkfO4RdAj$ySD=&xyZtwsm6Ncd@hIgiRU-InC|>|Z}yr_z0i6@FZ>&roREUJZQGRs zQkqbt$8wyG(4FPN>goP21g*^z7p4SNrAN-17CIMLaR;E49A->7(=WlGi z&i?S}SMzcu>w-qEgySESZb;(Q+G7r*ZBW{u6gykV(4Jn>KtT7IrL&J=P3geI`N-mU zS7Lu~HeB^Vk8qD?O9j1zb)??S)y>ta^iHAS<&l0nFN?mFu`K;5Auqj%0CqMdIFF)p zGS%O<4n07TRk{IKocWbgx9TmD0Ca|nNhE;FX#|g0mQ&jH5P;b9NptJ&J6yFn5e3(I z0VZCUXPKk5R^%&k4}>wC9Hle(udKad7TP^|KG=1{chi5Dk1Zl9>cl|jJ+g?fsNj`8 zx8X>70!!4gx6tXj^#1HUzmCwQuk_^lv#iGQOs?$VD$3(et0)d?4lJ*Avbvv&YvqvF zw0UB_No$;+dJn@~7bU>Lh={B@?T#1rzycx~h!n90?}N0iway8ZF_M{9QNu19WbC{_ zH5v`PW2MW{wZ~1~xOP{+?|c5iC;q!n{qw(jaQ%yJZ+acu!W`kGHeO8o^>2RVpHTkH zXmjI<=bpRW+RgE!PkhhEFO}46Z~XG-KL7Z5`0(35U zlVG1jS|OqcTJ|DxG#GR<*(Ha^ptLTs@f?*}Kn4!MWF$tJfyy$Y5EL^A5VH^x5CJlw zC3aezVy>~z0fv>zGhzmf5CmF~h3J>7@+At0h_(|308$LZK!60G8GyOFITS+F@gpMg zsoVN@eVMflP|+Y~nz>cA-pZBFUi#IWFTVKsyLZ3vFFyC=(;q=a-pFpbp~eFzO)xNY zV13){H$o=U#+0HnlRH$-wYIKjbv>OfBB`o{kyZt^wOn^moHz|3dW;>ASf+leN#7qCf0RLM8@R7aUog= z`Z<}1q|;g(@xC6jo5Bid=>`9Zq-r@eXz#*UDK;;Exh!bG5_FpuSz~C~V-tIjwAQ*P zww2P>0w5Ooh!j`Nj8FqO=bgzwX=$2@aEx=auN1aUYlFpmF~!8=mjUkSK{F!KR0Rur zt`X|Fe~A*wywr+H@0|#fViw2jjVVMtF4|(Js{qNp8)5NuTH*|+(Vx>7LoSNtp{AvdCWW?Qi`zP-Il_OfY`O}NP zhh_K9#mP^=VL+q|B07PpswR`k&gNFzwv&4Ai=Y3@U@$1h+gm$VA9?iYJkyJLj5~!# zUDqKEKyYUXD16BO78(jU(t|P}%d$Mr<21$udZ|_*P1@WGlhwponW%1SAoG5tx0AA_j#UO@!9f z4g`%B@OdupdFIiV?cuHc^6f?W*uz(cZ0g26fA8K`@7)>G#;mHGh-W{o8VdqQv!>qM z88TZY7Em6vAhAH>UA9IjYn@f72?f8hO(pC4V?*~yeafTj|S%?^Pb6iO-A+9oFw zk)vbI%#o7J7^R?XD{LUkveJ+jwxUTDNUapgS&f7Rpoxzw-Q^ zQXW`C@Gn(KCwv(&N*_yki8C&TNIzKcDVM@&m8`jvP9GF58lx;Py@-Gh#CHDT`~xRV zue<@@a5>|aTRCHAFFn?M9V&E6uh#Wpx!SXIGSaSnF{@boK?GAMifu$cDNr(O{ z?INLtNxp9@G=~`xF%3u_wI2PgrWO10gx!BvJ(*? zI*mm14!b1gCa~1dSxVfM>+mJj1u9GqlmH^~sde_|A8|zxs?)o=j6bAPvAnn|$G%)i za`8#0?w>!|&2Y^BPTfByL>;gW02mQM003qNAtVx30O%btBLIk2iokoy)xlBy`tALy zeyv5defjAdFMrMJy92kkVRpA3d2%wF)@%xFG~9XbJHJ<5di>LW_dm`?#fwK@+d^~e zwXYmZTl?HcAAa@&$Ze}9zyB}(zkc*j{JVeY|M2G@I@)vlUps~%mck#*Q&^-R|)e9TruYQq_@12u7 zuc;laE^fU4gMX;pytMcF&8lfGJoHFblp3L(O#zLX?Fk~HV&=xSRaH%^s%={{++b#B zxo(@Lv9r2vTW7tyGdZrR>gebonsEo$=h3iaq@%{ZK5Oo@eAlZ=4WSt3n^!!eX9Uuj z(po9Yn%JY9L~%%0wzU-rvRD9)8FQF{6u@fr0Dy?J(^mz^qxO_JG)%^$5}-mC6y!7z zB+)?O>R0eTmD7q0HVC%CRbYJOi$h%&|9|HGEZCMTI}F3tT6^bk&K=&pZ>)LjuAb*^ zpc_q01PFn%0E;3mJ0vpfABJc-Z0o}z+o2CR9P$rsg%!3#A6A4zjt~Q+tgs-MFbIGI z0b&N&b2qxHhw7^88s2>G88UP4wfwO&_gT4j=DGJ(Hwb1_+&b@^%$<8!^Z3T^cmMGJ z@G~F&)c@u``>!8+_W67FzDtWERSkysE0Yu9dP)ZQFY9my5;n$;Y36?&-GMo}Zt;{@U9`asTM( zs4PnWA7EM-ZNWpq+2wt}n%>5-K?5QF(A+3DXC3|)MWMGV-qtkD9=m`ZD)va2lN2YvnItTGu9*9m0wLM~o>KgZ+El!f!ts;y zNq?F#l&sh55UCv&9u@eFEAe32IUw(#^Q~;`!UA)N2jV;eu#gAwhMbWNc9rz-z<@;L znSlWm8L9Gi+AvU;ogRzw*Jq=CMS9B9I&gDFLPPU5myt(!B>~tHy2H z=I*`wfA^n!d9!NHx7&{Ks5op~=Y-k2lBRpxpeay*#gTUaU=R!;A~w(3~eA$ZtzH^8QxOm!tzC;(e!I9S#{0nJ4^SS`5(fGD2hc!LY z0p-V$+jI4ubK5llJ;oX2T3+(rxJ6L~NTxC7P!sHfneCAMmVig(UyrmK-~W|EZ2zS) zTTg1c`?u@+zb*rN$xdDdH86{O3SKcBk2d)rWK1F6e=kP+m5 zH>UgqmxRDLY>r2hCz}Yp`g@VSz{><1!r9mDU+gwI9%XFAPL=(^_j8q!V6mHfO7c&bdv~qN(oQ|L$h> zz&Pa9?Kj_k<75BEzx%>_e`NcG-~FXu{e|k*hd=niA1ln};p?xxef$3BzWue$m;RtU zc{eTWxBl5beeb6}dF`neuMk{cE`R%rzwylJb$I&yWxKj@`s9E7fBgskwJ-nCfAI4y z%LC?xVR!q_HR6xo{ei=yYo%}Bxc~4mx$^iA{rL}n`X{^l=U0voJxTSjGY_1(^5jb& z{x?q#7t%FdWlHf5D+YM#^r`^{U;$#FqB<}LaP>;u{KEj@p{=iL01-?u$P<9>?la23 zMnnK^!5RPq4#b1+zWd7SpZolmzWK^`fBVkM4_kM)krmjjt=D8bA?I8{232j0Xm}ym zl&0KlR{h9>!!lgf009Aq%me};1cATv@n`<--}LA@#tyJG37T9iuzTp$~iP>_;@5m=;(?fxP$b;3X!001G1j9CBxI>Z>L zNU^;#px~&5Y2?i2o_6bX7Pw<3>v_`&;-d*XYQAtxN9vTRYgY@ z4OX;SZ$%`8Ej#BJfrtb`h8l)R;$R8#OhSSN*nk&loogG{dB=C|zH$FcWphUN-?~}y zr>@j5-aPrq|I`2NXMgtR!`yD&d;)J zF;!_rJP-8c>d7epTnAvj9^=6sdm=eJJdEb5u$gV!uGj0^w{Ih2*L7uChL~Ja_VCoU z+c3`{cSfVz!9j>#M?iyUod=@j@$s^*Pr9ya+jhO(06?fRK}2l}v4}?~TcoQ1!`Nh; zP*43gm;$wM5ayHgD%fN28kXY|_L=BQiSUtA+eyVTju}hVz*rWXls3ZsL!;(4OoXKk zrLh5^?KS{_1c*qc0Dw?YT~sSqz5KZFNo!-KKlFPWOrqh}qB!jL7Dd2euwoODFNS<` z5-w2+!d|zG-DP{r;7}<(7p)@r>22)sy6d7$5Q@XVV2hXW6~37)I+2`|KC---b3UKw zdI>x-w^E@hvY`iwa%VCJ9I_UAJ}45)Sm|No-^hk@kW1=d#9yO06~~XPCGz`TAVv)J z{o{tjZ3%!B*BcJBf1M#<5q8`*tzdO@r9P;+uFaVc9DMmpzy0v-SHAXnx_AGr@=Ede zbMIY0q(%Lht*Xwu#=)Wn*EW?o#3ep__%P%ldGA9aQJ^`o@=y340Co}RDkB636Mys; zgbbkwD#_L*0FxX|@K502qR{7}$nq}dAVN-X^+j@;h^OIZT?s9DwLZSdN-POT=QG!8 z##sFz0N|mAq=rBMAV3y@P)RqOdyq|e*Q+4-nL}Yu9WXMQ1LyBW0I&wVYkhpApFDsi zX|{HF6tSlo1_6p7ObP;6FjfFul$1o6y>sF8iOGub3_^m2DgbFaM6{*xo|u{a);YHx z=nrnOsw%_+8HwoHm6PjX+n;&*bMOD)8+Y$MXt(#yn^)evkB2v3EWh^h-3PCDEN!_c z7YkbxdV`Fn1#m1!Uc`GNg6zSC>TjO6$S4f4CUOrXjJ{WUBG?4zaztIz3cH0Z3j`9V ztX(V?O>=g9v|Ly^FmUT=`Q-7@GuKX@e(dIZZq-+hj!J;nUir?IqvOMqQ$Xqjx~94N zV7+eJZnd)3)^%M~m9>_g54;r_Ey{vDH=T$u01^tJM?qvl3>lzasPo*F0PTS{;H8s= zfLnkFGlxb;y7HbOaL!p{o2Ch?D~h6Nn#hk5Hl%~UG%STN!<}*PoWk#h;vf_X5`oz% zhY1Od10tdzck-{2{wg& zXxp}`Dq~F8RU-1v_pB;V8ywXJ$*Dmujl>1&F7CVj&#q7FHb?|$kPPmWll(2Q@>txA zo_(O5oKHTh_}Jw5bAE}rB>G9g=AB)d+eRj$`30(LE(>rjdoRKg0roA9W!LLCHyK1c zN+itA510iQkSz2RMs|88Q63gFh=WFs7%R48NXf!bJ1E4Hia`k}eQ}v_99s|)VlUO~ z#HS=iGFgS*w>(=&?~)&vi5LNJZY!CAfqf7-6^;{GtrnB?U&!REIr(9F=2@U+{vi$< zUZm+mq?xfTQ@mLxVro$>%4y#F-uA7IQdA0Hl%=B_l@N!1@-&(!MqKE_{&(JQ5kUYS z413x345O&L*#5(gGwPk3t8qkqwxW6jaf05(r(_ogJ-SPTMGztAqT!7KAc6yN>x(=D zNs!~Yr=Tvoz3OL3VV?a-8Aap`hU{v9XoV5z8-q*SH|CKv6}M-=BSb{!MifcGGso@M z>ovnZkK&DbHwk)X5J3qgEx;fN_P&2F94%E|Mo6kLDBPrVsQ%8eTpDjk^z=wBl9P=0 zc;O%%!fvyGg-LekIf-1U_5^-7kv<^`5m+1m2!dcgI3j|87dA*kq`HJZVT&jbRRM@a ze#5U*@!ujcWS@+_6p>vk?TLu?wKyy@6zlJA!+^zpcnb}LLI!W2 zX%a`Hp%*pq^L02_ganLcrK_77H}+O%B!v06^?>iedzqa}EY& z)&N>tc<{TW_h!JLf{ivRCO7nk?48WM6I^@y={ReF%+A&{LO>uNyTk){?}Y^sEs`h; zVLZ_RPGpBkGme2B9u1@n!*LFfnn(x{=A$eMLoM!5)<#DPYWk0eZQBNeKwv|0gdSFh z)2as32l&Fy>lU~IRv?O+!aOPx~`^$_YRCT*0$mu5~B#A5eBlYYXc!-^nlQH z+XIlc>%bZ*u%KdLZB^92_q)G){fQeNf8R&W?tbr&zWiJN;2-_x*Q@FpH)pT`u|SVfjc<7dT{#ko8SJOU;59U`M}RS{>h&|t=p^U zrEAZL#gn^UAi@D4oE)(S>%^CM?zXh1Wo0_w0THykWf9o`03-TdOzQReb}ELrhvY!E zT?b>?DT(*p9Wgf^5sV=!(N<5t@Y2&SyaWLM0k`1&X0utXR-4VHZQG`4x~}u>Sy`5K zT~}2B0M55f)3oi@Kx2xO#{dUzpcuO9#$?- zn`oMhYD0>adlm9rc}P~5uPdHN_(m7`(I0dxSN(Y8UWo@{t|jl>4ky4C-<#B6(SBn+ zNj;Qqfe>yBePxrK!Lh|Z`J0G4@gfy9kl@ASiAN3pGCE|OJVw*fuu|8vy>v^18n0I* zQ;z)v>Jk}Fe_y!G1cH>>59*4dZ8^u5;(zPVUjzk2P78@Fzi)dGPG6>aN< zd|lUNSq9Lp>pGm&Yp=alRn_6)VO`g(T0?6KJIt5pjqB11GYlGaC_A3pFCrX9Pqsd( z~TJGQ7yp@7UkWz?UOFj{h$T&)N=o|=czfOkuNaEvPXK?9+aFU<~XEf zi0T_5$_*A{j&N|{i1OO%(Qe(ka@{X2z^9+Rd4AUX{NMYx?{3>Kzp?)Pul>pIee2bi z?>~5R?K&t5w8$lydnM6Pf!CJB^(*C#!|J$N)CE4ZtQS>%aBy(ASQbSA0|&0jEEbEZ zs$#BQUDwOyvTYu)>#Q*cbyb&DRhYW0>biyvLurRzYv4VjctUc8VfJs`z5V$we(9qh z{ip+~7Y9XAEbGOhs*es1pR`bGI;WW zP=Vvrobb;mw@k7aO`{SsLGr=aSQ{Oq<~-?>3!j6_AvnB2QO*RFasr!dl;%e`Sn;Bt z6%;**_^U~J+zri7cRIfsC)j6$k`{JG90OP)TC;RCkO!|ybeI5qc zztzdk?1A%la7+mc)^c^C1Vx)eQosNPv(nBJ;9L~)yRKU-7Qyhy%uTxq7OEi12IF2t zUKY9U*F{x2X$cuc{I)$@9-#MLP@1l?hK%vfb7T>HZN z-cy?6Zf%Z^pZoD2f9^A%{`t}@zyTES&wl@x-v83aeCNOVmCt|t!ykJ6)$e@!o8Ni- z^qKcwf9xB-@ms(6U;XDF`H`Ra`YRu~f4=(1|Mma*?$>@}gUjumx34_@{7?RS|GTw0 zGOf66`6uW1{>%ULuipOR%a6VLSDyOj=Z!r%!p*y${@An6e&F%Pt|4^3Z8zKPFZ{wU z{Dq(TsV5(Q0!47BE}*wvGw9@ia(hlfz!MmV1t%t(wCS2C;lrXJL|QBl7Rv(&u_*n| z1HAWY(|``rfb%yW`vM}g4!jpKz26D6dmy|la1mq7x!X#JE)0-l$QWa6A+`on0Ui=d zjriQjGta&IhY;X5f8js;%I5x8yXroji0AEg{h(`BeE6_wnzAfSVZC#m_Z`vs zq67d$185Kp5F!u&`1PQl5E7A4l#O_UzY|!5D7_$3=yI4&8a-bRU;3H z2*EIlLl6+M#uy7|Jpi#~IzFwAH}~#Jf%W3hH4iNm>=Ch+!Zae@U{No4!)NFB&+onI zR}V|SuE~|+5&RF^+s9@7?DZQT{mh3x@{x}`_QLbNC>>zMHh4gsV;~VSjj(Gs!9XU1 zL4SA5i$&<}7||?9=K)1EbJe9bf^Hcd0M=Svytgl(@Zaf zIb|ak_`>&zdLMtvtz&wBwA8Kxobv}K&E?5_5*jqQT}e%Rz2s;&i2~@Jq(7Ju3iBz2 zJ9XBG#)7%=6GsJHbS07FGBznD{8a}9CzGE$uQ^zXSE6qO^w$6!U!4!0%wg{Ba~qz| zInaUduAUGPvam3RC~ssz1BIo>ZauTG{5!vO@0IVJf9ex|{XOsd%$sk0_h3~HxqGl!8d?y!VrV%KOkm8sUay0rJ0uXqSl2MyIkT6>FU^(X_esH!NplY< zw-YEYBV>k~_*biE`Uc%FIX|_0+A&I0r*p-~<@~*K>Rhh0PXgE+Im*{4%0i(>n-gmH zh{7Y%F>}wH7BRUAkVT_Mmt>+C%%5U z2d5aKF9ZZ6#Gp9sWeD#;J-4%}MBULOMhh^ov`h+pTM` z*C@FMS;CbR@rzfK1GRny$@_^=uKrTOA1`_`DxpMA5x4>jM$p(f^&Kun) zF-)3FVue5vd%wI48i&^WDWJdkC(s*Bo_niBp=U3GGrkEkUks0q_sCMwa>34nL&pkYU{lECX z{3j33?)viJTVMU$@Bc6U=J}@ktAFEf`}cn4nfHF=^vTC>Tz&S`+78aX_Kojcm-D~> zzxqqBzP5SnDf8)1{7-)F-}#RK@BPLv{_3~B`ts{0opT4IzfZ)-NhL_1Q)2-K=)p~Wl*>1M2bM9_)#aLUGWx1%y zn%24Pwh28M!j5!Z=e-Apkc)k%xr4?yq?U2j@~U%<2QRWYd@9-w;?6RFm}Bp~583Iq zKw}tCM6i_3krc&4GJo>TuOXCYch3+jR~cF!HH?mBX?*eU&icDwZ|}VE0et%-&%f}A z4}IjlFMRN^tJiNF9$mX~Vvmm7E5{~?D#O7^2g1DNKIsV!u#=WIhzP)Wagu<*P17J^ z)EG4(&hZKRt{u{jx(Hyg%v_&;FYD96IU}ua)Ja8~5 zPm)Dl|2@&bC-28BPEE2X9zE4qlLrQ~APiCAljA4m!bnwhP5jG?UpXU8qNL-Pdq&U} z3LOYGx86PL(w33PQS!r0Xj~#*Yp~e^jo~(3z(IeXJ2}D)#n@XLk9f~M?>BHH7Y$cj zX0(FM6&K1iv}BF>WFV9ICe>6KXA%qS(p4JvND>pf5a5O{Iu!wgXv zyDf+yg9xKkZa3Rr>tIAQezzxnP*FsLyRHZAxLwEsuo4gvOH`Ic0RrNi^=7l#RAe{n zb>q6L*KXatd-wX4Qxd3c!8D?fBGRY#Qo2{+)jG?HR5yDOVd1{KKs*kBuIu8Qi=wEj z`h0aB_B@Cv5i( zAZNf+9T2KzB+0D&!R;6C(NhiS7ctv1{2KllZpi(Vlh@=P0)WARkHFx?3jd^umd{9Xt4Y5#s`sIQBCb15)l>$2xK7SYBO^v%%y$y;h&_+ zX2LiEG(jcJN$2J{Sxn|a`B{&@=NyyobMlhQLdoPWytKP9Inc?!b5qq3CkY4CPiZ*J zr;1ai!<@3g6ro72NH<`x&4qjy?bezcIX|VjvzvS*@#6131k?GYI~JgVI{lY2|7-Z{ zWG9J@*5$QxVmK@TKeetsN1SKXL=8wkt5uB2b%lC3k(LktVMLb%$aCyVgw^l!x}+vD zc_mZ5#5z5TCQF98Pqv7S(vi}-k=w4fJ@>C)h>DkJRhbAEx zIdMoyW%zRHhDoxVu!&?2$*U)S)e8x)7UfP$iVW{io=`^aJ%qtSkj#8?;ki&6r#i_& z<`(z^x%N|rWCA);Q^U3v6yufbGXR9V=NP*j_H7wow_W7bddie0ZExXfG`ok~&!6!fU@yDNi zPCO&>cI&n)23h?0U-%pA+i#yN59#o@zIpxj-S6_j*w z@TupYFKn@Cz3rrcQE`;HbBTMKMyF+3Z369c(%l??6d>qpg%Jkf4}nme-yk9&57qgA z#S4qIMcCAO{Dh2RBXu5ReDi0s##-AP9s21ZZT?7Ys8ni!dUB-I>`v`@UT4FaiKL z?+PsYOb7;P)?K^tZ6n|fS`l&HNBeKs&@J+I+pae2P1AOcTjw{M&8BJIJX=BQTBHwc zzwy8P(DJ2cUphO#yME|}aND>C54X;_g^~KP`mrDW`14O+f9_*{jR=fE0AOGM1`r2Q zv>QQj036QL8w;iu!wc7SUFUp3g$RomQx=GpAT6$<1?8x3$0++~jna7~5zcsAgA!Ho z6a7^DCYo2`An{!~pFl)n>zF9VOvdTQE6y{K9b`R>nu@yox@)$b)%Q;&Gws1PwKv)W=6dYlqO6C02wiiXv2F zOKOFIqTAtfLQgJK@8`hdk)C>_@%N8B_bZL1oVoks_?nSL#uU+3la5Ks5Z^*ob3oL# zTOjluqwA{9Ln9z7sA?eYSQvym2~~q@yA%;Y@z60kCJs$*i*{77TSS;0Gj~N_@h|4v z25e#wLX0Jx0f`ZqnZz1{EUxRCvapS8*Ru9ScUYc!0eBz9@)e>BBt$~+P|`3r$Hcd!h9SfVSETwPz(Ab!3^f2uLVlz_ zIS~#bR=7FbHd%l^DI1dFQ6`looG8~T5(#iHyFl{Z{-=4qHhEZcC{I(2a*sUuug@iW zb^cst2pJ=1-#BD zrtJ}N341^{mp?Dh+Abp(&84mBlfvCL#2Mg~m%cTaJV*(+{Bj5{`yj*57E zGk$`c`{V@k0yzpsWFdT+*P4i*aVqQRUqc$3P`tQ%`=Kp23ru3Jh|C<<1gVj+&j9BI z)OSED-tmQ;K-4fpjt;w>7Y%x%8b0c~-{HCEyM!bBy2Si*!@at6Se_O?AOVIklMO}x zrzPHDawvUAB^sX*54}1V0T3Ah5JbelZZu7J6R9aqCN>_3psOMIU;Q;HPUt1bj35{H zT$>z&9Uxc$g7Dn7ZC%&al z@#GWk#`U@^pL*KHpM8q9Z^LGBa`V=k{MJwZ^iK=b z9y$ZXX4S3TTfXhOhrHs~3I`9qcjq7f>aYCBXMgnCQH4caumF%J3uEzB>rhP$i#?8JMILe|>4sI0Yy3Ws0}SOZ?XZV*8Y;W2OELh?x7Kx`!0^^^&UkPLG>cvhYi;OR$73=Sub{w()Cw64)5e&P{F~EO zoxH5C&1RESZH`5aqT4`*h}TJDQ~0v$Iz225nudNbY9OIvQjQytne}t^N&ke1x@Ut% zr=-hV!g!L5d#$J^W|uj)LE_%f7KF+Bqq#&QGt5Ur&+Wq=bt?`cGeWAqlHw{!fo7R< z9>AFE=*`qNns5+_RU#QmpI4ku+?jksv>^ueUb$Q7LSijr-y4oIW)gVFw>Q^kl9g(M zi#8ujZip^8rqOMWJ4C~VhIYMh9v5oQ=|F^Y%^(24do~6|csC?78rZrG`i{4_wB_|D zUi#6;Usx=wwteuOuYKk1+i!mTn}2xy+T-huJH2{vzJ9Rn;MUDWK>6q+jA4w4jDntIsQPA8mHmf8%XQb(+Mj6^}q03P;;$M#-DZ9D0^@O&jT_F{Uib!^6WQ zq*PSTm?YXAr{aBbsDl~&n$xk9MI|PuWOF9bXHI0f%r&CkLlkpw>w+OpP_;hBaZpSI zl}Np{-AZHG9uI*Rc5P>EdA>etf!DGrq$rGO+LE{2fwQGuE-L6cXuHaq``rc+3o-93pN1zyGa1Vsu#f!y4lM7H36~=U4hTh{Em*aUQ)~+2s3iol6lCtvPeTmiK!u$gr z3onY1WqVRv^s3{?Iiq3KMOhOo&G+?A=Il?SiS~oyP_{9+F($1Kdl+AkyC`crp#x21 zTnlj&JRSjH&dNF$(>K^7Mq~jHW^1xr&64E%unzygFTF;cL<^ct)AR=I+B|0AGRZ6S z_&OUnAb=nwo6|ov=aNB&u~^uHE+}JVcuJsezByGvE_6-?mS}H9fxfNr;DO6+HyUet z}5YuSA^NBmeIiQxhB* zpeQMH8K<2uS)XPiz{i_&foJ~)CW=zN(s0x6BVHU<6>jXmzo^w}zMt|)J{!N2t9#2} zKk{yYdU_U>9>mkeomix6&l)kUveeI?{Z1y)kmk8l&-M=5u%R%foc$cea;BC=Cy#aR zpnp&__VVO^a{(#AH%nNLDndjKE$LBg81G?xD2#@y(qCX4Zc_y(l0Bc{>3iL>tnWYk?2rA0gQJrUyNzpKeCqi>`r|+N#%r%1|Hx5UEQ!kF zgJS@oqvmJ-;$Ox>JUD?RcqqKb%|gt!DLeFwy0XjVv*ZD;LPtGx>f_5Oc}~>#srsB_2h`B0XSpF+Is1{XbC?G58QR29Pv1~)5vs0RF{S^s}DQ(=!N9MI7$xfaJhc=t(=(1UdBI#^oo3T`ckkP)%$I&dx7 z8U&e#NG=mM8^f+HEm_yFIJ8ikBecT6 zY+BE*bL-BdLnM1pFM3I&1^_U!g8-d+AEU@)*iJM+ z>tM+!Y6R^!WTS@&zP4?7DD;n6x0b}P74^pOZ2U4RtD%cPRIp&dX>P;pcM{vyzSan@b;^CZGf6mE~)bNe>Bx+v#HEvoW(Bx*{@FA(8pY#mW ztJ`xWdj8}+i5#}4m&4@t<_bx2tS}~68piTUwUAN6RO`+Rj>$e~VIo=D+!p1Ff+>iK zbH>3wm8e6>#3J9FT+x1sDltM$-XG(6lUD;gf=r*!O{>01oxubWaOnPusyTCEN*Krm zSz~~N5!oLWC}?eU(6z96@Svy`296ii@uxriQdOC6{K@A}Pp?>Wba;Hiy!Xk3TK?z&kV9JB?F2}Yzx&6JGrUmsnuGZZ)tKa!y*MR? z1fj)B4hka;w4lKg#MFhzJ z0U88=PFer}0w5@O5hh@WLZ)Gu_GZ}#c_1JbheDQ?rb27PH{=)?En>HB7zB;9=(y>V zd(aT{{fC@5iDpW~krXtk2jY|)GCW&X3lTx0^=h>|IJkfB-gjSl<>cg~?L0GYx7%0^ zY`tCwFM@{Kn0ORx-zQ+K`2{GV1Q~|*{84s4ky7-Ml%q(@BXY$klbpOKz#reJZE(SD zuc@VulZcN;uhksHMxLm<7lgX5F||mSbkk>%h*Fc-p^L_7mw$u-9SoifCF%kiG1=Ys zGiKQYi{r4tj1^xb%78Vg9bJh2|BA0kHj&?ubB$ynlk#UGfF)p)NKO5VAeC79{e%%k z#mr3B-yBTtL<|!`i)2y%J6!NCr~u*qx!|~+LLsB@Be>{fbH30|>~hC+v1l}3sv zcfe;SkL(?cFp1i^3=pLS9pI;8Pyap_@}Dnc^&JfA%@m(#hGXU|7HLEMN`P1jT@B%He@^{hli0AMARFU5Cnu}+KhmR;1#|oQXswkO$P5_ zXkEaJLsosjW$ZHyFk|;oSJj%-g>>>!3GDvwC)F+)+xH^NND3an6jVncIan0KuP;U1 z7Ta6TzwqX_Uw?gb_wW3@|F|^8um9SwefO1bpBx`tzxnvW9=PWGv1_+J_|nIyGH4jY zx6KNDQLqQG9dic;3HTC>N8sFh^8Dg;0Z{LJ62@Da#sSL;xH~_<$Y<^Dh_(K^sB+WMIdxVRl1zNdy{b zm5sWJ1`A_M!5{!0!3eNpA|gOSv&J*D2))mrLZloTvw*b&*cgLQ8gSw^06>NH><9@7 z8N{&@FNjEn3a$pvIQJ3#007{q1Ax9@rou*RjZ(ve2Wfj#84*tc2xtIF7zG4;Q3TZp zdSNwWR)`Q*VF2t!Wfo=tXvh^c%EkeJVMnEb$~G-6v5}fYijKMIy3!U)gX|ntN6vY1 zttGLQFgxd5*R5V(JuDb&!~-g-qNK8ts$xrpw=TqjfkPl*Xu7s6OPM!+!W|HJ3k7$62d?+7T=37t4HzvA`-iKrw`MM@`9=$*C=+}NRF;Sj# zNSMo7i2IO?AF^BW=-o?BFFB?tK5DgJYckPV6g!`TDq*ckp#}6KEuqrmtoUQNQzO#Mh^>4lTmigcZf2{3%u|xr541`3bx~_vu z5(cxljAO&2D=`6>$!*tHbVDYE5nznGTGZB#0hIzuP&>2I?;n0 ziD8u6^oHke%)xt(Wn2=rleZ*$o2w#ouhUZdG{ks;EQkp_7;9VKLSY$w zY3i_+nAjI!&H`!<7`IE~wYI{C??VM1y`Q&lzvaDOtyZ7^{O3RW+0Ry0^}X+Xud1q} zYeZEmF#$c_%y2pY`BI)#$uOy8E-bCX*Kd zVzNZatYf{n=U25gD*-%{qN_(87t>pz!1+AiM@=ubF}K2 zrrEpVE#q-_hVJC1(-|I+2^`7EgGzC%b>`G|M zq)d1r8J8Ke^_+yM-J*KQlMC4+6<^r3A`!nQ;WHOv=XT~DZ*@NSCBNUZ)i954v)eV# zk@N5MiA+AFX~^U~lR5OsgcI6@eJW|X(sX|=+U?{)nQ;{5pJQEIR82IK+@QIa=6vDu z*JbE7s4x^!4MdYc;>=%n=+jeaWj{6rO0|1w0 z34jc8Ebjc_onQX-f9e1~{Ue`!>c*|Jv(>#fzPo|$o8SEAOE0~2cD7>X4}9PQAoA3$ z8-ma_9SUEqt5?4JE$bh=`TG4g-gx8HZ@v7nKlc-7t1WEWR@z%HKL5T?e)j$EGs5R( zap;`<^iTivW+iv;KIl4+=+M~5o_T81_?oEOiXcL1JL+0bh*Usnw`+q98@r{t^{%j` zH|L#0@6U+JV>yx5ZOY}sYyrTzP1vi_`vRQ-Y6rL9q`@bVs9^}-NrYj|@ikS{@;_v9 zPSmG(TrgSJ+S#3HNh~6aBBMC1f@=~Xs0@&ysyFNp0Ah3HJdD_yXs(5z=Q4l?L9xnV zC`um76qPn%YjzdC!=05;-UdJ26Ix$}formvo)g6HX2{xhFY&J>i&eqb+5L zl%MDilX6&&9XTw4A`7AXcZdZDhUsw7VU36wCPqY6RmD(QP{Zt*RS5Jm!ByYbzRr@?!5U&zxn%yw{56wNQB47$8}wRi1PyI0l*6~vtx0gk*Y{ZsllT6Vyd<4JH=BHf}YR*;M)%D%55HlJ0m*7}hY6)SJx+Oh~qKl(%vkwFju0fIac0<=OOR6rB}$G#{E;GL)t zVb6PC6a@j=qKJ(gLJAH5EEWshMJ-fHJzuYiqF8S?MNw2$$>jb_)60>fgXlC zLs_0s=dh|O5m~KP%v_cwQ6B~&LxHkT#W6gtDMQ55V0Mon9IbV+fwRVxAhc-8kd$3Y zu9*Fx&o`n;&a87GDlP=a9*Y1o%q*Tm=_=e+qoF94YhOMnUKz!$xE9y$vLt&v8p|rl z0!LB6+0t|IDREcqMkbql)Bh8EGSu(`il7K`51u&OHDe~0Fu`LO%cnndKrdjB01Y$C zP+Z9L9&Qq21WHJnla@ghcy(FnSt^Z4yP@Awo z5(Z%s3hw;A!>LH552-rlcW)l)ci=%8Db;kH>ukL|RoI`Bo;(cf}&#K>hP!p^vlf;x{`E#;CatbgIE1(szSIk?0ZPc9MoZ>mDXW`d;5?5V2!ED3CqEU?d#GeA zo$HH90x+{hVinw`p(wN(K^alt0p`PM0>A`-XNxqnb3lZ?lqE@kPz=u*V<2!1LhraJ ztasjdSC%CKdgqIx=w#OyA-)U|nSBbhvilokO8|&vt~>9ls)~Z8bIuy;y(j2ag4^umrk5e35-pt05f07yrOj{AOo41!<)(Xj`@YEgGxCm;ZVhKR6hy)`8fAz)e5 zVg~^gI{;zt7gbRd7PupDz)%{Ch$KMXEel&BbzN7M1<>N~%JQjOPhUGZT7U1`&E416 zt$+N9XY1wFSHJm3Ti-mq`{v?6-gvNh^=mKRdinzkTN~@I93K=#@zfLVK0G;k`|j&s z_{y)XzW42u$DTWW@};-VZ{K_MJI_D%^!0Qt`i3)GTsbrkp>%2g0WFJLS5ymYNGIV1G(LX*Nf|O7q znPT~gxCR;T(JFd=&F#qCl}ke5T}k=oI8GAk1qoBn%;h?MJQ_!W=B8~WaWoxODk8z9 zCL(oROLoCjEq+wdM*A0@EE!cv%H$w&Hmr*V=rLn8n`BmjDF&4|kvnsC2wD!CH$6iZ zPL%_Xeo$XYetl#rNpYTtV_~27Y+&4?N#P@(SbqQH0VQjXu#~qT)|;85&tzSN1m5z$ zxrQ(oWs|=#Tp@%o0ou8nVXylkM=qGh?)>ItpZA|8Ttqiu2IO-63YC)yl__@rDkg`8 z%z&cLT^o5L0OvcegFWX~o#Xpt);|trBW+Zb!5SXvWD+}n`P~+gqA04WI6OS;x~6U0 zw%e{&t5;ur^})S+Cs(gNJX;CjlTSVg>_71S554y4E8l+g>(4y%%#|yr_wT>;*kex! z`1SUzs;a|dtg6D`5&)d{NL@h&01!$LX<7#?h5@`V3nTch4VhAbphYux>jo#e>~=u! z4-cJ}V2TjolmkZp8jnnOaR-1vIiuq<9#CuoKgn2g_zxl=4z=B6aM{KpW(gR~6-|<{ zdI^JmqSY8(fF-IZ5jA3UCjC;qEjgONjCi2gadN-TbxYt@bErvCBpEQ-3Y_vo#}{dS z!K%trkRP;;7bLBwX+Y&9CA&RRgf3T#VUi+E-KX58>T4c)GiY#~lg-pmLlENd&3bjd z-EP;Lb9%r;bY!dg@X9mSZaE+kum+A6H8ZpVtz!{c5(1$20xSTILO%u~2V$)igcx(Q z-EMX75My>(*j~gM6Rho#K!r*>;yI@N(6~^3SZNes!WC@&0rf)GLiijyHq~`~c6Qdb z?ZLr8=zkt%w&*U6kBdks%*aCobhtU7aBM~rKq3doJv?Iar<`|BBGlYH`sgR+iX5nt zqC?T80Rv&=p%|COh(L?t^7lY;f(RF)z&pMWF*g(rcrlTU3iIN`pT;b6F>?R8%*n$r zCT#P943m)VDihI-^lR&x34lFv@wHm4Kq z;a1b2G7cP!pXVf~{HICIo0E&l2Xm~nruU1-umd6r?te{Y_cLV}>+ho@Q@k~vtnMYL zmoTSXU;Z2u0((h8G?5=J11EblL@n(n8>NpdTw$x*wiSe;D7>(DTv$tJfS56O#)3#@ z@vU)`g!b=}=lT5>6U+5J)3jlIRd7S8#sN<+aS$;)?2Dgy4)>uUB4bk3v z#1IvP5{i{Y_e@#p-lBT1aGd(ARg!2ejPvoT8b@4j$YKw9y+gH02w^maSO7%O7-I^H zV8XDLOxw2NnL;>-X8|Ch!V-Xhc;=mui3pyhC<;pk01O#sCO~6MA+B?sXYm4Hmurv~ zVJq@*_2BBk=?6diX~SU1vG9Wj+b@3k>!0}Khu`zAPksL7uYC8lum9MO|1A2`m%sJO z;p5NUyzzKh$iMvK-`i}q?|Rnvi@sdE?>#RboE&&t-aczi ztpI=oUt`D-GAIZJQq^@DJ%|0zKkOGp=I}kY>+Tms=k7@&0wd#iXUsH5z+9Wq-`fj^ zgBkierw%21x}WXAMchSSllM%TK_zS5naH(TpWmb4X}SYrK#8hjY=sxGRHqk(Z(`zq z!kP9chsmmwx#ef9-2i&bTtr@u8}vANchERae5qbz(7YnAj7Fig>!D$ zxiR(k`cM+lDo2gEr2EUbPxAHV;<651pR{@H8Fo%6(NmfnM+07z1L8arLrk&{31PfY zj@kKk;Mldhxw?@81J7wpc7`Ul#T4+qaD|rd~+9 zAX}jU5Gbhdu3c{)bX`|~t(OanX1m!ooiEF>wAKq-TL6G`u5J1j$7NX-MNySiA4|m} zR;t|tki}9!5SFIj>hyvsf+YYACnAjfJw@rHIoPov9Nef&y&N_@R__cEH=6a zdTDi#ZkwhF@z)v*NEMsf56KCQn&f4Z8>pr2{1tj1^xX7qxp=Lk<~5p6PD6&qOThsX z_9W(iJEVfn2i8kim(F5~`^J^pRqfu~b%210f<-MJb)@ zI#(`i(>7c01Dbg64Psr_XIme4zyM;1+GS?nEtkvfb{kR$oO1_9hnvmDlIcC=+V0 zWDtx3A!8uEF62=KL=LmkTTSD(I{K_9p#oYPd}D^N=2@PXN}#-?V`itW(--n2g&68cW@Q%r%{&k z#k&{(&e{5JK6dwuBfj^gQa> zHU}-cX$+~g6q8l}5rJ@`{p2L=N(8Fpvx+7m&yEp8Tz^p%Abo%0z-GgL;u!@95XVI$ zqr|J>ftic4(B%!1q9Y+1CW^kR)yfn$Cc)Iz(%N2HW^pFu8Fbt>j=2RxWmR%3LG_Do zl$nMK;h$Ai#R%QDZNuQsIYK65w{F|G&Uekj_3G_=ckbM|bNtw?^$k}FT6W)i_2rjd zde_Ox)gSuwht8q6ySUz+JZbLT{hiWc}Rf1?KbOM%f6AD0C*5V2stPMP`b3& zW-17c1#gX{8vb)-YK|X;Z<1oRnsGc5>!+-i;2f6CVO&Cyk`eaXOE2XfNZyycRc~lM z*UPKNkHl{#&t{G{=r0rWo?A}*JQtN2%}fCfcI0m`$Lp{xQRWy{Wjd1`fO%+3{;647 z@@o#+lLKn=y*BG7@p|!U+jery{0PDoe3f%TXC&zQb8;x*S?xB^el#|E^@=IW@q+?i0l|8h7rU1m?Wni%eN>$>nWR21 zjVam+AgW=esJj1?TdIvmQF_=VhYowvwyiN{Q6CsvKK}UAopZ*Rr=NbBncMB=;lqb- zy#3wPYW4K?sXzYXKN9BC(<>)O$ER1XkS*VS`wn|KI5=7?4vvpbs;XYE*M_QVwm3Yj z4~~vX&%qklwk-;a2#6F#5o={^+m^ugM0mWGSR^DW2{0xRb96Y_)ucj5tT~puX>*RQ z$IO8${IhM_u*MMH9!iR9q69N+F1fU!928>;F(dl8aj?cF!P;@Vl2zo>PK1GEl9LFS zfMpB`k3;Ewp425K_aPS&w0f`a4{q-`?GRdyh{1jx1AKz9xG0JcX`Z|~Sq@y{N)UO6 zm@@{+J!hS7ri{ZQCdd&X0(cPy3}rMbQ~51}qvNYrtg&s=wJk5}dbN326a|8?2qG{D zdvCm}O|AX(h{)b|T}QU3oX$Bf9MTA?s&dXTh=^DqA_`R*1HTUN7CbTW>SLXT)EOFj zcpM%cMo*gz2I~m1;U4dO(=?%n5PM!!wTP6qpc(^=tyZfD@DVCvI*}HH!f5d)>T9hS z89FN@0(#E2CeJPxV3<{N3&>Wa3Kz2*q|ldb6;^4=_=fgAqc}l8N_@<^}*MBIl>}NJqSNTLw{5s*Vom7yLz1MuAI=CYA4Jg|f#3|)v)yRB%ULOt$> zA|%n^Ame^ZldrqbjPMSwgb^fHl3j7({kk~M=qUh*?V#1kg|BiMQj47cBVjKNjx#;KGc0vUVEhOWt^yhy5k9aN#gV=8q z=%+gG%DOzXC|N(g&U1%-&)YtmlK_RCLlfk1L=1u+U|-5;Qml%-JdNoK zp9lFZ@(7N7Q~O?eyI`O=CaaMAWQU_2%!A+{ejp(L+8D9d%b{oyLMPZ^jy!-5hW93+ zX&QPHZC%D~@NlTuiG*Yg3L}vqc=iOsAS4nZV5)LqjB&yq#Ivufs%g8>WVkF#V~Zfl zA`qh4VIB~GcrQd@KXr)!j;|+On^+_~@DIN&MBgB)m1w@t3E@pjRiesnj;NGuVnc(#0q{ZA7iQqYp?!iM2wGTCG?!~6+M{mGU7DdJ z*s6~p{&UjZfGW-_KvGu0HquZ1eKbP{e~gGUO|#u@`wU!bBN5X@ zpOQmQxZWK6B)|ZB>Gqspqjx6xX>tyI$byJtc8(o;4Y@ zUvTM98U!@J3`77Hn86z`t{q+~!ctgkDr+necbjHg6veWxS^5?co6V-K>l|xBB|{Ne zEEX|}A~^7pvi=bQquWM?O84QUB3uPQC9stkjTMpxh$u7&sj4avsPG}UD+1@@p-*pc z%f*&FJg(OdgUvJ&)k}&B`AOl7@nVwX=BJ#nRE=4QdO10hi0Ls}JnU`k?WDJwh8_$# z1YU-#{wuiROeV81=ai1*8xdiI@8`-X^B&eYIVGKWmP5#hwD$B(n;}ww%!89eL=ad6 z#{!;Ke;~x-1$6*WzdDXDGv@L+kjTx5LwA-`*qA$@aLpEFok9?q_k_VE%+?p4;GUln zwmZ4T@8_bK7Q2_9;h)CMmqE05b|sRtrgF(`mz|ivkrdxT5p+kw)6qO9Ayx61+y8SHy1=sWw# zs4w^Edb=IN5qAzB`hf0XN;Fg@W#(YtoE>fEU3+fMf5IsPq8J1Pq&t#z>1>AmhaTyC zZVl)&xq;ZZ5Tg@fq=r^`g|&o(`c@X?*~ES}yPA``kW&cMy&qXy|3yrH3Vk!41geP$ zLbjOzGeh8s>c~7uXk*Z`Q4kcM&=AY9wH5@N?}&)~C>K96?ih*^!hiq(fB;EEK~x$W zD~p6=>gmu#d8K7<32WM+$_MVHP zFt+RiYDJiPcSR7D!;7?ht1}+}z*y|M?#`V%0Py(ZkB8h`B9gLt`njh|1aPe}H(z}I zxh3Q8{)^xGKmI2_ez3fHytuKbk2mY?mw)lM9)JAB!-H!F+xqq8>p%9{pSG>R2mXuS z{yp!jAO6rsiOj$M_x{7*{+)mEy*FP!00Ke%zBxVpAO7tBxGIlc{qCFBpL||yy*kG? z-ng@@E!{k{mb@3FmIRQ%2pT{_3_Wrw%VLarUk;p*QQG7$O!01gLlm|XyHKvLjgVr@ z#V*emKrxz0i%q=dvN`9%9l|y19!L--`5bXuPcBX|H(u-+A0CnECWFlFd(7LKd&Qiz zod?cCreMfSF&r2yO4n`OD{A= zSp}D*gLC@Eg=zq1uK45#ChadH^;=zY&M+B! zA0;bp7H^iZk3l1phVUpu?BA$-l2KiZf`yEIUD_y|k3YHaNz@jVG$10OVM!EFj4>jH zS%RGBeFsdXEdtr>I$u?$YuiP=c>aYan|AZ?;lo$I_3h`Me+~fd-+xdP#f@7}-Fp1_ zIEMy&XzFf_ML;hhvfFzvJah&Qw#Xo6bbK!ppCD>h59x|z;QX2LuJVoPL z{Q3xRoHjIRgPuIH{HI9dWo*;G%m|RlQ4t1mIVS;wIdDy&F@BjRmcf*9VG=|q5#$1- zn2FX$1MQ>F?$0om@yK+%~5{!3x znK+qZ;;}co01yBMD@b-{#M4Ay+sXehi|msh@CWB1c%-3zPwdrNXrcC2n!SCRbDLx;8TITmqM|*-HNuvqFi8aX0@9#kXYsIQ3;b zJS{u~eIzBPJFS28iv991%rIFku1Mws-t#P6gn)Z)i{y0jMS3mzCu91^10*rV)Kw`0 z?PlwVy!RqV6xs?87Jr#=>}u3t<<~LBB=-itI3k9Q$KJCOMnn!_@-!jL1j*1^8!~{H z83++cm^}!3uvQ>s9ER%utq)$L9kA*DLP<^xZI45B|2|1Z3v%I#@^IASf(8`C>CI+? zh(R|D(KgmvLL2nr*1N9j4i68Fp@(N($6VL-a=AnV;l8zMQQB3z^$6iVK|n%5Vqr0M zrxJ!)kpVJQQ50Mib+z4Y?>&5Aj5$0!EEg5Hw%c}WEik&yS4I87&-~N}KmAkf>fbMy z$7gr$zkU0SS6=Ta7ryZI z_uX052akW~lOO!R``&why?*1Z`(1IB7sX;(t?#d{J$`byJYZx`Y%0ReAc7GfKnr37 z2#A1vrUdbPKm$yLFYJG6PBlxiE%i~tm?>y%&CzO(Xy@OHy7oMqgO*0m1UD}zE`&1# zGSNnoNA%<_RDCab&5g5%Q%*{dlV<=~RA&4@=?WaP#`n zwQEm0=K(g=qP~0g&g*Zy?p*7=UmnyqZrnILc+6T$gk87Q4Fd?s+G0`Fpi*=Z5x8$H z8=^d-*)%USiBo32{aBxX1Ss^(vjOsb+dpO^$7t_zjIGY;gZ`Xz?Oa`nnQhTa6r{;u z6HmYfc6*Dj=Z@hm9oJpc;FgBw_~VpCy@ zVuNBFm%>Z6IBOq1G>Zil#lgV=32o2s9UdOSwiFRdMg-YbRaq6rvKKfx38df~ z-~8syn>UH*%{SkC_St6-4-ZRggW?o2$6+Y?9})$^SqCqIb1qa(4PLz%u+Pk!&8DiV zpbkYS4V8$BqKGxex~?mVqAW{h4&R5ggW&85_ru_{7Lf?~qPoxNv)20`;7Ygb&hJV1 z^`v|vV-JB$+1%#$I3A&7&Gx{9e z2IKn;AZ8>5_gU^nQe}L0PQBbOpUBvD0|0^8Xi*`2aO@cbv+~)obD-|QDmSrfjZrRW>Z{3RSBE zfk|^1hGkF1{ZVvS2w?axSdA|s?O*1aL*e~W*5$7~vhM!dE;-xpXC~jzZI>TJ^Uz6- zoXdLv{aI^UegDnnroAJoIDH$B^@1p3;TUeUx;?i)7czy(sZJ`wm*0F%d=j~9QuNw0 ztI7NGq4Yhu5H*?2FCxZRZ8wPMpJYz!ZON14MfKn@#(3Sv+z^XEs3JqyhpP`x>0GXk zlr~-X;4ac^lvO31OVXB{zg$vNgA8jXUpFbVgW{X%;qqnQ-f#X)a&25b5gFT?{>!pt z1R@In*vClq(CFNb1EMktX;EM47ubX;!utm;6A^o`Y`r7IP!kDysY4JTwj)47L<6C?CHrzH$`rJ)XdkGmDvHDr z24XGEpdkinIn)KIs-kVXu%BgFdg1N5Td&tA*Kb-&FtolvYfzx;ya0Iy7DTd0{U(qp z1Sr6?2#W|p&-%}|4FD__)j?JHkXccbTgMf1UDvD4))ZAE@YenFZQH%_>Z^6#93NeO z^08;1x_RTt_k7^Tf9B`A*1hri+yCTufA!mU&b>eXnZNewvvu+2gM%CI|KYRs`d!a` zoTAFb*#m?pjV zAzC)y#)NGoC?m>&qz_cjAI4lq##atG8ar|f=Pf49&J<=CUlkYklV!peT$f3hARih=GJf zyaNDB1c1S|A!f+H7|3`y*0L;7HCUFRthsi}U50r@b9T~f0LNy&N$#$BXd*qt`(p%j z$cl*bk9;T6E>cz0WiWF`FYILWS^N-8qA`w1XF4aWb56$tMP~I5Bu?A}(VTN3`a8tq z=^kqlSN`H0jhpM$>Qo@m>5OcBfJSg0Wre)7YmR}{Sh`n+_O&wj$P2Z|<{_1yiD z942(8>%0!G4;FUkT!3>N>Kw{o^)Blwd~b~js+vI}qOZRCwW27n;^XCFb^hR$uYKeG z-FvqlyYZ2a|F|{fYPEXxjn}VUy;>B-?b~;b4zC;>F5{7T?+IXaetz%Xy(`yF5%J{Y z1dTz&#bR-Me0+3t)V6KtL>$8Q3u9vi|GKV&bv_(NXjGu#Jiw%Lu4x(~(m6EY`}KMq zy#>K55KcV!2HLh=tybZN;2{VI6>f-KJr;`v3}FqixKK!=q@nhX1`HUyS^Fu5rUrX( z>YZJZIUb^WQj@s+DNREn;)4ni(3w?Q0M_!}B-7VmKN%~4KM1R+TF3<4UFz^Gxg=D}-8a9{k6X4uWqb4Q!%ab*kH@hUW zntP*`#=p;P+8J6Zj06_@B5FiR6i!gJ6L5~0cWZqSs>bj0Y zH)EoLD2^`7O%powLc?5;31yKG2#H{5_R!B$peo8BsE4x)ia#?4_j%M=qVcdS%k6d> zgwhy56M|i;s;aBHX`9Vv0|0~Y-?M45#yPh--xy=A-@3VNoG~Q;dSP}VFBZU40 zWDOwt;KVZI!hRwGVu(QqB9|?T!q9r#bX~VRJOBXCTo+cPUU_l0>I9s@#nq!F(ZP+A zO^40eZP1?GYwzEFgPW56ED@bu28k|?#X)d-m5RJ`GD%XpZo3q=Iyt( zhcA8%7Dw;-zr&cSDbUETs@m)PWt{m ztIk<FyPqT6{itx zABjAaKxX`#+pQ9KOCC&$7bTi}UJYY1sX2ke95?4qJm1Dx^iW4@<|mu)Lo!-!CeUIE zWC)_+M{9<2RZS+%ojJLPQ=Owf2`NlQ)6tjVpP|>THp1fAUzkrgpS+$@JXY%K^_O~G z!N`TmG6jPxD2BjKsw+upLR8`5301_wii$%8{2;7^l!Ewpq=Z47)mx`YlXHy9fXmF@ zZ;7BlAV6jiux4lj3*4b8ofiP>IuQY53<^o-5YhXNhq?yF0tv9R3_iqubly20qQU_X zxpPCYa`CZnIbv@eV-RiC3V0pE)uS<;vYK+?8%<9%#g1FCCk!DMNA{~mn(2t@V!`rpimrz<9 ztlQdTAN~g4Ni5tci4khpy2qcz=ioMoUA)5mD9-g2 z@SSr8Da;%xc(F*3IwzfDYYSVIU9;Yw}lx|JY`;Vdj_K|M=T)zy0~oeeSvEo;$sIwQXB#%et;@ zSzNt-?cu|R&N*xC)sqt^A?b?F}4-oOqH{X2Xi6@$- z3(!kM$H&LP8ydQp7-PKm+wC@jYu9yUIn+xX4$)eBaB$FWyQ-+Y?_ArJ2S!*hq(=E} zyIogRCCEiplE`YaDXJ3MzP))k-@5MGr3CXUB8IAH!Tk`X6v@6O9eL5nh>tnQf#S{@ z0&(2ZJONs*C&R80HTmd9SL@aZ5NeG6umOBuvknd(u#x*n(5~+41^&) z9uY%F+(4)S`^K;=eSSI;dN<)&gq@Xsr1>Bs0LA1+A_}E|1q4khF(M)W3S*qV-oswr z*s-=DHW0ml2V?>ceI~;!0CzhdcMo@|68%&;(!jwS&+4q1nN?Ux*ppx<=}RVvI_^Nh zz{1Y857ruC@XTdl43aVE!FTN0_aTc$&>D+~-Z|#osy&3JnElL+i2*T2RLYbP0+0u3 z#3S#Hl?YH!kfOtNAg%_2EoRrvl|b1_1w0ZN4EB}K0~tVs`$G?K+sEM|?RxO^fXe-* z0`APC!Q|01M8eUoD?SLSW3z~W0JF$oCWx5<;SPqOrZOQKB5MEu2n#BBoT8C9&NbYM z2+9Pafy%B(1h!oysX;1yC2^prHKmb6Y1jtwrLICFJjDNzuxwR#u?~im<>k%<) z4VA4k9%Y2OnExt+Ls!#teOr{nqqjSth79o*L8gKs5+wnK#H~9*%(1Yiu|o@Taj3vO zaeQeQP7EnDQ*1Np?^>?`*wq0`I2}Nc$GnCLLnuRETM=PlKrj{&3CJ*lhtSQm?@Sve zujTNV{ObBJcCgl1Yly&mVDC+7y!XrwJR8-9mw_P6!kC^QBN(y}db2~%5&Bdr5JS_k z!yWG`E+P>O@q8f%3pMEXK`LLg+UNq~RAFVZ_70hMX!^&MfO zC&+`Xzen2vz>tIF9MfnZ-~|)_Fi`y-H(KB7+VQk27%m(d=Y-1s$RRlci3%{x((e)? zBQOZ?&VACm)x%zikU?>b0O*;Bz_WLb3fqArAPj}<4Ot>)Fo0VC0x;GhiFn735v4Gu z6EbATp!dFWuEDk_ilQvaMy%=kEu$eC(7$ON02o`60bmIMF-1`{twTXT3OyXDs2S0V zY&T6=F9Y~(TgS|%s00m4Phr>tp|w?A0yxH1(=O_gx1Ha#Wm#6o3nAFH-GMQcf%Vq8 zZDZ;Z5S)l9Oew;~S|sm0Q!l@Doij{qhzj(~;Jme#EI9!oTy-o$MS0LMI)K*W+uP>! zuxdO6pPeOBGFtK_;PsklSs?(`<cwZCTUt6=9Nj1R6vwo6}GKKA$4 zo5STnU68O3jhfqTD<~Fg?|lf8u%?uu;hC2vMhHa&$-`MRTSR5>fhmin=9oDL(ND#D zG<} zapvZnu(7%O_3Lt0I{7)-`u&qh%s~0h%-KDYB`47b$req5p-{~6t)`rP+y zR|0|GJ*5k*=CvDpv92|(q z@$rc<=H%q6M+#;A%d!aedLlYFIC$5)-ertwn#MWjeeZx@E|*nRh5G&>FXZs>@c8%` z0E|HZI6ORDEEbE!qOR+kH*c0@8PbW`wvArNP@^n5t|A!>eIR1X+!*E@3@(SqrE?t% z+p_2!1JKsD$m|<$Oj(zUuIpUe8e?jpQplI3s47gs`Vf6rsA5DktD2;TIgZRRSy22j z!7^P43zRJ~+K+N~W{z<1$m@O>7>fS~lA;J(5` zcr2Qe!kc%eBOx-82~ZFyTB)+|sc>oYU%v?faI-{GR6sYOhk>9EwXiDJ-Et{u&oMaSgkf5foxeIwwp#6ymuf} zl(sO1Ap+MmO;=S_*EN>N5{k&fv-@BB%2&VjotJ<3)1Rf{t zc5_~s(sgWXY0M!ax^7ifRqzlv*B~O2wB1@q_vc(glWrMXIQ2amlJ(y?Wp>X`Nwtza zY!_!d_xrEJ&N+v_$#tpXLNoY$DYwCewZ)U~qn-3Jk`0Wx(f;}GA4hBTiO_uE&gd&K zF68iZnN1Vr(ofX3x%o(Drq>floH3F)D)j!x{((E%G*iGaS+5=~X@;Db_!AVa161mO|{WNv(|08dIn34PC3Iw#gl!d0^H{ac!=4g1$MsSN1dlgx>g zk9^_u<}|}-X-cwuCh;OcWc`h`!5$*s`_Q1cZChq{<;tlp(w&14%-~%jDMo1~S!S#b zAIpftP}xjRI|0R9J(%QiqOx0(%}KoXlSgxz)87L~lTbY;lqB>MRgBN%t9ATuz z!k%r}zs5<W{BF?H6XUR^%m+|uSYzyIDdhzN*e_Ss&s7@41n5Q!V4 z`JyPi^T9w+K_>vVC{HVUd{8`IowUw-k@o!FvZ@!0#mUJDA~a3cbsZQwIKFaP79PPd zU%h#&*=#*xD}Vs&^#)0n^#T!Bt26KYVzFqNrYH(iSa4lPpsuO{44K}v#la#Ur@dOO zLa={`-!F<{wOSn?AFo#%Yb}bnrnA-(8Q*q`vMQFy{fcKLcWKZPlM1eNa@<{3dwJ@U;-=dZb#40RR4Q*qVf6~vCm z31dg3*+y`R{1@ z!w-xgAQn&%P@wPjsmx)?uFhqJlIedIWAhj|$?Uudxz_oPppKjJy04u>3k<`5DLmA+5o zKI9A#4hTn5(7-PsNb)*x8{V~7gq z+71Cg*diStEdap@Y`3l;tQI9<)pp{22fhnwSH{}5ZL7jM=gzlV6mT6s`P4J-djI-?BMjN^=5lmcE>eYTmW<*_{0a#z5Mfk z@he~Z((iKHEN&b;_2hez(Et{vE(#;kB6%d~xX+nx+qNuAKxc~rG4v{BdHv+16Sd(TZ^|CiNzQ}kwr|h2 zYS+CE*WGiRIeVL%HqO{6_pB>KNrU2c`vFdip?M5=u4c^1lzDVE>^R>9CInkRjC9jK zjnxdyVKG0%e23>&otvR^0MIYZLxZDU5%oh*(Rnb%iV_y67dV$~6RIN2tvBa$bLTP{ z<&nVWoXV}QxteH_*=qNXlSr#D79jE^?Nzmy4xYJZ^9|W^r_K?HKh245`InmY zYlm*W(xJuxNT1gXL^i@2Gy9<%cE4%1+HBTM)1Wns;JtS!MNvr6aOVUzR_YRMX$Vlj zG2gttJw9~KJv@I9C`?sV#+bH4=cRES5IWC7RFvhm>$cnN@!?Tf7H!vvcSTVgA1(B-u+!mr5BN++x+(LcTa0J2W zh=_~DBA!~v%Ql5QTGW8ldcSpkX^j``7#?(;l&0G>_wL+T6!w`X9$!>7`>wzucov(c z*=#l;ry*1jioma3(Xp9F0z8@p#E9cbSu~-vktjtAV}fWnxJ46Amrp=jd=mLYuSLf} z<4`v#WMkmihMqvas8$3!z#iB^PiZ~t$q&U6ka?)pqg&ttKrBilv2|_yZNRbNdl7-Y zW#8~j_^gV$B$sCg07w*;ONick?;KLm?+GY@XYBQvB%TR3OtLOgDiK6VfSow!!%knAW7JaVYWWZ*%Ss5At<*Z9>dXqxL!Wdj48-Vx|tzLpQPe=MEAmH8rty0 z=G$iIXj0e;01TkVaCSW>HGsUU`Qu{(0TqOZJft>A=xPWU?e8#_({whiMvzWL=7yWY z8mpqzbUxmnN6q(LwT6>Ss@7<%48%WzI;p~UJY0z)Bg$BoKwvHk>RA!a!7V4iIl#k& zT)juAxG>BGGHGg38Oh1lpZRqe>3Waa5=F%1`#d9LZUo2(Y(5iBd-a1l`%Q~piC&ga zo@A&5gK~b6NT=FC$V0;?QHiJd8t25?$$i+f?Xw9Px>rX8OGM$GTu3bCgaAFo4+)h}x$(*t>Qwm7$tk0-uq# zTlF= zK=JGFiw`#;_Eq}=V+AmPl4WGD$m}+=0AWEU^bTg`5YCSvg#BY$=#n8G-vA1L_d)_F zh^Cj20}T5C>y0sCW!?*dK_fWkM0%@5go>i;I`2Igg9u;@8B-Rt+BT->GibZ6Go}cY zIiWJ_m^)XoC8CCm2RZl7x4hYGM5L~3Fk~z)z>=f&`XK;>hJVZ)S^-&W0Ras$fDjr6 zWbd5ofX^P@H^!`1E9vCwjq8iWqH7vLG-SnjAu2(rw1$YiYh72_1&VLmwyx{#cKh)D zz0=dv)6-LKeYscwvg6K>ecy*a`o0f+^z~O>`A`41|JM6|=&%0xb027VRUXvH_5C|5 z@89~hKl-g7`pi!NmLGce$A0XGKK0!;GRKV|W1JB=HObra>OIl-FvS<=N)1ddA7-MG6YdW4;@u@TP%ASqK7JWr z+C8HuFei=#y*-pc(6dcsl=#Nn{iBtWeEhj=gZ+n+f3M>v9%*|qGf~k{ayn>Jp^ky> zJPuRfJqG~E=RL`rlOU(7Wayid@-LH{^GI|gKd;Hl!43v;A(j*;mw zVLbE3MAhrg`slIN?&_cw2LxEh;6R~plO?4_r_q43&4y9L5wXHMntwvM+QQzp|(MQ z0>Owgh=3?WHdG>A)C8(T~}}4zW4CqLoUn1qve(5wZr97x;E?;yVg^iWXh))KDTWftCzr#X|B^d!cVBWjKoCv3x#t~0$YMa+8iLNlnSc) z74cjm0OW(~=y_^3kGa6yPiBaYPYH;m>y78ds-3&0;gd8cQOGaU;x0qaMT7?W z0C6qj_nMvcYj{OAH&Bu)3^jj*hqQMPL~!F`JCUAuI*QY?K%Y!tsKDNd1Jak=K@x~$ z0TC@yXw48AAB4h3eQOm!G==E_KD6{4;D=BdUUplAfQEXzp7bdc7|35(N^kGO5R(Z2 z7Hk4h(RZlb_KB>bk08p8IKDvf8qG%{gv7RxQT&Ti${cpjL3IMs7XnzK*2S0RxyXCu z=Q+VZOSAd(A{fiO;qTAz!Ymd9=EdMi)gyNx0gQxdn4zDS7q(&MpmE&G3I^Ifsncnc z*TR29?Rc6xHX?owq%VP3RP6xwY;Q7o{ddlO!dc->P5G1Ei@2{>y&u@-wA5tSIEXI3 zL|MPaiN`ZVH7^eZs?=Ux*UXYT=7&j+(aUY(05@g07w>)SdbaK7l5uTOKYoj=TM+5%VlK& zz=+*;ZoS#~!dfy%wxXg~`S#rTB^9;Dwl5AD53vssc;mtSLa-*=8nYz<0N%9SY6B&f zW#x;qgAT>BYmIPWD-Z_9h=Pd1(1{45F{WuZZ@lqF81vxZ;Kr@TLQ6vLWV_jrMPq{} zklMDXmdD;N-+1NAfAq`$&lg|(n0Jhp-u;1(Z_V*tCq-FbJ3Ih?^x*uQyW`{2oA)2A zZk=+==hE@v;UNH_*ptHp20#zaiM80a4zUzzMT{`Ul8!RAZC+BHOek9p&yU0`l5vs& zFC>rpT{&}~Q+xN!Fuvcu6`T`v;O2bQ4^R91J4Ch$OIr5k>=3_ zV=*EZKJwkmqfK*ute@p^t=RbBq&rSj27UM&N-l(utNfTSlW8tI(94{!-k-U1A=NB* za{LPg+;Hu2S4>LV$^OR)%$db=_g*e8OhQpUS^`($)ls}o2u_Y@mpq6)BY^ULCAI4G zHTmlOugl5z+z#Xl!~T8CrxBh?YEmog=*5iTn07rIW0Y|OEq%Dj1nRnGb_qOt&&Cuy zwD$~`u1%4^NnB=L3@yTKqAw)@P!Y|e`7CZi6lXQ2B+5*_G@VWxAIUK#!Hx?7Z$DE@ z&S?KB?|*e7ujdoE@D(_vkWW6i|JQgflQPg`7B{B1) ztSAq}-jA_8BZ%-gMx7v(?(e}|Z3XJ+Mg367#t6eA_63v)$qmu00vy{v;^3YE#V*5# zq9}SM$RZ?w{o%UK)3|Mh4reZk0*4lnVfVaufY3L91mqq&*oB(@LO4t#%-?&zv($u5 zKtw>#JNYxdPJ`KQ(?X5!aax!LY@O{l$;UNz?Bq8Y>1zRB!rP+}IplL=W1ndeqc4Q0+iAeCak~P2+VMBVtlhS^EbKl^5I21M4;bv6NA`loDmz$60Dk2cA z>C!(rOkj*+fRKcQ@2veP-f8(?pZKupUce&>_JQXj!rn8G&DS0bsgMN~@vhfSfTUUP zg?=R8YP}oNSfJt>b6sZgnC1r2aGtvXAV5KS6D(NCqu-Dh=M}8V;|a(mB&L=9Sp8M>O3+d05S`+4=oR!Da)d1 zT4r7>YFn7LYl8{vu&O(OZP(u0HX>3OyDaU2J-WZyN@2UwSOcXe>xB@u1sFEo`%pT` zR%J*RCZZdMR|wHkh+zVQK!@N?g~3~EubiF=hyf%3?tCcmZ;T<7u4`7SRnxhmS{xi4 zSc^|T{d8DSP$FL!c4-OAs{^l3C z+unTqgxRzjsn?#bB;2PbmhWK6#E}_0(0ZX zar6S`!W_dj>mEqC>_uAl+zr|sc^Oe-QZd^z$$ZTHZ<%vr%&p=w>(aN)1z_eBd`_^- z$Im0~HN%s^{FhVGd_M8PIqPbO9E{o&Jj$ho_{l5*oU&DEHG7W3UC8GmW7B4LeNQwe zC9~aiv>&X`A`<#5LI#a{xa{0Lc@k^*po=)^x9rr;Ov8aGBiiI`dq~L~{3m(Sb4PX| zkGw)a1q{`;%|%T%~6=#M52V1T@Vn= z;B_)uvK*L-?uif;ODKl;*?{|Xz0^7N$?FnLOhi_z6%j;5I?f-*)~;L!(_tx;_4(9l zc7iF<{4q&dla|pq1a2ghA$|_FBpDZ>)>l5C{6ICdB=gyC4gf`J=D>0f6wTd)11kAbRHjOJf~-TU7-S02KsV7}zj0DJH2vU2R18I383 zeAlt_z)}{4Ya1uLSS*-5vMiR>X0`Q%#ux-30|(3cVEJTk7-VmdOehM(LVyN|`m&q5 zn)~7OLli#_HH8o{l<}#mYI%52`8smXh)0Djy!Q+O0$uAxq%6y#D2|T~LK%IH4&M8a z#K6pL+lJQ>QC-((XJ-K_VynjJ7YTFQZnq&zAUqoiBSf4E@$5Z*i6AhzP5@YBVW23i z1uJ#cdoH~T4>nCBrdRA^*WchZ$z$=tKO?`6U+RA*1%i^>G@aBBYMhEiCv-ALyfdV3 z@VFbH2J%>YZ8F;?zAho@ar~G-87~>~HjFW$`(jb_y{TE19*(D+y8%!Ibmvqw|0%&l zeS^+L$O%+JXW|u;P7-!MNo_RQzmWJ$3O1T+fPgM~BqBfI0*Lxm#${4^rx8*#xN%=UQzG$m7R2qJJep z7CU~ge*2`HfHMaD3pslv9XYL-T;SX|lb0SX=Y4uzb13jf?pWkf;Kg zkT%Ks=MFBHv?n(?xf91_Dsh*UvNQF@6Q86r;nH+o<4*MH>UZhU^svSl=INrfb9Muq zahb%TJ(K4(`Oh4@ajNC`+^$}7^I^<=<^Wqu-H}m(BDV2eVP`^IMHC()TbV4TgovDV zL!6vv+!GG2uf1D3$@Q`$)Axi2Lm3-RfFJf=8Z!6j6v^9@B_pE9bP@8&VrUxb^i%`> zipbFXc{pG~3qcGA5!v-2hKU+NVTU6r07U_0*r~v*qtAl{EC~^U5P$_kA{eq-#WN8J z2?#o25dk8!2Atoar=j4D0{}C~pF#khW3tOm_(`YaLmM7MVP-%U5zlb-+nx;8huaM};XP^Jxx4!3!Wm{eO*pL3)AO7BN{^lS4!C(93q|i{lPE&#z#K&ndg7xvw+KQz4`EPaqzzPfAsu(E#iLVmw)lK*I&7Ea`5y2 z*55unJbde|x1W0QDFASeJv#tOdXXkrpCIoZXwSvb{hW8tuQKJ%pSvMZF)kc@u3FFj z*7)*jhkQyv4JVLN}w*6{*(Jhr4Qx|XjE{6qk-)n%;^$I5M zzi>km^F;oAA`V`dc9TpXR=0?VqX8yYFDLb@WDfa@qJ0F$_Un9xd6LZ~6{;L=6FFW> z^BTo8)XiO58Nk79rdJWc+n8umaBbVJR;$o_@9I?w_9-2GnHZI{rcst<2;&&b;!J%l z0gNLAJ69~?kw}m_`$sQ1PhyR5t1WsFDnfCI7$l1-A9hkY*Vz>ssdE5}DWzImPUt{# zYNG5@bKsZYi;S;$!z3Qff$=2gjr*27J_+OrFz+FEkP!#WJYJxxNbNy{{B`-#^m=28 zkBI1AFEB-C^b%wUf!`yUq4IA9n!teG`_3}}n8I3XEW-J!4N?Cs%hvf07|Ecelf|O# z{A#yWcKJi3bgCZ zqAVY7Rv;D;*?ZSFKtN**NGMj$?7TrTNaDF^+M+6n$T_#&Zbf9VSOiFJTW5?g!)moL z#xP4=FGVE85XJgG!A>nADA08sSpxvsl!mMHj zL$q{@a!7!ZrV>p-e;*DsKzkx;%GkSuh-fG_x7U+2<66}@VNcezo8Fj}_z`OvoDuM~o!Q z%+7C zOpdPt)ko!Jkan)(_?A%9FL-&Q94_-U5AxlPf9Jx>CQ0I+I}^N!= zPLkkr*rr|GHi>K^{Gi{c>=?Zq8;U>zfVI|I(mOg*g<$0MiTC90#!0a`*@}e7B-{}8 z;>FzHIqt&RdGiEjvx3UO2Jg*wB#0!KW=Ey$=AuM0^j^ z!&E98{stI>wr$axcy^HmFA2m5f+3=UNMu;7XKO54avqwt@xsOyWoeOn>5hZwxx@V) z>O!@pfDR$$QA>I_WEg8&#K@NV;uf8R$GI#EL~Pnk+jamTbs?UOKrP^ew(Om-C-DW8 zEtO6j_-%z^5gi=1`>0 zg{?hsm5iL@b|MBP*q#SU(@YsIQ0Tp9;V_R=`2o#I*fQi#1UWKCpLl5pQrv+($R6LT zbwZejbFImsvX&-AX5x#qOsvH${aNpT<_dABAOTowMZ9xuZ1ZKU^?qkt!Jz;&s+M{Q zWX_TMIEUPwt06}pWo!h(Lt*Gp7$k{vj~kWPn?zZ(b1?AUh3`Y~uZSER99V0ktt1Aq z1_>#ApO;$lzSEE(eNj{^gJ&z)-?i2q28n4;nnTC=Co*WR9t|P2(HCy(~Tvb)mbckq*Azd9tMSh}ggDA!X!yE*bYHiWBt+h4u z{#WR_4h8C}YMKU&4<%?s`pl1D@^TF<7K<)4!~!l#Q(DtB4H_GZoWxigG6n@=Ie23X zLl1a9L@kNHP+z<2x~6UFvWh^3Dk$3l8iObjQftL~acl~+an@P`wlEbu=={U&_SLuE zxOaB`M0IfG%9XM(7O5)jQBf>PDlq*M2wdAx~?Nxi)bD(E{I9S zn3I!}F!<5Y5e^n=QniU9x$X&z0S1qNWQ_f(N zyabTrS~zl4Uy6*0~_G?5PO z799M+IxH?y8*OjZG5!hZ4s(wEA>u!A2r3WfqCI@5oEh<(uy4T?q;>C6x)=*Pj{>DF z^bvqcEYl1qz+oF;Wt(Yl5j-jCyFa_&84{(JN z(AE}l+AKqL5zpS@&iq6~1XjQ^Ku~csm_>cT6o#4Gu0!@^SppgdEF#FkqtRP=SOh_v zzjuNe5z)Zf;;!{uI#eJ;M60|a=motK!?K#cfiP;OtoZ_lUJC#$tP4{FgAwU|#Vm-R9X9}dt!xpY@oDeZUqscE zPyQxyW};Z3vP8#kw1N{~7tmHePFWhI7K+e}YLSDILX&Ak8Kz&%*gi-wp#>mL^by^` zf3D{2B7ld@L4e5gr!K+4`4nt*2wqsQ&xw(w&KQD|k>U{eH8Ud_jl#XG>%DWW7T%08 zR9I#PA};^{1}$Ldo$Hy|wOyo{S|AAtb6POgf|lOo$SgvBOqde_5Rxs7cb))|NLWI~ zK-hQ8uH?KTpX!$%L?~3sT|lt$amCr@j?bR#Ta98pA_)M_WFcq>*Ke7-6HRPE4BEC^tb5txw35CAeW`=)h*MG0d~p;r|FLX#K( z5oMeq0ucx`Y)Cn-I`4F#o~Gd%-*;&4`lQiF(tFv)Z4rj*XCNFr*G$HkSSgRyDeEzl zy$>6Sh)G2y&FqHHY|+3Fp|7ukh8{&x0q?0&|K$#T5VSAEA^7<@^E=f&TkH$>bmCjSHAVwQ%^s5 zc<+f9o_E+8Uf;j>-8aAR?a%+qe{uch&5ykQhyTjI@$cM!aQhGc+5elspFZ={;gw|# zeu)Q9zwo{%UVP#1*_$6b`We~0{oZ#y`Py5r|2O~ew_pF}%OCmhC$B&C(tAJt(?`o? zVbOb6mDP6JVo{4J&H#+}D6Yhc5sU@5UM-NkDNrDEo3lusJ+Ro)Hf~EuK)pZ7aJu=$ii1+bl#qG1O*0jZh&O}^Tj4ponnN$sHJ2TS|&>lYtAyB z%u~N>62P=gGe5_CoIS}Ic6JsFF`@jdOH@Y|UFQHgt zs&L%QP6XGRO#rs4?4!S%u0=zx14IM95M~1wjq%`#s6K8*n$kj57l`=a!P$8ms`t8J zsf>)Hx1V+{D90=U!VyaQ2x}x34sk1pSl6|#HKC)z7f1}kBAw?B*a;(I0cZ-dW!`$X zX}kb*>+Mq~*PlMV*0oy@&z)ahxl)&9w{5n5RTM>4mXWLC5c=wz3(3f+4NrYg%C|wpl^`DJD8v_BF0KY z305XH-#E$WPT-M_O*;Bsv7fVEa(G?Y^AFifpqO_pT zG5Dyov>E;xqxh3^3^x5(N+sY(sPB{@X=Dew%|P^*gyn?#vk~}%RlRR>?OdR}VbelX zsVEy_*!d_2hHVMUiM}P&X3${*5fWm=x3){h!$Q@^I1xePKV!;T0FMC4kxyyRjmH{y zG8TM@;1|iJPIu6CA(BvB7)z@0SmV1u`3w>%NW=Cd$|UqW;q1E`WW?X~rHM&I+t5WL zUOQ+66MI?5Wk$$JJb6(Z(rAT)8(SkC#Dqe}G&BzfB~f_p;xhGlkr^Rrgd##40|Fvk z5s_=#K)8Z~Dr{G%Xq52cer`hPPWHD=habh}qB0~c#eeEo#B=YwOU@;HV4v2jOaztW z?cTL8zBKsFU5QWud*y!(oEnm|of#cda%VEEf~(X$phr$NS;h_+J*;GseExavetmQP zz07Dj5n$ryaI%{0=Q3L)V)>VPXQC9H^sxxH1(g8+NWe$e4)*j@<9H+pAv;I2;~24& zEIID8jKiUlZO?z&FAS$UkF$EK#=FQ1LYVWObJTDG0Mn;6f6dJnGEje{F(*MfSuYIX zbfD;i_d8ljv~!Q)*zckv2}II7Dp2JgB34cw%}#zBSB#J$!#)(aL_`DwfP{#ILVfW! z&6RpOY|}Gk;jWdAu`xk6$%p-3a&Pw;J}Vqn@)JUZ5BnD%rJb83?({y z{{PhdSFj~XmKcbg<08ZrPrsKsvocF{RaaM4t7$de(<3kn06_o)kN}p##<&KW5#$eT z$v+y6KQyC}{2`a*lDpjH4|hodSQr2T;9v%eQQhe2X;EEWy1QiReEP}=7l$8y!{50c zabITl5^Kh=s%JOvjqq?ke*9>ml}1EUbwmarA`0|A?T`m)cInxRVhl%nvkig=}0OVtPN*#v%YmP*ZKbb$!@>5vA(mu?mbxv>gedC7Gg~e zGgIXP-u(Wz?%e)pJel6ObnDv9r`N7*|H7BQ@ychv{D^-eEM_W z{Hwd0gSX#&ZKt&69e>57OJ`y;P zL|TfEKYABH4A<6APOIM9W?yQU)u2YR$~w{4W#5X4pBpKqa}@xP0760q7K8^R?K)1J=Sfo zN-Rs{Y);6C>4Aw^w$0sPwJ6qpMFQSrkNA4q=JI5{36Whj&sFC2WNyiqd>mvZwzl-J z3ZBqHJXmJ-3HRkSwP6KU#ucXg_P+BSObAfIIU+-fLhU(cZPgY7bYzoIWBrL6#6jd& z6kXwX%T={hnq=}&=l!eBU9k8>$3V1bSZhO%O(CGFsz;9=ot~Z|;`a7#;HDwUzSrwv z6XY1lRE$D}t{w?(1!)x)wUxl+pBxBn63TN0;(&2qIO2KMRNzdYTn?Y-k3?e=u|m~} zdqA*Wqtq(iB3S4>$C-rALn}Q+v}#pzF*<=EHr{w1MHnok?Op}6+PBAN)!s@@3L?zo zT6jn%|7W5^CGo<14(&VJ2a~H4?E}a|65^GduSDzc-iAW$-h1+kVl_krsG|mJtqpN` z0#&F;9_KHb+9S?`^8g@fB{r>lVFrU|(8{5O-y0fsev$wsyTb8+K%FxHq;9$-4-hn`rB8b~k26kCgg>ki+j7<#zAde zU13?45`~s%j4}0G2@v2NWEd;s3xv?e*jT;`(TE%)(7C<@j9;`-J+bP>&ykNrTQC5` z5csz06{Gv3T_aJ|xO1_y!T_zVJdh9{PV>2Z$WK|+sJZMB-2m+lL(v}@rHe?N;>_cR zP@4!EHw-j-eqGlApQFzym|~4F!{Kl~pNEBn;PfPbAbM89TS7QLFn+{M*lns3qeO|B zEJMJ4Lhi!fnBEr*xWS4PP60B1AvSjKn063pW;i|^k+_E@cod4F2uvZGjHB~7PM+5_ z;!aI&@`!^cWgCfkbHj%S)o9Ch&dulZIAOjubzR5p2pbn%Bi7n}zt2$)o3!AtoB*g` zUWb-!nP4?`Wy?iwn2_su9uw9qmP-^yEal{y^LIx})jy(4-d0i;#k-tM_~XKiD9^0( z5eIl&GxzZ$qHH3u1uTe_Z(`~Np7}GJbJkb@kZR$s;Gjkf)~M8XNJPH29P?x0Eb?c$ zn>LdyJdkE=w2>O5bazpgrx?R|lk z3#u;0P_H00HWCWdw~Z4qG^8}f#6x{D)RgZ^dTaZ zhuL#C0N2Fh%;PC*?`g6}oy%`a5$$W$rr?TjG0x)KDoOH2=)&ax-(De~l!&*0!H61d z%_~9v_6(ELgk5FIg%E*Vhpl(aT+1mKvg&y}ZI15AvHX&m8 z$QxG0SrIxS5v?Yj^iA2+nX7N&T;DrNli*xV8IxKlI=*UYUxMN-k zk>@IKLm@=aYn_upA1U&)EW^;O3w_vq0*D9}4+W5yeETBIlcN&9Xyh?Rbe19m03erq zMez!OBa6k#v$HG+B%s1YB|>U+7A3m{q$1CqBOy=|m{2Q|p7n>D)N%^;9M@@Njy3WQ1GWB46mI zZhq$4bDx=6_`#ol_ow$ix%a_s_xkJq%KYDlTNk#U{`!}H{eSq$kN^3P-}y%qJ03j$ zD{DD^_w6_Lj~|tTzIvbDe*61h|Ha?hSu5_`ez3c>^VH=_LSU9}UcNF|A0CaX6Fbwj zv)S5UJR1G@^*2BFg)jX0jW<5?m0!Af^ zWNAbc!a`$AA~4SZF>`19iT4J8u3)$1)p?%B0N6NSG>0Wf2+JZ=vgA(km26*}XDmp? zw;)Mnh)k``Xc7*{X|G>y`~Y42E3qjjV{$xMB`%!N|Aqx1B6@O&)_UhmfT7ZgJSwGt z97mziXfz&=5i#$t1#62ZAR$#=sJx&iYey?Nwe{(EJe$pIT|4Ja_HUPEDYG0A2ZKRg z^t|^<=e=^{#TQ=Q-P!5&`T)Wjt8{4{2_$RnbUGzw`$I$z86yLDqKt*R;$(iv+* zVHT;BkQn;uDWQ}KDGuKI&=J=;M+lC*u+{?3tZ^u$QXWNL==bm4yLa#Y=U)A+E_H2e zJ*gC6k;#6Zk@Ln>8nIX(N)&ZH7ftP&s8Iyu3}U)(%CkU|{m?0UP^)(ZCx+32%(-(g z>{eA3oSnfplUUU`wsQk6NAHLOJS+v5&6*r(+X5bcynR#?b5dMd+x;Kmkeq@rTBrFH z7CliG1!v;AVyU8paz-qQIa}Cd49sS;&?GsaO1R$`6HJ-WupXT>an;zE0Fv$FBq*M4 zahATeAh4aa%OhZe;XX!g2G2*9Wkpf6r%_c^_*I0;T2&RW4x4at?wknIh`Cv@l}noh zVdIYCxdQEFCdmYmzbAA%hLA%OdB;5mu?SL}YJh+IHDXurgmL7F{%m0Via7!2sLQ>r z34`D@ni3==R&g#_*D84|LD#cT(_kXR#->c6qp`RC%v}*-sI`__wK)_3IByV9NH{xi z)U+o-LIfehzz)y@dLcw`HAMq`@T|vu2~J|>=3k2Lj6inwjL5{N5OL8{(Yb(K!L<|J z72yUU1d0GDSLQ{Hbx60e-UKiST*w2&{D?vb6bUnp6Ki5gJ%u9Ru@rc+M-eMl%8kqY zqlqZ|MVY7!p1dc3m_!=q89vmmX2UFD>5BIfM^UdSiAtcjW@$F)w|CX+#s4O)8SA~iMy2d$?cum^a9$9$; zXE>K-bOMt*Amib!U873^08o(Y5iwYXc|sVcPRYnC4^hj_AJ0W&4_Q(Si71e_1o46> z)rsc_0vhke;tN0kRO*Z|1rE4y?s|)a1mv5-MgkC!n%3DAY}sLQ{>+8Z{79j`Cplb9 zOr6E2IwJDM0f1m9)LQGkkd3zhn~1w)IYdNw_O`Yz7XB)wf;WH~TYr#Zc-SbSIHMuU zs(P%|)_JHA(Yv$aw60;yY%W8cC9VTayaO(>r8tO8eLxYgpe~Yq@WD0 z-h4_B)rj80M?|NGv%KHC_@yuI9z37-^mbnU{y+MUA09orveAF#t6!SDd~5vhZrR`Z zPycWKyJw!h{`Ft@hCKanvwwZ<;;r!~_`#q2r?)R%Dc5%2{qFbv^AF$u+$*2|=CA%` z?c{oY^U=PZPN&zeUPA$|zVyY}$PRm3fAg>ZoyydI{wM$P-JiVv{0px>_1sH?;d-fh zxhzQlN`)W*z!?Wt3XypS#?6#c=rD?fApj|=dt6peB8s@cnVj^;esd1drWGE7bI$Lo zJJpjs#*gv&FM}?E(ka%og^zHxll!pCy!+UTH%v?7l{zP!m?qm)`w6c^4F-Pf9|vcC zbpr7GN8(Rh#B4{?hl^*XC-9GKhCh`Ee+g{c6KfN`WCDq>(#h~Tc~#usP6+UH{$$Rb z&`TInTN+N}<^%!}sm{r+NThB4`1y>Vf9B@$wh;g1@Hx}dmbWAJWDdrTRo4G_B`rYP zXS}_Y@mH>jw?pufwIo}axEnhqW-j=yIxIqnJkLV~;rK|5Ttmd+aHy0D1-SD(x7Nk_ z64(^H&hs3XgEg2KBxtpG*S@%EBayT?5+XC(B`ZzvjYET&ktl&`=Rxblt9VQzIgPY} zn&Ko0(d!g~!#yGJlcPbnVy7=?kx)J#E_LzmlT&1N0b;!t;~7uFQBy;t5F*xZMM5w0TuWI1 zc;}pu+8Af8FLHHqa&mlp98v^?5L;VY+uPfy3v5CsLb>^1>s()72Y}Pj_};xclgVUz zd;7|jD|w#dQ@4cHKvdUOWf}nHvs!8;gt)Lbbf!LfbTk|eg;qok1h=e$~Ecg_4u; zC{vjun-^uKNqFbH%R|gaZ3t})(~gQnjL?r2uEQ!4DK^^ z4&_I9WH&!YJc=z*eIs#$WictCU~Ni!rWY-9{CN6s*{uu9j^Xy%@lD5Lp0eFZ3gT#S zf8$sBIx_3$WO9&i94CIjIu0a1b4`AqNW27Us?<- z4W}u-f|vV`YYDw5!`qE9bzLi^f_ortK`id?S%Do+6~oB!CtlQonC+hL0_QlvOdqfK z6DOPTn=nt#1OvoEgN<-=UDuJCMgMfn*a$n)>-9p>r-Wuk2nVMTKPj**>9xK79EX!7 z@V`QE(*f~~OHOY1mAez5#dc!`3?GfdIdZqk)a_%)(LR|{A}0&hZzDbu7tS*bqQ|Jg zv9sFf)HtY!sK|RfA0?_|##Vv)1*Q=9I=Jho@#}=+X{~i3M}cLigh%2TiPPo>ur1$; zR_=~q=}??eas+M<7|mE5N(tHFLq$0uA*9@s!8d)C+%CmZ7IpmFb|-fC#W8 zoF~!+mrIujtWS>&KPmjn=U}3ezTsi zx#4T)09l&GvO6DI^1u^b=kM~rjcc(S=C%QLcEY*l9VH{3Pv`(J!rNX&f@X;v0t zLM;x`gsatXqn5P?nrw`Yn^%soIOvHYgSbQ@A^=sMK^MBNdav=0h#Zgu0wVCv zyG3vwQt$#30FXcgltO@ykZ}Map6L^B_abD9t8q`w!!@77w7h)>~fROhDh(M%52P41; z{D#igsYHv;)U%Bu z+bA*tzV`DBgb;Xobm&J_U7NWQyH~H5N}ryb+^Y=g^$}*%$#nkKyEwal{N7v7e)chj0t?L)|ZvM@``Zw;}ed|B`5B|5#%)j=lzw=c8=GLVf z-}%G0dUuX)e){E|UV)qa@BY~zZ(n}u#TQ<2*6;3aPe%DQmMO5tMm0 zP-~kY{+IvL|JKDDH-7i;{*Qm~+FRb+r(XUGFMaxj_1>URvR~xjEP#Lv3}_`)u%mmo z2pB~~DRmS_qpU7iFD_cn4_ih0+JY!Q&QE^*e01_LHuse2K5<4^!ZzAV>eLK@nb9L* zjX>lhx9R4p3ABlFa&c0{MZ+joc8=c(3Q7`!S~j~jMQBfQ6-YU_l2_w$A5Q)VXFq(U zt7@ai5Ur>%L=6GDq@qq5*M@EUhFiqAP|_)m;|hN}0O+O_+bWu7Y7DT{nW&ep0!3sS$(%VJuG-2(9pXA_JSj_vHCkC%sDG&{opg@a!ZG z)T&L-2ZXL8z$5zO9D?~i3bmfqERIpM$wtTbCWOedLL8CaauXGw7_T=C%aokDtfF@sK#OuP zB6=VK;gE>P5e^Co0M1tcbm#7!)6v*kSYO-B3w`I#9Vz973l}c-E)9o6DdfDGgR@d8 z0+K>VDeJlhpfXdn!})yP>-AoE?)lo(!9H8pHCcnULPY1mTMr~^GSc~s3azq2>VCiH z>e=zZ;mlY^kn1eZ^Rg_t{RM)s3uv5m&S4XRjd>nypoz^lvYr?Kj*XAdY9WNkRW5`e zj{*c5Pu|bbua$jCK+X##r*$>lTAy3vXR}~#bJ;7{V=h<0t zn^+Q^SZD&LC(x3kk|U5HUZm#%VPbeSH}mm*kMD~2L>%K;hVjV9DAYuAAm$cLPGy4F zT>nY%lT-MJ^YQ+6>5p)AM)LuyrxY6zV^g?4LMFB%2&fn;$Ss1Pn?f`Zm{nDYrs)L{ z5eO96)M4??gLSU1O}|$JAjgU&ozy#i*;)Khu;NGwB!kL=_{h-G|2o=%kZPlaqCGxdS8kOgH@DOm7xDB*8 zVAx+SQi>IcS=_VIW^;UGF0ONRfDahGCJEu~+(&+n5`CoeeroLLk@#~xDB;fGW-S#k zW&sH^A^?KWR1*;q(7Q#ua&H}ZF8~1r(!!Z4njC^4?Qj)0p(;WyS?wdrjmKkPj`=eY z0v27MJvd-y{e>G0h^XliDg@G5Zeq;-$+9do&BkS~#yK1JK=jVD!F+>(fpMA8{JAw&t%60`I2?}K;% zOb(CVlSfz#bry;W0D#?cc&8H4o*utHp$6w`SU}yfw-9M?QkzNfBcJfKGx-ob3POa? zY89H2Dc*Ym=gEWj=+BO=Kt)7=9uXy;<+~(OWrDzMx%V6}cEXTzA3p2~32+H9&i`j3 zuXetA)owodg|y=P6(joAx|ZEqHX;(B%?gi0EDF#shW?6W9XdM~DbO z5bs%_+489rkqCF#siOR^a4}Wh{~Q(_!WZ+2-ufuNEp#OTBv49;LIk=7fJBrDB??tb zuZP2v(^HcAcYfz@_A{+&K?IlyDE)W-^&foe-kr@$7iGWq*0=v)W3YWv`4_+Vm8YM7 z@mv4&pa1r64Eo!fcU_;V$?m$o0i-FzS6?pR<-^Z>IaB$Ipa1O1!G5l;ZC?J=?k#Dh zQd;lqy!6BG{^=J!_qqGW#~`!W(Q!ZD_~pO!yN}-f-h;dMHupB}J$!U&{0XPL4jFjSo)t zKe_+LB@_k7-Z#Ga8#ivfaQp7Vet$3@A6sjCMX|oVUSwJb5l*NzHl!MdUSC0{2nht8 zw3iDT{77>2;0Nb?irQAb&Y@=K{QCHi{DeO}_WsVpyy~_#d2N3VSJcoFajgxwa3%2| z@$)KE9VgA)pYvG8#^Hm@s%-nH^G7;IUZoM8bAK`oTsBqn;yiqWbFvxQ*h!656k3w# zD)Z|(az-MuRIDwq-Jc%k9qM zoe};VwL~k2(X-Mwez}SfhiY_?a5Ifx4w>ofuMnpZW)&?^9KCsxI~NBd`f-F@$YHDl z7$?yo3n8H%h!~2i#9+n*4>^%+?@0W~w<_8tbe@*|!L+PS^Yg{O?4+3q4Eg=sWr8io z7_D}!@;jqJCmAz=n-W^7ELf9B2EO{byWPqe~ z;G7+w9Gcnm?YG}XojvpXiyK?pxy}NLPN&n@KsAI80RTCV1i8)vJIIudN0s9PG-?Jm zmCYCZHof2K4;M7Sk`UOG5>aHSu8c3$)Ya(FdgI5YluDO9DP?V(HC`#1XXSi8A5F$e zDF-AGK#FKe^R#Fl3jncsLqONy!-(6O1Q#b~nV*YzI^y?XsBlB@PZ(oDAT~GWE!4;? zBNj;wjjbvM>?h}i<9R%=p0y@U7+>2F*rv}*>TxT)j*~1tU-vqr*S4#`N$=JR>f!6L+@ zlsuXY8Y@+F8*mm7@8N||BMJqtMx1|imV|$XJcG!)1C)X%fQSN$CK5IGw8gEY<<#5I z`q;CYLfoDk;l_DcbWp|J_pHOSU@g0oxNmH70({fqnUm{?6|skNWCSr3hA)RtGTzBw z7hcge+P434{D);h9a>o_2}?>OWLnNjOc)0j`{P?U0ZC?s%*r4A=n(;8FBXne@y0{}oE038N&4aBY6?VX@a=VGBH1oAK2INbSQCp_ElUA0GTdd2Bm z`z`0h-~3G1%17`?pXwA>R>67IQ)g!(h$8;1P)qZD8a5QqY4U1pnc7z^mQ!`2RD258 zqPQF)KybGTMDz&GIxauNtDO$Dz@M(=^UsNYbYV`x)I#N1r`zTyTp|y+L=7x8we^;- zEXx7Fd-6V1y^~S~c-8>8=;r|dAVTv1#3Wo21c@+yfD?KWcoxK_gb)Z42+(`)g+~$s zP-F_K+Jv;uEYlgNdQrF%AUK$n1OVq24AipPr;F{A0wqY-ZAh^#R1E)=vRg|H)&C$A zN@YYTDaM=N>O!m)3uPzBQUu@w>lBEHp>Zxq0YXq{XAR(S@n7IC-aGKlkq;Q{0nD_n z0eqI#9;obH+gwwQ1mRSJK)7@FqwoINpUkI|mtKD5+O224|D)G`@$0|#@WJ8HC!f6e zlXq?(Jj4wC=|B6Yj}G?tF2C^0zxC^dgkw+6xIM3C0GoQl3aS3 z-Q5fKA3S>d$KSp4;ZGjyAAJ7vpP!qaF%=@NgQoy3RNlEz+&DIin86&y zl~vS+pM~=&QYWE!@<%$a^I8)p{9qN$YGXMqd)<@A+kd(I5#_?W`V_JZYT8jgqUeUA^u;O^x6WPkaC*c6?QGl@2l1Y(^MZK8g3Vn`d3 zN#t+mJoz`PZdkR^ZSy*GxpVkAB~}YQ2?K9~Zq)&VE{ewA&M6l*RgWXsVVNdC>xgl4 zSSLUdJc;UAn}9^SK(K>glMtY_mU2<$fEQli5^X%BZK1r4!~BS?+V{3vLQSeZH_dbs zu>{oohG@Bqx*~rCH^j8hFD?i0`lc&sf<+0yxh9!#DP+d|xZ}}a7p!S9+BXT15NV418%NOGCJjX8XOIpc`e z&1MIO2f?E@pU>;M7C=(UJ9q9(r_;-qFYoN^^!ue!s+!kIDQo;_bRvYvln%BYo%NiV z11O<1IdbHkmw}&p!9m+WPKrYtNGui!5o8>kPBAV%$|-Nh!e- z0w^H?&kk}DcpL8LooTtz3M;Ra08idKA<>hqYD3^q^@@JxjO`0uRaLGs?|to^aKQo) zFdR_Hyw{gmj!2x8pe#<9ls2I8fc8L z(cekT;vB?s@z1i z3)YK19`-gFna_HqzH1Y0LIrtqK-6h$E=j!sWQbQ?Ap7;4#Zq8DEg_9C3n z@KoSLRaJ%0$Q-osL*!p^y%D>@CdLv>h!|Q=M`u`+thl)|*kOZi85%8wT@Er;7&gnY z;7H(zO3TF);$aM7Q{nztP%ENS1Tv?G)9IA^Y~mcSWi*LEi*CQDkMWky;Vnth2}i%M z9f_ftGpxkc9AU-rJnm*YOMo-tHbZQin>m3>CPPgH+c2xRzp*m}?-0P7op7F;tR)Ck zJO?HAVG8iaM$QlW$TLm=U^E&<9F9l`08;Ac1^^~0MK?p@aEk4SMN?u3nszLKr^H{w z(uiIBs7Auo_PZqPwH=Vb@8{-~b ztifsN(x$`Ggp*wcCL%62iID&z1Oy-tK9mIT z-X`z|^yFw!f5{U$?>u+{Az(H)*raUMwejAAK&>^>g4J5Ph&L5YWiYlvEuuQm6D|c6(Xy=|p7Edsa4RJMNGWS)EqP1cQIkO;P_QmX zJaPyTQG*m(2;s;IFQ;p}451xDV>j%dtgS{)l z4sNd9T)TclUcCFt*UasAzW>HsZ;pL={l%}owD-)#3wo{0Dpzl3+98gQ9~K0lm2+F# zjdV|Ea$O$I{OHd9$%6xDKmE+-e(QJs-qvt${NSzc{OjMl`|+cvUU>HC_JjGv{;j|I zw`Mho;oj!%c0Jy&>)97>K2tcH&*w*n`+#uq!d|bxKAlxXzVSPM`|m!w{n5KW{PTbL z_y4a&zklP#b3;)5OFO5x-y1!AbTpm}H+E6<#*ptl^UC%!pOcfR5YX@Sf$ZJ8cTZ1_ zoOAQ(>~mlI@<$(kgoszJT*-@qfIN`Ov?sE@k@JNXh|YWKY^F3vaxO5OPtYDq?%GH4 z$!VR6l5r#i$%wuwhYFsYb5dvkm{s$BzdxT(!x^yFPE?*_mHRn*^0Krt2j|Juxh|}A zh@h0T&PXY%YF3sdcrwOFB?ugmN0C|Mo6e4ah}JtHgte4qS?z2YBsL+uH$n&{7XqAA zMd&CU!c2moQ&pAJL2)8aXX&d_N@vn8kqIq}#|D{#`q*vYqb=kiCOk#Ky&Wsf)&5nH zcBZ? zTpf)X5V9Zz3L%^UYn>1f?K{yL!jUs#U1DY6cpj}_tJ(&}p{%t*93YZhR0{YkSa}5g zqC}nOl#@6LC3~0bBS#Wy>?6p`e;oeQM1#ghB0Y(d_f1`Lh!q1_4go)&Vad@tgUkAS z>!2+Kzy|YQ+>02b3CjX>oKJ9ja65H4BPbVbmln;l5D`%+Nv*>YdgGx$f0o1AOpL(W zrP|8;%<}xcgnaQ8ar{q^stsN)<#V)Yi^7Rg9coq*F41{JM3m0eAs#k_C!p{^&N;7? za?aP*L=&e_3Y^dLys9c8l(iO68e^rDb97oNs48%_m*phL0cE)+O8_81hPNY1Ph?Du z6&O-yQe;Vby}N84)n<}D*e;jw|91T-E=fL zIX*l&9rV}sHaF_&Q4YsPryu@{@BWi-{_BFeOsQdsNJgSSqikci~H zBP1egj3uu!J(`Ty216l)t4*0@#u!OJ0P$#tPRIZu6&XW1#ezsS6|rJnERJJ5v>%p* z_z-m-jhp;*bFm~Md@x}L1t#7cO4S34whnMpN_ilZOKv$Sl*+U*24Z?LK^Rsk6^ow` zyV|sov#_`${8UESEE-9v<&)!7oo7$-t&j97N^8Wr&PJ0oG|s|6uR@(DVy;1NYU6BK zmU*ViBCG5A{=GYE8|#7T#Hoe~MMY6~An+()L4;3FPmM8qdwWGu#6m`)<1sF`XXj)j zifR!uVTR7Rq9|fA9IzxXST+F*mV*^>h6@Sd`;eOz-!7V5ijZv(?~kd|3Bl(wWFl(D ztgqmJ=hV`(DsZ8O3^(_4kuR94dDlLUz^lYmSiS_SO%!jAFo{4;7=dWj&L&o2zb*H6 zY8lj+1(Smi*)A`=7(ei19(RAyF}|^zN9M~NLOf~mvQU%;_>JY-#*5t*nLhoVMJiJ_)nDQJlR2tXm|*n6)f zdh1KA;{FnYLj)!-3gVrUAjAAZNpJ8Z0EJlU9M0oi$Tsrx*)ReTTnKlNi(*5Iml8=p z>=I%hivS7QK$nRCmbE$VDR1K;*SV7KvFS3!4_zE{#jF-M&;6;$44|EhYOlJzki}wQ zd7f}aJQw&x4Pq_xzCc)bJXZ3qC@{qPmrgEI?j$r?01*g>03FLBhY;GkLMVx}peWd} z1<6dfJktcnhZ*`vY%uh{4*wPyJFANYw9kfwP|4r3+A@p(fq>W%T;gls94rzSg5VB_ zhytCdg%BFkl3YT`z)-M3>;)ebD;$2FBFPF%3twu1K6qe=C29yH$l`h$Kq*T^-phr& zO1|MZ-aC|lLI{Cg;@lau3p0yUKuw`25CXiHh(bW+BoRqK1;Hck1%V`K$U_7gk3kFv z2LZ9M!dqL{wG}c*SuuEGAzZn$aA6)Uxs>@GN|OncAO#@6nRXnaj=*_rq8+%LFQv3q zP1cbhLv64@E#-q;Lth{RMoS)@7;dm^6r&&ECrU=cQyc^fBl8O^51!Z049^_(W6J- z{qFzg^&kKFt1rL0y?r`6x(&MQZ|>yz5T^63t(~>CEhPjX_?qg8$vu!17p~mgyL|JP z{?gwFn{a$|bbNex?}PV0`RK#9-uu(<{Nw-NxBk-K_{vxQPVIURKK!8PXl9Pz_|6}% zU%#@^+pBxME4#a=4?h0mcmC*|Km0G|4w#sP~? z<+2^M?Ci<%fkl5Xm@JO4k_wT$^Fm=JbeOybtW8a!cc}NKT2zvisFY%QueIL0#*9S( zXA6mNNH!E2gD)`Kax+16 z+u*X_C5ozIxyS|=5aR@rJ>q-E1qF`AT%Zs3sZeP;6uVH(#RL6#OmUn{q!|3|$$a?d zb^v=Lb}vOm`Bt(KpEf{jWIz)3E{>?QG|eQ9_mZXH-^kWmPD<_ zgBIYN=XuBgZKFp-xmbxM#^9UEI`ZCm@4b*xDW!`%7w7>7y}s635TH6c5y)&$-g{~? z4+Jd65B9rAm)f#^q?!DmopgpHXXl{{@(DFQ2|h@>IAM5;l#5u)V3mg0w?jqs3_N+1 z8Ud(ldf}fj+Ygz2jXT&l#E^Ms^%QKG>Oa!X$ni= z--o>m=2uSTB6x#569df_nexlMwS)gA%p|k|jys5rmnA&XmUhD7#l|ZiSuJl7(UkjJ zaNf>MjPAWiI^s}mrUPBA9XTg$)A?2RC)Z>3g2WBb$(#7daZ#NonxA6dq$$U5xa7&T zdd^IP{2yHie5%y+2aYoj{w-Q=;~f4O<;idxG%TCv6A(-COQsrwW@5Mu7xP4`!eKmp z+-6I=l~EBcajTRMo%s*(Xs<|!8Pya4WvQ&9KR<1i=>5YqcP52eCymwMx(hW(`j!uqym+RZCJ_@k0m)S#>R) z=n#tG&!E9QMr}Ph+>;b#3a?E%004jhNkl|t&HohVUW@0YE|Ofiw0>&Nv9Ev_gWAA&P~F$Su(Ufd~ZTybA4m07W2oo(RA>S2y}g zi> z8|J>WB{8Iut7hTFE$l)7(2@miiiH;t5IuMa5)iz1wIPtGvSMR%d%axG3w7brUXiO_ zuSXyev+>l(-qze&@1#}&$N@NOs?+16`)0D%AKJOyTHnya?N`6@n}hY;lhaARr-`ih z)>@P2xwE#Ok9M{OMK3>{oF1JV2_>?k@8uK=DUM;SxAw`SgIr7JTv24Eo>x>!Bl}Lz zO-&#?P=B~KuY4_wk(GDn^{e0fm5a|j`@8?{|7>@2>!5Oo2b$xI1>%aW# z|HuE!|K`q{uYKlgzu5bgzj5*EO9%IFfAgDv?e6>Uynp*+E#$3R&)&Fv{i|R8m0oWH z1U&uR7v=_j`r-S3@O!`a8^8W*8*2kYcCeNo9Z&DvdGPGh*ZRd;p$8tEl*M3Z0F2Zb zBJ?yFKg)IL0i-OPHDt}kx&k*Q$bd2cpFm*0FC39GrUpc*rE{hRr!u9zuIpNAL?9v; zYHgA8vgtXBO)w}A^MhPadrUrF*gfBC@Uq@=8iXNr={3r7|IeZJc;YNb72WO1Zk!IvS=` z$mE^XUMgf2&m#|`CRjM?D!{5nwCUhu$!o$gSCZWobS*JsK}%dqu2n6n&S8z`U&(}H zG8ZiK$oYI8Q!Keg7o%Nb5F+=VCvTyq1AE(6o+L#^nR1obwCdufc0Px8RzxzfS^ za9bpo942iUqAd-_xUCq76u=d0?^PiYVqGg2d zJD*8>wymy*pK3#ZQV0-|qK0oRC{N@BB6_o+K~e~7EaniZC{SY}hejNsW#fB1L41-C zwux9De+p`FP;SGVgNrLEwh%2%bzO%*QO@Z-tDnGu1599zVC+0@)Uxt$Ei^%=&WxY9 z-l#@Gmkipesyi4ZR-` zWpK~uLe=(YG#Sn3^=w{eo5QOYFRZO?kWz}kJC{kBzxHdrf$j}%z4EzNeQiXRO(v6hJ@Z2JbYYDp3awKEc`=PI za0z<@sFQxz@ozZG!Au#T5uAUC87|5Iyx1T=!tGPZoe(?{ne*!)6vcuVQc9uZqOlo~ zC2#7=IR^qWrGps@0YiDz5TIb~B6>PZ#+}8gp9K@dD@?wfJlmA_weIzLAull2 zvJbXSXw0(hxchMNi2FF1wn!F#4OkbB39ugQu~2u;&z{bpxoDNu)1L1L3oIfz` z!~HSsLr2Sw%w!x~&W(U&rp8^Q03G?tVeJv$l2Z^IUGbzul!zYFMA(P~mX-s7c{Tu_ zKQ4Eh2!a1{(Yt=>wm1~G-V(EDaPf@Ch495Dd!5KX3HwgW#9YXXl7>%$XCqI6l%dda z2n8^q)Hl6e+H%n@)1D8gbKuzt^Yd?O4@=9s=aW#I$BumdmC)5y zgf1Z()Uqa@1HHwAKfleqWBey4$Zd{)&ft95P7H6e22MOzoxz`bvHw+G2_0_#)F#wR zbEd_eiT{kc%qazc8UY4+Noqcu4s0 zo?K{z28(7A*sz|MV7jpG%LUIZ@vIs|sB`22Xpx-ffxtV9h_NBG_de7sTNveOF@B8U zNHDE4J}-ThEO>%&1Upmts=EmbxHw|_F4&8iC*Hop9|zx0)_{-w2Ee|$RX z^@m=FNoC7Hu4ulyH2?rpJ8ONZhrSk>=uZXA$=&(*6Rlv_KNLvbS~Gfd|DEq&+k$`d zCx5EG_7|^QyZNavzDV%0hw}dIAH4VBPfrd;Ggoe0xpn96hu{C>f3SD?+I+gUGZecU zd9}NF>4lfysnxOf>qWLfpb0Xfp~h=hcE0lYPv>SPXQFb4fAB}&@u0JOuz%<6uYL8K zvZQx@`c^sYosLhgT)XAP&;pL@(~s|d+#9T4M0)z^r=2%S7gbduW{4;=FR2KCvzCNJ zslgiXL_(p0g<@z-1t7|@tm~RrwTV$UG1@*Y%ngm?R1`&QWEYHgLP@C=fH#)%tna-S z0))`>c_pPhHMNqcPzXSc=#3-iL8@F2Ol`eKAwusvFQqi5u4ycUAWuRl=e@JFQdy?7 zan=|UQY4T-WfD+Hz_Ay~Avi=-A?rziuw(_fMfyJx0th8h3g;YH9|8md@2?wa62uC2 zwc^4R{_R!7>l~sMdCU{O{d`G|AI@{Gw%4~)R*t`hVBx3~ty)MMKAdt<6HVM{zvx`% zlfZ$0P6EW*qb2j_hKWRBh-wu!nb(4Ml3C_nZSfSF7^@hc&U*t+CX>KXLK1zPOpG2% zUa@NYPRO6L@3O159kE(8d=TH%zxLk8_KcB@adySO2j)!y0Ot$sO~-X5;xa7fPM)bHUATQ_RDvb#1qhr?i;0QV zk`S;~>p zpBx}58V-k`1Q69|L?Ik_4<7TZf929nuE*nPQP(%FKljo5KR$i*2rGN*($%fu z?%v*0mGgBqa$d?@jZRNbM-%5^d+!1@)@>;lMOqpPlTzkbKixOQB_aR=bq$qRqInvZ zLrQtc6l}`(i>8JQv0`P(MsRPgV4YNvOc<&Dn_XoXR zI1yTF0T}ayfO+X$6YObhomAzHG&Z3yakl(Ka078C>u=MgNH&4VHi?-Mm}B^C99jf5 zA)r(`?AN2CQ)q(4qVD0Gv#d=KG0S_e*Nc>#`#g97aUSuPB#OnvCJwPBim}|FxRY|W zxoVp=ud>t#K<`}WEfMCRwRYqso*8gsp%@|xX#@dvg%DYm1$S%+r{LHqSnXQAN8ySs z&XOf_IPlRTRgb&CDR5gh!{ty0Y_b6&JC0?1k_(o5x5TxyNocgpawB-G6cqXRITjqg z@Z=!3vvv7ac2fQT(imRH=aZa^M0`3&c8zS2yX&~rl!VJ9jEOgxjr$%c5(j%U&HLrF zr${`J^*KoHiAOuQsW{n87Q#3jZ^z}Z zhE5XKhCe?t?fX}q%1-&`oVl%K@4bRQ?=>9`SUrOSp4P5oCW7oT27gta3k6f z=zOsA6ho`na+{kZGQ+>^t4;2HzHxAc$Y9x(B;aVopnVQi4==DFpaOLXm<;y}qK?M<@ zfV>0%|LEYpakD%3?oCGfTU$HAAC|@NliMFS=RWiDtL56IFU@}O_~_nW{VTt{xv~3y z`uG3m$KwZI``WL+^r>HYINLrw`AAm#Uwrvlwegh`BQ<0%e)h9ZU%qLe{^Z_W^W(Sg z+&i9>n6eaWp-u z!S1QKmMRlMq9~NkvRt4NM3qxnmI(m_PO40xvex9e_I|;As44i#4_zvA5DUr(hC=Us z+3Nv-^9}$IG6WJ50YOPX1Syo1&i4W%cLjkMV)05rE}C5 zs8BaW+x^^oOA>`ROM$VrMi**<`=Zp=S}(j(pfHqI#w-`1N_gGGT1LH5hmgG{Yc(dZ z!U}etD1ls@T_x@P9F+H*f5Hk0tj&9#{1s1l4_|2vsZJu%RtXSXT;S?lyM-*UXrrGS zXg~M&A?dgc?Ia5!vZ$oGYkXuC9D@2C?Nf2f%Cc;?h=i6KpcBX13RRpW4`}UN7oRgn zq3HOb<*3p`&FfSc_}^gnZ|26B5E>LDMW|4Ua?#lgJo?^xiAcuOUb>>p5n(o)DJo>s z)jgW*wbnv}X@{WovuMX?8|B({)J0>Zi(obi3lA25YK&09`xQYK^{!aoHVJ|YpF^>Q zs9hw91??^Cv}?fXtQT&GGpiM z8qe(u!T!~LDYCdlE^W6V#!Xkr5#YLRC;Z#vwD%>xS2p}2q5L807M95%w0s-TNNjw< z{O#?~+Nk!k6`lptg-1dthxeWcl|t`@_toLye%QFsZYLO6<#25dAOR~^?eFh=PusxF(X&_aQ0X0-2t zJUHj;dQtpD4tLjgb7|(A5Be{?^ts+(2&fK6lgVs6smDqV);HG$z+_yVoKE*HT&$|f zds|h(&_l+|na%y|#PLXNY>M6_xW~KZhR>1v3)c0TSSN77>i=>qig+83Nuc3@P!J0p z4Wrfdq9Ft}&alM|guZDY5l&DpG9g4Ju;1&?tHolgstTrIt#wFLT2Ky5cj=~s6 zp6KU`=AlGlNf4E*2g%_|G=vC$PDxgsvc$5CO~u}#D8eeDdjJ5kER*1pSopXj*qEN7 zali+!Pn=JZrNq4jF{`Pwmk%|03<+_-maR}URrR^YGyxQ~0^cyS|3#pt^Kj@J;ha-K zMn`Fg3}_PRY)sM$()DyYoy}%3j5^Qr5OGh-VPbq5(B#9lcUiL5@xtS*3VfO&sB{`9^b;DonSaN-Mo`96u;a_FWG&LA)I>G z&DB08=R6MQ>H=}eL4Jt%ob+~TOL0jVAI{%t*})L5=yc@!D3;qQBnLS;<6+F$rX?xX z&s&bi-3jA!KNcsjzAG^zB2_?u;Qi%&ERuckEbETvvPAOB(=ryF>$-NWz1Vm>X|=_i zbBbgwZAMQm3l1#@WN?&@hoX%$ZDi>Dk_*7?dpw+nlkI=D^HiD@$z(?o+2*k`KY#u! zA6fBzC&T~%eD{L(>}r2|_i|1$+R{nn=X?$7q12Ju!L$0D-XhKxgoYSJW%H{O?<7 zg&aPCCwd1_80MRmV7O-xi$Fuf&=;X8DTMKC)Z_&Sm1h#bId826fl{iTF6!DFOQtqO zt}_L;wl&%UB%#ns^i+N!yWkNBzfZkG!$EJp^QNu=kB&Y$Iy#!q=bM|G zATV?-vd-MN_%Z-od;TRtc>3yRPe$YY2m1#Hhiey~dHJiqYAW~RAN^qW%JZ*Y{POe9 zetM(de|CHQ3(sCXy8q$5PwuVlULI_0e(O7b^pF0*e{%PYKi_%o`A_}gZwy}g+*{N9 z=+3>bT-|wPs~BZy=E9vG*pCk%Rx^)KZfqKYx88VjedEI4{@uU(z1Mzl|J2{RcjwaO zT>-uE!Fvx54n@|x`OIfte&s7KKJ!^$XLs-3m51+Ke(HI5csw8PpWHtB@S~q#v2p8# z&sXITb7hSiogQQ|yL$Cn*)K;Y^}If*>-pMxZ#p`yO+}7Qjz^a+UE0~%^G#LHSRysz z(ozn4Jdnm10FOv_?tHSpzrVJzo@Lp|c!Y?V9`yTt^ps_)-|L0O1LM=NwbM`T@0Y!` zn>TN0&Jk( zgg}&$RH%d&-dGf*vjUt4M1v;Cidm+J$e7RuStzMZP{h5HQii5~)OZ~^-Qh^jCGUhF zCLdO@lTID7lkK(HXTowi$LCi|yoiR*34#0iavbb(k0=H%irlE8?2Tb#n> zu*dd1xkS#N>XcNh?=H$DREJw4sFzKXD<2;B-Wz8*1h`m?)Db8IOgG?9g%H<7O*&`q z+B`U+<_T(@v%jm7>!T)?mBvSjcdm=h)Z{ zV=o|MOiU3l#)KRzt#$0{7{!8Ug^8se&SJN_^2gg$g2N5E%+~xvr*PIjs?8x5`Uq`8L-+y3?`O?c@aL(Vpcjv)_du6{@ z6d94&9c&JYL0%R@O5>e#l%Wy`Q?e+_ zbwnu@O=rj8R4ncfO|n|R6K)FU*agg3c1GtLGz+EPqZKmN`sCySn!s3YjE&0G=$^<{VKoTPXz~5J?nDNhxKfbaWEC=0Zeln$v|MVAk0N-#h>c zA;|fNWBkY@Sz>YKopc6U7Q^JJwr`jp9PSbblZkJSYbo>6Ifp>xEI1dmwM+`x3Pz58 z+(aWbRRmfvzs4*>XgpGJwvoVc2A2@MHaU&dl3PflnioSBVq*gi+a%Bo81d(r56o5@ z*&YZq6}MnnJcyJk zzCCeJg`0z8tlb2gV+E%?p{iRX@}8xTCgMtbmP3XsdbF(NMeiN=rSYSY;BX?ztU8sQ zB1aT>U|EE4%l3nino?7mKHdc4expjYc_IW4HuTn-}IV|Ix+s-Dl;uc!r z_Gr&0>N{r;7AUm<5{L-PHOi?`{M(XS8_wrn=fDS+<JE_2nK&zXOFp3u^llM&kAcYcFiQ3CDda6+L}6D;3lU;6)}tIgor z-*k#bTv3X#Wr^AzA8AKTLF zc>@~m8D&Bd%guog{o50c!0Gc64G{}tdE?Vqd>#8BCh(MU$vtpJe-=?L6vFX|2+$Ld zmMyy<0RULP&|72g*#&;G-khBTna(1jI;c}*&DIeBA*Rnd-{d<665I5RUU+9MxsXg5 zk|xO4Q=_$(gtY>=K8Ijr(fK< z@$&WS*REW?Bmo5g0(bxc;5}#optKUuD+i;=^urHsfA@z!7=83FrgO8kc~NJ>FMaVF zd9kilK02w)Hk=$Cz4y_3n;UBv_qJz~le@RydHsj4z52z^o5_JE`S9@W*x9=$?#(~= zwsU4zndiRnrSjU1qvPot@BL_YFglUBz5XTKcxwHH&x|&<=BMK;L;u`Gyt$h@2?|VE zXe{<%Zx0A0DrZ4T*t>S)llwmzYz$w1`Q;z};M*5Bwr+0keEVB}_`Pra<4c#XZf)QA z+UNha%=+W$NND@~i%-A(CwD)7@7*82_xfyl=ituU!qLvfXD3puJ^ks2_eb7aoo7a8 z@7$T{{#sV{dY1CCn9b*?#72MZ;L)LVHF9g3*o(= zOePQR-{0QZ+1%XR*xH;-Cb#e1&9ZEFcNa*^=BNAn59ia#&dwH)yM6oPH-7xOlc(kO zjSDwlIVsDlTbq4XPmUg)9v=MBpZ@-EIQ;zQzPPcm{q1jk$9u88we!+TFAWF9cru$z zW=F@zz2TrN^AA7y>A}JN?%JBQc70>ZleqEpGg9XSfB=V6B1daOaHzn8AgC%^l-YPP zSs$*;OnU%FUT1lOsgUKl&a)^xE@;zoltEi?T%}_qXKAHaw%pMUm{bibu1+4-Hd~zk zV7ul%H8w=(VonK@;}p3Q0*OA;w&o|~EV9YujY(BySQeDezxZfexaU8$#ke-;_&2;j z1Tz$Pkj=ZChivPv53L~jnj=uuDVcX#4d zU0cMUl7W;NpeK^vdo8u8Er3%>5djfNA&RVY9;^^mRSD;Et%L7eOKD*dsN0w@{B$}U zjYgyKNuK9rSt^+kky4>nm5Kq3Jk2H8>B1l}94w?GVB-%65 zZSG=a^N1T7Gp;R@lU{M<#=RCHa@y%e!4k-!rFTp}n(4VfEWffbC7Q4${?SD8P%^b-r9}MM%zRHKNHaVP@@#yFxBG=&AH4VrFPBB8!YrM$v$?Bl^43goSgz|{ zvAZLXY;`&UTAxmFR?W4@2ScqD0b~GXHYH(uLW5FS6u!9#j;w$+bvfoEtqw4c<+H;T zOtjGr#$oQ8;$Vq($Ag>V_u)jud?wFIS_44vMuY4+tgyX{FodQKj4hXab5v7pieB43aWttZrq^1a4^jC4Djpp41lfwsI1Bb6G zMyn+xF<@-$9~)_Pl+zMC$+C9fv5En%RYY7lMyRnrzcVon% zhy{D`np)5}Ct%s-7DrB2$B9N8_;FEu{J4PUo{?p6wlvzjS_!tFi}HXn9(PbIOYZdY}%+OqBM2COEfu{CqfY zVe%YMvyEC)NQ&zg6_DbLQtP`3b@DUuIoD6Rdoz+X^g)IVKD%xeZ$Ze90Wz zngu`6o$x$=!kqOy_O>>Zepaw|uI{mu=Rm_{zZVxE`BCB^>I}>1{AZkrqea0mL1Xn--m@>CPUo`7p8L!zm!{Slbrrm|;~MB879YNQ zw;0oe`u9isFa7e@E2Lr0xWdjFMAf927m z2jBbd55Dll&konb?b{z79KHFI5B_*O9bLVC^V;?2Mk6B*%FV6aS!D)mo0+7lGAAb| zld8IStVolp`G3JCxpgzn;coIA!YdBFHV`{s7|Pn!`YifAXvdqQg1Ie4cg zlgw%r*=a;vkFWeIv4@QKEcCl6a-Wk0@tmGG0W}qdmMJTn^OO7^4 z$(peIB%n3i5Un=ALaqQn>x{_xrj=rC=JWa77<2#bo3q($ZEgL^l`BfA(P(0=&9bai z*~!Vt$?1_&D$Dd}bYhIz*x0bJrM2GN+zic57B%a^7e&$U_t)3gwaT4yp`4)i;Jp>B zeh4=9!MKk+PAz~vZi=-wnCe2mfvCrF(|M=_9W`NKDs%!^9zhv_9JS3vtK{b>(WPSy zUEDja*WoTVq{D4#xRXAF{uaq$0cNvUm^h>N1&Nl=muM=CQ&U#~ zSto5i2f-?CQ&V{<7GGL6Gb~NRdp{Y^oO38B&-HXV_T=(hKX`D@&1aXdUeEF}@0DBY zTYze2?AG4yr_FN?XgaGVRVB5$^Wfp}!w)UNPYzFz$jmA&bboDKX4-3&_sUzEHa9lZ z+L{HhRgH)wiL3)RSzB8}$R!rGcB(7|0TD`^D*_@xAPAM!eUg0u$ouu=^u$*)6nzWw zcxHzFc&(?ivZu6=QdV^(kN}{_r6{#0*_a#%Qt<9s=~ z_Y$=vDe4kY7syFgRw#t4D<$2c5b0Sc87*fHMR|rBr)b$_D5VT;6<98^lmt`r54^Y# ze@hIUPgp#$90Cq5Zm{O$-PT5G6`Vzo1{?9rI(_&}qx+YI3013cT-L$Gs z=Fll$AuFnr><$-}XgPG6m@kBz`(ZhVk1vzsEm!+Fzu-pOHu-JS|Mov4Fu9Br@fnt7 z-9&Co5P%QZmQ)ct0$StD+n|6hsg+N)eXWwm+W6Ti>L+r3TiTB;yAv)J-9_AF08Dg@ z_5#n+L&N7N7=#f1FnDy+Mm86>K(N|_)RfPPt;NIlA!{(qEsVy49%3h_Pz}-;6MOk^ z>csbvJ4aPx;)zE2SQf0EMZxP+wK;Fw`X8Q(?$sE_5J3^)%yBZo6MkS5TMS3zwy5U( zlDjv~ll;oIMs4hBAHW2!+K=-amIY2szF)P{Ro|aK=b!bSRc3kt-3Qh!6vF1*HKQhB zSOIyWZ1&{yS*5VAx;P<))~V;?bkhIFzOi!xPxwf};yZ6{!-4Z_{^esq(I%DW-|%xl zqkSd*(d6LMiomaUz5l4Vb_! z2`>P4czK{Cr>pHN(fiK0gAl=*4-5SXVbP8hG1TY5MgO%hJ1KSGna1$gFX5VU|CT%u z81DdpfDj<`<`pQU5T1w-NMPlR1StW*lLRD086q1Zg-OniZ=#(D!kR@W7>gpjDA_Vx z(NmM$@||IDA|aRXIg&x^9nYYR+egcSTmo1`lyaf8Iykd)auz0<-eCN_XRI*zXTqKm z1m-6JDsq{WLP~Jqm{=en2?jFkCy6^QBI$cluc_#E0 z%$=)7J-PAJ?o(1pyYcilf9+b)FJArXUoXn_tPpR%@%`b(Gq1k#0#KzuAQlK&nZsmq z`p(;LKKI-+_wT;{@BMp!cPID1{#X9HuYBekuiSd!!3S?3)Jmz*sQSs>4{uz%t0emb5^X2ta@QqcP!z4xQnzW@9)Pd{_(*@H(%SFT(ePbNXCgeG*ApYb$I zIEee(+uOZfK@)rN!j84G_dot=QDhHpkMcbK30wg;yL9~}C*X}A{-{uEr^lnw?BLm7 z{BQ2we0t~F)#2twiSok6jca?aNN|@vrK?#rn@`C3>o;D>i^A3Q#~*(5_B((4;~)Qc z|MtWD{-eCmJDVFQHm+U1@bNp}yG#Dc zja!$tc5*rBmz%rmSjx#{KDn~JH5ye9KYpW^7eD;r4}bXa-GBGL_xG&Kjz_0T&}4k* zs(C#(llvdcPLHfPF;MN@c&_MQ29Kro{b3J$IUHYMnG)7}ABtE2388A^w7?i99OUNd zd?thd(n274FGMH^1Kxv_(n<#&rB_N)H9yYFfumwJry`$vsf;ltX3iVqO_672G6A)Y zU(X}uTT$xP){RVhiq1|-CyY=^Ku1b{doL@BlM-q*(D8Cc_FmJ#-h5!LlfN;(Mp%S8kNmNLByt^kZEm+tgn@V zs-Ag0lmL=|oKphO+1ePDDQj~e2i^f%ArK{ikd6pUjzDED>v|4ET4l4!Nu_)#oCoPa zA`*HOd zQa8-ZE0skdEasSp1qQoeBxTXh5#|$?zqYmp07-3Yu8K!fR5TZc`9~!)32cn*loP{N zQnNlTDn>eUdt9(@61yT<_7HL~2nd7_QZJS)@hsdj%*r`e*R`QD!(t3WmYGJVt+n&n zOd|G*VtjNkolbj2ri%V}JT8i&==H5}-kHTY%FEFCmY6BcJEO9kn1=wC%bT1{yRq7Y zY=cA-=3s$knR_W2eEA++QFGv&`}wP*Ie;hnoN2|u_^7m$vdD65f-F2O%AQbjlosnX z0)!Akg)G;F4oXk~X@TAm0@cpcvl$8!LRd-T@fbaw9Gz}$ZDmRg`vXKY=)Cu{*~}PI zJ3k(eh13@=T<9wwY8=&do#$mRPY?RrHkq@Dkqoeoly#DCXBLIKt(xsdC{lCp)x4G5+1K-1|oL4HJ3;>=-rW&k4c0jeRC!ISqcROv=UnVp5ZE;)BW z5RfPDJQ!mGtJPYA@F)dg(=w__6$%)hL=#h!Gm)rmoOiUt>pE3d=YaqaA=XC%Kx2K} z^kr2RoR&Lfi6c|H&BTZ|2P!c;}q;iSgzKWT~zg%gIoQ{JxdgJ1YlELFL zxq#7W1i_XArrk=XozvDfry?`!^KES#*H&4z+E=I9i+Go^W@(*|oD*FO&|$RVi{;;U znnj|x#bXipu0e~hMLwI%mRfDJIE1+;CJ04|2{_)&%>fGuq>1{crY}W;hmp?2WqR-D z^SM|qS{0~qbl=9wN55}#YR(Q|UDru%v9Ke-Tg>TFvK{Sx;nagiU2vBbhcnOL;sYkf zkedq=!xrDrHWCKnf3`FtPm5e+ei%DxFJGNZFoCT*f!o>Y2*Q8S6@h?@A`nCXL=pGS z-q<=p%K@;wY|UX ztKBl4%fRPc31v&n-0jIC52lEaA>j@vLyP*e#3T(bcv(z=?9jb;KB2Q_&N7A5EG62Q?TBKb3u z3y^cxab*V@amYC*g$@?Ku(eW3Yu(&hrIaTJtgxXFLIM!_`P={i+C_P45S7%ruC3PE zIqzI8WpKd|B7zj&Rf2>7Qcg+CgcP%?Qc4`3j+Ih*p4YV%LU><~Mx)x(#$XM>*7F&X z%d_n1=U*%a!`Ht3tv~vQ|5299um1XPf9Xq~Hy~!G!mC$}!| z{=#R!@~7YaH+K#n{Kdci@4WERm!+O51qMmD@$_g^&u06Fhj;JaJ00J3&Tnt;0*NC# z|HtqAUR{kwqoehF@3|LVlq%oexj5|iObt7`FZ}!e`+r`U>Bic+052t8+}?b6|86y# zAKv}s-EaSke|hrY=-}?*(S1O9{nm5Y_{NXlv~QvwA0JLf2RE->!5#JHpWX3Tlv>K( zu;1_BdvG6^j6p)INRrK~FR(MO+P_3od&`OmQQJA0q@wCU*q zlIyL1deEESKe%tVuhrX|C%W`SCX5@>RFA4G)7sANKDhn%;XVnM7n+E!TzzKm!i~XT z5W;nXA?bw|KS#J$mgS&d9v$uj`O)a)baXr&jSueql}gR4&uZocqk zC0!=OY&I{-p>eL7jHjdV$R1AS-cnsVrGyZf z^L}1AfteK2Dd)%|0SO^cWtjp1Q+Y`)mpLf!$mhcy0an5|Qv(155~b={DKut<2LMoO zr6I^bgZH^7i^>o<;cMxX>j@!qtps=hsB?+vt@jh_AP*&>Ay{~c2mypwRD&Y}09tE> z(t8gcMxxI0!dY}yYTX9_0?aZn1|UPsB#>7@4s!qi)DzSQkscuUL?wbG2O0=vRT+?3 zuh2+P*A;<45JHd?0>BHoa0wS;U~RA+%iud9BJW@#NDJX55Lrt`q1Rd{nE{2^a}q+J zkcddm2#;Q9M36#y2X*a$G9{%oLgzCFV7*Z~(^=*ToF!`k)L=ZFgY|hP(7D>o5ve!q zRdt3Ey>rGnAGkVdr3Cm&N-3qVjtC_xsX!~8NjFDCBqd0B6a>l>5!5Ag?*gmDm{7ODs@5j2{IW z{(ZZ+RubdJRlyii+O{GEq8E&eToY@Hci{@O0YtRcP9_s$Os2p&_u=~=93JgoxpaAF zXWJPwud5;Id2KwP)P+e zEV}3jp(WO}FY~RnLAkN+%2Ij@h+r+0Xh~2}YV08*x{VzvrP7ib|6DLoN+|(+zuyOP z4<0-qqOGkhA>?E-1)$+@T?l~)+uPflo103B%a<=ZZ={d_4nRPVCQ$-FFeszUmf(Z9 zNU6<~6##&u$b!%*gb-R*^J%Z_Ng)w&Hk(bS)4&Ke*Ee``;Ak}qMJlK%cOJ2QI2=aY z;0TbEbl{r{%Ss(Jc^Hw%a8q5hQbnC1INbwLio-_4j*3d$-0*myOA*hb(#Yd*ISR$R zt9ZRs^(Fu?l&|55I}49aT?cw4nj#_Lb-syyjrbqDBepiY zC^@wV7i}n}&4aQnHE&d>*%c z{y3fgJl~wuzJb%LgqJdtO7zCV*GTRV<%em zuH?%|HaoXw)gHx@PV|^aZCnxedOk8IU=JUXo;v(auE57vaI+w{H1YMeEi9=mfLTIe z`Bbjj?kEHBS4YtZm*Y%h07L+0Bo~O-@Au)@c*wojU%chq5-`O|I-H!K$PicE-}!x#MZ{HxbJp8QJvuM3qR!`I z`OB)oJI6^-omNM4bG+houT#$Ne6UT)&sol6q15IVQ6A_Vb`?DV#tJ#>Z}ZvnA-pP- zz8yrhO5j-~Z-k2%KIM0=+ONl6Z4sz*jUn7E!4_CeF526@7#Z-KBf!n*e1tY3jTzp_ z>qJ+z31q$N5*;(h+ws+j_by6AzNxFDghUpS67>g~GQOqE4N$h$tF!MC8=iI$u9^>*qS6_T(>(Zq=w?F*$@Bfp5R?psg_Q&rX9Nzh< z_09nu9`1i~`+f4`K`E@6ou1aS(|gBQXOrTu&9e2q`Sf(Oe^6%f+Len+DczUP zefpWFuK$JUXr@)qS?N9HT3c7|??0T(5AwXU)(*EWDqUcfOCh%V+ho_3QsgZn7QNo~ z-i44+Qqy145)kbE{{4G*AE3$xTNjirhMCVYWzF0>qf|DptTAFRSOc`ZUhmSSD|wy+ z2ms+d2(e&-XNMu~Q;_N|UAz`rVy~}n0^r8R#?!Z+J2^SN`{?6|+<)ucZ$G^M+Vz{? z*t_&~EPI1pG5`4co3$Lc@$B92jBJH#`rh&VyYK$kPLKC4UcPYg+F*TeZLpp9HiRw( zX0y{rTU&itL`U`kP^n_)!ZnKm0LErIvh&)^r537R7FvG(`Y%7&f3Uy*@T1!wUBJPw z{@QO$#B8v;=%3E-Ut2`ID{8B$Ho zDVZrJoH1BR;n9ObL=Pl|6dY z3g!kwL`uqILyj=G4Xdg;o{#`gD0IJv0suVt8o({t#e6=W z&MXmugrZkcAwAFu8cX%SsnBahNdbt?kq3!dl`xkQ5kLyr>w!d1cv846I=gmY)n8*t*HzF3`z%(8>zA!0h|Irk`gW0c~!~0@MLCoj)+1k zDTM)3TY~vCMzzQAc|@Gfs^E}|M(~AzOHNAZ{UX9^GMS75dFb>TrJ2As=JR=&j9@i+VwHa`o`wOVISiH~9K%{7n~7$HL|={H#3DH{ zRxW<%l$K&B=>OE(>Z?f>tn4si5C5Y&E+ z(@BT%T;Bk#w!fdVU!6a-qg=ynk)N*G$^@8F$`IKQ)BZZ4mweNy=ytxVvsYQ0VSqd@FfRpmVNH2Hk!yA35RI#Bv8aks%V6#G??YZW{tTT0edetVS7!4jfe!QpM9`Y2u!H@W2yH!*ONbzY-n<=l81l3X`GK&Z7O&EfZWo@ zWfC}-?XLQg8Z&6;oLBM3RSR52SD$cSIH`F;2zQ?Jh*X@OoC9aR9jktV$MKp@fOJl5 z6`oVePt13=O>I`ammerx4zo+>OebH!CLSfeF@lj7q2=#w3!l^^I&)1SlFbmV2MuA5 zaIoOmyeS5X-j;R;Sl{pXG{+84=(!XTTXh z_mwrl*eIm*-j`Aihr{4n$g(U1p5}QDmJxGuH^cwxMr!}qtZx+P ztQO==?M>b56{zItcrrR1IY&-ZI_u?mE_0MnUK!kQcmn{O2O+=%7*m0FxfG7Pspe8j zqDAo5d^UH^$=p`P=ThwKT-@2YNQ9%=d^8yk%RQ6``M?8sf}#W|00;!bfiu8Y0Bot;b9 z_SQGGgA>Pu4dET4bg&d9DkBekL_|A+_@^lt!7=)3Pjm?Z%VC z@pyc2bl`n$T_tn+&Ue0bYcv-ho>L>eox-8+qa6n3m2Y# zfpot%AS4Pyn*!wNabLMY%=)<;4AzP)@5v0n`{`^lI?0q?)M1wbypSr>(iM3olyuGj zK&G^{b~>3zvU6i5lgWHO_0CmQHJeSXwH8e#)p&G5t{w~vB9AC`c6OZHtg5;978)v$ zXXV9=C(waLFFk@(;IYse5P*Q@&XUtQL+9$LF|H|V-rNrcAHWHQOcqF{Qj*IV0IUt3#S*zxs+JkMj1{b*O(SRXEkvNI$=NhyN+ z#V?f1y0%48tgWrBudf4uwLXxH*=!bgXO?AImQ5y;wp1N{jhZBv8l&MX1cgNo6kAFr z%($IIA6X99WTL(rw0gue_cKJ+$1vW=oOskvTS(&BwPDOy#v~COxdA%SNE3N3UZLwF zH6@-xbaOjdK90uoFrtuQt?l>w0Z;(}0^BInszMBThK0@+tuH}X>y(xN;DDx8MF@zf zQRuu7mb~{0F)M4OGOcx)X`qF}Uo@qeYKb~40y@p-^Z9%p#DY+Y9a%~NheM}HB2fd{ zvUMW%#ahOEoG3jcLD1g&Ua!Y7ryU^KmZ*SDxcwrE1>y++z}FUtB!EIhAP)r2k+qw;~daspIN*ZIG z3pVape>o#^)_UiRQknPQoFQ6xhJD;*n5*#0roE=_k*&JwwDyZ*8r$lW25))Bcs;6IB{mwl(+_HDOnoZ&q0`67t?2 ze-%|+Wrk?0$DO#HDEx6uUhau&&YXhL$li`V>VTMGDmI(at-SAr7J zV$wM#?=a6pK)89AONl%~5tg|_8@9AuSFmj#;TV!^aT^=?&3rzc_>=Tv;lH-00gt0U z0BMz&#QhLAf@d4>2`Amc6MXA@U3<&$afv?s=5xLzBBWK%t}<_RkG5($*wteI0TEUd z!&y1bqUj4#(_zz7u!SY#w<*tc8b5^vauez zcn!av(EZ%r#FPTZH&XbILigZBs=7NfyGtn%AQW3eY&s8Plk98H+y;ETTI;id{%1k z0wMxm6h+|9s;(tq?Ys+b&$9V^4gj$tg)!#n=t!!PyhHD@TxqQ<;|xJo6xKN!`Q72* z;>#}U%mERHQP6isP=>Y+U7HRD3nm*#&fU8um3g?7=rQqVE^#;>u>zH zKfFky|0l11_qm(TUf#W&sXWu#2{3{{Yv;h!CL0We5QY#4E9Vc#`*;5Q_kQ~BJGVdl z(3)A1<%7XsV{3Cb93ECgsEd54bsuH%nU_Cz_3CwJV{)Fdix)3lS6RPb3^&$> z-b1h7d*uteTVMVL5g21UxKQ5L7(+Oo%@(o1QcCYFBIdbv&Y;yoBlsLV2wUWt%+>|~ zP(yI5`Ae5?SZkfJJKMQ24wVVL{)7;S%6aFUQ#lGLBo>#8o8Eit#}}_&mr@E;xyY|v zz4iScz4pl`Zw#b+;hE2GY+lL+>yIA2`_50^8l4_&t^eYWUi-7-(ZiFIlh3^Lxv`mo z$E}UcLT4X+_~Gu}UY_TZ`D}Ex*xlV73GA3Pk6!!fyWcrHIWkmU zzWzdy<*$8reC$;J>T|`~PQS?N@yOQZ(m;#jJJQy)al9T@#yu}Oz(q@t<8Y#8{Z!6J-z<)H}4(4b8!0R{PlNd zC)Lc$^5RpvzfJi%lBg_{&XU!N$k@}v>7)7aqnRf1(ChV#akF_Xvb-1$);BhA?5xYB z#JaAWb1KWE(9RK*!>o|S#ttGXB|^%k5HR(i1xNt$&U-I~CIQxg!pb>^0+c|3Vl2|z?bw9b=telo9s>Oq!EL}#rS761TH>Y{XRLkX#rLjKtJw5H0WuOt&d>ZKNYtm&rwRK(V zyclk*)ioGnw)_35gt3DY12ghdPh)3grbH$#PiI<)LSP170Z3CjQ`Ph7sXy%%Lj@&F zNjc%LdT?;>==folt>ro+aEFf`jz=e%k`e*X+u3Y3op^7HGE-WUkXezJy}|y&k1t=j zmgoJ+cxGKaog5uLygRQZsMglk_I7tJZf#$ZBI^w{hQpz?Zf$!ncGHC=5qYU*Yc!b! z(^YKF8SjoJW~G#rI(pfa%A(rFtyCd&JyF2oIKd|$%}KG2M`C9|*80->voPdD?T>;K zu`pzA#NqLZHj zX>^^6Zm=gN5?ClkAKf0F72}SgNlf^t>zW)O;=HN=K;;>s@ZKvWXH^~E9*X}u=M;LO zz*-CBA`G)ED~h65<^+p(m1UWz{Tvy%zDlb@7}{F&IZ3iye&xz=BG+>BF~2zqD4iz7 zrJ@7~)KmjkGIQPoqVv?WxdZP#adU8!YofIVwTQ#jS`|eRCS|R2C@oq`-p%RW!(--Bm?S!7BHoK=>_BOwIJQc6h#WLyxW0VNUUnf4w6n+p?n&R5k;%0+f( zQ_7iy0Ht(Y$6_@EAc+>y@gzlEF9f?F{4M&v)~1vi0Kfwh0U-iHZDS5;=;S-$qnH{+Y8vam2EPT(1)bdQhRQl3v5qSEVnE=2V>)}op!FyeWcaJK)5+Okt;Y| zjrVZ=NXwmgqlCk;g-3V7GA-rDlUT?_Tjh-3^Vbn`GqhnYSW!3*ndGTBW)xo&m~(6w z>}pe(B%kA)!EMlRW-&tasJ**r7U4$CF=N zU^suYh#OBh?D_LwwKb2W=dhY^{Y-E>AD&MLRerj$6_WMXx4<%Ue=Li{R-8Mmpo&}p zd6HG1A1v~OSEquLP@DD)qZ->L6xf7E(28KD^QXFsQ6h`KX*0DbtDpb6WV1RGe3RvK zD%vix#06|VG2aw=N_6cApJb5mQ_wFs&E%_YQ~01D@JWWDxZKM{+^A4+0XSADYQsOF zGwppzBojXEs02}y=F7J-%s;wqV)T8q0C2E(Zggazafa<*6M>K`;PDk&>rg)+#u8yG z?Jw5Ijf^+~lY=*k8FgLrBM=ogJ6=>%kcho3AK)UEtFo1~Hbibws3s!Bcswq9Lmw1m zz_KhIkaeV#bO5EW*31B*CM|VV3^s@B!(o4CVL3wpgjq!gkM@l*_s9E>KKY=KE-SLj zx1Rm#*MEC^uqh3gU@8}a5azWf=LC|L1P&z@#tZTuyr0i%rHhxYT`iyb-EaIR2moiT zk_rHvCn=FCgGvDq2vOBD>*|O5_pPl3>gjaa>-8k6ix)2zMFAAr`*}pHttAxRLFJ56 z8iY8Q)n#7lBBzBN1Dqr9RLv?B%Au-eG@noHeCo~EyQ(Mi;cys(=V;j~m}cZf+3w`@ zwBPRs%ZSdhkVTIuTpx@XuI&!jcPjvJ^t?fs*D&x?&>2YOE#w4(oUBGd(W%u!2-JuU zD_fBzL@ZQp+#CR`H^$X5@=gfhl|`YfG)gEVYLuDK0swL^og*P+RhzQNtWY3?&ZY1Q z0D)XRs|gKCAwdcuPDbNi>1WeQi`+XspKB1a)5#rsItE$nuH)5MGX7humuU{TnoE|=y zoE#%cQ4U~sbo&wR?d|n8d-l}kgUrqS$@KK(f&BoJyPi}7QNR>_IGwV)g0kioE9?OefQW z2R|*#Qfc98Gn-ASsuD<9mQ}qgQW`rS*}4*_YYR?5Z>_hf2Hrbk%xpH%TCWY(%e+98 zQq6^_P9EN$&*#P%XKY>9^ZESf=(Ow&ws$Y|1_J;%-QS;%NAu~#ncIj0K#)fOC54hw zOQAiwV9lIPr$khi<#al|bLS3L?>{;?tZTe{_32BOZuSP7gTYo-^e;XCs+1Cy%(6_N znog(l`CMq#1L>Xd!jYYgPU=uAV>CWiS(c;ND@Cre%KHy*-#fg0G`IG9!*vM=*7$j? z5eEI9*1E21X%F|-<&~Z7f$G`X)YVK62A6l%?7^|ts+v_~PL&&P^oQup=;Y+^bidad zI9K1keOG6_>rXwqvvXC+{FzVx_W0!R!QJMi$D=MYLe9 z;#y)-%7B_OP#sqB^qNgEn}KqNEQ)Dp3cNX4(B#LMBHl&8L%P>^&E|wXICIA{ut$qq{nF# z`?!su4bM12$dQy%3MKj6cn10VvC!v*>U zyoKO`ttyV~T)YdR07Og0xZ?NW;@y2?c2Pnl5~&oLl0J)Y6{W;-b+V@2`Xcru>_tfa zB|6KKjrbO8{Rd(gDiO71{fG}`S#o8AQ-DZ+;;ewT;c9v#2ggG+v_(B6Opkw#_>9ZR z`_aW1z6|BX0s;pJ1Ob|ppm=10FCjh~m8IA#g46ckS>zo@0%G$+#19MCF(Ua2pF<%# zuB)^aCr%KOQ02}u+cxfC%Wt&GtaFaSmfRLS9Gy2j)(o^tuTD%VJTD}fK<8+yCeR+| zeEV0_b>lL{a%rsO06wAF1y-1EI_GfCm#};Q+nRP#^EXcK$>$m278&a*HiYbIPn*kH zt5%e3I;{5eV9O%L_mNMaEq?H)+UA&KCUMwK2TU~C#9E^~eKTZja+w4gdd0&XA|v2# zfaDYhd5?FXNf>vsBY|PH=S<62oNxm6Gycxn&qUd*Ezj`TcA{v4Z;4#g`ACAO9D&@v8j&HXi65_a7E8o`SbZflq;nyi*DA8J>hbeOJLiTcG~b^yY@3@r`TW~b z%BuUhITfEEX`_>voP$@Z_-3-4J+-}GxlRW-dKD8=4r=3lRE8_8!VLMsO zW2eFIPv}PY@N?r;q&W#Z6B=?3RbRD_tCrAt!($PwEzcy3DN=RlVn*l7KYX!mEuODE zx&b1g;~D&IY)SZ9963qq?F2=Pbm2e6O8Us6aH8zZ#pyP;g=L2{zq1_{11%E)pGG2w z#IQe(hsi6FP2u;qPXXt55n^%qQK^q_=iZbU@)aKBEGfP^>L%1^j~OfoJ`1~ z=2oDn7OkzVMb#-HgLAIP7Ci}^a{>{O0K7m^WEp8qAx;s%TZ>Ybd2Z?|S6b#pP`Z)TH8JCt37RJqU!1Q2?yghVgB144(=SmDVNR1Qh$Tx#Q7HFt$~MoJVC>Z%e#DWxihT7mPm zl9{ols$5wPwl~$$(b2*FhojNtL`w2lO~ysh%k#14>6GG66{056e>Tv(-DU67Y%zOcM;S{Lz0QH!jHE6bJh1_gPtRkhF(^E_9x zi4bOPtj8RvL}#R+HBlQS>-j`$sZdoO97!XGS}ScTS35wI0ssXlN93z|;+;@Rc_IK5 zD9J%>oO88QSp~*>Ulf8oPAB!ENtCaZQqGdKcH$Nda_hPVrGtjA@(g7}O1rVKX-vJb zS%8On?gr~SsNe!zsX-usfLa1sKm{3D5&}cTnAy-B9^ZNX{{LbAEKlD5z`tXx?9z8ti4fk$6_uPE?2jBj~ z-#gkrzH;MJwA)wn+EvxOuGe;UFI>JU*S277?a>GC7i(*C%y6(JwuaUY@0vM`4)%ZV zAMI^#ZS)4S(dqHQey-Kd&W=)IxckhbhY#k}xGanPhxd+8M{8?aPu;k6@%jtoytOs< zX=7Itc~e!+8L;Ny;Jz6hIBTm4N#v^6o6JUL`hh1a1QHQt*=%;?Dw@q^l!FJ7Quq7) zjrFbG_7;G!*49oP9UWOu)!ay_5k!%f{oX(q*Oby(mStIih}QVp8t=W5Ma;S?ilQhq zA{aySdPYQbHLL5nceR!%1$=ujZ*1>`B4sC|@xzDr2ZKR> zP(FC@U_PDp`UN7*s_CQs`-g}7!(o4ScXxYuA=d?htZPFc1{<4suU{KeTzKhde0*p0 ze&s7NwlA^`-QOKEKXae5x$_MNyC!v$+M#^u~pqy-U5! z$f9^O8Gdy4&fUAWHQ2rF*N+}Os7EKedzUwNFKumI+`D*neSHf^7-MECSeYdvDg*$i zZS9=ZbY^ys6dxBou??|&WVD=FpNLJwjQ;1yOvKFP z?U=+^tG&%}`GD9&CC75=RaJ$+|B$f}?+=Df9>N@Ae*3*5oH*}&HJvFf$T>h#S|E5J zCr}2@pH>UD9Zd5=2!SNzq9{r%)E@1{iTDz(#FK=}-W}>PE<=~}-;PpDGB%Pej%JHC z7V-CA+pqUorz2fv0$tO2%7qsg<4ErDdoIz-Z;PL`yE&0 zy+@EjAbFhD(?&2{pld8o&!yq8F7PyiIxIW_F5F<8jdAFnm96gtqhBr-lmMJ>0gqRT zv*Il)18TK((V{5gD*`I=-H*}tRaG4yABT!SS(cS$8KUfCHbcBK&L>E>;k`Tt2U)aE zBA0W~ISSLnI^D&V27J{qNunLg6LSLMZTzJ0vlGc|s2z!jp)O~YbJfoK(m zidS&!0<$~wcsIVG@tj8Gitlx_K|@P*d_r>xzd1S7ZDy1xZ+rxPC*MJC6yTqeNBC&e zGSjpP*Q)vR_a-ppbLbr8oEIh0*6neUy^Bg4k+ zx!Z#Sq0?N2HdzQQDPk?HV$?Yr_@{=*16ySpNtAExqVR7=97_UF&(9 zQ$#}yIJIr-DTXN>1b2w&9fT0WcC+r+n<0)vY#gpSH*N1=+G2Z2Tk3t=I*7v*r|qh7 zfH2-(cW?z1*EsnBHk-GT1V8ybsGU(yAC0w z3^SS6y9*Df&B<0}iu3H&#>4uer-*lVci3VBPaA5HfWjR#gZJ$;b^~GGwJ>hb2j`H& z_|&&>Jz&3{;^e(2z-byVmEm;}T3{oV4Wt;Ory<6;jpH;-F%S@W?8haX?47-L{B86sHLQeA~2$aU5?vwZ7X8XwkiR|9k({``-ufQ@|fBirH$AA9$PkqE@ORvV;pZ)S@ zUwrb*^{NZkFF*VL{tj-om*4#mI)8g}6PgSE;=8L4zW?k8|7v}8L4Woi!!&+14&VCz z<@Z-jkAz|T?9-2@X?XtpY1=(-JDjHJ+4Zx{5I+6%)4T0go15F~>+3KL&cQTnUcGvC zbMtz*{`t-8FK%AHY~b>vkN(Yn`S1RRkACOJ(E6Jpgb*$-*QvBUfI|Qv0KCtACxAde z1igc&5MR9a9dtfrCp!R&5eWd?9DxMF*tE35)_Xr}r>5y)r}|HKn{AQ-t?y$Dxbjy| zo=p^|X=u9k!nuaL_dbS^h`fVw+|m@fuJi3hI!2Vp|N8nG0RH6PT>>E_ejOvCgAlfN zo9pZAuWr9cx9kqX=JoB(%iEy^*RJ~OpZ`C@-B;V2Pd@qOKWuLAzWd>CKmG3S58*p^ z>lb&!7F%@9^AO`r=vJ5SJb&j$zrS7~ri%PP;I=UiQ;Zm1e>JXG?GNAm)(3s_;}70> z_3D*(%^&{ok5*UDh8V&3{rV#M_Ve3e8aCUz&5O$?n|OPDef|2?S1BKP)wLHF7s2~< zXDsJ_k|@cYKC=sq**(B+@3R<7j&fGk-CVa?iZ>~`IBQrYkx3g^+HW)$>yRu+`l_tW zECb_8rgF3FqLN)J_dWY1m0=)%m{q@EUvZ2AU_RpK=BtXV$r1O9EYa-El(qHcUzvs+ z=VE**K{y?ZNKxD5*-7p{n4NU&4NNwC*LA}%yn6L&wOT=>X`1@JAHqaLecL&=D=6fU z+Qvm_QlvF)tDWc#qa+@wLaFSwy3$rcDVRJF+V_E4g*&Y~pHbiesG>CTsERUxVNM=N zjMq`zWhN$i_H%x^Lw0|}7}wy}>nm8N36gUTeT&^{Ac&EMyA1&Jec$*d65ZVmFB^Y- zeZARi5dG7qPgko|2!tWVI3VJ>=_i0MUvI$q%XWnb&2|_!TjyNAi*rnAQq$&cweF@c zZ0BPNDcZ*of@zwjF(wKb$IoFnVuaIARy9?frb!62;+8$KvN8en2sE%Aep2G z<=9e0&a^7y1Szb=4`KGmB(*Os8Q)cv0(EjPGXsIS7!k~VJsuT`4EJ0Ym z%!eLQ7wnps)R;(xi#A9yW-3DA^iSU=1#6Z^_s4@$Rz9ovmJO;%0Fx_IFBgL2tehw+ zK*~r)TJ*$q<>@PiIG2@^ObNv?2zjc2Nj>2?;g%~M@y3xBh~fhr58%I&1M@pPjI8}B zH@p2S$FJU}{o28blb5mrvpLRg|1KPf(yWVt_OlM+FF)vN(Pu^ z%OxA$qKp4kSEptjI#Bjj$l=L1WrLD9NnACr1UCrb$iL!kl{Hgg->s~)3pbzTw-b~g z@n9!t!x+5v?D#>5Axj2|fMs`bS!G{5{p|Q9hC!$zN>M>kUVubiyrfAf7bNmYWG5)b zk{U^2drOmyBK5E2gpyR4`$=X3b4^IQp&+MRsW%mS$DD^nDxY#)WKC*IFm^B{h^8u| z8l>G&Xd-QuUb2{1>r28>TWZSk6U7G<#g;&QE7w9k;Wlkj@Sz4vsMr(7S$-&|Q|d9x zw-3IrjDiZD6`}Hn(&?ydJ8HR0s<@IYa0uiEDRmkP#b>he(T=S`_NAX#`JSa`wbn9K zTz+e*TD4yiuS&~O;bAbWo)*LgAXjWn@i+AotLIph@)Y8J;ood8rQX%BRa#!w+*T8p z1CSZ(io@IGlk2N?ip%0ZS~iGe3Io@UdM(*@jIzF4{S$&}UQ21``LDVwYn%b|i% zIpjK8V&ilrd6wErE=Eq%@G0ayMRc~(0t@c+H;gfb{&z{(DMc*A%{cOTVm=rR0A1H{ zqLM5nByq)CgE5er;z~+v74iuCzTcUg4kPT6758#a-M2qJk0kFQ}jH~DX0DTLH4$%3Q5a9xxhqh~o2!Xn0=L%$9%B8~dv*>pY zLTtc8-}EwO*{wXCH6ytC6m6U;az`>Gj3>*~P^>4*TuqHozB8F1qJePyLI3 zGmx9GfAXylKl;J%Jb(7y+WcTX)|?QH%*Z!IM>7&yB>yNm;!-ALO&7^0U>m| zJ7?}KbLSi)AVMG?!vqmLV5AVnX__|9xs>qEteJ>H7!h}_mM{hHT+_5j2!RNuDem}q zB=F4&8i)YXxMl7P0Eo$ZejgCVL<)`TpS1mxC-2gCnEA5n3K2JVH$?R0$rI;2#rX2& z%g;XhEXMflb;OHqyWQU1y#jzXwy=TCF#H4b{c5#Zt(vay`@UOUbk2nkV(+#TLU-Y> zT-QB$_sRR-`^Ni!`lo+-|AP-M`tJ4X>+N>Cy12T$yu4Vg388J8_g_5y(0_yg5#a9b zZnN1uyI2#^6l0i10C0eaNFn;JC6R-J;xt*>_}lF^l@wRLDy1AKD;kcp3e;;>2~&p| z8KikeqI8LMR;t0v3zJcJ;#9vhUu24N)Y=-HvV9IqaP^@i?QE@E*@zN^NyhW#Kf6m` z_pNl83$Ub#aMWGe$xMwL;Nh@PYL?v};ILcufau9#I^sjlxdvU+v^}7wFiDjqHR7AD zbT~chK9l%S`Z_;X3N(eiQszYI<~TryDU!_00lWi%X&6%!aZm3=@X4Fx?wJEcA|mjx zyA(@gY7^0BJ2p+@eDB*9eV=M`fp=~qbm$x)0(zjtuK>0UY{w9tA7c3Q<;yQ$zY2&= z+ulsihwUAW!)4#T|4#eP6YmJ7*mXz&aG1u%MTo-+d)NCgPEB&BZnq8Mcb{G$BGE)d z9+3Jz#t7orgd=ME79u%7=NpIw2uZ3sbiQ#3T}|%+03mj*2k?le6mv)zMWNUUy^N9@@73yHCc)Q(l5u`Li*=8z##H>krrnplX4#c?%S>i1+ z+{Fwqgpi^UXtIO3<B&KIJM4ayj6(Tzq-mF}Ee`v|+)9SJ zd_g5xrBGk0MheE(l>@%tT(gPDx4FpGR9Sn9q$@zi$7lT%VB{U6h_kAdWX$>kBpc+B zr{AW#HzWbe$W8i*i~z|)Exbla8Zx>#=%rG#W!9UVuaXN2nToL}`* z3Dx36;#0Xol=~p7c<^-MetkvRzBTOSs>%H#=^Xa`!d6jX4TrN36k_GKOmb3r!2LW# z>GY5D4Mr%gnBqp%T`Alew=5YK?;2iZ`I_W%JT>X)Wd94(gUS-+*7#1BEZJMkA5=#r zb3+rwkt571y<9*^4awz-D5`PF&y|B*6+jc`%T5sXwNTp#iWm>i*LFdLQ!k~l*%3mS zgQ;c}Lnxgdkb6N2nh^5Ndg+3*NRZ7kVJk^$*KRpBV1eLJvs3sl{aih-P=(@1Czd`-3>k7_9rpP~w%=@n%sHjJ=ClAsR8a8Y!fa;BWwPB_ z%&V56a8oGeqQz4B*oDX{OR!H-@*Q=0OoAN`7}ZYBH%nBiefe2R7O7EK#-;EeDVAM} ziVP%IPj52EPG9jMT8Rg?9TXm<rrZrIMIzQ1*I<~;h5A^Rsi@kFwI$j z677m4(hdNC4a?;xdcGna~QJ?}5H=B)fj+3!S z94oYm7m(vKIGbz|1NRk}#hLX;0RRvnOuM_#QcfEHpx>FYM3_FM=|wi+5h6t*LO^tg zga{GCMBo7c5JmzB6TsvFAOd2#12v|S0D$Pg1Hg8OfE_?8@9h8xrZIUeJd7RyLPG%n z21FQ!UGxG7J$674CjuacKm$4O(=d8;=`P+#0B+{DX)FL90LW2{fYfzehyffDK!~Af z`w$_<0RS){1Vrq-+o3eXv1x&bHZ(#5029QiL72wi5MvmV`#oiFGu__Xt7_XXIm8D> z+S$dHhw&}1kvxUB6A6=2y0=oYWJqTGv`Z#xI}ZQ^VG04l+>ygo(*);&xU`d|iKeN) z{RGjY_pb2~XxL8i?lx|>)5m}J$tRznqd)%RKmLdR`zM>t=F2ZX_ug;(x1N6cyWjfo zd*A=|2isMT(|9w4&+oQ<joR=T|^A%nydA@?YFPqd-2`( zSKGV0>#sf!AzVFq)<1pMbuWDXPTw~Uo3`x%pl#dD=616gV~m0D>dCX!`trNq`@w(s z@BaI~@3-S{dwcuI=bycP{rZb9U*2uD-}&D6-+lh>x?d6DxOsVZcbAO++s%ex$LwN& zi`DAp=KA*L`s(WH$A9>50cqS07f+x2rhESUIrMvI>`j2kn zZ@t>a>E-JuF@E~xFR!ovh3FF>@8Y=e6Rw)|4}bU1zW46Ct5r7+cXv0hKmGgvbp6#= z)370IR;&JQ{ers- z?PH7-={m+30}=s52S^Yign-d)2At!K(rsE2!?Jjs5^uPFI8>S_O5j<(#$P4J`t9v) zGJ!5GExVgF6O+3ZMop;UwVk+_@MW*~WjUjo^R5jSFZeg*V z&8n!V^)|axJy)PsL5g!}cFGr&K+5ti`HsvYk=bMl2?_eXSA37-nrh5e%Sl9=_UvY} z;k;=SXY-{xo0cPS)I`rbQwBa^CmkgUCz5oP6lW-zG3=he{bXue{D=14I!fbLSuKhT z5D|8ae3lm>I_Dw~?T+A4O7D+AK!AwgFljwIRVn$(*(r{I4gqk_Y)J2?Vl_`XBHG?; z2&h4z216u>q3iqm&83mRLiLc_Eu%C)S z$vY}wz8!(G|epCgg^nq;&Wz?Za zz~OJ*T)$NfTj^aqyY$<1`h>ZGe2p8C5mtHPLAEcMiCs+#HmwY1Nu(&G(hltxKjD)W zDOI`BjuS`35F$vq_#LT` zu-r_+JUIrN8;El{2OwFBnpxk}(hw|sy9Vbz(=*P;GtOBEyM#T{1OrZb@)v9qtJg|Qk zh3viLCL)h5fkAC0X-UoyhJE zrBT@YV)=e4{pjFCD&wrL*HJ&uwnN6Lt<*m-{6TUdyY#Rh^)In9*<p{j2ClQ5x-= z5ArLeFY#yg&#b z`q}!@Ik$5K;63*rXqV#0S7CVXudc58=YPCz8wZ3C0ey@xlJg!Kn9@1N7^C;@>gww1 z>H;9f5M0XKjA56{7NKdHyKCPxaJ#+vmrwtB*j|79@lQ8*uQ!|9=l#Wcy}sKHmzS5{ z|K3Lc@cQ-ZzVE@K_s!MS3h~=bw+aEKKu?~oF5Y>ECZYx*PIosq=YgdVsGs*DtSMz5L|W7eBjx^|S5Wr~kYE?RTHQAfmfr zc=x>zfB2(6eDU6QyVAm)$B{UVZ1o4?q0y!|%li(=>hh`K$Hi#V)7Px8AuJ z0>ucP#%UUcVY}UO?q`gVTn7N&xu$VV(=={JftTwGW`D`;l9pSQK|g3&sXVN^yE_{X zm#wKvbKIt%i+amng=v}sx>Q$*JOu)vIi-~RE<+!|p+`*TyH=}J2qBKsFbwHv4SR?- zn@x;yy}s;LJw+s-wrxLn_dO|}?v1w;1Wnw0o&nA7?2_@2GG}ql8C02Eau=&?INtJ7 z&61m)-8T#QNnKr5Oa_HJ1NJqNta53^c9yLtNSuw25K16ZOP4=bcRb8yE$(Mla<-;Q z{Y#&>+pU}ziL?kMH4`X2bNf=2u~?F){1W@+A^`Fsm_rz9m7_TtuM{BJqa8c|xaPh# zE~RvUv{DfPfB<)a!}(|=t0@j$L?D1jF-D3_K4&m;EJ2bJz!b(9<2DX%jIL{(bAW(| zFOkScLUQ0P$UQ~Zc<&ruPB6xHqzKN9YabCJIwESvZSSD-9vU}A4@BN!>%0#H0pb`_ zM?mBpIK*+h4~L|T4j@7z*Xpdp^skhsR_TWVf0N%**^#6cG;7#%vueAk!6>!6qSk}a zZozO}IWU~vnoOCpvOn`T-eTNBN+;)1IZ5+Q0RuKIVvLE6Cz&F1xL8qI{8DHB_~ro)Tl}WlTY~S1-Z~NGADbKKbLXedfr;;6F zxoBxTr2&-EfJ#t0^HKVFc05?PV}rMVOs`3lnWaG6@4{Iuhl+@lJ(jrT63B{9%X}cG zrQl-FR`IX4kraMSHvmlme zv>T+(C)H_G{@nLHXD^W`Bp}5Khbtg=+RQG2B}J*#ySWN9dn{#ZT>P2#&&F{$!FEcy zGa{vMj$L1==h*jc)=~ETY%h`?j*^>DkVjPZBbA_Xs!44nobB9$^kY|6Art1)jV1l6 zG}>Z#pagaFkx5ZesN}Cw_MCL;rRPkJl7TwsQg&N<(o%Cz;^c{_aeFaUB3d>6zUMH- zX*ZZJ$vc{fh7geuqjS(TLrUBCExf+H_TEo%3^cXadPMS#Z@h0hT20ipmlWC|`E()y z#SlY`6HPJ325_AHaY-Wv0CeuPA8&7CmPP)tZj78?47 z=F-D=cU#}}1hMJ;cfRwTRrlR#9A16(^7X5)KKuA*FRx$S-QA7j7y~_d{^Gs&-s{@c zG~C_|qjSy0)w6DWg}zP1KvUdqx7&e22;1$r-As3P1_6C&pS0_Xt7pwK4;Ww^ zLp$AdUDx(ZFp+>LM0DNV>o33f_Kcd_5wI99r z?GQqPP2cqZFl@&dsP)Y_-oy|f29I#D>aQ-=yNq~$fo{%R0|eMHukARztwM=wuk-mE>j=^4Euq{lriqzE;bNjBmjco$N_-|1VC&M zu}zu6A&kka>bh=M`v?I&IP!?tdL}fXoFRskH+_${dD(J@0cmzKaY%qA2j%I%#bE#d zKQBo{K~%Dtq{`()!C5p}pZpU>se%%lRzJ7@W|^ARoLKy4-?X3k6dTGbTlbYGIfis5 zQF>BXglEm#uIo_v5c$j?nA7*^LpnTYvvK8FNGlTTAxgPMa$u#|@#zzO==Mvgz`pWi z_m`V%#M`7KoRc?YiuCSZSw`0DvIwj!D9H*vdt@PrjIjvMrW?>`M})qB^CtvHG60j&$x-&R2#R?(XiA@a60wwWm01So*dk z22x;~^?UZS%XIO_%2tN_wh%UQ*L-1z6b6&Qn;G1R$AhvXPv* zkk(Ric6j`)N+aMUkl|a24M~&oeP@cCq^~%eo;#BK0mLn0eA>)zBK1TDtXWr?eHNS#`p?$klRSE;WyTuC^ZEQZBJ7al$*An_?G#7gv5n#jTZr8+91BC9u+h5`F@lBHP12kMQKzOsBt4b0MQ{ki?20uDFY3EIZN zNDUsatO5^J;+Jx&B-bof?O^IlR=Uf_79o_U^R1|r>!Q|3;aO^`%m?`1GS#o;^R%DS zWAk`Q>qv7dl3g7TQpx)Ltwh856qj5+OImUaEZNn%D9;18APW}BmMiMI#emeipuktv zG}Yj({UX@~?mN~kz0rOOg59koAUmm&esZuXhC+)il9|BzE$jPbN!V3lqh6Fxof>C8 zUkne_(NqGQO+;Q}rS5a%>byLEm9qAvmdUHaGx6Sg=To5xB!GQlnloYd$nkz3hQ>MP zokIjlTp&cEDR__MTi3pPb$!*YAOIjvjlBg_+uznL8cJKNxRm0q!CeXzX(4EF_dsz7 z?i4B3A^`%$3k28TP@uS$w!z(@xVydl&%NLI-o5YK_ud#8BN^FqZuzaTlPzn_MIVxr zM`@M$Lo6mPit2e}r-V8QI}w@~(^LzYzq|(_vf>lsEgC?OAj=)kPelQO5<7uTNM)hR z^YvmuJ+bi^%}>Ddi;iq>BUNAUs!joT-#-oc*(XvNxYdh68+3F}d%qYD_$Oj*#<~A| zhX`QOaln)9(d`#-ZjZ?)FVc(2`z)7WCA4s~wa|{iyrU(=_V7uFgWKtZvSVxIVP`7= z8oFpm`!TJ{r`FwF3vxokCoczlml^`!z0BBB&+a%~ziy=Wi2_K4#1lqx(Rq}$d*emX zy^Rs{eYYm)v0r+M3!sx(YAoUQnle%3UyY|s%a7xWt7S}yx)Iw=`jQ|ojEYmDUjRkR z=K1htw0}U9ndk5gPF`V3yH78C#ZR}U=u*Q&D~#n^e_;9?!drMRE5oPuV--V|u96bH zq~Up=TY4eapnIrfH5LOvbozAl;~yA7O%EC83kn5Y>JBYSeXN}&ZZu~3i4dI=is0(i z-!aylCi-t7xU_Z+?DZkdT2H*>K@HuaDx4^dZ0{bhy5`zoW7^y7t~@G*0mC$CtYIUI zfM%I;XZvI?9NuK6M7bU2QKblf(_T(qRsQZH15SoWga`H8B0bj5ChEcMLZpjzhv4I! zyz5hPyD7~m0>+*u=hj<#v6iLTlDKd9qs@4d1k3Vqg`_QjrzlliORA733Na+cXMZL0 z&>jEO6BuD|E~%)x#%0Em@M)xZ(>L{9x4Tuo`++Gh16V}6TE;vxQ2|s_W3WhW8|ylI z5C2YP(VOvRg23NIsj-lkya(e?B}H*6rZX{k#^79#67pidoovMr1M__ZI@5`y4?#B< zQmuPL<61czdLo`Wk{56#3=J!`DLjA+E4n<{zF%a9emPC)<%hDWGqFj1au+3dF|~p4 zL@?GbtRI+r5?|uKsZ|TjW56BJ^F=Kw<2cN}!9L%pH-bkO_s6L6kOoUrZ^Yn2BSFSTYS}T#ugi0>JgSxgPNt}{xoG@|%wZ3R^ z8QW#rKhE0J`ZOeDu}1;KZhsyMDB{%{9EJ|X39z|LhqWaSxU67Su%PRRzqgaSD+}{7 z4m8lYg~eF!U&V}wW8)))>hGPxV#aK2Q=PPg%05-tS6d%jofG3A?dVzub|yf?p90%4 zZtqkPpkeKld9kJg_!te&r9>FEgF%G60WEf<*B3;YV@Jvg0hV(|Qy$cy0Vk!{`VXA9 zil{J&ot~6+U|*GUczu#P;;;2o6TYNu`<%?VJ_w!5Q~L%~L+30%6uYOgahoQAtD#w2 z+fs%Ms@3{o>}t|-p0b(bw6%O|Kg}Fu1i7S54kLYQADn@OKYs0>b*Q`{+BWF?ggM9h zr?WEchZwvPAdLYBr!pU+u+;IpCm|9}0s#E>gH*VW;seG+n6=3V7_rL(2G@S3kT3V@QVj#6!5f&v)P80Y}9Pi4M~NM9+9{JJP zrNhwsUg9uN5w&J)I9|NA`K%Qd?)(Ll?}sQJ$(ovs<7&|qkKOEVpGbn6b~BULhq}K8 z&JWvt-tgoxx-P#4DC9-S*Jbgx$H+C!X?`YPdGbX4uS5VYAd2S4d-<*OhW+%yPF}U{ zYFzIzUY07h5X#0dmvc`qfu$#=AD$l78fl2HCROzlh28ldCFt4BvF%uzeTT}eiL&6* z38g_%0{jaxOX{1rKUo!YRRmi57|{TQ4wDP_G(FcGl$_Tsg}-MR z5plAdl_2|9=&j1r27}Ex+s_QjslvM=Z`JzBeRAi&LEt!r(cUIgo(l)hV||0!>wnlI z^_%!(kDtqWThO5cWY(k1)lxfx7e%-L00jVMtGz*Zh}(B>jCZM)E+Lp@UR8x&YYD$U zdh@CMr3(K`bYa{@FIb67|AJ6*wRC2Ig1X~MK7l91U0TOvX%)Is>W1Rl`WzG*B4wx~ z1@dukLlP^7Celdfp){adLnjze|E+DS*J_-q7}2YUl*6LfSnF;268og6IrvV_(Ejj# zf$J02m!}~~Mv?Ej4)tG$Ux!T?xwbvycm z+;XFL`)|$n_C|Q5^EP~S^x*|9S-no%EdN?=T4@iq99@U4=~6kbih-~t8}MuLMx0uSdb2B5=n;uoy(f8-WZ}F45L2HvVY(qM$PVEKd%+3g zk2S_7RN5jEv+v;1I|bGo!3jrny6s=UVmZ8+A}w?z2#Wq{A0kcCNTWtfIMG88_j!NK z?&tFc>@B@`XW|gpwss6N%mbm{jfz8Qd7r4Y<-E2)-_(Y!R+537<)^pcDP%jFh?N zqXc62rX;m|2U9Gvi5e?LqHyO zd|;q5up7nC0Ljgma?K5K#Ok_swOb<$c)gyStW+XL+dWcWEonwP3pvwbcrvX{V$b}X z5%4?@@U44MWcrE)FNyW(*{I0q3JJhO-hQ?a{X-!k)$=Dzsr(MIn8Z)%WSLOMe{LEB zGhcJ(c*i(Z=yUTuK85gd5y7cJ;U&u|bAD;=H|d4n2%@WZTilw|dj5EoO?w1-i{TQ7 zIJA<>>R-Mb6Ldo5VVpZFd+4gVamK|fv^%RfU5sDj{jz3X-?H!mkFW19TVB2$)u;W7 ziwof=i;Lfb?l=0SoR?5eAVqq5)Nxi(Q#{+2o-@L@&!c1(iLZGhD%@+@cCcnG(&(CE z3*SK8GBf?31NhWJ{)owZe%h$@*~I-BAaHKwCX{P>LV-u@gO+!nL`WeqxPvhKTmv${A@+t=#Uq zNpI`4lubvE393Go1w46z+Aor^f+|+xw7tkX5=Iu*>DEq{Avd7B;;ii6iSB7(t5Y574h`m)-vA&g=wQ0a3c4VOmUQ|co~K(nA9LZ0Woni^ z?3uaPDpW;I2(AvYBRplEf~vbIaP+&Bu|X0)?D=M02^VW$HhR; zYzWZ!x|hm7XWe5@BZ5p@f(7KBe zP&2VIJIodzD|*bFtJAHEs319=+>Z)!C($j7rjrGfb*Y%d)5oS87DO=1xn`0Kb zlPAgYupDTShZAQg|*$>;`ePj6r~UoV>~4 zfECPSplW^MCtMBwL(>bpBfnNXP4CwXfPfUCC*`_6h3Yt^=_JdRe z<9qsB9qD%##3jEprK49lFr`d-K3IRsb?x%85@sOezGiYLi1sYrr?cgH6>&FQWl$B9 z=fq4~>kpEu@!7N}+gP)y#Q{Y`slmz3y&R#m90LbUetxQUlQX}722AzJB5f&Mkeig? z+O-l+juZReL&LC~;tNvFS{yD~kUh{sNhl?&l{q7kWEh~hnI=MX+TqG!Q zb{*M7%R5@>^2Ae*(`qCesj-upp1S@E@*i2zN<;}C zlZALe;y^E-bgPfIvo#N=v}aAMnwv3LZ?l%^m9(CbmWA)d}iuyk*`B-uDyGUYWZ|%(z08g zKl$ijF7d@_janWcV_&%6V3)ox+VI68oh6(vZf#eT!7_~d?ezSj*pa-){SCz|0U;Wd z;9_D}p_%9IMAFvUa8=N5nDhtTU|D(kmMBF+$se)FL*EzZi0BvOV@8u)vXdwq*1{|8 zS_;vyllq;;Ixz=}eru^H*REx|%@e#=k(^!1Y5OqGUG|}9gSb})5RQv94wERH2v?Nh z67LlSsv3F<-&Wk^Li+Z-5@I<8cE@JhxwC)7vnr5FMJ<|}^q7-$lNBMTzgwG?~!IDPz(KSPi?>U)ciwCM^?;Rbi*GKgJ zbnVqD*6m|qUG|Y#ea1{ITxeNaRE4%i@rR%*dVeZv{mdwSf;Y#)@P#B1lQ+*@X1Q9a z2BQmenSoGd@9owAy?GcWjpl2%mlhJv-!Dhs6p*9$0`LaWR9Y!zTp46zq86L`T0h;j zxqxjm(b3-5wi+7PD=I!66*9Pequ_SM=^%V$sq`h%y;w(ALd3zlv95eC!TOkQ(9lUY ze@F2?MbB%P7SRFS7CX<23G8&Nu4!R$xz(US^EG|fP*Pav;XP>Ncu=9zkTO&WW&fUP zu%Oq!HeN#QHODSRwS|0J=^Bw$enQXMHF2$fXvtlWrcb2g)kfr6`TFO3s!ekcp&nIs z_#tvt6&VJ%JrIxCIVyD<7-UDKAtPB|;V!`5xp2U@`AJi^#?x@mibdQk^)avQk5) z3`twp3de(LPNFUe_gYCwa8h=;3Ck+GYQK8NfHsiBGsn*pEz^6p*?O7x792|@231_8 zjjdJv`oz3byDlK=GaQ4^F!ttd#>6QiMO7AMwHTXq3{P?S?3yI^!OALl@Lc}t^PS>^ z4JPD5eBZsr_DqX`(-nv-l0wGLj7`3T6<)7PeHQqm+)Es$ z6R%9H^lM_pa3Ya9+nSM%BpOPonD|OMRU3CWfctzy;wpXi4Gv`;5y59at1$^jo|N*5 zy#&x%SaY%Tx)<39r5Tr{mxA@}%od0{HKEY7dTEHEQZw788!u5%F43PG}%r_m@)#&<>b11emHN$uXP` zHT7_%1n4{te~30w+|>W?NPJ)%62mzm-xx?VvUr4P|fzu3n?o=$Y68E{_noF9bCS? z;~*R{`8@)9z}qGqLM^Osb*6s-EtnyEeS;r%5=tG-DTr7oaGnCz>fW!-B-T$OgG6ke zeE+q|il(vX6QjymQ&Hfedz_-44Ec3f?f%ZEs!u)+kC09tJL<2^Sda@j9w8smM>+hj zND;`7*8R=;$O!3zIrblPa>rn>8Y{xq{g0n30&_6P=uxTRS-ngH^<2B*lQNR%&yf}zKhkLdPj{a7WEnoKL%n1bLZ8J<4U>1OKTm|96O&!Wop6d&CfQIe*wOv2frf@*Y)OY|?_ z%Ovd$-h*&J&y~wR%HMk9vy~1kI?)+4GKZAEDAkWAyVkwnORgjsX=y`ZijhgZkQ0~q zu&qnqV9GDcjanm)v5dR7wuAgQ;w!K(jC!EQpYo$R=a|>*4KFmz?TTk2hIgdN_95*1 z)^1A`JTFZu^Y;v?7q{@CE^L0wrpTVLmQ=*T#Du8kulECqYw~V*SWj>3F@bPGHn+b0 zRQoHNS`E$w1dDIooeKuQV40*NwcL{Och7+ z5Ub#nnE850;<@gbvbME?lWyD(MC)IY)PWOUPPPqu!+{-?XQqwj!SJs;7K(oH;D%df z<$b6&kfk#rkPJj6daLnTPdx4B8@1ED$FnfmR(M_c+(Nsr>y3e~BcWzzc0INEBQM{c zR#tj0b+dJtB$1J%!2NJG!(0DoS#m@H_}DyE>ELBqG4GOM-dFr@E!UR96NOyyNjeY^ zPcjKaA4sSz(O@X&#q@z84iaW{aHm0iUHvNxF{h!VBf4370qP(%W)IA->o{=ZSN4U6 z7rcxS@2T0WtffrWpf@!hFDlgPw<7G!)BvP2otr4k{IhoMlF4-jh{q1+na()iK z;q>H_=*+@>1-#q>-q&xxNwPU}VyYyJ*m&WlmK*b)JU2nNnwsY~+RIepZjSlB%jk?>{ z^eg|U&)_7lEMCxU)!84Q=s)4~S13IjU;?Y96L1*uO62eP>HM&iQ~h$W5C=^t@UL5p zGZdPQr>*vTdP~{|UctOBd{G6`tlK#}**8m_)y#qpiNCr`aO85J$v^t2dxkxWA;Ab- zGRRXiIa*?llu;5LFIdtFGTLbjcRH8CS0`glq;sgS(b+FQ@8!uI%-8PV`Xdv4$S$AK z)8_Yc2uCag$vwIgBbh2M#y^5(d7FF|-h#BOV3Cr38j8mZbES2pf%V2aep{zGSv)oP z=&_`9W7JhJ%R_fjLM0r`woT+kt(?vkFCg)Nyfx_RzGwKj!}&^12d0pv^)cZj07L0Q zbwn(9H&m{MUzt8eoug7`#xKQp#;n9}D|n&K?D9L z7uxxEqz{8FF@dj2FBC7XI|H+X)+-7UeknIN*}cWMt$)-#yK|hvc*jtGX+eI4U56CK zxA*=W12kF-c4`TuJ|dDb!ye`mbFy-SH#GarRC-2p?A+@f8L-le_WV*dpUPue;*86` z5#{t#v}39Z7CtDp<9?w>70wo;9G3f0v#Dj=1ZK{Gsd7;@$Fm*HusiCdr)h_H9NOC{ zpGIs^uNm)BDy)Pk(;HRLE9)Ut!7rdFxoqP%u^7B}GoY`U zEddILf#eoEJw+2+%v7oN?>|CaifNK+Gz>=f%1We>c6x~*&b}PmVNAq(aIDBI{$6TB4obp_ zzgqZH1oms7d=qs%LNVUn>>Q%eHh`ea{DoUU!4B2k4^Vz?9cp#i8lxsagt&!|3w<4D znxIGv)J~ly|(1vlUAn zEqXqlAGR<2L06 zy81|#OXu`0Q-M=CbbxlY%wWT3eW12a0AEPCk#q@US2RuHvWY*FnCw?>9FawxEe>M# zly3ik)+9@g6eJ>bdG3pO4p839ewl^jaiP7hlSvW|R0(A#& z+sz1IB;c%pnW@(=T4$#~ahwlTy!rF3UM71aO-Jv0ozQOmdq&Hcu0S@{gKc=XBR$S! zV2qhnd@Rt43_O@6YPFAjjw1btL)ZNPfn(BQC8vuE#mTDFY<&AJI%hy+W0=`DOIRWY zZjLK|LOfH?!O}ShK_gs6m{0>%g8^cJIZPdc>Ekw6H`ARspAZC7~RLY>SKF=wQUH)1H>`?Oei^&qv zL#OMD>qE%(z)XA0gVUcEre-q{>2V&vr`ta~hWbrUJw^Q3ACKkME$&Kx<>jL-s`>;h zAVb=}yfrCFx6)91ld=9B#!;@5(AGWK?h;&__KB)Ua2m9!tF#Qe3OQM;Zd5>0j$#oL z{o>%(cbv| zNucQ%H7i7t+5ck(G@!}S;m9_-is6subgOoLNBbekE5y^8qGakMx67C6C5+7O3 zG=g=kwlXLCrd<3HjT2nDg34t3B?oI6e(J3jUtoJ>NG6T_XkWZ|s~w)isxMsuj!Zr> zpxA=2*%=!!65=(RaGXS4bpJr4(eK4}fNquFW-sgC7M}!VY2e2(IecRCP0m_=uOYr~ z;`caxK(qah18ax~58lBrw}&31!ZD~r`}pP#=mAPjK8>L~E{2z^brfi){;gdMx^UNB z;`|bR;eKu&><%u{LJGW;E#dg2x)pPprDQNh8C!L$`3HYbqfh`CveN3E2R}&g)YMN^2Rc33qqxzxp zf|}p&tdkcX@!}VR-348qtsu9^3nutl`KT zZn4Y{IeJo46|vZrAZ(HS)@ClL+7GFECqq?^f70Y98ZOI=3-8Na;Y{rBK4-=5JljHU z!Rb5OX$n=`t|#8qmCdBYJWp0bXKN%O$J0W}JH3>|QSygZe^6>4cgUXYWIL0ME8^n2 z?JWMF%ZG@2q0Gx~++)^DUOV%fZXPY>Iq0@V^CJfi>JEn6dHc5mmO%NQ%^%C0LY434k3aEc{(R^I#uSx$-@$0NzQsI|CmCbS==9yHhY$Jp)%`dTY7$g93fbK95w z)~H?AknuWv!J<^6nY(-^9CdfOHHLmNUoiXmt~r^M6N6F2Fxo67X7R{{RNLBE*Dz&% zzT~eYEfa13doct)w=vA+RAQ0_y}P>tf0X~U95al+jJ;V5bQ8wWFc|DK>Dv%do=%_I z-h%;B$lF`-sCTao)j76kRnh&Wt*pU8VfD^(KX#8><1eV!rh)*tPsfc&&x(8I3Hl`M zn#kEI2t0c~FJOi+s+~zrR#sM_y(+{#P+J5tr|h47Pxi!NX~~gd@@{y-He7PDCyp?PO{QtC)bXp3T5s3#gy+YJ8L^Vw_v5!vFCt8xUd;}Qx| zifmAwwjEIULaK@ghqX|SJ_J}uVC2wJIQsjf;Z@$qe-W`2h}?b(W_sAZ}xS4j+5EX1EA)Jb<~pw4um zw%TT|VhQas74?E6hE4KXu!9pk%J$OzBn;#g{1iv5vReqkh#pL0Q>k&TBVUwP(p$qF>JfChV|G zGG!xf2{)PlB6h*`A2o#&G-Ek!WCp?Jb7uVJyU^NX;5Q-@9wEF4aJ~MGA6+ccn!Gw9 zcSLbs`jdg(cMpnX)c+iGHqkW-$Z2Dw;MjC;rlT;$IcK5 ztXb1`*Un}sdzu_0JH4vCZ3ez5qqfFna#&0lSgdc4FMnlYfQz5I?p!v{EOQF}4p-TS z+P7Li8lE!NTREhs>Z-nepEYEd1kV``ZeHdOPRT4VI3>F5Ha~G0{E`u<#^Rbnot>HQ z8EczrRNuW>Nx}NhiJqUP<|rPGpH8jD!FE_e{pr3BImr2)0^FdnB+F{bv!v8QVT^5s zK9Dn(f#Xs`Hx$SY(?K9NmY^A$i-GW?+|Ou*ue?%E)j5W&O@a)X?Ql}z7s@A!3vl|f zR5r)rwx-n!hnEoP>Pp8_qMM+KQ|ya92RK(lxQjI_qTn*Z-I%``7@@7ZuixsaM4q2#Gwo)` z{PW={&cP^X4P#+-xJUzI!my-psZ$N#P-Q;oLddg7dGd~({>?VN7eHV1wMt%jW^ywU zrYFY{p~u-vuKahX9VRM`ks&6jv5KG4N|}0=(fcjj@4e9NDf3l{Mk+sIPaT#Sq~-T) zecH;d9bT#IcVZ<|6FNjDAuegoY78MZ7m8h>yVZK6Dd#IHfNcN1e`8KrA~lNgH5E0L zWGgGdV|(H^w57?xfHurjsf}@+khqtp4eTAwtIVY`Vnh@KkJnZLlVg5Vn*p}S`S(+E zxW8sqtGJ03M-<1!>3rI;**V!Naj29sK8e#|f#zzj)pI{bcsbg687Q#MNu_k7GiR3d zDXTG@Blg?s%s_fS6+OR&`kJ;Ay-uAJ*(GCTs1(flknL13WHGpR1*gXK{d#k%PG*vLl^8g4+3EN+18xdAFSXNbuB_~0 z#c|%2E`8|$%?yr)V(YwOviSBO!$QSxGLiDKS%u>#bo}J4bG||5JM76+jBB3v#|B+*;vx>RdT%mzq;#$wxVX+~nx5LDY z8H4jC#gP@WniCw|KoVEFtK2}{8n>q!m?KcpMjXeE zrkfo&mg3I?Y7NugkY;gFn;zF279-Bf`=~w0a8l|dHUp*7TVZHV*UuV#nwSfCHXt6g zZ->>~v9n*?mZNJH(ZBw^z_szFXzwyNy)#GIJ1=>1d~|BGceSIZ4IY;Y>#NMyEXKP? zGat-SvcH2I%s0IT7dtwg8~@a+w~DN&6#wN+EavTip#u~en7Q?oXw+sZIIA9wto4DB zH*C@zL@ML^*=WCWDQmbQl5xX~_fH)*)PsQja?R8#|4u37P@nC# zeJeH(cMYrlix2i0G-qknp1FHRJJ3CISUB=KZO8s@0vD-5?e6cz*BY9!_${7fc%QLJ zrD3!bDU_%~zg4f2WrNx79iZ=IQ_4<+{Zleb^0c21LvO8EOw)mKuQT+q6(;yc=9maW zu!BL3B(QksKw)_sYmR;+f3QZj#Pm|fj6#)+0z9d9V0H&iYgy?-(Tb0^DlJrbB1rnU z!oXFU=%~80XYCXZ^YdM+v3u|`O63N}UXFRhb z?4ot;ARi!_&^UReBXGa$qI_C^%PcV#wy#W49a&}Pb$WckQ&MmeIdfF7J{NO09wb)G zox1l+^JieW-w`K;^xSj0gKZuB3dZy0D`mY5-Fb$Xq+C~GJ@WSSwVL`9a0&5@!r)K6 zuId*q%0|j2^?7^Gt|bE#EH&97wJ?KKYZEYv67j7c0Y97$V(EsoUT~ zut8UXbaE4TVhZ6f9DoNkh?YtXZzH7{_bU6lsOiGK{oWv;&_+wsoc}Ee&D9|~B4u|s zkIpDL1)^{b+BGtxV0Ip`E>9{hYwj5|*U1f&W?xy!jf!Nvry+IZysXUh_llb&D#^E8 z*?!!^OkF;jUX^CL$M1*RPs!cQrE)vh|n zW!FGNk>&y?|MU^xl6Lr2$#S8=^eA+DhGrs}3~Gmk6y9WnDSeS>$R$s%U3(cb7$#e3 z`Mmh;IGHASnRRM@(?FdEnNd6baD(6Ev=PGeUB$Mn_c!aPD62zJ2q>D)9b@PWXmb?Awveuk@vv*>()u_rcsc zAF+oc zJDe6at5*+wm3$L*J26)=o1A6dD_Rb0R7ekC7^58988*_d)nfrNZSP57!~%fx4LCpJ znMWhm=|KpozG=?|=x`+2XjuUP&&s2&P~#H!%UsXwglTHC0<1_&7x;Z^&z>~sX6jo( z=<2s_GPXE10knd)92LesxqJO%NF(NO33N&V2LoS-XI(jEt0WI^OGtgJ=T*<}Wm8Wu zbJ&%+!^L>-Ne*p!n3->O433j<#yY#&RGpNJK;n%~w8GS-XDHPF^7w#tf`kF02>kky z`Hsr*vG|y3BR~u@{tx|iqwYd|#x9!>I+?XMhp2ECSm}@2^&BYiOTiF};-le`Zf|D| z!C-&8S?V%I5rKB&;Mmo!+UdBC6J4O&yk9F8l2=+gL zp>xv48~+{pjv!0cf$q3@I=`Fkn@O%?#`DH$cy6{(ZYb+=LOp3dZke*4D85%ERo-jMuzTfjrd? z{{!XKzX+^P0SPswhGWqREQw1gi|xfnLeu@rGb`ekq}ca8Xh@^c1rcSAf$a%=>nqy?Ns=+A<>-S|K;sxiu&D<|bQHimka@ul?(vQ?-(glc^ZJW)yv-4AR zR3LNxZ2Sgg|Bf=wT*P73d@ z&LHK4WZGRI_wg4fO%Oqwrzpo6=ZdxkG?@&rN`()@InE(yr+F|)ePCWt50cUi%Ca~0 zyiXXJNk>+3sDEg8!|v(4vAX*FPPO{As2)_b;iuJk3&wAC-8NuE_e(XH z(4*DHK*?RVGE3{z9pG@BeL9OT2VFF}b3vlwx9b1{%oUW^Tk%ReeLZ4*Z`qe7L+CqG zMu-;cvQLf2UV!?Bvy=|M+F*rm=~186d}z@#yZ5Q6EoJ1Ke5y3zY@&}i7rX!dk< z#h3#Q6_vQlTKVu+zdb#dXrzh%6?q(4w~DA-E>XkD0!fSMkX>|g7qlLQgSIm!T~e!J z$6u{K@0aD<*Yc%l|HMA_jp8##g*jgQYpK^8b*>!E8vTH;{8;@LMxkRjW3bp|zcgK# zN9Ay<)todb)h^HL1{w;${K5j7jJQnsv?IEEmk-KAa18YgGkszurvL5-j^-pE0iI}L z?mO6K?YfFM{)a7nY>AZXz;Xkf_lMIE*vXp3OjLZN$WJdY*g|Z^C0#?3^a20vnpO#1 zQ)w*Bl|$sR#+AsO?`uC32e|v&n7i_gj`QE7k+M2L{10XiOj%!jepqsS`xWM#AF-Bt z4TL-1M@G<#MgHx)0)i8*@hhYO_^cjdykm@RgrOsGDgn!NHpr5B@&45n^9rOjF$+Z8 zLSJy#JT!?cmh;?STLLR5`kGdoh(->QlBhjoB&w%nxjAEirowVLCV_orPW1gqg+lMJ~d zL>2`*I3nIJdH4XH#%@kj6$iM88(rvspE#ErZsqWi*!eqMvaS|g3DUtnyVe-O9~#NRYXk~c-* zS)LrCzWSGZhsulkF|j=1B3!ib)QjjbEoG|^!KHo;+O4h#AN`iTu1n67n_Q7Jom@Yk zWj=2^$>(a6x-~h`Y}WyWc9t{!N;Rro$~U&PGsdQ-acftp!Plw5_fx@vu1zkXwR*GC zF=3J_S&&hu#<>P3+DxA7G~;TKkB!JvBxa;V!nHicv2Urs`}tlo**BikF_(`OcVUqs(hOJql^ zAeo(8hfX&-m#r>c{G@eY%gUIG^1>(&#q!+u__S-5Wk*g1;_QKn6RdL$V0PFJt`uGD z(e&|mg=U%4Z)gDW{+ivED*9OKW}||Khmt;YK$MTaf2(0dny}hmvR}w+^sY*{H>j5Y z@4>hbug2E+nJ3<6GB8r}&u6V|%Kn2Y5^_bOhnF2!&j{})%Xc8j#9cdkcuVls^LEfzl|_i89Y-i+QR>o zG-1BzF}J_A50R&m>`^?X+Li78&1CB3^!vIx8p8Su&vo)i? zJNHr$$)&m@0Kic=0p-ci`tX1((Z&*43;XU*@;`dPi0 zxk1DFqVyA`Wz}8j?d9aN0B*ep+OV?54R!R+i=>`ZuosQ=L!*Lp|E%hEVNJ(vYX^Er zk$R4^aE3KtQJtrxce{J(VSnl2I_J^1%?_)Sp|eVS!06S|g+9%*?=tug*K^sOQ&0KL zHu(&aI-dWitu$^vPV1{Z5>nZj4oWT3xTp9rWq#4|hkKFC5-!7^yrji zwhKiH1B3rtZR%fRU31xg7n5t~?rU>`x!#xxJ!Q}tAYFEf_hzS34uY&!ms?+pWPZn& z9TP+e81yu+n4SB+2KdCiQ@zk5l*UNaU+iGuvGir~e=PiyvUB4=If8!VRqDlYI6P6+ zVYYLS(a?@>nk7ElQF8>iGxELU6xkI>UbeV>zk`1Cr65x4_!WuBFsRVmvBM?(ux&f& z6;|*uDz~sm@TrKP*_%4$SBcR?+kHDZck8Uu_uK0Drp(8>7Usczf7NFdY) zi^?BVl}(zx9)yXK;;>8Sh=PCYkN(DTn;Mf%}5KKSr5NIfg3sUwPjX9i*y1>4pXAapyKpC&LPCHiic@+;{I ztV@>pRgF#A;bYhF08ZkPx>FzOW3)7O`W=T{5eLHKMj$q=vGumz%b*Fk7wyJ`GZ z(z|`FQJ4>BQbc19B!1r`I=f_f`n8AIN@#Dn=vb&08A)5;S zs3H21E7TG^BwXiO>e_4dcZYAA9dsDTbkTmT@wJ5i0?y!pm<_cR?S3`t`bi*j<;8#e zsP`<07#;i{i~dcx`_wHa`TtVKKR_3%%x?7mk81ilAMrg7LRDv1iQfYN0A>#beGh93 z53slu1dMtE@bmJCaq;qT@rh{j3y2E|i1UkZ^75e$x{los|)TQTwLko zw1ntXEv)REJzU&BsBlvPQ8cQ5(>y+SI$1hf*g1;Jx%{`Ggs;J_dMF&*f53q>6yE)7 zR8}7>oNd8xT_8>t9smI`!N3R)dK3rv|CjSWt;jq1y5a|lNBbXmnqYSqN6&wroQ|^{ z%2>=YrepsHsQ!Or0V4c7e6x|{|BUoM)oSp+K|Kzm+fY#Ue>z}iWhHLq;_3^rv;E)! zko&JsbZ=Zy69Gu4x*MC`A&dwR^WI?AWPbX*L zXlIGCD%cvMRHPAw|JNk`rx7O$XHOdoD-TZy7y|fDgpr>#4k!Z6e>H%b!zjc5C!~rg Srws}TP*PBnuaq?p{l5T_X1IF* literal 0 HcmV?d00001 From 66f2556d530547dcd85c6ea67ee35fe901bb5f49 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Thu, 24 Jan 2019 19:21:12 +0100 Subject: [PATCH 115/129] image --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 95bafa7..073c734 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,6 @@ H. Wouters, I. Y. Petrova, C. C. van Heerwaarden, J. Vilà-Guerau de Arellano, A # Get started: -see class4gl.eu/#getstarted +see https://class4gl.eu/#getstarted From ddc922972da174f49ad80db18a311379ebe50e89 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Thu, 24 Jan 2019 19:22:57 +0100 Subject: [PATCH 116/129] image --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 073c734..2eb73e1 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![CLASS4GL Logo](https://class4gl.eu/wp-content/uploads/2019/01/cropped-class4gl_small-1.png)](https://class4gl.eu) -_CLASS4GL_ (Chemistry Land-surface Atmosphere Soil Slab model for Global Studies) is a fast and easy interface to investigate the dynamics of the atmospheric boundary layer from weather balloons worldwide. General info and tutorials for using CLASS4GL are available at class4gl.eu, and video clips about the atmospheric boundary layer physics can be found on the [website of the original CLASS model](classmodel.github.io/). +_CLASS4GL_ (Chemistry Land-surface Atmosphere Soil Slab model for Global Studies) is a fast and easy interface to investigate the dynamics of the atmospheric boundary layer from weather balloons worldwide. General info and tutorials for using CLASS4GL are available at https://class4gl.eu, and video clips about the atmospheric boundary layer physics can be found on the [website of the original CLASS model](classmodel.github.io/). # Features - _Mine_ appropriate observations from global radio soundings, satellite data, reanalysis and climate models From 7c72dfa06dda0b42db36400c876ab4e562d15ccd Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Tue, 29 Jan 2019 19:14:41 +0100 Subject: [PATCH 117/129] fix next record --- class4gl/interface/interface_new_koeppen.py | 38 +++++++------- class4gl/interface/interface_stations.py | 39 +++++++------- class4gl/interface_functions.py | 2 + class4gl/interface_multi.py | 57 ++++++++++++++------- 4 files changed, 78 insertions(+), 58 deletions(-) diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py index f9a2a3e..a950d86 100644 --- a/class4gl/interface/interface_new_koeppen.py +++ b/class4gl/interface/interface_new_koeppen.py @@ -311,10 +311,10 @@ def brightness(rrggbb): for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]): # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc'] # clearsky = (cc < 0.05) - # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt'] - # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt'] - mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'] - obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'] + # mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].loc[clearsky]['d'+varkey+'dt'] + # obs = c4gldata[key].frames['stats']['records_all_stations_obs_end_obs_stats'].loc[clearsky]['d'+varkey+'dt'] + mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'] + obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] print ('filtering classes that have sufficient samples: ', include_koeppen) filter_classes = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen)) @@ -387,10 +387,10 @@ def brightness(rrggbb): if koeppen.amount >= 200: print(ikoeppen,':',koeppen) kgc_select = (c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] == koeppen['KGCID']) - koeppen_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'][kgc_select] - koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'][kgc_select] + koeppen_end_mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'][kgc_select] + koeppen_obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'][kgc_select] - #axes[varkey].scatter(koeppen_obs,koeppen_mod,marker=symbols[ikoeppen],color=colors[ikey]) + #axes[varkey].scatter(koeppen_obs,koeppen_end_mod,marker=symbols[ikoeppen],color=colors[ikey]) # label=key+", "+\ # 'R = '+str(round(PR[0],3))+', '+\ # 'RMSE = '+str(round(RMSE,5))+', '+\ @@ -403,8 +403,8 @@ def brightness(rrggbb): # # 'RMSE = '+str(round(RMSE,5))+', '+\ # # 'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey]) - dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(), - pearsonr(koeppen_mod,koeppen_obs)[0], + dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), + pearsonr(koeppen_end_mod,koeppen_obs)[0], marker='o',linewidth=0.5, mfc=koeppen.color,mec='black',#koeppen.color, zorder=300+icolor, @@ -412,8 +412,8 @@ def brightness(rrggbb): # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7} ) - dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(), - pearsonr(koeppen_mod,koeppen_obs)[0], + dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), + pearsonr(koeppen_end_mod,koeppen_obs)[0], marker='o',linewidth=0.5, mfc=koeppen.color,mec='black',#koeppen.color, zorder=301+icolor, ms=1 @@ -422,8 +422,8 @@ def brightness(rrggbb): ) - # dias[varkey].add_sample(koeppen_mod.std()/koeppen_obs.std(), - # pearsonr(koeppen_mod,koeppen_obs)[0], + # dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), + # pearsonr(koeppen_end_mod,koeppen_obs)[0], # marker='o',linewidth=0.5, mfc='none',mec=str(koeppen.color), # zorder=600+icolor, # ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float))) @@ -526,11 +526,11 @@ def brightness(rrggbb): # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc'] # clearsky = (cc < 0.05) - # mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats'].loc[clearsky]['d'+varkey+'dt'] - # obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].loc[clearsky]['d'+varkey+'dt'] + # mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].loc[clearsky]['d'+varkey+'dt'] + # obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats'].loc[clearsky]['d'+varkey+'dt'] - mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'] - obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'] + mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'] + obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] print ('filtering classes that have sufficient samples: ', include_koeppen) filter_classess = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen)) mod = mod.loc[filter_classes] @@ -748,7 +748,7 @@ def brightness(rrggbb): key = list(args.experiments.strip().split(' '))[ikey] data_all = pd.DataFrame() - tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats'].copy()) + tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_end_obs_stats'].copy()) tempdatamodstats["source"] = "soundings" @@ -780,7 +780,7 @@ def brightness(rrggbb): for ikey,key in enumerate(list(args.experiments.strip().split(' '))): keylabel = keylabels[ikey] - tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_mod_stats'].copy()) + tempdatamodstats = pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].copy()) tempdataini_this= pd.DataFrame(c4gldata[key].frames['stats']['records_all_stations_ini'].copy()) tempdatamodstats['dates']= tempdataini_this.ldatetime.dt.date diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py index a7dbd21..a5bbbac 100644 --- a/class4gl/interface/interface_stations.py +++ b/class4gl/interface/interface_stations.py @@ -146,11 +146,15 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu #axes_taylor[varkey] = fig.add_subplot(2,3,i+3) #print(obs.std()) - obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'] + obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] STD_OBS = obs.std() dias[varkey] = TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference') + dias[varkey].add_grid(zorder=-100.) dias[varkey]._ax.axis["left"].label.set_text(\ "Normalized standard deviation") + i += 1 + i = 1 + for varkey in ['h','theta','q']: if i == 1: axes[varkey].annotate('Normalized standard deviation',\ xy= (0.05,0.36), @@ -175,16 +179,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f') #dia._ax.set_title(season.capitalize()) + i += 1 - dias[varkey].add_grid() - - - #dia.ax.plot(x99,y99,color='k') - - for ikey,key in enumerate(args.experiments.strip(' ').split(' ')): - mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'] - obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'] + mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'] + obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] x, y = obs.values,mod.values print(key,len(obs.values)) @@ -268,7 +267,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu marker='o', ms=5, ls='', #mfc='k', mec='k', # B&W mfc=colors[ikey], mec=colors[ikey], # Colors - label=key) + label=key,zorder=100) # put ticker position, see # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html @@ -284,10 +283,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu istation = 0 for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows(): indices = (c4gldata[key].frames['stats']['records_all_stations_index'].get_level_values('STNID') == current_station.name) - station_mod = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].iloc[indices] - station_obs = c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].iloc[indices] + station_end_mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'].iloc[indices] + station_obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'].iloc[indices] - axes[varkey].scatter(station_obs,station_mod,marker=symbols[istation],color=colors[ikey]) + axes[varkey].scatter(station_obs,station_end_mod,marker=symbols[istation],color=colors[ikey]) # label=key+", "+\ # 'R = '+str(round(PR[0],3))+', '+\ # 'RMSE = '+str(round(RMSE,5))+', '+\ @@ -300,12 +299,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # # 'RMSE = '+str(round(RMSE,5))+', '+\ # # 'BIAS = '+str(round(BIAS,5)),s=1.,color=colors[ikey]) - dias[varkey].add_sample(station_mod.std()/station_obs.std(), - pearsonr(station_mod,station_obs)[0],#annotate=symbols[istation], + dias[varkey].add_sample(station_end_mod.std()/station_obs.std(), + pearsonr(station_end_mod,station_obs)[0],#annotate=symbols[istation], marker=symbols[istation], ms=5, ls='', mfc='k', mec='k', # B&W #mfc=colors[ikey], mec=colors[ikey], # Colors - label=key) + label=key,zorder=100) istation += 1 @@ -332,11 +331,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu i +=1 axes[varkey].set_aspect('equal') - low = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].min() - high = c4gldata[key].frames['stats']['records_all_stations_mod_stats']['d'+varkey+'dt'].max() + low = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'].min() + high = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'].max() - low = np.min([low,c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].min()]) - high = np.max([high,c4gldata[key].frames['stats']['records_all_stations_obs_afternoon_stats']['d'+varkey+'dt'].max()]) + low = np.min([low,c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'].min()]) + high = np.max([high,c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'].max()]) low = low - (high - low)*0.1 high = high + (high - low)*0.1 diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py index 630439e..6e79de6 100644 --- a/class4gl/interface_functions.py +++ b/class4gl/interface_functions.py @@ -59,6 +59,8 @@ def __next__(self,jump=1): self.ix = (self.ix+jump) if self.ix >= len(self.records.index): raise StopIteration + if self.ix < 0: + raise StopIteration return self.records.index[self.ix], self.records.iloc[self.ix] def __prev__(self): diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index 69a2cf9..2f44fe4 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -254,23 +254,23 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal print('exclude exceptional observations') print('exclude unrealistic model output -> should be investigated!') valid = (\ - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.250) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.250) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > 0.25000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 1.8000) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 40.0000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 350.) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 400.) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -.00055) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -.00055) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.00055) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & - # filter 'extreme' model output -> should be investigated! - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & + # # filter 'extreme' model output -> should be investigated! + # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & + # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & + # (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & + # (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0003) & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & @@ -331,8 +331,10 @@ def sel_station(self,STNID=None,rownumber=None): self.update_station() + def next_station_event(self,event=None,**kwargs): + self.next_station(**kwargs) - def next_station(self,event=None,jump=1): + def next_station(self,jump=1): with suppress(StopIteration): self.frames['worldmap']['STNID'],\ self.frames['worldmap']['current_station'] \ @@ -347,8 +349,11 @@ def next_station(self,event=None,jump=1): self.update_station() - def prev_station(self,event=None): - self.next_station(jump = -1,event=event) + def prev_station_event(self,event=None,**kwargs): + self.prev_station(**kwargs) + + def prev_station(self): + self.next_station(jump = -1) def update_station(self): for key in ['STNID','current_station','stations_iterator']: self.frames['stats'][key] = self.frames['worldmap'][key] @@ -416,7 +421,10 @@ def update_station(self): self.update_record() - def next_record(self,event=None,jump=1): + def next_record_event(self,event=None,**kwargs): + self.next_record(**kwargs) + + def next_record(self,jump=1): old_chunk = self.frames['profiles']['current_record_chunk'] @@ -465,8 +473,11 @@ def next_record(self,event=None,jump=1): self.update_record() + def prev_record_event(self,event=None,**kwargs): + self,prev_record(**kwargs) + def prev_record(self,event=None): - self.next_record(jump=-1,event=event) + self.next_record(jump=-1) def update_record(self): self.frames['profiles']['current_record_ini'] = \ @@ -686,10 +697,10 @@ def plot(self): ]) if button_type !='dataset': btns[label] = Button(axes[label], 'Previous '+button_type) - btns[label].on_clicked(getattr(self, 'prev_'+button_type)) + btns[label].on_clicked(getattr(self, 'prev_'+button_type+'_event')) else: btns[label] = Button(axes[label], 'Previous input var') - btns[label].on_clicked(getattr(self, 'prev_'+button_type)) + btns[label].on_clicked(getattr(self, 'prev_'+button_type+'_event')) label='bnext'+button_type @@ -701,10 +712,10 @@ def plot(self): ]) if button_type !='dataset': btns[label] = Button(axes[label], 'Next '+button_type) - btns[label].on_clicked(getattr(self, 'next_'+button_type)) + btns[label].on_clicked(getattr(self, 'next_'+button_type+'_event')) else: btns[label] = Button(axes[label], 'Next input var') - btns[label].on_clicked(getattr(self, 'next_'+button_type)) + btns[label].on_clicked(getattr(self, 'next_'+button_type+'_event')) # label = 'bprev_dataset' @@ -907,6 +918,8 @@ def goto_datetime_worldmap(self,DT,shift=None): #else: # self.frames['worldmap'].pop('DT') + def next_datetime_event(self,event=None,**kwargs): + self.next_datetime(**kwargs) def next_datetime(self,event=None): if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims: # for now we don't go to different files, so we cannot go to @@ -916,6 +929,8 @@ def next_datetime(self,event=None): if "fig" in self.__dict__.keys(): self.refresh_plot_interface(only='worldmap') + def prev_datetime_event(self,event=None,**kwargs): + self.prev_datetime(**kwargs) def prev_datetime(self,event=None): if 'time' in self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.variables[self.frames['worldmap']['inputkey']].dims: # for now we don't go to different files, so we cannot go to @@ -994,6 +1009,8 @@ def sel_level(self,level): print('seldata7') + def next_level_event(self,event=None,**kwargs): + self.next_level(self,**kwargs) def next_level(self,event=None,jump=1): if 'lev' not in list(self.globaldata.datasets[self.frames['worldmap']['inputkey']].page.dims.keys()): raise ValueError('lev dimension not in dataset'+self.frames['worldmap']['inputkey']) @@ -1002,7 +1019,9 @@ def next_level(self,event=None,jump=1): level = ((level + jump - min(levels)) % (max(levels)-min(levels))) + min(levels) self.sel_level(level) - def prev_level(self,event=None): + def prev_level_event(self,event=None,**kwargs): + self.prev_level(self,**kwargs) + def prev_level(self): self.next_level(jump=-1) #self.frames['worldmap']['level'] = level: From d5dd32ab0ca3b77d23edfdf2729fc83e60ded270 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Tue, 29 Jan 2019 19:15:11 +0100 Subject: [PATCH 118/129] fix next record etc. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 25784e7..9e06a36 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='0.1.20', + version='0.9.1', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From d1aede505169831a3976e8c6dffb01baa1d8ec70 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Fri, 1 Feb 2019 19:00:41 +0100 Subject: [PATCH 119/129] fix interface --- class4gl/interface/interface_show_profiles.py | 2 - class4gl/interface_functions.py | 12 +- class4gl/interface_multi.py | 24 +- class4gl/setup/setup_goamazon.py | 172 ++-- class4gl/simulations/simulations.py | 716 +++++++------- class4gl/simulations/simulations_iter.py | 918 ++++++++++-------- 6 files changed, 968 insertions(+), 876 deletions(-) diff --git a/class4gl/interface/interface_show_profiles.py b/class4gl/interface/interface_show_profiles.py index 734e8b9..c15c5ea 100644 --- a/class4gl/interface/interface_show_profiles.py +++ b/class4gl/interface/interface_show_profiles.py @@ -1,4 +1,3 @@ -''' import numpy as np import pandas as pd @@ -212,7 +211,6 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu #axes[varkey].set_title(latex['d'+varkey+'dt']+' '+units_final,fontsize=12.) -''' diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py index 6e79de6..dd9a007 100644 --- a/class4gl/interface_functions.py +++ b/class4gl/interface_functions.py @@ -77,6 +77,8 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'): #print('going to next observation',filename) yaml_file.seek(index_start) + print('index_start',index_start) + print('index_end',index_end) buf = yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','') os.system('mkdir -p '+TEMPDIR) @@ -241,13 +243,19 @@ class stations_iterator(object): def __init__(self,stations): self.stations = stations self.ix = -1 + print('self.ix',self.ix) + print(stations.table) def __iter__(self): return self def __next__(self,jump=1): - self.ix = (self.ix+jump) - if ((self.ix >= len(self.stations.table.index)) or (self.ix < 0 )): + print('next jump',jump) + print('self.ix',self.ix) + if ((self.ix+jump >= len(self.stations.table.index)) or (self.ix+jump < 0 )): raise StopIteration + self.ix = (self.ix+jump) + print('self.ix',self.ix) self.ix = np.mod(self.ix,len(self.stations.table)) + print('self.ix',self.ix) return self.stations.table.index[self.ix], self.stations.table.iloc[self.ix] def set_row(self,row): self.ix = row diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index 2f44fe4..f2a1a2e 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -399,10 +399,10 @@ def update_station(self): self.frames['profiles']['current_station_file_end_mod'].close() self.frames['profiles']['current_station_file_end_mod'] = \ open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r') - if 'current_station_file_end' in self.frames['profiles'].keys(): - self.frames['profiles']['current_station_file_end'].close() + if 'current_station_file_end_obs' in self.frames['profiles'].keys(): + self.frames['profiles']['current_station_file_end_obs'].close() if self.path_forcing is not None: - self.frames['profiles']['current_station_file_end'] = \ + self.frames['profiles']['current_station_file_end_obs'] = \ open(self.path_forcing+'/'+format(STNID,"05d")+'_end.yaml','r') # for the profiles we make a distinct record iterator, so that the @@ -466,9 +466,9 @@ def next_record(self,jump=1): open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r') if self.path_forcing is not None: - if 'current_station_file_end' in self.frames['profiles'].keys(): - self.frames['profiles']['current_station_file_end'].close() - self.frames['profiles']['current_station_file_end'] = \ + if 'current_station_file_end_obs' in self.frames['profiles'].keys(): + self.frames['profiles']['current_station_file_end_obs'].close() + self.frames['profiles']['current_station_file_end_obs'] = \ open(self.path_forcing+'/'+format(STNID,"05d")+'_end.yaml','r') self.update_record() @@ -545,7 +545,7 @@ def update_record(self): self.frames['profiles']['record_yaml_end_obs'] = \ get_record_yaml( - self.frames['profiles']['current_station_file_end'], \ + self.frames['profiles']['current_station_file_end_obs'], \ record_end.index_start, record_end.index_end, mode='model_input') @@ -940,10 +940,14 @@ def prev_datetime(self,event=None): if "fig" in self.__dict__.keys(): self.refresh_plot_interface(only='worldmap') + def next_dataset_event(self,event=None,**kwargs): + self.next_dataset(**kwargs) def next_dataset(self,event=None): ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey']) ikey = (ikey + 1) % len(self.frames['worldmap']['inputkeys']) self.sel_dataset(self.frames['worldmap']['inputkeys'][ikey]) + def prev_dataset_event(self,event=None,**kwargs): + self.prev_dataset(**kwargs) def prev_dataset(self,event=None): ikey = self.frames['worldmap']['inputkeys'].index(self.frames['worldmap']['inputkey']) ikey = (ikey - 1) % len(self.frames['worldmap']['inputkeys']) @@ -1798,9 +1802,9 @@ def on_pick(self,event): self.frames['profiles']['current_station_file_end_mod'] = \ open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r') if self.path_forcing is not None: - if 'current_station_file_end' in self.frames['profiles'].keys(): - self.frames['profiles']['current_station_file_end'].close() - self.frames['profiles']['current_station_file_end'] = \ + if 'current_station_file_end_obs' in self.frames['profiles'].keys(): + self.frames['profiles']['current_station_file_end_obs'].close() + self.frames['profiles']['current_station_file_end_obs'] = \ open(self.path_forcing+'/'+format(STNID,"05d")+'_end.yaml','r') # go to hovered record of current station diff --git a/class4gl/setup/setup_goamazon.py b/class4gl/setup/setup_goamazon.py index 2315353..e0e83aa 100644 --- a/class4gl/setup/setup_goamazon.py +++ b/class4gl/setup/setup_goamazon.py @@ -70,11 +70,11 @@ def replace_iter(iterable, search, replace): HOUR_FILES = {} for iDT, DT in enumerate(DTS): morning_file = None - possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf') + possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*cdf') if len(possible_files)>0: morning_file= possible_files[0] afternoon_file = None - possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*.cdf') + possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*cdf') if len(possible_files)>0: afternoon_file= possible_files[0] @@ -291,6 +291,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # only select samples monotonically increasing with height air_ap_tail_orig = pd.DataFrame(air_ap_tail) air_ap_tail = pd.DataFrame() + print(air_ap_tail_orig) air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True) for ibottom in range(1,len(air_ap_tail_orig)): if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.: @@ -343,7 +344,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # this is the real longitude that will be used to extract ground data dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour) - dpars['datetime'] = dpars['ldatetime'] + dt.timedelta(hours=-4) + dpars['datetime'] = dpars['ldatetime'] + dt.timedelta(hours=+4) dpars['doy'] = dpars['datetime'].timetuple().tm_yday dpars['SolarAltitude'] = \ @@ -415,7 +416,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): print('updating...') print(column) - c4gli.update(source='humppa',\ + c4gli.update(source='goamazon',\ # pars=pars, pars=dpars,\ air_balloon=air_balloon,\ @@ -445,28 +446,38 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): return c4gli -path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/' +# path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/' +path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GOAMAZON2/' - -file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') -for date,pair in HOUR_FILES.items(): - print(pair['morning']) - humpafn =pair['morning'][1] - print(humpafn) - balloon_file = xr.open_dataset(humpafn) - - c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0]) - print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) -file_morning.close() - -file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') +file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_end.yaml','w') +file_morning = open(path_soundings+format(current_station.name,'05d')+'_ini.yaml','w') +ipair = 0 for date,pair in HOUR_FILES.items(): + humpafn = pair['afternoon'][1] - balloon_file = xr.open_dataset(humpafn) - - c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0]) - print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime) + balloon_file_afternoon = xr.open_dataset(humpafn) + balloon_file_morning = xr.open_dataset(humpafn) + print(ipair) + if (\ + (balloon_file_morning.pres.shape[0] > 10) and \ + (balloon_file_afternoon.pres.shape[0] > 10)\ + ): + + c4gli_afternoon = humppa_parser(balloon_file_afternoon,file_afternoon,date,pair['afternoon'][0]) + print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime) + ipair += 1 + + print(pair['morning']) + humpafn =pair['morning'][1] + print(humpafn) + + c4gli_morning = humppa_parser(balloon_file_morning,file_morning,date,pair['morning'][0]) + print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) + +print(ipair) file_afternoon.close() +file_morning.close() + # file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') @@ -492,17 +503,16 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # # file_model = open(fnout_model+ format(current_station.name,'05d')+'.yaml','w') - records_morning = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='morning', + subset='ini', refetch_records=True, ) print('records_morning_ldatetime',records_morning.ldatetime) records_afternoon = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='afternoon', + subset='end', refetch_records=True, ) @@ -512,62 +522,62 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): records_afternoon.index = records_morning.index path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/' -os.system('mkdir -p '+path_exp) -file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml') -file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml') -file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') -file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') - -for (STNID,chunk,index),record_morning in records_morning.iterrows(): - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, - mode='ini') - #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) - - - c4gli_afternoon = get_record_yaml(file_afternoon, - record_afternoon.index_start, - record_afternoon.index_end, - mode='ini') - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds())}) - c4gli_morning.update(source='manual', - pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False}) - c4gli_morning.dump(file_ini) - - c4gl = class4gl(c4gli_morning) - c4gl.run() - - c4gl.dump(file_mod,\ - include_input=False,\ - timeseries_only=timeseries_only) -file_ini.close() -file_mod.close() -file_morning.close() -file_afternoon.close() - -records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='ini', - refetch_records=True, - ) -records_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='mod', - refetch_records=True, - ) - -records_mod.index = records_ini.index - -# align afternoon records with initial records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] -records_afternoon.index = records_ini.index +# os.system('mkdir -p '+path_exp) +# file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_ini.yaml') +# file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_end.yaml') +# file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') +# file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') +# +# for (STNID,chunk,index),record_morning in records_morning.iterrows(): +# record_afternoon = records_afternoon.loc[(STNID,chunk,index)] +# +# c4gli_morning = get_record_yaml(file_morning, +# record_morning.index_start, +# record_morning.index_end, +# mode='ini') +# #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) +# +# +# c4gli_afternoon = get_record_yaml(file_afternoon, +# record_afternoon.index_start, +# record_afternoon.index_end, +# mode='ini') +# +# c4gli_morning.update(source='pairs',pars={'runtime' : \ +# int((c4gli_afternoon.pars.datetime_daylight - +# c4gli_morning.pars.datetime_daylight).total_seconds())}) +# c4gli_morning.update(source='manual', +# pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False}) +# c4gli_morning.dump(file_ini) +# +# c4gl = class4gl(c4gli_morning) +# c4gl.run() +# +# c4gl.dump(file_mod,\ +# include_input=False,\ +# timeseries_only=timeseries_only) +# file_ini.close() +# file_mod.close() +# file_morning.close() +# file_afternoon.close() +# +# records_ini = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='ini', +# refetch_records=True, +# ) +# records_mod = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='mod', +# refetch_records=True, +# ) +# +# records_mod.index = records_ini.index +# +# # align afternoon records with initial records, and set same index +# records_afternoon.index = records_afternoon.ldatetime.dt.date +# records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] +# records_afternoon.index = records_ini.index """ diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index 3087fe3..8d9a652 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -74,378 +74,378 @@ def __init__(self,**kwargs): print(args.__dict__) -def execute(**kwargs): - # note that with args, we actually mean the same as those specified with - # the argparse module above - - # overwrite the args according to the kwargs when the procedure is called - # as module function - for key,value in kwargs.items(): - args.__dict__[key] = value - - print("-- begin arguments --") - for key,value in args.__dict__.items(): - print(key,': ',value) - print("-- end arguments ----") - - # load specified class4gl library - if args.c4gl_path_lib is not None: - sys.path.insert(0, args.c4gl_path_lib) - - from class4gl import class4gl_input, data_global,class4gl - from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records - from class4gl import blh,class4gl_input - - # this is a variant of global run in which the output of runs are still written - # out even when the run crashes. - - # #only include the following timeseries in the model output - # timeseries_only = \ - # ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', - # 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', - # 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', - # 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', - # 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] - - - EXP_DEFS =\ - { - 'BASE':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - - 'NOADV':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, +# def execute(**kwargs): +# note that with args, we actually mean the same as those specified with +# the argparse module above + +# overwrite the args according to the kwargs when the procedure is called +# as module function +# for key,value in kwargs.items(): +# args.__dict__[key] = value + +print("-- begin arguments --") +for key,value in args.__dict__.items(): + print(key,': ',value) +print("-- end arguments ----") + +# load specified class4gl library +if args.c4gl_path_lib is not None: + sys.path.insert(0, args.c4gl_path_lib) + +from class4gl import class4gl_input, data_global,class4gl +from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records +from class4gl import blh,class4gl_input + +# this is a variant of global run in which the output of runs are still written +# out even when the run crashes. + +# #only include the following timeseries in the model output +# timeseries_only = \ +# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', +# 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', +# 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', +# 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', +# 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] + + +EXP_DEFS =\ +{ + 'BASE':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + + 'NOADV':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + + 'ERA_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True}, + 'GLOBAL_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, + 'IOPS_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'IOPS_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, +} + +# ======================== +print("getting a list of stations") +# ======================== + +# these are all the stations that are found in the input dataset +all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) + +# ==================================== +print('defining all_stations_select') +# ==================================== + +# these are all the stations that are supposed to run by the whole batch (all +# chunks). We narrow it down according to the station(s) specified. + + + +if args.station_id is not None: + print("Selecting station by ID") + stations_iter = stations_iterator(all_stations) + STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) + all_stations_select = pd.DataFrame([run_station]) +else: + print("Selecting stations from a row range in the table") + all_stations_select = pd.DataFrame(all_stations.table) + if args.last_station_row is not None: + all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] + if args.first_station_row is not None: + all_stations_select = all_station_select.iloc[int(args.first_station):] +print("station numbers included in the whole batch "+\ + "(all chunks):",list(all_stations_select.index)) + +print(all_stations_select) +print("getting all records of the whole batch") +all_records_morning_select = get_records(all_stations_select,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + +# only run a specific chunck from the selection +if args.global_chunk_number is not None: + if args.station_chunk_number is not None: + raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') + + if (args.split_by is None) or (args.split_by <= 0): + raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.") + + run_station_chunk = None + print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') + totalchunks = 0 + stations_iter = all_stations_select.iterrows() + in_current_chunk = False + try: + while not in_current_chunk: + istation,current_station = stations_iter.__next__() + all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) + chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) + print('chunks_current_station',chunks_current_station) + in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) - 'ERA_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True}, - 'GLOBAL_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'IOPS_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'IOPS_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - } - - # ======================== - print("getting a list of stations") - # ======================== - - # these are all the stations that are found in the input dataset - all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) - - # ==================================== - print('defining all_stations_select') - # ==================================== - - # these are all the stations that are supposed to run by the whole batch (all - # chunks). We narrow it down according to the station(s) specified. - - - - if args.station_id is not None: - print("Selecting station by ID") - stations_iter = stations_iterator(all_stations) - STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) - all_stations_select = pd.DataFrame([run_station]) - else: - print("Selecting stations from a row range in the table") - all_stations_select = pd.DataFrame(all_stations.table) - if args.last_station_row is not None: - all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] - if args.first_station_row is not None: - all_stations_select = all_station_select.iloc[int(args.first_station):] - print("station numbers included in the whole batch "+\ - "(all chunks):",list(all_stations_select.index)) - - print(all_stations_select) - print("getting all records of the whole batch") - all_records_morning_select = get_records(all_stations_select,\ - args.path_forcing,\ - subset=args.subset_forcing, - refetch_records=False, - ) - - # only run a specific chunck from the selection - if args.global_chunk_number is not None: - if args.station_chunk_number is not None: - raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') - - if (args.split_by is None) or (args.split_by <= 0): - raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.") - - run_station_chunk = None - print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') - totalchunks = 0 - stations_iter = all_stations_select.iterrows() - in_current_chunk = False - try: - while not in_current_chunk: - istation,current_station = stations_iter.__next__() - all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) - chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) - print('chunks_current_station',chunks_current_station) - in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) - - if in_current_chunk: - run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] - run_station_chunk = int(args.global_chunk_number) - totalchunks - - totalchunks +=chunks_current_station - - - except StopIteration: - raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') - print("station = ",list(run_stations.index)) - print("station chunk number:",run_station_chunk) - - # if no global chunk is specified, then run the whole station selection in one run, or - # a specific chunk for each selected station according to # args.station_chunk_number - else: - run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] - if args.station_chunk_number is not None: - run_station_chunk = int(args.station_chunk_number) - print("station(s) that is processed.",list(run_stations.index)) - print("chunk number: ",run_station_chunk) - else: - if args.split_by is not None: - raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split_by.") - run_station_chunk = 0 - print("stations that are processed.",list(run_stations.index)) - - - #print(all_stations) - print('Fetching initial/forcing records') - records_morning = get_records(run_stations,\ - args.path_forcing,\ - subset=args.subset_forcing, - refetch_records=False, - ) - - # note that if runtime is an integer number, we don't need to get the afternoon - # profiles. - if args.runtime == 'from_profile_pair': - print('Fetching afternoon records for determining the simulation runtimes') - records_afternoon = get_records(run_stations,\ - args.path_forcing,\ - subset='end', - refetch_records=False, - ) + if in_current_chunk: + run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] + run_station_chunk = int(args.global_chunk_number) - totalchunks - # print(records_morning.index) - # print(records_afternoon.index) - # align afternoon records with the noon records, and set same index - print('hello') - print(len(records_afternoon)) - print(len(records_morning)) - - print("aligning morning and afternoon records") - records_morning['dates'] = records_morning['ldatetime'].dt.date - records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date - records_afternoon.set_index(['STNID','dates'],inplace=True) - ini_index_dates = records_morning.set_index(['STNID','dates']).index - records_afternoon = records_afternoon.loc[ini_index_dates] - records_afternoon.index = records_morning.index - - experiments = args.experiments.strip(' ').split(' ') - if args.experiments_names is not None: - experiments_names = args.experiments_names.strip(' ').split(' ') - if len(experiments_names) != len(experiments): - raise ValueError('Lenght of --experiments_names is different from --experiments') - + totalchunks +=chunks_current_station + + + except StopIteration: + raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') + print("station = ",list(run_stations.index)) + print("station chunk number:",run_station_chunk) + +# if no global chunk is specified, then run the whole station selection in one run, or +# a specific chunk for each selected station according to # args.station_chunk_number +else: + run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] + if args.station_chunk_number is not None: + run_station_chunk = int(args.station_chunk_number) + print("station(s) that is processed.",list(run_stations.index)) + print("chunk number: ",run_station_chunk) else: - experiments_names = experiments - - for iexpname,expid in enumerate(experiments): - expname = experiments_names[iexpname] - exp = EXP_DEFS[expid] - path_exp = args.path_experiments+'/'+expname+'/' - - os.system('mkdir -p '+path_exp) - for istation,current_station in run_stations.iterrows(): - print(istation,current_station) - records_morning_station = records_morning.query('STNID == '+str(current_station.name)) - start_record = run_station_chunk*args.split_by if run_station_chunk is not 0 else 0 - end_record = (run_station_chunk+1)*args.split_by if args.split_by is not None else None - if start_record >= (len(records_morning_station)): - print("warning: outside of profile number range for station "+\ - str(current_station)+". Skipping chunk number for this station.") + if args.split_by is not None: + raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split_by.") + run_station_chunk = 0 + print("stations that are processed.",list(run_stations.index)) + + +#print(all_stations) +print('Fetching initial/forcing records') +records_morning = get_records(run_stations,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + +# note that if runtime is an integer number, we don't need to get the afternoon +# profiles. +if args.runtime == 'from_profile_pair': + print('Fetching afternoon records for determining the simulation runtimes') + records_afternoon = get_records(run_stations,\ + args.path_forcing,\ + subset='end', + refetch_records=False, + ) + + # print(records_morning.index) + # print(records_afternoon.index) + # align afternoon records with the noon records, and set same index + print('hello') + print(len(records_afternoon)) + print(len(records_morning)) + + print("aligning morning and afternoon records") + records_morning['dates'] = records_morning['ldatetime'].dt.date + records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date + records_afternoon.set_index(['STNID','dates'],inplace=True) + ini_index_dates = records_morning.set_index(['STNID','dates']).index + records_afternoon = records_afternoon.loc[ini_index_dates] + records_afternoon.index = records_morning.index + +experiments = args.experiments.strip(' ').split(' ') +if args.experiments_names is not None: + experiments_names = args.experiments_names.strip(' ').split(' ') + if len(experiments_names) != len(experiments): + raise ValueError('Lenght of --experiments_names is different from --experiments') + +else: + experiments_names = experiments + +for iexpname,expid in enumerate(experiments): + expname = experiments_names[iexpname] + exp = EXP_DEFS[expid] + path_exp = args.path_experiments+'/'+expname+'/' + + os.system('mkdir -p '+path_exp) + for istation,current_station in run_stations.iterrows(): + print(istation,current_station) + records_morning_station = records_morning.query('STNID == '+str(current_station.name)) + start_record = run_station_chunk*args.split_by if run_station_chunk is not 0 else 0 + end_record = (run_station_chunk+1)*args.split_by if args.split_by is not None else None + if start_record >= (len(records_morning_station)): + print("warning: outside of profile number range for station "+\ + str(current_station)+". Skipping chunk number for this station.") + else: + fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' + if os.path.isfile(fn_morning): + file_morning = open(fn_morning) else: - fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' - if os.path.isfile(fn_morning): - file_morning = open(fn_morning) - else: - fn_morning = \ - args.path_forcing+'/'+format(current_station.name,'05d')+\ - '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' - file_morning = open(fn_morning) - - if args.runtime == 'from_profile_pair': - file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml') - fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_ini.yaml' - fn_end_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_end.yaml' - file_ini = open(fn_ini,'w') - file_end_mod = open(fn_end_mod,'w') - - #iexp = 0 - onerun = False - print('starting station chunk number: '\ - +str(run_station_chunk)+' (chunk size:',args.split_by,')') - - records_morning_station_chunk = records_morning_station.iloc[start_record:end_record] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] - - isim = 0 - for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): - print('starting '+str(isim+1)+' out of '+\ - str(len(records_morning_station_chunk) )+\ - ' (station total: ',str(len(records_morning_station)),')') - + fn_morning = \ + args.path_forcing+'/'+format(current_station.name,'05d')+\ + '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' + file_morning = open(fn_morning) + + if args.runtime == 'from_profile_pair': + file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml') + fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_ini.yaml' + fn_end_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_end.yaml' + file_ini = open(fn_ini,'w') + file_end_mod = open(fn_end_mod,'w') + + #iexp = 0 + onerun = False + print('starting station chunk number: '\ + +str(run_station_chunk)+' (chunk size:',args.split_by,')') + + records_morning_station_chunk = records_morning_station.iloc[start_record:end_record] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] + + isim = 0 + for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): + print('starting '+str(isim+1)+' out of '+\ + str(len(records_morning_station_chunk) )+\ + ' (station total: ',str(len(records_morning_station)),')') - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, + + c4gli_morning = get_record_yaml(file_morning, + record_morning.index_start, + record_morning.index_end, + mode='model_input') + if args.diag_tropo is not None: + print('add tropospheric parameters on advection and subsidence (for diagnosis)') + seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) + profile_tropo = c4gli_morning.air_ac[seltropo] + for var in args.diag_tropo:#['t','q','u','v',]: + if var[:3] == 'adv': + mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) + c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) + else: + print("warning: tropospheric variable "+var+" not recognized") + + + if args.runtime == 'from_profile_pair': + record_afternoon = records_afternoon.loc[(STNID,chunk,index)] + c4gli_afternoon = get_record_yaml(file_afternoon, + int(record_afternoon.index_start), + int(record_afternoon.index_end), mode='model_input') - if args.diag_tropo is not None: - print('add tropospheric parameters on advection and subsidence (for diagnosis)') - seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) - profile_tropo = c4gli_morning.air_ac[seltropo] - for var in args.diag_tropo:#['t','q','u','v',]: - if var[:3] == 'adv': - mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) - c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) - else: - print("warning: tropospheric variable "+var+" not recognized") + runtime = int((c4gli_afternoon.pars.datetime_daylight - + c4gli_morning.pars.datetime_daylight).total_seconds()) + elif args.runtime == 'from_input': + runtime = c4gli_morning.pars.runtime + else: + runtime = int(args.runtime) + + + c4gli_morning.update(source='pairs',pars={'runtime' : \ + runtime}) + c4gli_morning.update(source=expname, pars=exp) + + c4gl = class4gl(c4gli_morning) + + if args.error_handling == 'dump_always': + try: + print('checking data sources') + if not c4gli_morning.check_source_globaldata(): + print('Warning: some input sources appear invalid') + c4gl.run() + print('run succesful') + except: + print('run not succesful') + onerun = True + + print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') + c4gli_morning.dump(file_ini) - if args.runtime == 'from_profile_pair': - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - c4gli_afternoon = get_record_yaml(file_afternoon, - record_afternoon.index_start, - record_afternoon.index_end, - mode='model_input') - runtime = int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds()) - elif args.runtime == 'from_input': - runtime = c4gli_morning.pars.runtime - else: - runtime = int(args.runtime) - - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - runtime}) - c4gli_morning.update(source=expname, pars=exp) - - c4gl = class4gl(c4gli_morning) - - if args.error_handling == 'dump_always': - try: - print('checking data sources') - if not c4gli_morning.check_source_globaldata(): - print('Warning: some input sources appear invalid') - c4gl.run() - print('run succesful') - except: - print('run not succesful') - onerun = True - - print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') + c4gl.dump(file_end_mod,\ + include_input=False,\ + #timeseries_only=timeseries_only,\ + ) + onerun = True + # in this case, only the file will dumped if the runs were + # successful + elif args.error_handling == 'dump_on_success': + try: + print('checking data sources') + if not c4gli_morning.check_source_globaldata(): + print('Warning: some input sources appear invalid') + c4gl.run() + print('run succesful') c4gli_morning.dump(file_ini) + print("dumping to "+str(file_ini)) c4gl.dump(file_end_mod,\ include_input=False,\ #timeseries_only=timeseries_only,\ ) onerun = True - # in this case, only the file will dumped if the runs were - # successful - elif args.error_handling == 'dump_on_success': - try: - print('checking data sources') - if not c4gli_morning.check_source_globaldata(): - print('Warning: some input sources appear invalid') - c4gl.run() - print('run succesful') - c4gli_morning.dump(file_ini) - - - print("dumping to "+str(file_ini)) - c4gl.dump(file_end_mod,\ - include_input=False,\ - #timeseries_only=timeseries_only,\ - ) - onerun = True - except: - print('run not succesful') - isim += 1 - - - file_ini.close() - file_end_mod.close() - file_morning.close() - if args.runtime == 'from_profile_pair': - file_afternoon.close() - - if onerun: - records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - getchunk = int(run_station_chunk),\ - subset='ini', - refetch_records=True, - ) - records_end_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - getchunk = int(run_station_chunk),\ - subset='end',\ - refetch_records=True,\ - ) - else: - # remove empty files - os.system('rm '+fn_ini) - os.system('rm '+fn_end_mod) - - # # align afternoon records with initial records, and set same index - # records_afternoon.index = records_afternoon.ldatetime.dt.date - # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] - # records_afternoon.index = records_ini.index - - # stations_for_iter = stations(path_exp) - # for STNID,station in stations_iterator(stations_for_iter): - # records_current_station_index = \ - # (records_ini.index.get_level_values('STNID') == STNID) - # file_current_station_end_mod = STNID - # - # with \ - # open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ - # open(path_exp+'/'+format(STNID,"05d")+'_end_mod.yaml','r') as file_station_end_mod, \ - # open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: - # for (STNID,index),record_ini in records_iterator(records_ini): - # c4gli_ini = get_record_yaml(file_station_ini, - # record_ini.index_start, - # record_ini.index_end, - # mode='ini') - # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) - # - # record_end_mod = records_end_mod.loc[(STNID,index)] - # c4gl_end_mod = get_record_yaml(file_station_end_mod, - # record_end_mod.index_start, - # record_end_mod.index_end, - # mode='mod') - # record_afternoon = records_afternoon.loc[(STNID,index)] - # c4gl_afternoon = get_record_yaml(file_station_afternoon, - # record_afternoon.index_start, - # record_afternoon.index_end, - # mode='ini') - + except: + print('run not succesful') + isim += 1 -if __name__ == '__main__': - #execute(**vars(args)) - execute() + + file_ini.close() + file_end_mod.close() + file_morning.close() + if args.runtime == 'from_profile_pair': + file_afternoon.close() + + if onerun: + records_ini = get_records(pd.DataFrame([current_station]),\ + path_exp,\ + getchunk = int(run_station_chunk),\ + subset='ini', + refetch_records=True, + ) + records_end_mod = get_records(pd.DataFrame([current_station]),\ + path_exp,\ + getchunk = int(run_station_chunk),\ + subset='end',\ + refetch_records=True,\ + ) + else: + # remove empty files + os.system('rm '+fn_ini) + os.system('rm '+fn_end_mod) + + # # align afternoon records with initial records, and set same index + # records_afternoon.index = records_afternoon.ldatetime.dt.date + # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] + # records_afternoon.index = records_ini.index + + # stations_for_iter = stations(path_exp) + # for STNID,station in stations_iterator(stations_for_iter): + # records_current_station_index = \ + # (records_ini.index.get_level_values('STNID') == STNID) + # file_current_station_end_mod = STNID + # + # with \ + # open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ + # open(path_exp+'/'+format(STNID,"05d")+'_end_mod.yaml','r') as file_station_end_mod, \ + # open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: + # for (STNID,index),record_ini in records_iterator(records_ini): + # c4gli_ini = get_record_yaml(file_station_ini, + # record_ini.index_start, + # record_ini.index_end, + # mode='ini') + # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) + # + # record_end_mod = records_end_mod.loc[(STNID,index)] + # c4gl_end_mod = get_record_yaml(file_station_end_mod, + # record_end_mod.index_start, + # record_end_mod.index_end, + # mode='mod') + # record_afternoon = records_afternoon.loc[(STNID,index)] + # c4gl_afternoon = get_record_yaml(file_station_afternoon, + # record_afternoon.index_start, + # record_afternoon.index_end, + # mode='ini') + + +# if __name__ == '__main__': +# #execute(**vars(args)) +# execute() diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py index 5c1bd2b..8b60b86 100644 --- a/class4gl/simulations/simulations_iter.py +++ b/class4gl/simulations/simulations_iter.py @@ -9,382 +9,385 @@ import pytz import math -import argparse -#if __name__ == '__main__': -parser = argparse.ArgumentParser() -#parser.add_argument('--timestamp') -parser.add_argument('--path_forcing')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/') -parser.add_argument('--path_experiments')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') -parser.add_argument('--first_station_row') -parser.add_argument('--last_station_row') -parser.add_argument('--station_id') # run a specific station id -parser.add_argument('--error_handling',default='dump_on_success') -parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv']) -parser.add_argument('--subset_forcing',default='morning') # this tells which yaml subset - # to initialize with. - # Most common options are - # 'morning' and 'ini'. +arguments = [] +#parser.add_argument('--timestamp') +arguments.append(dict(arg='--path_forcing',\ + help='directory of forcing data to initialize and constrain the ABL model simulations')) +arguments.append(dict(arg='--path_experiments', + help='output directory in which the experiments as subdirectories are stored'))#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') +arguments.append(dict(arg='--first_station_row',\ + help='starting row number of stations table')) +arguments.append(dict(arg='--last_station_row',\ + help='ending row number of stations table')) +arguments.append(dict(arg='--station_id',\ + help="process a specific station id")) +arguments.append(dict(arg='--error_handling',\ + default='dump_on_success',\ + help="type of error handling: either\n - 'dump_on_success' (default)\n - 'dump_always'")) +arguments.append(dict(arg='--diag_tropo',\ + default=['advt','advq','advu','advv'],\ + help="field to diagnose the mean in the troposphere (<= 3000m)")) +arguments.append(dict(arg='--subset_forcing', + default='ini', + help="This indicates which yaml subset to initialize with. Most common options are 'ini' (default) and 'morning'.")) # Tuntime is usually specified from the afternoon profile. You can also just # specify the simulation length in seconds -parser.add_argument('--runtime',default='from_afternoon_profile') - -parser.add_argument('--experiments') -parser.add_argument('--split_by',default=-1)# station soundings are split - # up in chunks - -#parser.add_argument('--station-chunk',default=0) -parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') -parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations -parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations -args = parser.parse_args() - -sys.path.insert(0, args.c4gl_path_lib) -from class4gl import class4gl_input, data_global,class4gl -from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records -from class4gl import blh,class4gl_input - -# this is a variant of global run in which the output of runs are still written -# out even when the run crashes. - -# #only include the following timeseries in the model output -# timeseries_only = \ -# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', -# 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', -# 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', -# 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', -# 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] - - -EXP_DEFS =\ -{ - 'ERA_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'W_ITER': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_SHR_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True}, - 'GLOBAL_W_ITER': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'IOPS_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'IOPS_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'IOPS_W_ITER': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'IOPS_AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, -} - +arguments.append(dict(arg='--runtime', + default='from_input', + help="set the runtime of the simulation in seconds, or get it from the daytime difference in the profile pairs 'from_input' (default)")) -# #SET = 'GLOBAL' -# SET = args.dataset +arguments.append(dict(arg='--experiments', help="IDs of experiments, as a space-seperated list (default: 'BASE')")) +arguments.append(dict(arg='--split_by',\ + type=int, + help="the maxmimum number of soundings that are contained in each output file of a station. -1 means unlimited (default). In case of arrays experiments, this is usually overwritten by 50.")) -# ======================== -print("getting a list of stations") -# ======================== +#arguments.append(dict(arg='--station-chunk',default=0) +arguments.append(dict(arg='--c4gl_path_lib',help="the path of the CLASS4GL program"))#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') +arguments.append(dict(arg='--global_chunk_number',help="this is the batch number of the expected series of experiments according to split_by")) +arguments.append(dict(arg='--station_chunk_number',help="this is the batch number according to split_by in case of considering one station")) +arguments.append(dict(arg='--experiments_names', help="Alternative output names that are given to the experiments. By default, these are the same as --experiments") ) -# these are all the stations that are found in the input dataset -all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) -# ==================================== -print('defining all_stations_select') -# ==================================== -# these are all the stations that are supposed to run by the whole batch (all -# chunks). We narrow it down according to the station(s) specified. +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + #parser.add_argument('--timestamp') + for argument in arguments: + name = argument.pop('arg') + parser.add_argument(name,**argument) - - -if args.station_id is not None: - print("Selecting station by ID") - stations_iter = stations_iterator(all_stations) - STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) - all_stations_select = pd.DataFrame([run_station]) + args = parser.parse_args() else: - print("Selecting stations from a row range in the table") - all_stations_select = pd.DataFrame(all_stations.table) - if args.last_station_row is not None: - all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] - if args.first_station_row is not None: - all_stations_select = all_station_select.iloc[int(args.first_station):] -print("station numbers included in the whole batch "+\ - "(all chunks):",list(all_stations_select.index)) - -print(all_stations_select) -print("getting all records of the whole batch") -all_records_morning_select = get_records(all_stations_select,\ - args.path_forcing,\ - subset=args.subset_forcing, - refetch_records=False, - ) + class Namespace: + def __init__(self,**kwargs): + self.__dict__.update(kwargs) + + args = Namespace() + for argument in arguments: + if 'default' in argument.keys(): + args.__dict__[argument['arg'].strip('-')] = argument['default'] + else: + args.__dict__[argument['arg'].strip('-')] = None + print(args.__dict__) + -# only run a specific chunck from the selection -if args.global_chunk_number is not None: - if args.station_chunk_number is not None: - raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') +def execute(**kwargs): + # note that with args, we actually mean the same as those specified with + # the argparse module above + # overwrite the args according to the kwargs when the procedure is called + # as module function + for key,value in kwargs.items(): + args.__dict__[key] = value - if not (int(args.split_by) > 0) : - raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.") + print("-- begin arguments --") + for key,value in args.__dict__.items(): + print(key,': ',value) + print("-- end arguments ----") - run_station_chunk = None - print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') - totalchunks = 0 - stations_iter = all_stations_select.iterrows() - in_current_chunk = False - try: - while not in_current_chunk: - istation,current_station = stations_iter.__next__() - all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) - chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) - print('chunks_current_station',chunks_current_station) - in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) - - if in_current_chunk: - run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] - run_station_chunk = int(args.global_chunk_number) - totalchunks - - totalchunks +=chunks_current_station + # load specified class4gl library + if args.c4gl_path_lib is not None: + sys.path.insert(0, args.c4gl_path_lib) + + from class4gl import class4gl_input, data_global,class4gl + from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records + from class4gl import blh,class4gl_input + + # this is a variant of global run in which the output of runs are still written + # out even when the run crashes. + + # #only include the following timeseries in the model output + # timeseries_only = \ + # ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', + # 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', + # 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', + # 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', + # 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] + + + EXP_DEFS =\ + { + 'BASE_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + + 'NOADV_ITER':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - - except StopIteration: - raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') - print("station = ",list(run_stations.index)) - print("station chunk number:",run_station_chunk) - -# if no global chunk is specified, then run the whole station selection in one run, or -# a specific chunk for each selected station according to # args.station_chunk_number -else: - run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] - if args.station_chunk_number is not None: - run_station_chunk = int(args.station_chunk_number) - print("station(s) that is processed.",list(run_stations.index)) - print("chunk number: ",run_station_chunk) + 'ERA_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'W_ITER': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'AC_ITER': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + } + + + # #SET = 'GLOBAL' + # SET = args.dataset + + # ======================== + print("getting a list of stations") + # ======================== + + # these are all the stations that are found in the input dataset + all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) + + # ==================================== + print('defining all_stations_select') + # ==================================== + + # these are all the stations that are supposed to run by the whole batch (all + # chunks). We narrow it down according to the station(s) specified. + + + + if args.station_id is not None: + print("Selecting station by ID") + stations_iter = stations_iterator(all_stations) + STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) + all_stations_select = pd.DataFrame([run_station]) + else: + print("Selecting stations from a row range in the table") + all_stations_select = pd.DataFrame(all_stations.table) + if args.last_station_row is not None: + all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] + if args.first_station_row is not None: + all_stations_select = all_station_select.iloc[int(args.first_station):] + print("station numbers included in the whole batch "+\ + "(all chunks):",list(all_stations_select.index)) + + print(all_stations_select) + print("getting all records of the whole batch") + all_records_morning_select = get_records(all_stations_select,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + + # only run a specific chunck from the selection + if args.global_chunk_number is not None: + if args.station_chunk_number is not None: + raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') + + + if not (int(args.split_by) > 0) : + raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.") + + run_station_chunk = None + print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') + totalchunks = 0 + stations_iter = all_stations_select.iterrows() + in_current_chunk = False + try: + while not in_current_chunk: + istation,current_station = stations_iter.__next__() + all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) + chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) + print('chunks_current_station',chunks_current_station) + in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) + + if in_current_chunk: + run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] + run_station_chunk = int(args.global_chunk_number) - totalchunks + + totalchunks +=chunks_current_station + + + except StopIteration: + raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') + print("station = ",list(run_stations.index)) + print("station chunk number:",run_station_chunk) + + # if no global chunk is specified, then run the whole station selection in one run, or + # a specific chunk for each selected station according to # args.station_chunk_number else: - if args.split_by != -1: - raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.") - run_station_chunk = 0 - print("stations that are processed.",list(run_stations.index)) + run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] + if args.station_chunk_number is not None: + run_station_chunk = int(args.station_chunk_number) + print("station(s) that is processed.",list(run_stations.index)) + print("chunk number: ",run_station_chunk) + else: + if args.split_by != -1: + raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.") + run_station_chunk = 0 + print("stations that are processed.",list(run_stations.index)) + + + #print(all_stations) + print('Fetching initial/forcing records') + records_morning = get_records(run_stations,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + + # note that if runtime is an integer number, we don't need to get the afternoon + # profiles. + if args.runtime == 'from_profile_pair': + print('Fetching afternoon records for determining the simulation runtimes') + records_afternoon = get_records(run_stations,\ + args.path_forcing,\ + subset='end', + refetch_records=False, + ) + # print(records_morning.index) + # print(records_afternoon.index) + # align afternoon records with the noon records, and set same index + print(len(records_afternoon)) + print(len(records_morning)) + + print("aligning morning and afternoon records") + records_morning['dates'] = records_morning['ldatetime'].dt.date + records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date + records_afternoon.set_index(['STNID','dates'],inplace=True) + ini_index_dates = records_morning.set_index(['STNID','dates']).index + records_afternoon = records_afternoon.loc[ini_index_dates] + records_afternoon.index = records_morning.index + -#print(all_stations) -print('Fetching initial/forcing records') -records_morning = get_records(run_stations,\ - args.path_forcing,\ - subset=args.subset_forcing, - refetch_records=False, - ) - -# note that if runtime is an integer number, we don't need to get the afternoon -# profiles. -if args.runtime == 'from_afternoon_profile': - print('Fetching afternoon records for determining the simulation runtimes') - records_afternoon = get_records(run_stations,\ - args.path_forcing,\ - subset='afternoon', - refetch_records=False, - ) - - # print(records_morning.index) - # print(records_afternoon.index) - # align afternoon records with the noon records, and set same index - print('hello') - print(len(records_afternoon)) - print(len(records_morning)) - - print("aligning morning and afternoon records") - records_morning['dates'] = records_morning['ldatetime'].dt.date - records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date - records_afternoon.set_index(['STNID','dates'],inplace=True) - ini_index_dates = records_morning.set_index(['STNID','dates']).index - records_afternoon = records_afternoon.loc[ini_index_dates] - records_afternoon.index = records_morning.index - -experiments = args.experiments.strip(' ').split(' ') -for expname in experiments: - exp = EXP_DEFS[expname] - path_exp = args.path_experiments+'/'+expname+'/' - os.system('mkdir -p '+path_exp) - records_morning_station = records_morning.query('STNID == '+str(current_station.name)) - for istation,current_station in run_stations.iterrows(): - if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)): - print("warning: outside of profile number range for station "+\ - str(current_station)+". Skipping chunk number for this station.") - else: - - fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' - if os.path.isfile(fn_morning): - file_morning = open(fn_morning) + experiments = args.experiments.strip(' ').split(' ') + if args.experiments_names is not None: + experiments_names = args.experiments_names.strip(' ').split(' ') + if len(experiments_names) != len(experiments): + raise ValueError('Lenght of --experiments_names is different from --experiments') + + else: + experiments_names = experiments + + for iexpname,expid in enumerate(experiments): + expname = experiments_names[iexpname] + exp = EXP_DEFS[expid] + path_exp = args.path_experiments+'/'+expname+'/' + + os.system('mkdir -p '+path_exp) + records_morning_station = records_morning.query('STNID == '+str(current_station.name)) + for istation,current_station in run_stations.iterrows(): + if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)): + print("warning: outside of profile number range for station "+\ + str(current_station)+". Skipping chunk number for this station.") else: - fn_morning = \ - args.path_forcing+'/'+format(current_station.name,'05d')+\ - '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' - file_morning = open(fn_morning) - - if args.runtime == 'from_afternoon_profile': - file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_afternoon.yaml') - fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_ini.yaml' - fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_mod.yaml' - file_ini = open(fn_ini,'w') - file_mod = open(fn_mod,'w') - - #iexp = 0 - onerun = False - print('starting station chunk number: '\ - +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)') - - - isim = 0 - records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] - for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): - #if iexp == 11: - - - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, - mode='ini') - if args.diag_tropo is not None: - seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) - profile_tropo = c4gli_morning.air_ac[seltropo] - for var in args.diag_tropo:#['t','q','u','v',]: - if var[:3] == 'adv': - mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) - c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) - else: - print("warning: tropospheric variable "+var+" not recognized") - - #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) - + + fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' + print('fn_morning',fn_morning) + if os.path.isfile(fn_morning): + file_morning = open(fn_morning) + else: + fn_morning = \ + args.path_forcing+'/'+format(current_station.name,'05d')+\ + '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' + file_morning = open(fn_morning) + + fn_afternoon = args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml' + print(fn_afternoon) + if args.runtime == 'from_profile_pair': + file_afternoon = open(fn_afternoon) + fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_ini.yaml' + print('fn_ini',fn_ini) + fn_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_end.yaml' + file_ini = open(fn_ini,'w') + file_mod = open(fn_mod,'w') + + #iexp = 0 + onerun = False + print('starting station chunk number: '\ + +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)') + + + isim = 0 + records_morning_station_chunk = records_morning_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] + for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): + #if iexp == 11: - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - c4gli_afternoon = get_record_yaml(file_afternoon, - record_afternoon.index_start, - record_afternoon.index_end, - mode='ini') - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds())}) - c4gli_morning.update(source=expname, pars=exp) + + c4gli_morning = get_record_yaml(file_morning, + record_morning.index_start, + record_morning.index_end, + mode='model_input') + if args.diag_tropo is not None: + seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) + profile_tropo = c4gli_morning.air_ac[seltropo] + for var in args.diag_tropo:#['t','q','u','v',]: + if var[:3] == 'adv': + mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) + c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) + else: + print("warning: tropospheric variable "+var+" not recognized") + + #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) + + if args.runtime == 'from_profile_pair': + record_afternoon = records_afternoon.loc[(STNID,chunk,index)] + c4gli_afternoon = get_record_yaml(file_afternoon, + int(record_afternoon.index_start), + int(record_afternoon.index_end), + mode='model_input') + runtime = int((c4gli_afternoon.pars.datetime_daylight - + c4gli_morning.pars.datetime_daylight).total_seconds()) + elif args.runtime == 'from_input': + runtime = c4gli_morning.pars.runtime + else: + runtime = int(args.runtime) + + + c4gli_morning.update(source='pairs',pars={'runtime' : \ + runtime}) - c4gl = class4gl(c4gli_morning) - - #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.) - EFobs = c4gli_morning.pars.EF - - b = c4gli_morning.pars.wwilt - c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01) - - - try: - #fb = f(b) - c4gli_morning.pars.wg = b - c4gli_morning.pars.w2 = b + c4gli_morning.update(source=expname, pars=exp) + c4gl = class4gl(c4gli_morning) - c4gl.run() - EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - fb = EFmod - EFobs - EFmodb = EFmod - c4glb = c4gl - c4gli_morningb = c4gli_morning - #fc = f(c) - c4gli_morning.pars.wg = c - c4gli_morning.pars.w2 = c - c4gl = class4gl(c4gli_morning) - c4gl.run() - EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - fc = EFmod - EFobs - print (EFmodb,EFobs,fb) - print (EFmod,EFobs,fc) - c4glc = c4gl - c4gli_morningc = c4gli_morning - i=0 + #EFobs = c4gli_morning.pars.BR /(c4gli_morning.pars.BR+1.) + EFobs = c4gli_morning.pars.EF - - if fc*fb > 0.: - if abs(fb) < abs(fc): - c4gl = c4glb - c4gli_morning = c4gli_morningb - else: - c4gl = c4glc - c4gli_morning = c4gli_morningc - print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root") + b = c4gli_morning.pars.wwilt + c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01) - else: - print('starting ITERATION!!!') - cn = c - fc/(fc-fb)*(c-b) - - - #fcn = f(cn) - c4gli_morning.pars.wg = np.asscalar(cn) - c4gli_morning.pars.w2 = np.asscalar(cn) + + try: + #fb = f(b) + c4gli_morning.pars.wg = b + c4gli_morning.pars.w2 = b c4gl = class4gl(c4gli_morning) c4gl.run() - fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs + EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) + fb = EFmod - EFobs + EFmodb = EFmod + c4glb = c4gl + c4gli_morningb = c4gli_morning - tol = 0.02 - ftol = 10. - maxiter = 10 + #fc = f(c) + c4gli_morning.pars.wg = c + c4gli_morning.pars.w2 = c + c4gl = class4gl(c4gli_morning) + c4gl.run() + EFmod = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) + fc = EFmod - EFobs + print (EFmodb,EFobs,fb) + print (EFmod,EFobs,fc) + c4glc = c4gl + c4gli_morningc = c4gli_morning + i=0 - is1=0 - is1max=1 - while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter): - if fc * fcn > 0: - temp = c - c = b - b = temp - - a = b - fa = fb - b = c - fb = fc - c = cn - fc = fcn - - print(i,a,b,c,fcn) - - s1 = c - fc/(fc-fb)*(c-b) - s2 = c - fc/(fc-fa)*(c-a) - - - # take the one that is closest to the border (opposite to the previous border), making the chance that the border is eliminated is bigger - - - if (abs(s1-b) < abs(s2-b)): - is1 = 0 - else: - is1 +=1 - - # we prefer s1, but only allow it a few times to not provide the opposite boundary - if is1 < is1max: - s = s1 - print('s1') + + if fc*fb > 0.: + if abs(fb) < abs(fc): + c4gl = c4glb + c4gli_morning = c4gli_morningb else: - is1 = 0 - s = s2 - print('s2') + c4gl = c4glc + c4gli_morning = c4gli_morningc + print("Warning!!! function value of the boundaries have the same sign, so I will not able to find a root") + + else: + print('starting ITERATION!!!') + cn = c - fc/(fc-fb)*(c-b) - if c > b: - l = b - r = c - else: - l = c - r = b - - m = (b+c)/2. - - if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)): - cn = s - print('midpoint') - else: - cn = m - print('bissection') - #fcn = f(cn) c4gli_morning.pars.wg = np.asscalar(cn) @@ -393,84 +396,153 @@ c4gl.run() fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs - - i+=1 + tol = 0.02 + ftol = 10. + maxiter = 10 - if i == maxiter: - raise StopIteration('did not converge') - - - - - #c4gl = class4gl(c4gli_morning) - #c4gl.run() - - c4gli_morning.pars.itersteps = i - c4gli_morning.dump(file_ini) - - - c4gl.dump(file_mod,\ - include_input=False,\ - # timeseries_only=timeseries_only,\ - ) - onerun = True - except: - print('run not succesfull') - - #iexp = iexp +1 - file_ini.close() - file_mod.close() - file_morning.close() - file_afternoon.close() - - if onerun: - records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - getchunk = int(run_station_chunk),\ - subset='ini', - refetch_records=True, - ) - records_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - getchunk = int(run_station_chunk),\ - subset='mod',\ - refetch_records=True,\ - ) - else: - # remove empty files - os.system('rm '+fn_ini) - os.system('rm '+fn_mod) - - # # align afternoon records with initial records, and set same index - # records_afternoon.index = records_afternoon.ldatetime.dt.date - # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] - # records_afternoon.index = records_ini.index - - # stations_for_iter = stations(path_exp) - # for STNID,station in stations_iterator(stations_for_iter): - # records_current_station_index = \ - # (records_ini.index.get_level_values('STNID') == STNID) - # file_current_station_mod = STNID - # - # with \ - # open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ - # open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \ - # open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: - # for (STNID,index),record_ini in records_iterator(records_ini): - # c4gli_ini = get_record_yaml(file_station_ini, - # record_ini.index_start, - # record_ini.index_end, - # mode='ini') - # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) - # - # record_mod = records_mod.loc[(STNID,index)] - # c4gl_mod = get_record_yaml(file_station_mod, - # record_mod.index_start, - # record_mod.index_end, - # mode='mod') - # record_afternoon = records_afternoon.loc[(STNID,index)] - # c4gl_afternoon = get_record_yaml(file_station_afternoon, - # record_afternoon.index_start, - # record_afternoon.index_end, - # mode='ini') - + is1=0 + is1max=1 + while (( abs(cn-c) > tol) or ( abs(fcn) > ftol)) and (fcn != 0) and (i < maxiter): + if fc * fcn > 0: + temp = c + c = b + b = temp + + a = b + fa = fb + b = c + fb = fc + c = cn + fc = fcn + + print(i,a,b,c,fcn) + + s1 = c - fc/(fc-fb)*(c-b) + s2 = c - fc/(fc-fa)*(c-a) + + + # take the one that is closest to the border (opposite to the previous border), making the chance that the border is eliminated is bigger + + + if (abs(s1-b) < abs(s2-b)): + is1 = 0 + else: + is1 +=1 + + # we prefer s1, but only allow it a few times to not provide the opposite boundary + if is1 < is1max: + s = s1 + print('s1') + else: + is1 = 0 + s = s2 + print('s2') + + if c > b: + l = b + r = c + else: + l = c + r = b + + m = (b+c)/2. + + if ((s > l) and (s < r)):# and (abs(m-b) < abs(s - b)): + cn = s + print('midpoint') + else: + cn = m + print('bissection') + + + #fcn = f(cn) + c4gli_morning.pars.wg = np.asscalar(cn) + c4gli_morning.pars.w2 = np.asscalar(cn) + c4gl = class4gl(c4gli_morning) + c4gl.run() + fcn = c4gl.out.LE.sum()/(c4gl.out.H.sum() + c4gl.out.LE.sum()) - EFobs + + + i+=1 + + if i == maxiter: + raise StopIteration('did not converge') + + + + + #c4gl = class4gl(c4gli_morning) + #c4gl.run() + + c4gli_morning.pars.itersteps = i + c4gli_morning.dump(file_ini) + + + c4gl.dump(file_mod,\ + include_input=False,\ + # timeseries_only=timeseries_only,\ + ) + onerun = True + except: + print('run not succesfull') + + #iexp = iexp +1 + file_ini.close() + file_mod.close() + file_morning.close() + file_afternoon.close() + + if onerun: + records_ini = get_records(pd.DataFrame([current_station]),\ + path_exp,\ + getchunk = int(run_station_chunk),\ + subset='ini', + refetch_records=True, + ) + records_mod = get_records(pd.DataFrame([current_station]),\ + path_exp,\ + getchunk = int(run_station_chunk),\ + subset='end',\ + refetch_records=True,\ + ) + else: + # remove empty files + os.system('rm '+fn_ini) + os.system('rm '+fn_mod) + + # # align afternoon records with initial records, and set same index + # records_afternoon.index = records_afternoon.ldatetime.dt.date + # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] + # records_afternoon.index = records_ini.index + + # stations_for_iter = stations(path_exp) + # for STNID,station in stations_iterator(stations_for_iter): + # records_current_station_index = \ + # (records_ini.index.get_level_values('STNID') == STNID) + # file_current_station_mod = STNID + # + # with \ + # open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ + # open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \ + # open(path_soundings+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: + # for (STNID,index),record_ini in records_iterator(records_ini): + # c4gli_ini = get_record_yaml(file_station_ini, + # record_ini.index_start, + # record_ini.index_end, + # mode='ini') + # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) + # + # record_mod = records_mod.loc[(STNID,index)] + # c4gl_mod = get_record_yaml(file_station_mod, + # record_mod.index_start, + # record_mod.index_end, + # mode='mod') + # record_afternoon = records_afternoon.loc[(STNID,index)] + # c4gl_afternoon = get_record_yaml(file_station_afternoon, + # record_afternoon.index_start, + # record_afternoon.index_end, + # mode='ini') + +if __name__ == '__main__': + #execute(**vars(args)) + execute() From 98a36a8fdedd9e3faca2f243a81b084ad834c840 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Sat, 2 Feb 2019 11:43:34 +0100 Subject: [PATCH 120/129] fix prev record. --- class4gl/interface_multi.py | 2 +- class4gl/setup/setup_goamazon.py | 4 +--- setup.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index f2a1a2e..8643964 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -474,7 +474,7 @@ def next_record(self,jump=1): self.update_record() def prev_record_event(self,event=None,**kwargs): - self,prev_record(**kwargs) + self.prev_record(**kwargs) def prev_record(self,event=None): self.next_record(jump=-1) diff --git a/class4gl/setup/setup_goamazon.py b/class4gl/setup/setup_goamazon.py index e0e83aa..9716f69 100644 --- a/class4gl/setup/setup_goamazon.py +++ b/class4gl/setup/setup_goamazon.py @@ -456,6 +456,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): humpafn = pair['afternoon'][1] balloon_file_afternoon = xr.open_dataset(humpafn) + humpafn = pair['morning'][1] balloon_file_morning = xr.open_dataset(humpafn) print(ipair) if (\ @@ -467,9 +468,6 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime) ipair += 1 - print(pair['morning']) - humpafn =pair['morning'][1] - print(humpafn) c4gli_morning = humppa_parser(balloon_file_morning,file_morning,date,pair['morning'][0]) print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) diff --git a/setup.py b/setup.py index 9e06a36..f3832ba 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='0.9.1', + version='0.9.3', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From 1273fb5906243e6166f247557324c310b85a9db4 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Tue, 19 Feb 2019 13:58:35 +0100 Subject: [PATCH 121/129] fixes in the advection rates and profile estimates for igra and goamazon --- class4gl/class4gl.py | 27 +- class4gl/interface/interface_new_koeppen.py | 8 +- class4gl/interface/interface_stations.py | 22 +- class4gl/interface_functions.py | 128 ++-- class4gl/interface_multi.py | 56 +- class4gl/model.py | 189 +++-- class4gl/setup/batch_setup_igra.py | 2 +- class4gl/setup/setup_bllast.py | 152 +++-- class4gl/setup/setup_goamazon.py | 147 +++- class4gl/setup/setup_goamazon_noon.py | 298 ++++---- class4gl/setup/setup_humppa.py | 148 ++-- class4gl/setup/setup_igra.py | 199 +++--- class4gl/simulations/batch_simulations.py | 3 +- class4gl/simulations/simulations.py | 720 ++++++++++---------- class4gl/simulations/simulations_iter.py | 19 +- setup.py | 2 +- 16 files changed, 1193 insertions(+), 927 deletions(-) diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py index a0de860..0173222 100644 --- a/class4gl/class4gl.py +++ b/class4gl/class4gl.py @@ -183,6 +183,7 @@ def set_pars_defaults(self): theta = 288. , # initial mixed-layer potential temperature [K] dtheta = 1. , # initial temperature jump at h [K] gammatheta = 0.006 , # free atmosphere potential temperature lapse rate [K m-1] + gammatheta_lower_limit = 0.002, advtheta = 0. , # advection of heat [K s-1] beta = 0.2 , # entrainment ratio for virtual heat [-] wtheta = 0.1 , # surface kinematic heat flux [K m s-1] @@ -599,16 +600,30 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): #is_valid = (air_balloon.z >= 0) # # this is an alternative pipe/numpy method # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0] - valid_indices = air_balloon.index[is_valid].values + valid_indices = air_balloon.index[is_valid] #print(valid_indices) - dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]] + dpars['Ps'] = air_balloon.p.loc[[valid_indices[0]]][0] air_balloon['t'] = air_balloon['TEMP']+273.15 air_balloon['theta'] = (air_balloon.t) * \ (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp) air_balloon['thetav'] = air_balloon['theta']*(1. + 0.61 * air_balloon['q']) + + + i = 1 + t_cut_off = 2.0 + if t_cut_off is not None: + + while (air_balloon.thetav.loc[[valid_indices[0]]][0] - \ + air_balloon.thetav.loc[valid_indices[i:i+1]][0] ) > t_cut_off: + #diff = (air_balloon.theta.iloc[valid_indices[i]] -air_balloon.theta.iloc[valid_indices[i+1]])- 0.5 + air_balloon.thetav.loc[valid_indices[0:i]] = \ + air_balloon.thetav.loc[valid_indices[i:i+1]][0] + t_cutoff + + i +=1 + if len(valid_indices) > 0: #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) @@ -635,13 +650,13 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): # determine mixed-layer properties (moisture, potential temperature...) from profile # ... and those of the mixed layer - is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) - valid_indices_below_h = air_balloon.index[is_valid_below_h].values + is_valid_below_h = (air_balloon.loc[valid_indices].z < dpars['h']) + valid_indices_below_h = air_balloon.loc[valid_indices].index[is_valid_below_h].values if len(valid_indices) > 1: if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon[is_valid_below_h].mean() + ml_mean = air_balloon.loc[valid_indices][is_valid_below_h].mean() else: - ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() + ml_mean = air_balloon.loc[valid_indices[0:2]].mean() elif len(valid_indices) == 1: ml_mean = (air_balloon.iloc[0:1]).mean() else: diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py index a950d86..dddb816 100644 --- a/class4gl/interface/interface_new_koeppen.py +++ b/class4gl/interface/interface_new_koeppen.py @@ -124,8 +124,6 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu sns.reset_orig() - - lookup_symbols= { 'A':'equatorial', 'B':'arid', @@ -535,6 +533,7 @@ def brightness(rrggbb): filter_classess = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen)) mod = mod.loc[filter_classes] obs = obs.loc[filter_classes] + nbins=40 x, y = obs.values,mod.values @@ -702,7 +701,7 @@ def brightness(rrggbb): fig.show() koeppenlookuptable = koeppenlookuptable.sort_index() - if bool(args.show_control_parameters): + if args.show_control_parameters == 'True': pkmn_type_colors = [ @@ -797,9 +796,7 @@ def brightness(rrggbb): tempdataini = tempdataini.set_index(['source_index','STNID','dates']) - #print('hello2') index_intersect = tempdataini.index.intersection(tempdatamodstats.index) - #print('hello3') tempdataini = tempdataini.loc[index_intersect] #print('hello4') @@ -905,6 +902,7 @@ def brightness(rrggbb): #gridspec_kw=dict(height_ratios=(1, 3), gridspec_kw=dict(hspace=0.20,wspace=0.08,top=0.94,bottom=0.06,left=0.15,right=0.99)) + varkey = 'q' data_all['d'+varkey+'dt ['+units[varkey]+'/h]'] *= 1000. icol = 0 diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py index a5bbbac..e98bbee 100644 --- a/class4gl/interface/interface_stations.py +++ b/class4gl/interface/interface_stations.py @@ -146,8 +146,6 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu #axes_taylor[varkey] = fig.add_subplot(2,3,i+3) #print(obs.std()) - obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] - STD_OBS = obs.std() dias[varkey] = TaylorDiagram(1., srange=[0.0,1.7],fig=fig, rect=(230+i+3),label='Reference') dias[varkey].add_grid(zorder=-100.) dias[varkey]._ax.axis["left"].label.set_text(\ @@ -155,6 +153,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu i += 1 i = 1 for varkey in ['h','theta','q']: + obs = c4gldata[args.experiments.strip().split()[0]].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] + STD_OBS = obs.std() if i == 1: axes[varkey].annotate('Normalized standard deviation',\ xy= (0.05,0.36), @@ -192,7 +192,16 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu RMSE = rmse(obs,mod) BIAS = np.mean(mod) - np.mean(obs) STD = mod.std() - + + # print(STD) + # print(PR) + print(varkey,STD,STD_OBS,STD/STD_OBS,PR) + dias[varkey].add_sample(STD/STD_OBS, PR, + marker='o', ms=5, ls='', + #mfc='k', mec='k', # B&W + mfc=colors[ikey], mec=colors[ikey], # Colors + label=key,zorder=101) + fit = np.polyfit(x,y,deg=1) if varkey == 'q': @@ -261,13 +270,6 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu - # print(STD) - # print(PR) - dias[varkey].add_sample(STD/STD_OBS, PR, - marker='o', ms=5, ls='', - #mfc='k', mec='k', # B&W - mfc=colors[ikey], mec=colors[ikey], # Colors - label=key,zorder=100) # put ticker position, see # https://matplotlib.org/examples/ticks_and_spines/tick-locators.html diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py index dd9a007..547e6f8 100644 --- a/class4gl/interface_functions.py +++ b/class4gl/interface_functions.py @@ -14,6 +14,7 @@ from interface_functions import * #from data_soundings import wyoming import yaml +from yaml import CLoader import glob import pandas as pd import json @@ -81,29 +82,34 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'): print('index_end',index_end) buf = yaml_file.read(index_end- index_start).replace('inf','9e19').replace('nan','9e19').replace('---','') - os.system('mkdir -p '+TEMPDIR) - filebuffer = open(TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w') - filebuffer.write(buf) - filebuffer.close() - # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start)) - - if which('ruby') is None: - raise RuntimeError ('ruby is not found. Aborting...') - command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)+' ' + # # Ruby way -> fast + # os.system('mkdir -p '+TEMPDIR) + # filebuffer = open(TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start),'w') + # filebuffer.write(buf) + # filebuffer.close() + # # print("HHHEEELOOOO",filename+'.buffer.yaml'+str(index_start)) + # + # if which('ruby') is None: + # raise RuntimeError ('ruby is not found. Aborting...') + # command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+shortfn+".buffer.yaml."+str(index_start)+"').to_json"+'" > '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)+' ' + + # #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"' + # print(command) + # os.system(command) + # jsonstream = open(TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) + # record_dict = json.load(jsonstream) + # jsonstream.close() + # os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start)) - #command = '/apps/gent/CO7/sandybridge/software/Ruby/2.4.2-foss-2017b/bin/ruby -rjson -ryaml -e "'+"puts YAML.load(ARGF.read()).to_json"+'"' - print(command) - os.system(command) - jsonstream = open(TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) - record_dict = json.load(jsonstream) - jsonstream.close() - os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.yaml.'+str(index_start)) + record_dict = yaml.load(buf,Loader=CLoader) if mode =='model_output': modelout = class4gl() modelout.load_yaml_dict(record_dict) - os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) + + # # needed in case of Ruby + # os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) return modelout elif mode == 'model_input': @@ -112,11 +118,12 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'): # datetimes are incorrectly converted to strings. We need to convert them # again to datetimes for key,value in record_dict['pars'].items(): + # # needed in case of ruby # we don't want the key with columns that have none values - if value is not None: - if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str): - # elif (type(value) == str): - record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z") + # if value is not None: + # if key in ['lSunrise','lSunset','datetime','ldatetime','ldatetime_daylight','datetime_daylight',]:#(type(value) == str): + # # elif (type(value) == str): + # record_dict['pars'][key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z") if (value == 0.9e19) or (value == '.9e19'): record_dict['pars'][key] = np.nan @@ -127,12 +134,13 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'): for datakey,datavalue in record_dict[key].items(): record_dict[key][datakey] = [ np.nan if (x =='.9e19') else x for x in record_dict[key][datakey]] - #os.system('rm '+filename+'.buffer.json.'+str(index_start)) c4gli = class4gl_input() #print(c4gli.logger,'hello') c4gli.load_yaml_dict(record_dict) - os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) + + # # needed in case of ruby + # os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) return c4gli @@ -222,7 +230,7 @@ def get_stations(self,suffix): stations_list = [] for stations_list_file in stations_list_files: thisfile = open(stations_list_file,'r') - yamlgen = yaml.load_all(thisfile) + yamlgen = yaml.load_all(thisfile,Loader=CLoader) try: first_record = yamlgen.__next__() except: @@ -477,16 +485,15 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor print('pkl file does not exist. I generate "'+\ path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...') generate_pkl = True + elif refetch_records: + print('refetch_records flag is True. I regenerate "'+\ + path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...') + generate_pkl = True elif not (os.path.getmtime(path_yaml+'/'+yamlfilename) < \ os.path.getmtime(path_yaml+'/'+pklfilename)): print('pkl file older than yaml file, so I regenerate "'+\ path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...') generate_pkl = True - - if refetch_records: - print('refetch_records flag is True. I regenerate "'+\ - path_yaml+'/'+pklfilename+'" from "'+path_yaml+'/'+yamlfilename+'"...') - generate_pkl = True if not generate_pkl: records_station_chunk = pd.read_pickle(path_yaml+'/'+pklfilename) records_station = pd.concat([records_station,records_station_chunk]) @@ -496,6 +503,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor dictout = {} + #initialization (go to file position just after "---") next_record_found = False end_of_file = False while (not next_record_found) and (not end_of_file): @@ -504,39 +512,54 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor end_of_file = (linebuffer == '') next_tell = yaml_file.tell() + # loop over different yaml profile records while not end_of_file: print(' next record:',next_tell) current_tell = next_tell next_record_found = False yaml_file.seek(current_tell) - os.system('mkdir -p '+TEMPDIR) - filebuffer = open(TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w') + + # # needed for Ruby + # os.system('mkdir -p '+TEMPDIR) + # filebuffer = open(TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell),'w') + linebuffer = '' + stringbuffer = '' while ( (not next_record_found) and (not end_of_file)): - filebuffer.write(linebuffer.replace('inf','0').replace('nan','0')) + # # needed for Ruby + # filebuffer.write(linebuffer.replace('inf','0').replace('nan','0')) + stringbuffer += linebuffer linebuffer = yaml_file.readline() next_record_found = (linebuffer == '---\n') end_of_file = (linebuffer == '') - filebuffer.close() + # # needed for Ruby + # filebuffer.close() next_tell = yaml_file.tell() index_start = current_tell index_end = next_tell - - if which('ruby') is None: - raise RuntimeError ('ruby is not found. Aborting...') - #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) : - command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' - print(command) - - os.system(command) - #jsonoutput = subprocess.check_output(command,shell=True) - #print(jsonoutput) - #jsonstream = io.StringIO(jsonoutput) - jsonstream = open(TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)) - record = json.load(jsonstream) + # start direct yaml way -> slow + record = yaml.load(stringbuffer,Loader=CLoader) + # end direct way + + # # # start json ruby way -> much faster + # if which('ruby') is None: + # raise RuntimeError ('ruby is not found. Aborting...') + # #if ((irecord >= start) and (np.mod(irecord - start,2) == 0.) : + # command = 'ruby -rjson -ryaml -e "'+"puts YAML.load_file('"+TEMPDIR+'/'+yamlfilename+".buffer.yaml."+str(current_tell)+"').to_json"+'" > '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)+' ' + # print(command) + # + # os.system(command) + # #jsonoutput = subprocess.check_output(command,shell=True) + # #print(jsonoutput) + # #jsonstream = io.StringIO(jsonoutput) + # jsonstream = open(TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)) + # record = json.load(jsonstream) + # # end json ruby way + + dictouttemp = {} for key,value in record['pars'].items(): # we don't want the key with columns that have none values @@ -545,22 +568,25 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor if (type(value) in regular_numeric_types): dictouttemp[key] = value elif key in ['lSunrise','lSunset','datetime','ldatetime','datetime_daylight','datetime_daylight','ldatetime_daylight','ldatetime_daylight']:#(type(value) == str): - #print (key,value) # dictouttemp[key] = dt.datetime.strptime(value[:-6],"%Y-%m-%d %H:%M:%S") - dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z") + dictouttemp[key] = value + # # needed for Ruby + # dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z") + # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!! dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC) recordindex = record['index'] dictouttemp['chunk'] = chunk dictouttemp['index_start'] = index_start dictouttemp['index_end'] = index_end - os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)) + # os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.json.'+str(current_tell)) for key,value in dictouttemp.items(): if key not in dictout.keys(): dictout[key] = {} dictout[key][(STNID,chunk,recordindex)] = dictouttemp[key] print(' obs record registered') - jsonstream.close() - os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell)) + # # needed for Ruby + # jsonstream.close() + # os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell)) records_station_chunk = pd.DataFrame.from_dict(dictout) records_station_chunk.index.set_names(('STNID','chunk','index'),inplace=True) print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\ diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index 8643964..1795592 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -59,7 +59,7 @@ os.system('module load Ruby') class c4gl_interface_soundings(object): - def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=False,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False,tendencies_revised=False): + def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=False,end_state=True,refetch_stations=True,inputkeys = ['cveg','wg','w2','cc','sp','wwilt','Tsoil','T2','z0m','alpha','LAI',],obs_filter=False,tendencies_revised=False): """ creates an interactive interface for analysing class4gl experiments INPUT: @@ -83,6 +83,7 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal self.tendencies_revised = tendencies_revised self.path_exp = path_exp self.path_forcing = path_forcing + self.end_state = end_state self.exp_files = glob.glob(self.path_exp+'/?????.yaml') # # get the list of stations @@ -122,7 +123,10 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal subset='ini',\ refetch_records=refetch_records ) + print(self.frames['stats']['records_all_stations_ini']) # get its records and load it into the stats frame + + self.frames['stats']['records_all_stations_end_mod'] =\ get_records(self.frames['stats']['stations'].table,\ self.path_exp,\ @@ -138,9 +142,15 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal subset='end',\ refetch_records=refetch_records ) + if self.end_state: + if len(self.frames['stats']['records_all_stations_end_mod']) == 0: + raise IOError ('No end state records found. If you want to ignore end states, please use the option end_state = False') + self.frames['stats']['records_all_stations_end_mod'].index = \ + self.frames['stats']['records_all_stations_ini'].index - self.frames['stats']['records_all_stations_end_mod'].index = \ - self.frames['stats']['records_all_stations_ini'].index + else: + self.frames['stats']['records_all_stations_end_mod'] = \ + self.frames['stats']['records_all_stations_ini'] if len(self.frames['stats']['records_all_stations_ini']) ==0: @@ -254,22 +264,21 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal print('exclude exceptional observations') print('exclude unrealistic model output -> should be investigated!') valid = (\ - # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.250) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > 0.25000) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 1.8000) & + #(self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.00) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25000) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 3.0000) & # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 40.0000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 350.) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 400.) & - # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -.00055) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.00055) & - # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -0.0006) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < 0.0003) & # # filter 'extreme' model output -> should be investigated! - # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & - # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & - # (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & # (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0003) & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & @@ -390,15 +399,28 @@ def update_station(self): STNID = self.frames['profiles']['STNID'] chunk = self.frames['profiles']['current_record_chunk'] + print(chunk) if 'current_station_file_ini' in self.frames['profiles'].keys(): self.frames['profiles']['current_station_file_ini'].close() + + + fn_ini = format(STNID,"05d")+'_ini.yaml' + if not os.path.isfile(self.path_exp+'/'+fn_ini): + fn_ini = format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml' + self.frames['profiles']['current_station_file_ini'] = \ - open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_ini.yaml','r') + open(self.path_exp+'/'+fn_ini,'r') if 'current_station_file_end_mod' in self.frames['profiles'].keys(): self.frames['profiles']['current_station_file_end_mod'].close() - self.frames['profiles']['current_station_file_end_mod'] = \ - open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r') + + + if self.end_state: + self.frames['profiles']['current_station_file_end_mod'] = \ + open(self.path_exp+'/'+format(STNID,"05d")+'_'+str(chunk)+'_end.yaml','r') + else: + self.frames['profiles']['current_station_file_end_mod'] = \ + open(self.path_exp+'/'+fn_ini,'r') if 'current_station_file_end_obs' in self.frames['profiles'].keys(): self.frames['profiles']['current_station_file_end_obs'].close() if self.path_forcing is not None: @@ -546,8 +568,8 @@ def update_record(self): self.frames['profiles']['record_yaml_end_obs'] = \ get_record_yaml( self.frames['profiles']['current_station_file_end_obs'], \ - record_end.index_start, - record_end.index_end, + int(record_end.index_start), + int(record_end.index_end), mode='model_input') diff --git a/class4gl/model.py b/class4gl/model.py index 21e86c3..87dbd99 100644 --- a/class4gl/model.py +++ b/class4gl/model.py @@ -130,14 +130,31 @@ def __init__(self, model_input = None,debug_level=None): # correct pressure of levels according to surface pressure # error (so that interpolation is done in a consistent way) - p_e = self.input.Ps - self.input.sp - for irow in self.input.air_ac.index[::-1]: - self.input.air_ac.p.iloc[irow] =\ - self.input.air_ac.p.iloc[irow] + p_e - p_e = p_e -\ - (self.input.air_ac.p.iloc[irow]+p_e)/\ - self.input.air_ac.p.iloc[irow] *\ - self.input.air_ac.delpdgrav.iloc[irow]*grav + p_prev = self.input.sp + p_corr_prev = self.input.Ps + #p_corr_prev = self.input.Ps - self.input.sp + air_ac_index = self.input.air_ac.index + for irow,indexrow in list(enumerate(air_ac_index))[::-1]: + p_corr =self.input.air_ac.p.iloc[indexrow]/p_prev*p_corr_prev + p_prev = self.input.air_ac.p.iloc[indexrow] + + self.input.air_ac.p.iloc[indexrow] = p_corr + p_corr_prev = p_corr + + # p_old = self.input.air_ac.p.iloc[indexrow] + + # p_new = self.input.air_ac.p.iloc[indexrow] + p_corr + + # p_corr_next = np.log(self.input.air_ac.p.iloc[air_ac_index[irow-1]]/\ + # self.input.air_ac.p.iloc[indexrow]*p_corr) + # self.input.air_ac.p.iloc[irow]=p_new + + # self.input.air_ac.p.iloc[irow] =\ + # self.input.air_ac.p.iloc[irow] + p_e + # p_e = p_e -\ + # (self.input.air_ac.p.iloc[irow]+p_e)/\ + # self.input.air_ac.p.iloc[irow] *\ + # self.input.air_ac.delpdgrav.iloc[irow]*grav @@ -268,6 +285,8 @@ def init(self): self.dtheta = self.input.dtheta # initial temperature jump at h [K] self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1] + self.gammatheta_lower_limit = \ + self.input.gammatheta_lower_limit # free atmosphere potential temperature lapse rate lower limit to avoid crashes [K m-1] self.advtheta = self.input.advtheta # advection of heat [K s-1] self.beta = self.input.beta # entrainment ratio for virtual heat [-] self.wtheta = self.input.wtheta # surface kinematic heat flux [K m s-1] @@ -454,6 +473,7 @@ def init(self): self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar}) + # gammatheta, gammaq, gammau, gammav are updated here. self.__dict__['gamma'+var] = \ self.air_ap['gamma'+var][np.where(self.h >= \ self.air_ap.z)[0][-1]] @@ -509,35 +529,54 @@ def init(self): found. We just take the bottom one.") in_ml = self.air_ac.index == (len(self.air_ac) - 1) - for var in ['t','q','u','v']: + for var in ['theta','q','u','v']: # calculation of the advection variables for the mixed layer # we weight by the hydrostatic thickness of each layer and # divide by the total thickness - self.__dict__['adv'+var] = \ - ((self.air_ac['adv'+var+'_x'][in_ml] \ - + \ - self.air_ac['adv'+var+'_y'][in_ml])* \ - self.air_ac['delpdgrav'][in_ml]).sum()/ \ - self.air_ac['delpdgrav'][in_ml].sum() + if var == 'theta': + self.__dict__['adv'+var] = \ + ((self.air_ac['advt_x'][in_ml] \ + + \ + self.air_ac['advt_y'][in_ml])* \ + self.air_ac['delpdgrav'][in_ml]).sum()/ \ + self.air_ac['delpdgrav'][in_ml].sum() + else: + self.__dict__['adv'+var] = \ + ((self.air_ac['adv'+var+'_x'][in_ml] \ + + \ + self.air_ac['adv'+var+'_y'][in_ml])* \ + self.air_ac['delpdgrav'][in_ml]).sum()/ \ + self.air_ac['delpdgrav'][in_ml].sum() # calculation of the advection variables for the profile above # (lowest 3 values are not used by class) self.air_ap = self.air_ap.assign(**{'adv'+var : 0.}) - self.air_ap['adv'+var] = \ - np.interp(self.air_ap.p,\ - self.air_ac.p,\ - self.air_ac['adv'+var+'_x']) \ - + \ - np.interp(self.air_ap.p, \ - self.air_ac.p, \ - self.air_ac['adv'+var+'_y']) + + if var == 'theta': + self.air_ap['adv'+var] = \ + np.interp(self.air_ap.p,\ + self.air_ac.p,\ + self.air_ac['advt_x']) \ + + \ + np.interp(self.air_ap.p, \ + self.air_ac.p, \ + self.air_ac['advt_y']) + else: + self.air_ap['adv'+var] = \ + np.interp(self.air_ap.p,\ + self.air_ac.p,\ + self.air_ac['adv'+var+'_x']) \ + + \ + np.interp(self.air_ap.p, \ + self.air_ac.p, \ + self.air_ac['adv'+var+'_y']) # as an approximation, we consider that advection of theta in the # mixed layer is equal to advection of t. This is a sufficient # approximation since theta and t are very similar at the surface # pressure. - self.__dict__['advtheta'] = self.__dict__['advt'] + # self.__dict__['advtheta'] = self.__dict__['advt'] # # # STRANGE, THIS DOESN'T GIVE REALISTIC VALUES, IT NEEDS TO BE @@ -559,16 +598,22 @@ def init(self): # self.air_ach['wrho']) # self.ws = self.air_ap.w.iloc[1] - if (self.sw_ac is not None) and ('w' in self.sw_ac): - self.air_ap = self.air_ap.assign(wp = 0.) - self.air_ap['wp'] = np.interp(self.air_ap.p, \ - self.air_ac.p, \ - self.air_ac['wp']) + if (self.sw_ac is not None) and \ + (('w' in self.sw_ac) or ('adv' in self.sw_ac)): self.air_ap = self.air_ap.assign(R = 0.) self.air_ap['R'] = (self.Rd*(1.-self.air_ap.q) + \ self.Rv*self.air_ap.q) self.air_ap = self.air_ap.assign(rho = 0.) + self.air_ap['t'] = \ + self.air_ap.theta * \ + (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp) self.air_ap['rho'] = self.air_ap.p /self.air_ap.R/ self.air_ap.t + + if (self.sw_ac is not None) and ('w' in self.sw_ac): + self.air_ap = self.air_ap.assign(wp = 0.) + self.air_ap['wp'] = np.interp(self.air_ap.p, \ + self.air_ac.p, \ + self.air_ac['wp']) self.air_ap = self.air_ap.assign(w = 0.) self.air_ap['w'] = -self.air_ap['wp'] /self.air_ap['rho']/self.g @@ -625,6 +670,7 @@ def init(self): self.tstart = self.input.tstart # time of the day [-] self.cc = self.input.cc # cloud cover fraction [-] self.Swin = None # incoming short wave radiation [W m-2] + self.Swin_cs = None # incoming short wave radiation clearsky [W m-2] self.Swout = None # outgoing short wave radiation [W m-2] self.Lwin = None # incoming long wave radiation [W m-2] self.Lwout = None # outgoing long wave radiation [W m-2] @@ -1070,7 +1116,7 @@ def integrate_mixed_layer(self): if (self.sw_ac is not None) and ('adv' in self.sw_ac): - for var in ['t','q','u','v']: + for var in ['theta','q','u','v']: #if ((self.z_pro is not None) and (self.__dict__['adv'+var+'_pro'] is not None)): # take into account advection for the whole profile @@ -1101,9 +1147,9 @@ def integrate_mixed_layer(self): self.Rv*self.air_ap.q) # air_aptheta_old = pd.Series(self.air_ap['theta']) - self.air_ap['theta'] = \ - self.air_ap.t * \ - (self.Ps/self.air_ap.p)**(self.air_ap['R']/self.cp) + self.air_ap['t'] = \ + self.air_ap.theta * \ + (self.air_ap.p/self.Ps)**(self.air_ap['R']/self.cp) if (self.sw_ac is not None) and ('w' in self.sw_ac): zidx_first = np.where(self.air_ap.z > self.h)[0][0] self.air_ap.z[zidx_first:] = self.air_ap.z[zidx_first:] + \ @@ -1198,14 +1244,14 @@ def integrate_mixed_layer(self): # quantities of moisture and temperature. The limit is set by trial # and error. The numerics behind the crash should be investigated # so that a cleaner solution can be provided. - gammatheta_lower_limit = 0.002 + # self.gammatheta_lower_limit = 0.002 while ((itop in range(0,1)) or (itop != ibottom)): theta_mean = air_ap_tail_orig.theta.iloc[ibottom:(itop+1)].mean() z_mean = air_ap_tail_orig.z.iloc[ibottom:(itop+1)].mean() if ( #(z_mean > (z_low+0.2)) and \ #(theta_mean > (theta_low+0.02) ) and \ - (((theta_mean - theta_low)/(z_mean - z_low)) > gammatheta_lower_limit)) or \ + (((theta_mean - theta_low)/(z_mean - z_low)) > self.gammatheta_lower_limit)) or \ (itop >= (len(air_ap_tail_orig)-1)) \ : @@ -1264,29 +1310,48 @@ def integrate_mixed_layer(self): if in_ml.sum() == 0: warnings.warn(" no circulation points in the mixed layer found. We just take the bottom one.") in_ml = self.air_ac.index == (len(self.air_ac) - 1) - for var in ['t','q','u','v']: + for var in ['theta','q','u','v']: # calculation of the advection variables for the mixed-layer # these will be used for the next timestep # Warning: w is excluded for now. - self.__dict__['adv'+var] = \ - ((self.air_ac['adv'+var+'_x'][in_ml] \ - + \ - self.air_ac['adv'+var+'_y'][in_ml])* \ - self.air_ac['delpdgrav'][in_ml]).sum()/ \ - self.air_ac['delpdgrav'][in_ml].sum() + if var == 'theta': + self.__dict__['adv'+var] = \ + ((self.air_ac['advt_x'][in_ml] \ + + \ + self.air_ac['advt_y'][in_ml])* \ + self.air_ac['delpdgrav'][in_ml]).sum()/ \ + self.air_ac['delpdgrav'][in_ml].sum() + else: + self.__dict__['adv'+var] = \ + ((self.air_ac['adv'+var+'_x'][in_ml] \ + + \ + self.air_ac['adv'+var+'_y'][in_ml])* \ + self.air_ac['delpdgrav'][in_ml]).sum()/ \ + self.air_ac['delpdgrav'][in_ml].sum() # calculation of the advection variables for the profile above # the mixed layer (also for the next timestep) - self.air_ap['adv'+var] = \ - np.interp(self.air_ap.p,\ - self.air_ac.p,\ - self.air_ac['adv'+var+'_x']) \ - + \ - np.interp(self.air_ap.p,\ - self.air_ac.p, \ - self.air_ac['adv'+var+'_y']) + + if var == 'theta': + self.air_ap['adv'+var] = \ + np.interp(self.air_ap.p,\ + self.air_ac.p,\ + self.air_ac['advt_x']) \ + + \ + np.interp(self.air_ap.p,\ + self.air_ac.p, \ + self.air_ac['advt_y']) + else: + self.air_ap['adv'+var] = \ + np.interp(self.air_ap.p,\ + self.air_ac.p,\ + self.air_ac['adv'+var+'_x']) \ + + \ + np.interp(self.air_ap.p,\ + self.air_ac.p, \ + self.air_ac['adv'+var+'_y']) # if var == 't': # print(self.air_ap['adv'+var]) # stop @@ -1296,7 +1361,7 @@ def integrate_mixed_layer(self): # approximation since theta and t are very similar at the surface # pressure. - self.__dict__['advtheta'] = self.__dict__['advt'] + # self.__dict__['advtheta'] = self.__dict__['advt'] if (self.sw_ac is not None) and ('w' in self.sw_ac): # update the vertical wind profile @@ -1364,6 +1429,7 @@ def integrate_mixed_layer(self): # Based on the above, update the gamma value at the mixed-layer # top + # gammatheta, gammaq, gammau, gammav are updated here. self.__dict__['gamma'+var] = self.air_ap['gamma'+var][np.where(self.h >= self.air_ap.z)[0][-1]] @@ -1378,6 +1444,11 @@ def run_radiation(self): Tr = (0.6 + 0.2 * sinlea) * (1. - 0.4 * self.cc) self.Swin = self.S0 * Tr * sinlea + self.Swin_cs = self.S0 * (0.6 + 0.2 * sinlea) * sinlea + + # Swin/Swin_cs = (1. - 0.4*self.cc) + # self.cc = (1.-Swin/Swin_cs)/0.4 + self.Swout = self.alpha * self.S0 * Tr * sinlea @@ -1814,6 +1885,7 @@ def store(self): self.out.Rib[t] = self.Rib self.out.Swin[t] = self.Swin + self.out.Swin_cs[t] = self.Swin_cs self.out.Swout[t] = self.Swout self.out.Lwin[t] = self.Lwin self.out.Lwout[t] = self.Lwout @@ -1830,6 +1902,12 @@ def store(self): self.out.LEref[t] = self.LEref self.out.G[t] = self.G + self.out.ws[t] = self.ws + self.out.advtheta[t] = self.advtheta + self.out.advu[t] = self.advu + self.out.advv[t] = self.advv + self.out.advq[t] = self.advq + self.out.zlcl[t] = self.lcl self.out.RH_h[t] = self.RH_h @@ -1934,6 +2012,7 @@ def exitmodel(self): del(self.tstart) del(self.Swin) + del(self.Swin_cs) del(self.Swout) del(self.Lwin) del(self.Lwout) @@ -2068,6 +2147,7 @@ def __init__(self, tsteps): # radiation variables self.Swin = np.zeros(tsteps) # incoming short wave radiation [W m-2] + self.Swin_cs = np.zeros(tsteps) # incoming short wave radiation clearsky [W m-2] self.Swout = np.zeros(tsteps) # outgoing short wave radiation [W m-2] self.Lwin = np.zeros(tsteps) # incoming long wave radiation [W m-2] self.Lwout = np.zeros(tsteps) # outgoing long wave radiation [W m-2] @@ -2085,6 +2165,13 @@ def __init__(self, tsteps): self.LEref = np.zeros(tsteps) # reference evaporation at rs = rsmin / LAI [W m-2] self.G = np.zeros(tsteps) # ground heat flux [W m-2] + self.ws = np.zeros(tsteps) + self.advtheta = np.zeros(tsteps) + self.advu = np.zeros(tsteps) + self.advv = np.zeros(tsteps) + self.advq = np.zeros(tsteps) + + # Mixed-layer top variables self.zlcl = np.zeros(tsteps) # lifting condensation level [m] self.RH_h = np.zeros(tsteps) # mixed-layer top relative humidity [-] diff --git a/class4gl/setup/batch_setup_igra.py b/class4gl/setup/batch_setup_igra.py index 5a33bb9..4f1e432 100644 --- a/class4gl/setup/batch_setup_igra.py +++ b/class4gl/setup/batch_setup_igra.py @@ -26,7 +26,7 @@ parser = argparse.ArgumentParser() #parser.add_argument('--timestamp') parser.add_argument('--exec') # chunk simulation script -parser.add_argument('--pbs_string',default=' -l walltime=30:0:0') +parser.add_argument('--pbs_string',default=' -l walltime=50:0:0') parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/') parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') # parser.add_argument('--first_YYYYMMDD',default="19810101") diff --git a/class4gl/setup/setup_bllast.py b/class4gl/setup/setup_bllast.py index 0de6fc4..bf779d6 100644 --- a/class4gl/setup/setup_bllast.py +++ b/class4gl/setup/setup_bllast.py @@ -122,6 +122,14 @@ def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) valid_indices = air_balloon.index[is_valid].values + i = 1 + while (air_balloon.thetav.iloc[valid_indices[0]] - \ + air_balloon.thetav.iloc[valid_indices[i]] ) > 0.5: + #diff = (air_balloon.theta.iloc[valid_indices[i]] -air_balloon.theta.iloc[valid_indices[i+1]])- 0.5 + air_balloon.thetav.iloc[valid_indices[0:i]] = \ + air_balloon.thetav.iloc[valid_indices[i]] + 0.5 + + i +=1 air_ap_mode='b' @@ -151,11 +159,11 @@ def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): # determine mixed-layer properties (moisture, potential temperature...) from profile # ... and those of the mixed layer - is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) - valid_indices_below_h = air_balloon.index[is_valid_below_h].values + is_valid_below_h = (air_balloon.iloc[valid_indices].z < dpars['h']) + valid_indices_below_h = air_balloon.iloc[valid_indices].index[is_valid_below_h].values if len(valid_indices) > 1: if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon[is_valid_below_h].mean() + ml_mean = air_balloon.iloc[valid_indices][is_valid_below_h].mean() else: ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() elif len(valid_indices) == 1: @@ -423,7 +431,7 @@ def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/' -file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') +file_morning = open(path_soundings+format(current_station.name,'05d')+'_ini.yaml','w') for date,pair in HOUR_FILES.items(): print(pair['morning']) humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['morning'][1] @@ -435,7 +443,7 @@ def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) file_morning.close() -file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') +file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_end.yaml','w') for date,pair in HOUR_FILES.items(): humpafn ='/user/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/BLLAST/MODEM Radiosoundings/'+pair['afternoon'][1] balloon_file = open(humpafn,'r',encoding='latin-1') @@ -471,82 +479,82 @@ def bllast_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): records_morning = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='morning', + subset='ini', refetch_records=True, ) print('records_morning_ldatetime',records_morning.ldatetime) records_afternoon = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='afternoon', + subset='end', refetch_records=True, ) -# align afternoon records with noon records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date] -records_afternoon.index = records_morning.index -path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/' - -os.system('mkdir -p '+path_exp) -file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml') -file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml') -file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') -file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') - -for (STNID,chunk,index),record_morning in records_morning.iterrows(): - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, - mode='ini') - #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) - - - c4gli_afternoon = get_record_yaml(file_afternoon, - record_afternoon.index_start, - record_afternoon.index_end, - mode='ini') - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds())}) - - - c4gli_morning.pars.sw_ac = [] - c4gli_morning.pars.sw_ap = True - c4gli_morning.pars.sw_lit = False - c4gli_morning.dump(file_ini) - - c4gl = class4gl(c4gli_morning) - c4gl.run() - - c4gl.dump(file_mod,\ - include_input=False,\ - timeseries_only=timeseries_only) -file_ini.close() -file_mod.close() -file_morning.close() -file_afternoon.close() - -records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='ini', - refetch_records=True, - ) -records_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='mod', - refetch_records=True, - ) - -records_mod.index = records_ini.index - -# align afternoon records with initial records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] -records_afternoon.index = records_ini.index +# # align afternoon records with noon records, and set same index +# records_afternoon.index = records_afternoon.ldatetime.dt.date +# records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date] +# records_afternoon.index = records_morning.index +# path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/' +# +# os.system('mkdir -p '+path_exp) +# file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml') +# file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml') +# file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') +# file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') +# +# for (STNID,chunk,index),record_morning in records_morning.iterrows(): +# record_afternoon = records_afternoon.loc[(STNID,chunk,index)] +# +# c4gli_morning = get_record_yaml(file_morning, +# record_morning.index_start, +# record_morning.index_end, +# mode='ini') +# #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) +# +# +# c4gli_afternoon = get_record_yaml(file_afternoon, +# record_afternoon.index_start, +# record_afternoon.index_end, +# mode='ini') +# +# c4gli_morning.update(source='pairs',pars={'runtime' : \ +# int((c4gli_afternoon.pars.datetime_daylight - +# c4gli_morning.pars.datetime_daylight).total_seconds())}) +# +# +# c4gli_morning.pars.sw_ac = [] +# c4gli_morning.pars.sw_ap = True +# c4gli_morning.pars.sw_lit = False +# c4gli_morning.dump(file_ini) +# +# c4gl = class4gl(c4gli_morning) +# c4gl.run() +# +# c4gl.dump(file_mod,\ +# include_input=False,\ +# timeseries_only=timeseries_only) +# file_ini.close() +# file_mod.close() +# file_morning.close() +# file_afternoon.close() +# +# records_ini = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='ini', +# refetch_records=True, +# ) +# records_mod = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='mod', +# refetch_records=True, +# ) +# +# records_mod.index = records_ini.index +# +# # align afternoon records with initial records, and set same index +# records_afternoon.index = records_afternoon.ldatetime.dt.date +# records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] +# records_afternoon.index = records_ini.index diff --git a/class4gl/setup/setup_goamazon.py b/class4gl/setup/setup_goamazon.py index 9716f69..18f5612 100644 --- a/class4gl/setup/setup_goamazon.py +++ b/class4gl/setup/setup_goamazon.py @@ -23,7 +23,7 @@ Rv = 461.5 # gas constant for moist air [J kg-1 K-1] epsilon = Rd/Rv # or mv/md -path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/' +path_soundings_in = '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/radio_all/' def replace_iter(iterable, search, replace): for value in iterable: @@ -70,19 +70,50 @@ def replace_iter(iterable, search, replace): HOUR_FILES = {} for iDT, DT in enumerate(DTS): morning_file = None - possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*cdf') - if len(possible_files)>0: - morning_file= possible_files[0] + possible_files_morning =\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.11??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.10??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.09??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.08??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.12??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.13??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.14??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.15??00.*cdf') + # glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.07??00.*cdf')+\ + # glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.06??00.*cdf')+\ + # glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*cdf')+\ + if len(possible_files_morning)>0: + ix = 0 + while ((ix < (len(possible_files_morning))) and (morning_file is None)): + # print (xr.open_dataset(possible_files_morning[ix]).pres.shape) + # print(xr.open_dataset(possible_files_morning[ix]).pres.shape[0] >= 10) + if (xr.open_dataset(possible_files_morning[ix]).pres.shape[0] >= 500) and\ + (possible_files_morning[ix].split('/')[-1] != 'maosondewnpnM1.b1.20150103.115200.custom.cdf') and \ + (possible_files_morning[ix].split('/')[-1] != 'maosondewnpnM1.b1.20150103.115200.custom.cdf'): + morning_file = possible_files_morning[ix] + ix +=1 + afternoon_file = None - possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*cdf') - if len(possible_files)>0: - afternoon_file= possible_files[0] + possible_files_afternoon =\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.20??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.19??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.18??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.17??00.*cdf')+\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.16??00.*cdf') + if len(possible_files_afternoon)>0: + ix = 0 + while ((ix < (len(possible_files_afternoon))) and (afternoon_file is None)): + # print (xr.open_dataset(possible_files_afternoon[ix]).pres.shape) + # print(xr.open_dataset(possible_files_afternoon[ix]).pres.shape[0] >= 10) + if (xr.open_dataset(possible_files_afternoon[ix]).pres.shape[0] >= 300) and \ + (possible_files_afternoon[ix].split('/')[-1] != '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/maosondewnpnM1.b1.20140823.143600.cdf'): + afternoon_file = possible_files_afternoon[ix] + ix +=1 if (morning_file is not None) and (afternoon_file is not None): - HOUR_FILES[DT] = {'morning':[5.5,morning_file], - 'afternoon':[17.5,afternoon_file]} - -print(HOUR_FILES) + HOUR_FILES[DT] = {'morning': [int(morning_file.split('/')[-1].split('.')[3])/10000.,morning_file], + 'afternoon':[ int(afternoon_file.split('/')[-1].split('.')[3])/10000. ,afternoon_file]} + print(HOUR_FILES[DT]) # HOUR_FILES = \ # { @@ -162,12 +193,13 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): if 'alt' in xrin: air_balloon['z'] = xrin.alt.values else: + g = 9.81 # gravity acceleration [m s-2] air_balloon['z'] = 0. for irow,row in air_balloon.iloc[1:].iterrows(): air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \ - 2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \ + 2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow])/g * \ (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1]) - + for varname,lfunction in rowmatches.items(): air_balloon[varname] = lfunction(air_balloon) @@ -189,15 +221,38 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1) air_balloon = air_balloon.iloc[:].reset_index().drop(['index'],axis=1) + # if air_balloon.z.max() > 100000.: + # air_balloon.z = air_balloon.z/10. + is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) valid_indices = air_balloon.index[is_valid].values air_ap_mode='b' + i = 1 + while (air_balloon.thetav.iloc[valid_indices[0]] - \ + air_balloon.thetav.iloc[valid_indices[i]] ) > 0.5: + #diff = (air_balloon.theta.iloc[valid_indices[i]] -air_balloon.theta.iloc[valid_indices[i+1]])- 0.5 + air_balloon.thetav.iloc[valid_indices[0:i]] = \ + air_balloon.thetav.iloc[valid_indices[i]] + 0.5 + + i +=1 + + + # while ((len(valid_indices) > 10) and + # ((air_balloon.theta.iloc[valid_indices[0]] - + # air_balloon.theta.iloc[valid_indices[1]]) > 0.5)): + # valid_indices = valid_indices[1:] + + #theta_vs_first_inconsistent = True + # while theta_vs_first_inconsistent: + if len(valid_indices) > 0: - print(air_balloon.z.shape,air_balloon.thetav.shape,) + air_balloon_temp = air_balloon.iloc[valid_indices] + print(air_balloon_temp) + print(air_balloon_temp.z.shape,air_balloon_temp.thetav.shape,) dpars['h'],dpars['h_u'],dpars['h_l'] =\ - blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) + blh(air_balloon_temp.z.values,air_balloon_temp.thetav.values,air_balloon_temp.WSPD.values) dpars['h_b'] = np.max((dpars['h'],10.)) dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height @@ -221,11 +276,11 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # determine mixed-layer properties (moisture, potential temperature...) from profile # ... and those of the mixed layer - is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) - valid_indices_below_h = air_balloon.index[is_valid_below_h].values + is_valid_below_h = (air_balloon.iloc[valid_indices].z < dpars['h']) + valid_indices_below_h = air_balloon.iloc[valid_indices].index[is_valid_below_h].values if len(valid_indices) > 1: if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon[is_valid_below_h].mean() + ml_mean = air_balloon.iloc[valid_indices][is_valid_below_h].mean() else: ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() elif len(valid_indices) == 1: @@ -235,15 +290,26 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): temp.iloc[0] = np.nan ml_mean = temp + + dpars['theta']= ml_mean.theta dpars['q'] = ml_mean.q dpars['u'] = ml_mean.u dpars['v'] = ml_mean.v + # theta_vs_first_inconsistent = \ + # ((air_balloon.theta.iloc[valid_indices[0]] - air_balloon.theta.iloc[valid_indices[1]]) > 0.2) + # theta_vs_first_inconsistent = \ + # ((air_balloon.theta.iloc[valid_indices[0]] - dpars['theta']) > 0.1) + # if theta_vs_first_inconsistent: + # valid_indices = valid_indices[1:] + # print("warning! too large difference between near surface value and abl value of theta. I'm taking the next one as near surface vlue") else: dpars['theta'] = np.nan dpars['q'] = np.nan dpars['u'] = np.nan dpars['v'] = np.nan + # theta_bl_inconsistent = False + air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns) # All other data points above the mixed-layer fit @@ -343,8 +409,14 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): dpars['lon'] = 0. # this is the real longitude that will be used to extract ground data - dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour) - dpars['datetime'] = dpars['ldatetime'] + dt.timedelta(hours=+4) + # dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour) + # dpars['datetime'] = dpars['ldatetime'] + dt.timedelta(hours=+4) + + + dpars['datetime'] = ldate+dt.timedelta(hours=lhour) + dpars['ldatetime'] = dpars['datetime'] + dt.timedelta(hours=dpars['longitude']/360.*24.) + + dpars['doy'] = dpars['datetime'].timetuple().tm_yday dpars['SolarAltitude'] = \ @@ -409,6 +481,9 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): air_balloon[column] = air_balloon[column].round(decimal) air_ap[column] = air_ap[column].round(decimal) + + + dpars['gammatheta_lower_limit'] = 0.0001 updateglobal = False if c4gli is None: c4gli = class4gl_input() @@ -427,7 +502,9 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # if profile_ini: # c4gli.runtime = 10 * 3600 - c4gli.dump(file_sounding) + # if not ((dpars['ldatetime'].hour <=12) or\ + # ((dpars['lSunset'].hour - dpars['ldatetime'].hour) >= (2.))): + # c4gli = None # if profile_ini: # c4gl = class4gl(c4gli) @@ -447,30 +524,38 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/' -path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GOAMAZON2/' +path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GOAMAZON6/' +if os.path.isdir(path_soundings): + print("Warning, I'm removing "+path_soundings+" in 10 seconds. Press ctrl-c to cancel") + os.system('rm '+path_soundings) file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_end.yaml','w') file_morning = open(path_soundings+format(current_station.name,'05d')+'_ini.yaml','w') ipair = 0 for date,pair in HOUR_FILES.items(): + print(date,ipair,pair) humpafn = pair['afternoon'][1] balloon_file_afternoon = xr.open_dataset(humpafn) humpafn = pair['morning'][1] balloon_file_morning = xr.open_dataset(humpafn) - print(ipair) if (\ (balloon_file_morning.pres.shape[0] > 10) and \ (balloon_file_afternoon.pres.shape[0] > 10)\ ): - + print('filename',pair['afternoon'][1],date,pair['afternoon'][0]) c4gli_afternoon = humppa_parser(balloon_file_afternoon,file_afternoon,date,pair['afternoon'][0]) - print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime) ipair += 1 + - + print('filename',pair['morning'][1],date,pair['afternoon'][0]) c4gli_morning = humppa_parser(balloon_file_morning,file_morning,date,pair['morning'][0]) - print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) + if (c4gli_morning is not None) and (c4gli_afternoon is not None): + print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) + print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime) + c4gli_morning.dump(file_morning) + c4gli_afternoon.dump(file_afternoon) + print(ipair) file_afternoon.close() @@ -503,15 +588,15 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): records_morning = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='ini', - refetch_records=True, + subset='ini',\ + refetch_records=True,\ ) print('records_morning_ldatetime',records_morning.ldatetime) records_afternoon = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='end', - refetch_records=True, + subset='end',\ + refetch_records=True,\ ) # align afternoon records with noon records, and set same index diff --git a/class4gl/setup/setup_goamazon_noon.py b/class4gl/setup/setup_goamazon_noon.py index c99e913..efd9333 100644 --- a/class4gl/setup/setup_goamazon_noon.py +++ b/class4gl/setup/setup_goamazon_noon.py @@ -62,38 +62,31 @@ def replace_iter(iterable, search, replace): 'Range[m]', ] -#DTSTART = dt.datetime(2014,1,1,0,0,0,0,pytz.UTC) -#DTEND = dt.datetime(2015,5,16,0,0,0,0,pytz.UTC) -#DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))] - -DTS = [dt.datetime(2014,11,15,0,0,0,0,pytz.UTC), - dt.datetime(2014,12,29,0,0,0,0,pytz.UTC), - dt.datetime(2015,1,5,0,0,0,0,pytz.UTC), - dt.datetime(2015,5,7,0,0,0,0,pytz.UTC) - ] - - - +DTSTART = dt.datetime(2014,9,1,0,0,0,0,pytz.UTC) +DTEND = dt.datetime(2014,10,1,0,0,0,0,pytz.UTC) +DTS = [DTSTART+dt.timedelta(days=day) for day in range(0, int((DTEND-DTSTART).total_seconds()/3600./24.))] HOUR_FILES = {} for iDT, DT in enumerate(DTS): morning_file = None - possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.05??00.*.cdf') - if len(possible_files)>0: - morning_file= possible_files[0] - afternoon_file = None - possible_files = glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.12??00.*.cdf')+\ - glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.11??00.*.cdf')+\ - glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.10??00.*.cdf') - if len(possible_files)>0: - afternoon_file= possible_files[-1] - hour_afternoon = int(afternoon_file[-17:-15])+float(afternoon_file[-15:-13])/60. - if (morning_file is not None) and (afternoon_file is not None): - HOUR_FILES[DT] = {'morning':[5.5,morning_file], - 'afternoon':[hour_afternoon,afternoon_file]} - -print(HOUR_FILES) + possible_files_morning =\ + glob.glob(path_soundings_in+'/maosondewnpnM1.b1.'+DT.strftime("%Y%m%d")+'.23??00.*cdf') + if len(possible_files_morning)>0: + ix = 0 + while ((ix < (len(possible_files_morning))) and (morning_file is None)): + # print (xr.open_dataset(possible_files_morning[ix]).pres.shape) + # print(xr.open_dataset(possible_files_morning[ix]).pres.shape[0] >= 10) + if (xr.open_dataset(possible_files_morning[ix]).pres.shape[0] >= 500) : + # and\ + # (possible_files_morning[ix] != '/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/GOAMAZON/Radiosounding_ARM/maosondewnpnM1.b1.20150103.115200.custom.cdf'): + morning_file = possible_files_morning[ix] + ix +=1 + + + if (morning_file is not None): + HOUR_FILES[DT] = {'morning': [int(morning_file.split('/')[-1].split('.')[3])/10000.,morning_file], } + print(HOUR_FILES[DT]) # HOUR_FILES = \ # { @@ -127,8 +120,6 @@ def efrom_rh100_T(rh100,T): def qfrom_e_p(e,p): return epsilon * e/(p - (1.-epsilon)*e) - - def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): print(balloon_file) @@ -157,38 +148,37 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # header=None, # names = columns, # na_values='-----') - - - rowmatches = { + print(air_balloon['p'][0]) + varcalc = { 'R' : lambda x: (Rd*(1.-x.q) + Rv*x.q), + 'pp': lambda x: (x['p'][0]/x['p'])**(x['R']/cp), 'theta': lambda x: (x['t']) * (x['p'][0]/x['p'])**(x['R']/cp), 'thetav': lambda x: x.theta + 0.61 * x.theta * x.q, 'rho': lambda x: x.p /x.t / x.R , } - for varname,lfunction in rowmatches.items(): + for varname,lfunction in varcalc.items(): air_balloon[varname] = lfunction(air_balloon) print('alt in xrin?:','alt' in xrin) if 'alt' in xrin: air_balloon['z'] = xrin.alt.values else: + g = 9.81 # gravity acceleration [m s-2] air_balloon['z'] = 0. for irow,row in air_balloon.iloc[1:].iterrows(): air_balloon['z'].iloc[irow] = air_balloon['z'].iloc[irow-1] - \ - 2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow]) * \ + 2./(air_balloon['rho'].iloc[irow-1]+air_balloon['rho'].iloc[irow])/g * \ (air_balloon['p'].iloc[irow] - air_balloon['p'].iloc[irow-1]) - - for varname,lfunction in rowmatches.items(): - air_balloon[varname] = lfunction(air_balloon) + # for varname,lfunction in varcakc.items(): + # air_balloon[varname] = lfunction(air_balloon) dpars = {} dpars['longitude'] = current_station['longitude'] dpars['latitude'] = current_station['latitude'] dpars['STNID'] = current_station.name - # # there are issues with the lower measurements in the HUMPPA campaign, # # for which a steady decrease of potential temperature is found, which @@ -200,15 +190,29 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # air_balloon = air_balloon.iloc[ifirst:].reset_index().drop(['index'],axis=1) air_balloon = air_balloon.iloc[:].reset_index().drop(['index'],axis=1) + # if air_balloon.z.max() > 100000.: + # air_balloon.z = air_balloon.z/10. + is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) valid_indices = air_balloon.index[is_valid].values air_ap_mode='b' + + + while ((len(valid_indices) > 10) and + ((air_balloon.theta.iloc[valid_indices[0]] - + air_balloon.theta.iloc[valid_indices[1]]) > 0.5)): + valid_indices = valid_indices[1:] + + #theta_vs_first_inconsistent = True + # while theta_vs_first_inconsistent: if len(valid_indices) > 0: - print(air_balloon.z.shape,air_balloon.thetav.shape,) + air_balloon_temp = air_balloon.iloc[valid_indices] + print(air_balloon_temp) + print(air_balloon_temp.z.shape,air_balloon_temp.thetav.shape,) dpars['h'],dpars['h_u'],dpars['h_l'] =\ - blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) + blh(air_balloon_temp.z.values,air_balloon_temp.thetav.values,air_balloon_temp.WSPD.values) dpars['h_b'] = np.max((dpars['h'],10.)) dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height dpars['h_l'] = np.max((dpars['h_l'],10.)) #low limit of mixed layer height @@ -220,8 +224,6 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): dpars['h_e'] =np.nan dpars['h'] =np.nan - - if ~np.isnan(dpars['h']): dpars['Ps'] = air_balloon.p.iloc[valid_indices[0]] else: @@ -232,11 +234,11 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # determine mixed-layer properties (moisture, potential temperature...) from profile # ... and those of the mixed layer - is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) - valid_indices_below_h = air_balloon.index[is_valid_below_h].values + is_valid_below_h = (air_balloon.iloc[valid_indices].z < dpars['h']) + valid_indices_below_h = air_balloon.iloc[valid_indices].index[is_valid_below_h].values if len(valid_indices) > 1: if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon[is_valid_below_h].mean() + ml_mean = air_balloon.iloc[valid_indices][is_valid_below_h].mean() else: ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() elif len(valid_indices) == 1: @@ -245,23 +247,29 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): temp = pd.DataFrame(air_balloon) temp.iloc[0] = np.nan ml_mean = temp - + dpars['theta']= ml_mean.theta dpars['q'] = ml_mean.q dpars['u'] = ml_mean.u dpars['v'] = ml_mean.v + # theta_vs_first_inconsistent = \ + # ((air_balloon.theta.iloc[valid_indices[0]] - air_balloon.theta.iloc[valid_indices[1]]) > 0.2) + # theta_vs_first_inconsistent = \ + # ((air_balloon.theta.iloc[valid_indices[0]] - dpars['theta']) > 0.1) + # if theta_vs_first_inconsistent: + # valid_indices = valid_indices[1:] + # print("warning! too large difference between near surface value and abl value of theta. I'm taking the next one as near surface vlue") else: dpars['theta'] = np.nan dpars['q'] = np.nan dpars['u'] = np.nan dpars['v'] = np.nan - + # theta_bl_inconsistent = False + air_ap_head = air_balloon[0:0] #pd.DataFrame(columns = air_balloon.columns) # All other data points above the mixed-layer fit air_ap_tail = air_balloon[air_balloon.z > dpars['h']] - - air_ap_head.z = pd.Series(np.array([2.,dpars['h'],dpars['h']])) jump = air_ap_head.iloc[0] * np.nan @@ -297,17 +305,15 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) - - # only select samples monotonically increasing with height air_ap_tail_orig = pd.DataFrame(air_ap_tail) air_ap_tail = pd.DataFrame() + print(air_ap_tail_orig) air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[0],ignore_index=True) for ibottom in range(1,len(air_ap_tail_orig)): if air_ap_tail_orig.iloc[ibottom].z > air_ap_tail.iloc[-1].z +10.: air_ap_tail = air_ap_tail.append(air_ap_tail_orig.iloc[ibottom],ignore_index=True) - # make theta increase strong enough to avoid numerical # instability air_ap_tail_orig = pd.DataFrame(air_ap_tail) @@ -347,16 +353,20 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): air_ap['p'].iloc[1] =(dpars['Ps'] - rho * g * dpars['h']) air_ap['p'].iloc[2] =(dpars['Ps'] - rho * g * dpars['h'] -0.1) - dpars['lat'] = dpars['latitude'] # this is set to zero because we use local (sun) time as input (as if we were in Greenwhich) dpars['lon'] = 0. # this is the real longitude that will be used to extract ground data - print('ldate',ldate) - print('lhour',lhour) - dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour) - dpars['datetime'] = dpars['ldatetime'] + dt.timedelta(hours=-4) + # dpars['ldatetime'] = ldate+dt.timedelta(hours=lhour) + # dpars['datetime'] = dpars['ldatetime'] + dt.timedelta(hours=+4) + + dpars['datetime'] = ldate+dt.timedelta(hours=lhour) + dpars['ldatetime'] = dpars['datetime'] + dt.timedelta(hours=-4) + + + + dpars['doy'] = dpars['datetime'].timetuple().tm_yday dpars['SolarAltitude'] = \ @@ -371,13 +381,12 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): dpars['datetime']\ ) - dpars['lSunrise'], dpars['lSunset'] \ = Pysolar.util.GetSunriseSunset(dpars['latitude'], 0., dpars['ldatetime'],0.) - # Warning!!! Unfortunately!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). + # Warning!!! Unfortunatly!!!! WORKAROUND!!!! Even though we actually write local solar time, we need to assign the timezone to UTC (which is WRONG!!!). Otherwise ruby cannot understand it (it always converts tolocal computer time :( ). dpars['lSunrise'] = pytz.utc.localize(dpars['lSunrise']) dpars['lSunset'] = pytz.utc.localize(dpars['lSunset']) @@ -421,6 +430,9 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): air_balloon[column] = air_balloon[column].round(decimal) air_ap[column] = air_ap[column].round(decimal) + + + dpars['gammatheta_lower_limit'] = 0.0001 updateglobal = False if c4gli is None: c4gli = class4gl_input() @@ -428,7 +440,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): print('updating...') print(column) - c4gli.update(source='humppa',\ + c4gli.update(source='goamazon',\ # pars=pars, pars=dpars,\ air_balloon=air_balloon,\ @@ -439,7 +451,9 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # if profile_ini: # c4gli.runtime = 10 * 3600 - c4gli.dump(file_sounding) + if not ((dpars['ldatetime'].hour <=12) or\ + ((dpars['lSunset'].hour - dpars['ldatetime'].hour) >= (2.))): + c4gli = None # if profile_ini: # c4gl = class4gl(c4gli) @@ -458,29 +472,25 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): return c4gli -path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS_NOON/' - - +# path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/' +path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/GOAMAZON_EVENING/' os.system('mkdir -p '+path_soundings) -file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') -for date,pair in HOUR_FILES.items(): - print(pair['morning']) - humpafn =pair['morning'][1] - print(humpafn) - balloon_file = xr.open_dataset(humpafn) - - c4gli_morning = humppa_parser(balloon_file,file_morning,date,pair['morning'][0]) - print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) -file_morning.close() -file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') +file_morning = open(path_soundings+format(current_station.name,'05d')+'_ini.yaml','w') +ipair = 0 for date,pair in HOUR_FILES.items(): - humpafn = pair['afternoon'][1] - balloon_file = xr.open_dataset(humpafn) + + humpafn = pair['morning'][1] + balloon_file_morning = xr.open_dataset(humpafn) + if (balloon_file_morning.pres.shape[0] > 10): + c4gli_morning = humppa_parser(balloon_file_morning,file_morning,date,pair['morning'][0]) + if (c4gli_morning is not None) : + c4gli_morning.dump(file_morning) + ipair += 1 + +print(ipair) +file_morning.close() - c4gli_afternoon = humppa_parser(balloon_file,file_afternoon,date,pair['afternoon'][0]) - print('c4gli_afternoon_ldatetime 0',c4gli_afternoon.pars.ldatetime) -file_afternoon.close() # file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') @@ -506,82 +516,70 @@ def humppa_parser(balloon_file,file_sounding,ldate,lhour,c4gli=None): # # file_model = open(fnout_model+ format(current_station.name,'05d')+'.yaml','w') - records_morning = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='morning', - refetch_records=True, - ) -print('records_morning_ldatetime',records_morning.ldatetime) - -records_afternoon = get_records(pd.DataFrame([current_station]),\ - path_soundings,\ - subset='afternoon', - refetch_records=True, + subset='ini',\ + refetch_records=True,\ ) # align afternoon records with noon records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date] -records_afternoon.index = records_morning.index -path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS_NOON/' - -os.system('mkdir -p '+path_exp) -file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml') -file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml') -file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') -file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') - -for (STNID,chunk,index),record_morning in records_morning.iterrows(): - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, - mode='ini') - #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) - - - c4gli_afternoon = get_record_yaml(file_afternoon, - record_afternoon.index_start, - record_afternoon.index_end, - mode='ini') - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds())}) - c4gli_morning.update(source='manual', - pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False}) - c4gli_morning.dump(file_ini) - - c4gl = class4gl(c4gli_morning) - c4gl.run() - - c4gl.dump(file_mod,\ - include_input=False,\ - timeseries_only=timeseries_only) -file_ini.close() -file_mod.close() -file_morning.close() -file_afternoon.close() - -records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='ini', - refetch_records=True, - ) -records_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='mod', - refetch_records=True, - ) -records_mod.index = records_ini.index - -# align afternoon records with initial records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] -records_afternoon.index = records_ini.index +# os.system('mkdir -p '+path_exp) +# file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_ini.yaml') +# file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_end.yaml') +# file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') +# file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') +# +# for (STNID,chunk,index),record_morning in records_morning.iterrows(): +# record_afternoon = records_afternoon.loc[(STNID,chunk,index)] +# +# c4gli_morning = get_record_yaml(file_morning, +# record_morning.index_start, +# record_morning.index_end, +# mode='ini') +# #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) +# +# +# c4gli_afternoon = get_record_yaml(file_afternoon, +# record_afternoon.index_start, +# record_afternoon.index_end, +# mode='ini') +# +# c4gli_morning.update(source='pairs',pars={'runtime' : \ +# int((c4gli_afternoon.pars.datetime_daylight - +# c4gli_morning.pars.datetime_daylight).total_seconds())}) +# c4gli_morning.update(source='manual', +# pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False}) +# c4gli_morning.dump(file_ini) +# +# c4gl = class4gl(c4gli_morning) +# c4gl.run() +# +# c4gl.dump(file_mod,\ +# include_input=False,\ +# timeseries_only=timeseries_only) +# file_ini.close() +# file_mod.close() +# file_morning.close() +# file_afternoon.close() +# +# records_ini = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='ini', +# refetch_records=True, +# ) +# records_mod = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='mod', +# refetch_records=True, +# ) +# +# records_mod.index = records_ini.index +# +# # align afternoon records with initial records, and set same index +# records_afternoon.index = records_afternoon.ldatetime.dt.date +# records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] +# records_afternoon.index = records_ini.index """ diff --git a/class4gl/setup/setup_humppa.py b/class4gl/setup/setup_humppa.py index b7bcc7c..273e039 100644 --- a/class4gl/setup/setup_humppa.py +++ b/class4gl/setup/setup_humppa.py @@ -161,6 +161,14 @@ def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) valid_indices = air_balloon.index[is_valid].values + i = 1 + while (air_balloon.thetav.iloc[valid_indices[0]] - \ + air_balloon.thetav.iloc[valid_indices[i]] ) > 0.5: + #diff = (air_balloon.theta.iloc[valid_indices[i]] -air_balloon.theta.iloc[valid_indices[i+1]])- 0.5 + air_balloon.thetav.iloc[valid_indices[0:i]] = \ + air_balloon.thetav.iloc[valid_indices[i]] + 0.5 + + i +=1 air_ap_mode='b' @@ -190,11 +198,11 @@ def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): # determine mixed-layer properties (moisture, potential temperature...) from profile # ... and those of the mixed layer - is_valid_below_h = is_valid & (air_balloon.z < dpars['h']) - valid_indices_below_h = air_balloon.index[is_valid_below_h].values + is_valid_below_h = (air_balloon.iloc[valid_indices].z < dpars['h']) + valid_indices_below_h = air_balloon.iloc[valid_indices].index[is_valid_below_h].values if len(valid_indices) > 1: if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon[is_valid_below_h].mean() + ml_mean = air_balloon.iloc[valid_indices][is_valid_below_h].mean() else: ml_mean = air_balloon.iloc[valid_indices[0]:valid_indices[1]].mean() elif len(valid_indices) == 1: @@ -203,7 +211,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): temp = pd.DataFrame(air_balloon) temp.iloc[0] = np.nan ml_mean = temp - + dpars['theta']= ml_mean.theta dpars['q'] = ml_mean.q dpars['u'] = ml_mean.u @@ -441,7 +449,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): path_soundings = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS/' -file_morning = open(path_soundings+format(current_station.name,'05d')+'_morning.yaml','w') +file_morning = open(path_soundings+format(current_station.name,'05d')+'_ini.yaml','w') for date,pair in HOUR_FILES.items(): print(pair['morning']) humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['morning'][1] @@ -452,7 +460,7 @@ def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): print('c4gli_morning_ldatetime 0',c4gli_morning.pars.ldatetime) file_morning.close() -file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_afternoon.yaml','w') +file_afternoon = open(path_soundings+format(current_station.name,'05d')+'_end.yaml','w') for date,pair in HOUR_FILES.items(): humpafn ='/kyukon/data/gent/gvo000/gvo00090/EXT/data/SOUNDINGS/HUMPPA/'+pair['afternoon'][1] balloon_file = open(humpafn,'r',encoding='latin-1') @@ -488,79 +496,79 @@ def humppa_parser(balloon_file,file_sounding,ldate,hour,c4gli=None): records_morning = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='morning', + subset='ini', refetch_records=True, ) print('records_morning_ldatetime',records_morning.ldatetime) records_afternoon = get_records(pd.DataFrame([current_station]),\ path_soundings,\ - subset='afternoon', + subset='end', refetch_records=True, ) -# align afternoon records with noon records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date] -records_afternoon.index = records_morning.index -path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/C4GL/IOPS/' - -os.system('mkdir -p '+path_exp) -file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_morning.yaml') -file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_afternoon.yaml') -file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') -file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_mod.yaml','w') - -for (STNID,chunk,index),record_morning in records_morning.iterrows(): - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, - mode='ini') - #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) - - - c4gli_afternoon = get_record_yaml(file_afternoon, - record_afternoon.index_start, - record_afternoon.index_end, - mode='ini') - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds())}) - c4gli_morning.update(source='manual', - pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False}) - c4gli_morning.dump(file_ini) - - c4gl = class4gl(c4gli_morning) - c4gl.run() - - c4gl.dump(file_mod,\ - include_input=False,\ - timeseries_only=timeseries_only) -file_ini.close() -file_mod.close() -file_morning.close() -file_afternoon.close() - -records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='ini', - refetch_records=True, - ) -records_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - subset='mod', - refetch_records=True, - ) - -records_mod.index = records_ini.index - -# align afternoon records with initial records, and set same index -records_afternoon.index = records_afternoon.ldatetime.dt.date -records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] -records_afternoon.index = records_ini.index +# # align afternoon records with noon records, and set same index +# records_afternoon.index = records_afternoon.ldatetime.dt.date +# records_afternoon = records_afternoon.loc[records_morning.ldatetime.dt.date] +# records_afternoon.index = records_morning.index +# path_exp = '/kyukon/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/IOPS_ALL/' +# +# os.system('mkdir -p '+path_exp) +# file_morning = open(path_soundings+'/'+format(current_station.name,'05d')+'_ini.yaml') +# file_afternoon = open(path_soundings+'/'+format(current_station.name,'05d')+'_ini.yaml') +# file_ini = open(path_exp+'/'+format(current_station.name,'05d')+'_ini.yaml','w') +# file_mod = open(path_exp+'/'+format(current_station.name,'05d')+'_end.yaml','w') +# +# for (STNID,chunk,index),record_morning in records_morning.iterrows(): +# record_afternoon = records_afternoon.loc[(STNID,chunk,index)] +# +# c4gli_morning = get_record_yaml(file_morning, +# record_morning.index_start, +# record_morning.index_end, +# mode='ini') +# #print('c4gli_morning_ldatetime',c4gli_morning.pars.ldatetime) +# +# +# c4gli_afternoon = get_record_yaml(file_afternoon, +# record_afternoon.index_start, +# record_afternoon.index_end, +# mode='ini') +# +# c4gli_morning.update(source='pairs',pars={'runtime' : \ +# int((c4gli_afternoon.pars.datetime_daylight - +# c4gli_morning.pars.datetime_daylight).total_seconds())}) +# c4gli_morning.update(source='manual', +# pars={'sw_ac' : [],'sw_ap': True,'sw_lit': False}) +# c4gli_morning.dump(file_ini) +# +# c4gl = class4gl(c4gli_morning) +# c4gl.run() +# +# c4gl.dump(file_mod,\ +# include_input=False,\ +# timeseries_only=timeseries_only) +# file_ini.close() +# file_mod.close() +# file_morning.close() +# file_afternoon.close() +# +# records_ini = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='ini', +# refetch_records=True, +# ) +# records_mod = get_records(pd.DataFrame([current_station]),\ +# path_exp,\ +# subset='end', +# refetch_records=True, +# ) +# +# records_mod.index = records_ini.index +# +# # align afternoon records with initial records, and set same index +# records_afternoon.index = records_afternoon.ldatetime.dt.date +# records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] +# records_afternoon.index = records_ini.index # stations_for_iter = stations(path_exp) # for STNID,station in stations_iterator(stations_for_iter): diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py index 3c71599..f1516c2 100644 --- a/class4gl/setup/setup_igra.py +++ b/class4gl/setup/setup_igra.py @@ -153,8 +153,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu one_run = False # for iSTN,STN in STNlist[5:]: - fnout = args.path_output+"/"+format(STN.name,'05d')+"_morning.yaml" - fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_afternoon.yaml" + fnout = args.path_output+"/"+format(STN.name,'05d')+"_ini.yaml" + fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_end.yaml" # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \ @@ -199,7 +199,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # we take 3000 because previous analysis (ie., HUMPPA) has # focussed towards such altitude le3000 = (c4gli.air_balloon.z <= 3000.) - logic['10measurements'] = (np.sum(le3000) >= 10) + logic['10measurements'] = (np.sum(le3000) >= 7) leh = (c4gli.air_balloon.z <= c4gli.pars.h) @@ -207,9 +207,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu (len(np.where(leh)[0]) > 0) and \ # in cases where humidity is not defined, the mixed-layer # values get corr - (not np.isnan(c4gli.pars.theta)) and \ - (rmse(c4gli.air_balloon.theta[leh] , \ - c4gli.pars.theta,filternan_actual=True) < 1.)\ + (not np.isnan(c4gli.pars.theta))\ + #and \ + #(rmse(c4gli.air_balloon.theta[leh] , \ + # c4gli.pars.theta,filternan_actual=True) < 1.0)\ ) @@ -237,118 +238,120 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu c4gli.pars.ldatetime.day) c4gli_afternoon.clear() print('AFTERNOON PROFILE CLEARED') - try: + # try: + c4gli_afternoon.get_profile_wyoming(wy_strm) + print('AFTERNOON PROFILE OK') + + if wy_strm.current is not None: + current_date_afternoon = \ + dt.date(c4gli_afternoon.pars.ldatetime.year, \ + c4gli_afternoon.pars.ldatetime.month, \ + c4gli_afternoon.pars.ldatetime.day) + else: + # a dummy date: this will be ignored anyway + current_date_afternoon = dt.date(1900,1,1) + + # we will dump the latest afternoon sounding that fits the + # minimum criteria specified by logic_afternoon + print(current_date,current_date_afternoon) + c4gli_afternoon_for_dump = None + while ((current_date_afternoon == current_date) and \ + (wy_strm.current is not None)): + logic_afternoon =dict() + + logic_afternoon['afternoon'] = \ + (c4gli_afternoon.pars.ldatetime.hour >= 12.) + # the sounding should have taken place before 1 hours + # before sunset. This is to minimize the chance that a + # stable boundary layer (yielding very low mixed layer + # heights) is formed which can not be represented by + # class. + logic_afternoon['daylight'] = \ + ((c4gli_afternoon.pars.ldatetime - \ + c4gli_afternoon.pars.lSunset \ + ).total_seconds()/3600. <= -1.) + + + le3000_afternoon = \ + (c4gli_afternoon.air_balloon.z <= 3000.) + logic_afternoon['5measurements'] = \ + (np.sum(le3000_afternoon) >= 7) + + # we only store the last afternoon sounding that fits these + # minimum criteria + + afternoon_ok = np.mean(list(logic_afternoon.values())) + + print('logic_afternoon: ',logic_afternoon) + print(afternoon_ok,c4gli_afternoon.pars.ldatetime) + if afternoon_ok == 1.: + # # doesn't work :( + # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon) + + # so we just create a new one from the same wyoming profile + c4gli_afternoon_for_dump = class4gl_input() + c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm) + + wy_strm.find_next() + c4gli_afternoon.clear() c4gli_afternoon.get_profile_wyoming(wy_strm) - print('AFTERNOON PROFILE OK') if wy_strm.current is not None: current_date_afternoon = \ - dt.date(c4gli_afternoon.pars.ldatetime.year, \ - c4gli_afternoon.pars.ldatetime.month, \ - c4gli_afternoon.pars.ldatetime.day) + dt.date(c4gli_afternoon.pars.ldatetime.year, \ + c4gli_afternoon.pars.ldatetime.month, \ + c4gli_afternoon.pars.ldatetime.day) else: # a dummy date: this will be ignored anyway current_date_afternoon = dt.date(1900,1,1) - # we will dump the latest afternoon sounding that fits the - # minimum criteria specified by logic_afternoon - print(current_date,current_date_afternoon) - c4gli_afternoon_for_dump = None - while ((current_date_afternoon == current_date) and \ - (wy_strm.current is not None)): - logic_afternoon =dict() - - logic_afternoon['afternoon'] = \ - (c4gli_afternoon.pars.ldatetime.hour >= 12.) - # the sounding should have taken place before 1 hours - # before sunset. This is to minimize the chance that a - # stable boundary layer (yielding very low mixed layer - # heights) is formed which can not be represented by - # class. - logic_afternoon['daylight'] = \ - ((c4gli_afternoon.pars.ldatetime - \ - c4gli_afternoon.pars.lSunset \ - ).total_seconds()/3600. <= 1.) - - - le3000_afternoon = \ - (c4gli_afternoon.air_balloon.z <= 3000.) - logic_afternoon['5measurements'] = \ - (np.sum(le3000_afternoon) >= 5) - - # we only store the last afternoon sounding that fits these - # minimum criteria - - afternoon_ok = np.mean(list(logic_afternoon.values())) - - print('logic_afternoon: ',logic_afternoon) - print(afternoon_ok,c4gli_afternoon.pars.ldatetime) - if afternoon_ok == 1.: - # # doesn't work :( - # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon) - - # so we just create a new one from the same wyoming profile - c4gli_afternoon_for_dump = class4gl_input() - c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm) - - wy_strm.find_next() - c4gli_afternoon.clear() - c4gli_afternoon.get_profile_wyoming(wy_strm) - - if wy_strm.current is not None: - current_date_afternoon = \ - dt.date(c4gli_afternoon.pars.ldatetime.year, \ - c4gli_afternoon.pars.ldatetime.month, \ - c4gli_afternoon.pars.ldatetime.day) - else: - # a dummy date: this will be ignored anyway - current_date_afternoon = dt.date(1900,1,1) - - # Only in the case we have a good pair of soundings, we - # dump them to disk - if c4gli_afternoon_for_dump is not None: - c4gli.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon_for_dump.pars.datetime_daylight - - c4gli.pars.datetime_daylight).total_seconds())}) + # Only in the case we have a good pair of soundings, we + # dump them to disk + if c4gli_afternoon_for_dump is not None: + c4gli.update(source='pairs',pars={'runtime' : \ + int((c4gli_afternoon_for_dump.pars.datetime_daylight - + c4gli.pars.datetime_daylight).total_seconds())}) - print('ALMOST...') - if c4gli.pars.runtime > 18000.: # more than 5 hours simulation - + print('ALMOST...') + if c4gli.pars.runtime > 3600*4.: # more than 4 hours simulation + - c4gli.get_global_input(globaldata) - print('VERY CLOSE...') - if c4gli.check_source_globaldata() and \ - (c4gli.check_source(source='wyoming',\ - check_only_sections='pars')): - c4gli.dump(fileout) - - c4gli_afternoon_for_dump.dump(fileout_afternoon) - - - # for keyEXP,dictEXP in experiments.items(): - # - # c4gli.update(source=keyEXP,pars = dictEXP) - # c4gl = class4gl(c4gli) - # # c4gl.run() - # - # c4gl.dump(c4glfiles[key]) - - print('HIT!!!') - one_run = True - except: - print('get profile failed') + c4gli.get_global_input(globaldata) + print('VERY CLOSE...') + if c4gli.check_source_globaldata() and \ + (c4gli.check_source(source='wyoming',\ + check_only_sections='pars')): + print('starting dumps') + c4gli.dump(fileout) + print('file morning dumped') + c4gli_afternoon_for_dump.dump(fileout_afternoon) + print('file afternoon dumped') + + + # for keyEXP,dictEXP in experiments.items(): + # + # c4gli.update(source=keyEXP,pars = dictEXP) + # c4gl = class4gl(c4gli) + # # c4gl.run() + # + # c4gl.dump(c4glfiles[key]) + + print('HIT!!!') + one_run = True + # except: + # print('get profile failed') if one_run: #STN.name = STN.name all_records_morning = get_records(pd.DataFrame([STN]),\ args.path_output,\ - subset='morning', + subset='ini', refetch_records=True, ) all_records_afternoon = get_records(pd.DataFrame([STN]),\ args.path_output,\ - subset='afternoon', + subset='end', refetch_records=True, ) else: diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py index 61c7da2..0762cf8 100644 --- a/class4gl/simulations/batch_simulations.py +++ b/class4gl/simulations/batch_simulations.py @@ -175,8 +175,7 @@ # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path import importlib.util print(batch_args.exec) - spec = importlib.util.spec_from_file_location("module.name", - batch_args.exec) + spec = importlib.util.spec_from_file_location("module.name", batch_args.exec) task_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(task_module) print('hello') diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index 8d9a652..3639c2d 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -74,378 +74,384 @@ def __init__(self,**kwargs): print(args.__dict__) -# def execute(**kwargs): -# note that with args, we actually mean the same as those specified with -# the argparse module above - -# overwrite the args according to the kwargs when the procedure is called -# as module function -# for key,value in kwargs.items(): -# args.__dict__[key] = value - -print("-- begin arguments --") -for key,value in args.__dict__.items(): - print(key,': ',value) -print("-- end arguments ----") - -# load specified class4gl library -if args.c4gl_path_lib is not None: - sys.path.insert(0, args.c4gl_path_lib) - -from class4gl import class4gl_input, data_global,class4gl -from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records -from class4gl import blh,class4gl_input - -# this is a variant of global run in which the output of runs are still written -# out even when the run crashes. - -# #only include the following timeseries in the model output -# timeseries_only = \ -# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', -# 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', -# 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', -# 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', -# 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] - - -EXP_DEFS =\ -{ - 'BASE':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - - 'NOADV':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - - 'ERA_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True}, - 'GLOBAL_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'IOPS_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'IOPS_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, -} - -# ======================== -print("getting a list of stations") -# ======================== - -# these are all the stations that are found in the input dataset -all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) - -# ==================================== -print('defining all_stations_select') -# ==================================== - -# these are all the stations that are supposed to run by the whole batch (all -# chunks). We narrow it down according to the station(s) specified. - - - -if args.station_id is not None: - print("Selecting station by ID") - stations_iter = stations_iterator(all_stations) - STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) - all_stations_select = pd.DataFrame([run_station]) -else: - print("Selecting stations from a row range in the table") - all_stations_select = pd.DataFrame(all_stations.table) - if args.last_station_row is not None: - all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] - if args.first_station_row is not None: - all_stations_select = all_station_select.iloc[int(args.first_station):] -print("station numbers included in the whole batch "+\ - "(all chunks):",list(all_stations_select.index)) - -print(all_stations_select) -print("getting all records of the whole batch") -all_records_morning_select = get_records(all_stations_select,\ - args.path_forcing,\ - subset=args.subset_forcing, - refetch_records=False, - ) - -# only run a specific chunck from the selection -if args.global_chunk_number is not None: - if args.station_chunk_number is not None: - raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') - - if (args.split_by is None) or (args.split_by <= 0): - raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.") - - run_station_chunk = None - print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') - totalchunks = 0 - stations_iter = all_stations_select.iterrows() - in_current_chunk = False - try: - while not in_current_chunk: - istation,current_station = stations_iter.__next__() - all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) - chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) - print('chunks_current_station',chunks_current_station) - in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) - - if in_current_chunk: - run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] - run_station_chunk = int(args.global_chunk_number) - totalchunks - - totalchunks +=chunks_current_station +def execute(**kwargs): + # note that with args, we actually mean the same as those specified with + # the argparse module above + + # overwrite the args according to the kwargs when the procedure is called + # as module function + for key,value in kwargs.items(): + args.__dict__[key] = value + + print("-- begin arguments --") + for key,value in args.__dict__.items(): + print(key,': ',value) + print("-- end arguments ----") + + # load specified class4gl library + if args.c4gl_path_lib is not None: + sys.path.insert(0, args.c4gl_path_lib) + + from class4gl import class4gl_input, data_global,class4gl + from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records + from class4gl import blh,class4gl_input + + # this is a variant of global run in which the output of runs are still written + # out even when the run crashes. + + # #only include the following timeseries in the model output + # timeseries_only = \ + # ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', + # 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', + # 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', + # 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', + # 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] + + + EXP_DEFS =\ + { + 'BASE':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + + 'NOADV':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - - except StopIteration: - raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') - print("station = ",list(run_stations.index)) - print("station chunk number:",run_station_chunk) - -# if no global chunk is specified, then run the whole station selection in one run, or -# a specific chunk for each selected station according to # args.station_chunk_number -else: - run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] - if args.station_chunk_number is not None: - run_station_chunk = int(args.station_chunk_number) - print("station(s) that is processed.",list(run_stations.index)) - print("chunk number: ",run_station_chunk) + 'ERA_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'IOPS_ADV_SM2':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shearwe':True}, + 'GLOBAL_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, + 'IOPS_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'IOPS_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, + 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, + } + + # ======================== + print("getting a list of stations") + # ======================== + + # these are all the stations that are found in the input dataset + all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) + + # ==================================== + print('defining all_stations_select') + # ==================================== + + # these are all the stations that are supposed to run by the whole batch (all + # chunks). We narrow it down according to the station(s) specified. + + + + if args.station_id is not None: + print("Selecting station by ID") + stations_iter = stations_iterator(all_stations) + STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) + all_stations_select = pd.DataFrame([run_station]) else: - if args.split_by is not None: - raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split_by.") - run_station_chunk = 0 - print("stations that are processed.",list(run_stations.index)) - - -#print(all_stations) -print('Fetching initial/forcing records') -records_morning = get_records(run_stations,\ - args.path_forcing,\ - subset=args.subset_forcing, - refetch_records=False, - ) - -# note that if runtime is an integer number, we don't need to get the afternoon -# profiles. -if args.runtime == 'from_profile_pair': - print('Fetching afternoon records for determining the simulation runtimes') - records_afternoon = get_records(run_stations,\ - args.path_forcing,\ - subset='end', - refetch_records=False, - ) - - # print(records_morning.index) - # print(records_afternoon.index) - # align afternoon records with the noon records, and set same index - print('hello') - print(len(records_afternoon)) - print(len(records_morning)) - - print("aligning morning and afternoon records") - records_morning['dates'] = records_morning['ldatetime'].dt.date - records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date - records_afternoon.set_index(['STNID','dates'],inplace=True) - ini_index_dates = records_morning.set_index(['STNID','dates']).index - records_afternoon = records_afternoon.loc[ini_index_dates] - records_afternoon.index = records_morning.index - -experiments = args.experiments.strip(' ').split(' ') -if args.experiments_names is not None: - experiments_names = args.experiments_names.strip(' ').split(' ') - if len(experiments_names) != len(experiments): - raise ValueError('Lenght of --experiments_names is different from --experiments') - -else: - experiments_names = experiments - -for iexpname,expid in enumerate(experiments): - expname = experiments_names[iexpname] - exp = EXP_DEFS[expid] - path_exp = args.path_experiments+'/'+expname+'/' - - os.system('mkdir -p '+path_exp) - for istation,current_station in run_stations.iterrows(): - print(istation,current_station) - records_morning_station = records_morning.query('STNID == '+str(current_station.name)) - start_record = run_station_chunk*args.split_by if run_station_chunk is not 0 else 0 - end_record = (run_station_chunk+1)*args.split_by if args.split_by is not None else None - if start_record >= (len(records_morning_station)): - print("warning: outside of profile number range for station "+\ - str(current_station)+". Skipping chunk number for this station.") + print("Selecting stations from a row range in the table") + all_stations_select = pd.DataFrame(all_stations.table) + if args.last_station_row is not None: + all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] + if args.first_station_row is not None: + all_stations_select = all_station_select.iloc[int(args.first_station):] + print("station numbers included in the whole batch "+\ + "(all chunks):",list(all_stations_select.index)) + + print(all_stations_select) + print("getting all records of the whole batch") + all_records_morning_select = get_records(all_stations_select,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + + # only run a specific chunck from the selection + if args.global_chunk_number is not None: + if args.station_chunk_number is not None: + raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') + + if (args.split_by is None) or (args.split_by <= 0): + raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.") + + run_station_chunk = None + print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') + totalchunks = 0 + stations_iter = all_stations_select.iterrows() + in_current_chunk = False + try: + while not in_current_chunk: + istation,current_station = stations_iter.__next__() + all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) + chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) + print('chunks_current_station',chunks_current_station) + in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) + + if in_current_chunk: + run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] + run_station_chunk = int(args.global_chunk_number) - totalchunks + + totalchunks +=chunks_current_station + + + except StopIteration: + raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') + print("station = ",list(run_stations.index)) + print("station chunk number:",run_station_chunk) + + # if no global chunk is specified, then run the whole station selection in one run, or + # a specific chunk for each selected station according to # args.station_chunk_number + else: + run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] + if args.station_chunk_number is not None: + run_station_chunk = int(args.station_chunk_number) + print("station(s) that is processed.",list(run_stations.index)) + print("chunk number: ",run_station_chunk) else: - fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' - if os.path.isfile(fn_morning): - file_morning = open(fn_morning) - else: - fn_morning = \ - args.path_forcing+'/'+format(current_station.name,'05d')+\ - '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' - file_morning = open(fn_morning) - - if args.runtime == 'from_profile_pair': - file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml') - fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_ini.yaml' - fn_end_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_end.yaml' - file_ini = open(fn_ini,'w') - file_end_mod = open(fn_end_mod,'w') - - #iexp = 0 - onerun = False - print('starting station chunk number: '\ - +str(run_station_chunk)+' (chunk size:',args.split_by,')') - - records_morning_station_chunk = records_morning_station.iloc[start_record:end_record] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] - - isim = 0 - for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): - print('starting '+str(isim+1)+' out of '+\ - str(len(records_morning_station_chunk) )+\ - ' (station total: ',str(len(records_morning_station)),')') - + if args.split_by is not None: + raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split_by.") + run_station_chunk = 0 + print("stations that are processed.",list(run_stations.index)) - c4gli_morning = get_record_yaml(file_morning, - record_morning.index_start, - record_morning.index_end, - mode='model_input') - if args.diag_tropo is not None: - print('add tropospheric parameters on advection and subsidence (for diagnosis)') - seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) - profile_tropo = c4gli_morning.air_ac[seltropo] - for var in args.diag_tropo:#['t','q','u','v',]: - if var[:3] == 'adv': - mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) - c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) - else: - print("warning: tropospheric variable "+var+" not recognized") - + + #print(all_stations) + print('Fetching initial/forcing records') + records_morning = get_records(run_stations,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + if len(records_morning) == 0: + raise IOError("No initialization records records found in "+\ + args.path_forcing+' (subset: '+args_subset_forcing+')') + + # note that if runtime is an integer number, we don't need to get the afternoon + # profiles. + if args.runtime == 'from_profile_pair': + print('Fetching afternoon records for determining the simulation runtimes') + records_afternoon = get_records(run_stations,\ + args.path_forcing,\ + subset='end', + refetch_records=False, + ) + if len(records_afternoon) == 0: + raise IOError("No final state records found in "+\ + args.path_forcing+' (subset: '+args_subset_forcing+')') + + # print(records_morning.index) + # print(records_afternoon.index) + # align afternoon records with the noon records, and set same index + print('hello') + print(len(records_afternoon)) + print(len(records_morning)) + + print("aligning morning and afternoon records") + records_morning['dates'] = records_morning['ldatetime'].dt.date + records_afternoon['dates'] = records_afternoon['ldatetime'].dt.date + records_afternoon.set_index(['STNID','dates'],inplace=True) + ini_index_dates = records_morning.set_index(['STNID','dates']).index + records_afternoon = records_afternoon.loc[ini_index_dates] + records_afternoon.index = records_morning.index + + experiments = args.experiments.strip(' ').split(' ') + if args.experiments_names is not None: + experiments_names = args.experiments_names.strip(' ').split(' ') + if len(experiments_names) != len(experiments): + raise ValueError('Lenght of --experiments_names is different from --experiments') + + else: + experiments_names = experiments + + for iexpname,expid in enumerate(experiments): + expname = experiments_names[iexpname] + exp = EXP_DEFS[expid] + path_exp = args.path_experiments+'/'+expname+'/' + + os.system('mkdir -p '+path_exp) + for istation,current_station in run_stations.iterrows(): + print(istation,current_station) + records_morning_station = records_morning.query('STNID == '+str(current_station.name)) + start_record = run_station_chunk*args.split_by if run_station_chunk is not 0 else 0 + end_record = (run_station_chunk+1)*args.split_by if args.split_by is not None else None + if start_record >= (len(records_morning_station)): + print("warning: outside of profile number range for station "+\ + str(current_station)+". Skipping chunk number for this station.") + else: + fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' + if os.path.isfile(fn_morning): + file_morning = open(fn_morning) + else: + fn_morning = \ + args.path_forcing+'/'+format(current_station.name,'05d')+\ + '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' + file_morning = open(fn_morning) + + if args.runtime == 'from_profile_pair': + file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml') + fn_ini = path_exp+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_ini.yaml' + fn_end_mod = path_exp+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_end.yaml' + file_ini = open(fn_ini,'w') + file_end_mod = open(fn_end_mod,'w') + + #iexp = 0 + onerun = False + print('starting station chunk number: '\ + +str(run_station_chunk)+' (chunk size:',args.split_by,')') + + records_morning_station_chunk = records_morning_station.iloc[start_record:end_record] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] + + isim = 0 + for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): + print('starting '+str(isim+1)+' out of '+\ + str(len(records_morning_station_chunk) )+\ + ' (station total: ',str(len(records_morning_station)),')') - if args.runtime == 'from_profile_pair': - record_afternoon = records_afternoon.loc[(STNID,chunk,index)] - c4gli_afternoon = get_record_yaml(file_afternoon, - int(record_afternoon.index_start), - int(record_afternoon.index_end), + + c4gli_morning = get_record_yaml(file_morning, + record_morning.index_start, + record_morning.index_end, mode='model_input') - runtime = int((c4gli_afternoon.pars.datetime_daylight - - c4gli_morning.pars.datetime_daylight).total_seconds()) - elif args.runtime == 'from_input': - runtime = c4gli_morning.pars.runtime - else: - runtime = int(args.runtime) - - - c4gli_morning.update(source='pairs',pars={'runtime' : \ - runtime}) - c4gli_morning.update(source=expname, pars=exp) - - c4gl = class4gl(c4gli_morning) - - if args.error_handling == 'dump_always': - try: - print('checking data sources') - if not c4gli_morning.check_source_globaldata(): - print('Warning: some input sources appear invalid') - c4gl.run() - print('run succesful') - except: - print('run not succesful') - onerun = True - - print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') - c4gli_morning.dump(file_ini) + if args.diag_tropo is not None: + print('add tropospheric parameters on advection and subsidence (for diagnosis)') + seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) + profile_tropo = c4gli_morning.air_ac[seltropo] + for var in args.diag_tropo:#['t','q','u','v',]: + if var[:3] == 'adv': + mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) + c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) + else: + print("warning: tropospheric variable "+var+" not recognized") - c4gl.dump(file_end_mod,\ - include_input=False,\ - #timeseries_only=timeseries_only,\ - ) - onerun = True - # in this case, only the file will dumped if the runs were - # successful - elif args.error_handling == 'dump_on_success': - try: - print('checking data sources') - if not c4gli_morning.check_source_globaldata(): - print('Warning: some input sources appear invalid') - c4gl.run() - print('run succesful') + if args.runtime == 'from_profile_pair': + record_afternoon = records_afternoon.loc[(STNID,chunk,index)] + c4gli_afternoon = get_record_yaml(file_afternoon, + int(record_afternoon.index_start), + int(record_afternoon.index_end), + mode='model_input') + runtime = int((c4gli_afternoon.pars.datetime_daylight - + c4gli_morning.pars.datetime_daylight).total_seconds()) + elif args.runtime == 'from_input': + runtime = c4gli_morning.pars.runtime + else: + runtime = int(args.runtime) + + + c4gli_morning.update(source='pairs',pars={'runtime' : \ + runtime}) + c4gli_morning.update(source=expname, pars=exp) + + c4gl = class4gl(c4gli_morning) + + if args.error_handling == 'dump_always': + try: + print('checking data sources') + if not c4gli_morning.check_source_globaldata(): + print('Warning: some input sources appear invalid') + c4gl.run() + print('run succesful') + except: + print('run not succesful') + onerun = True + + print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') c4gli_morning.dump(file_ini) - print("dumping to "+str(file_ini)) c4gl.dump(file_end_mod,\ include_input=False,\ #timeseries_only=timeseries_only,\ ) onerun = True - except: - print('run not succesful') - isim += 1 - - - file_ini.close() - file_end_mod.close() - file_morning.close() - if args.runtime == 'from_profile_pair': - file_afternoon.close() - - if onerun: - records_ini = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - getchunk = int(run_station_chunk),\ - subset='ini', - refetch_records=True, - ) - records_end_mod = get_records(pd.DataFrame([current_station]),\ - path_exp,\ - getchunk = int(run_station_chunk),\ - subset='end',\ - refetch_records=True,\ - ) - else: - # remove empty files - os.system('rm '+fn_ini) - os.system('rm '+fn_end_mod) - - # # align afternoon records with initial records, and set same index - # records_afternoon.index = records_afternoon.ldatetime.dt.date - # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] - # records_afternoon.index = records_ini.index - - # stations_for_iter = stations(path_exp) - # for STNID,station in stations_iterator(stations_for_iter): - # records_current_station_index = \ - # (records_ini.index.get_level_values('STNID') == STNID) - # file_current_station_end_mod = STNID - # - # with \ - # open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ - # open(path_exp+'/'+format(STNID,"05d")+'_end_mod.yaml','r') as file_station_end_mod, \ - # open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: - # for (STNID,index),record_ini in records_iterator(records_ini): - # c4gli_ini = get_record_yaml(file_station_ini, - # record_ini.index_start, - # record_ini.index_end, - # mode='ini') - # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) - # - # record_end_mod = records_end_mod.loc[(STNID,index)] - # c4gl_end_mod = get_record_yaml(file_station_end_mod, - # record_end_mod.index_start, - # record_end_mod.index_end, - # mode='mod') - # record_afternoon = records_afternoon.loc[(STNID,index)] - # c4gl_afternoon = get_record_yaml(file_station_afternoon, - # record_afternoon.index_start, - # record_afternoon.index_end, - # mode='ini') + # in this case, only the file will dumped if the runs were + # successful + elif args.error_handling == 'dump_on_success': + try: + print('checking data sources') + if not c4gli_morning.check_source_globaldata(): + print('Warning: some input sources appear invalid') + c4gl.run() + print('run succesful') + c4gli_morning.dump(file_ini) + + + print("dumping to "+str(file_ini)) + c4gl.dump(file_end_mod,\ + include_input=False,\ + #timeseries_only=timeseries_only,\ + ) + onerun = True + except: + print('run not succesful') + isim += 1 + + + file_ini.close() + file_end_mod.close() + file_morning.close() + if args.runtime == 'from_profile_pair': + file_afternoon.close() + + if onerun: + records_ini = get_records(pd.DataFrame([current_station]),\ + path_exp,\ + getchunk = int(run_station_chunk),\ + subset='ini', + refetch_records=True, + ) + records_end_mod = get_records(pd.DataFrame([current_station]),\ + path_exp,\ + getchunk = int(run_station_chunk),\ + subset='end',\ + refetch_records=True,\ + ) + else: + # remove empty files + os.system('rm '+fn_ini) + os.system('rm '+fn_end_mod) + + # # align afternoon records with initial records, and set same index + # records_afternoon.index = records_afternoon.ldatetime.dt.date + # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] + # records_afternoon.index = records_ini.index + + # stations_for_iter = stations(path_exp) + # for STNID,station in stations_iterator(stations_for_iter): + # records_current_station_index = \ + # (records_ini.index.get_level_values('STNID') == STNID) + # file_current_station_end_mod = STNID + # + # with \ + # open(path_exp+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ + # open(path_exp+'/'+format(STNID,"05d")+'_end_mod.yaml','r') as file_station_end_mod, \ + # open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: + # for (STNID,index),record_ini in records_iterator(records_ini): + # c4gli_ini = get_record_yaml(file_station_ini, + # record_ini.index_start, + # record_ini.index_end, + # mode='ini') + # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) + # + # record_end_mod = records_end_mod.loc[(STNID,index)] + # c4gl_end_mod = get_record_yaml(file_station_end_mod, + # record_end_mod.index_start, + # record_end_mod.index_end, + # mode='mod') + # record_afternoon = records_afternoon.loc[(STNID,index)] + # c4gl_afternoon = get_record_yaml(file_station_afternoon, + # record_afternoon.index_start, + # record_afternoon.index_end, + # mode='ini') -# if __name__ == '__main__': -# #execute(**vars(args)) -# execute() +if __name__ == '__main__': + #execute(**vars(args)) + execute() diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py index 8b60b86..bf0da9d 100644 --- a/class4gl/simulations/simulations_iter.py +++ b/class4gl/simulations/simulations_iter.py @@ -110,9 +110,11 @@ def execute(**kwargs): EXP_DEFS =\ { - 'BASE_ITER':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'BASE_ITER':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, + 'BASE_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, + 'BASE_ITER_W_ADV':{'sw_ac' : ['adv',"w"],'sw_ap': True,'sw_lit': False}, + 'BASE_ITER_W':{'sw_ac' : ["w"],'sw_ap': True,'sw_lit': False}, - 'NOADV_ITER':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, 'ERA_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, 'NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, @@ -222,6 +224,11 @@ def execute(**kwargs): subset=args.subset_forcing, refetch_records=False, ) + if len(records_morning) == 0: + raise IOError("No initialization records records found in "+\ + args.path_forcing+' (subset: '+args_subset_forcing+')') + + # note that if runtime is an integer number, we don't need to get the afternoon # profiles. @@ -232,6 +239,9 @@ def execute(**kwargs): subset='end', refetch_records=False, ) + if len(records_afternoon) == 0: + raise IOError("No final state records found in "+\ + args.path_forcing+' (subset: '+args_subset_forcing+')') # print(records_morning.index) # print(records_afternoon.index) @@ -348,7 +358,6 @@ def execute(**kwargs): b = c4gli_morning.pars.wwilt c = c4gli_morning.pars.wfc #max(c4gli_morning.pars.wfc,c4gli_morning.pars.wsat-0.01) - try: #fb = f(b) c4gli_morning.pars.wg = b @@ -374,7 +383,6 @@ def execute(**kwargs): c4gli_morningc = c4gli_morning i=0 - if fc*fb > 0.: if abs(fb) < abs(fc): c4gl = c4glb @@ -490,7 +498,8 @@ def execute(**kwargs): file_ini.close() file_mod.close() file_morning.close() - file_afternoon.close() + if args.runtime == 'from_profile_pair': + file_afternoon.close() if onerun: records_ini = get_records(pd.DataFrame([current_station]),\ diff --git a/setup.py b/setup.py index f3832ba..3ea0222 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='0.9.3', + version='0.9.4', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From f57ed7e0237e02aff03be5c25f9459260e7c5efa Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Mon, 4 Mar 2019 16:36:51 +0100 Subject: [PATCH 122/129] multiple updates --- class4gl/class4gl.py | 60 +++--- class4gl/interface_functions.py | 13 +- class4gl/interface_multi.py | 27 +-- class4gl/model.py | 10 +- class4gl/setup/batch_setup_igra.py | 2 +- class4gl/setup/setup_igra.py | 272 ++++++++++++++-------------- class4gl/simulations/simulations.py | 16 +- 7 files changed, 204 insertions(+), 196 deletions(-) diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py index 0173222..bebf496 100644 --- a/class4gl/class4gl.py +++ b/class4gl/class4gl.py @@ -601,30 +601,32 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): # # this is an alternative pipe/numpy method # (~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0)).pipe(np.where)[0] valid_indices = air_balloon.index[is_valid] + air_balloon = air_balloon[is_valid].reset_index() #print(valid_indices) - dpars['Ps'] = air_balloon.p.loc[[valid_indices[0]]][0] - air_balloon['t'] = air_balloon['TEMP']+273.15 - air_balloon['theta'] = (air_balloon.t) * \ - (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp) - air_balloon['thetav'] = air_balloon['theta']*(1. + 0.61 * air_balloon['q']) + if len(air_balloon) > 2: + dpars['Ps'] = air_balloon.p.values[0] + air_balloon['t'] = air_balloon['TEMP']+273.15 + air_balloon['theta'] = (air_balloon.t) * \ + (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp) + air_balloon['thetav'] = air_balloon['theta']*(1. + 0.61 * air_balloon['q']) + + # t_cut_off = 1.5 + # i = 1 + # if t_cut_off is not None: + # + # while ((i < len(air_balloon)) and \ + # ((air_balloon.thetav[0] - air_balloon.thetav[i] ) > t_cut_off)): + # #diff = (air_balloon.theta.iloc[valid_indices[i]] -air_balloon.theta.iloc[valid_indices[i+1]])- 0.5 + # air_balloon.thetav[0:i] = \ + # air_balloon.thetav[i] + t_cut_off + # + # i +=1 - i = 1 - t_cut_off = 2.0 - if t_cut_off is not None: - - while (air_balloon.thetav.loc[[valid_indices[0]]][0] - \ - air_balloon.thetav.loc[valid_indices[i:i+1]][0] ) > t_cut_off: - #diff = (air_balloon.theta.iloc[valid_indices[i]] -air_balloon.theta.iloc[valid_indices[i+1]])- 0.5 - air_balloon.thetav.loc[valid_indices[0:i]] = \ - air_balloon.thetav.loc[valid_indices[i:i+1]][0] + t_cutoff - - i +=1 - if len(valid_indices) > 0: #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) @@ -641,28 +643,22 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): dpars['h_l'] =np.nan dpars['h_e'] =np.nan dpars['h'] =np.nan - - - if np.isnan(dpars['h']): dpars['Ps'] = np.nan + air_balloon['t'] = np.nan + air_balloon['theta'] = np.nan + air_balloon['thetav'] = np.nan if ~np.isnan(dpars['h']): # determine mixed-layer properties (moisture, potential temperature...) from profile # ... and those of the mixed layer - is_valid_below_h = (air_balloon.loc[valid_indices].z < dpars['h']) - valid_indices_below_h = air_balloon.loc[valid_indices].index[is_valid_below_h].values - if len(valid_indices) > 1: - if len(valid_indices_below_h) >= 3.: - ml_mean = air_balloon.loc[valid_indices][is_valid_below_h].mean() - else: - ml_mean = air_balloon.loc[valid_indices[0:2]].mean() - elif len(valid_indices) == 1: - ml_mean = (air_balloon.iloc[0:1]).mean() + is_valid_below_h = (air_balloon.z < dpars['h']) + valid_indices_below_h = air_balloon.index[is_valid_below_h].values + if len(valid_indices_below_h) >= 3.: + ml_mean = air_balloon[is_valid_below_h].mean() else: - temp = pd.DataFrame(air_balloon) - temp.iloc[0] = np.nan - ml_mean = temp + ml_mean = air_balloon[0:2].mean() + dpars['theta']= ml_mean.theta dpars['q'] = ml_mean.q diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py index 547e6f8..d3e8829 100644 --- a/class4gl/interface_functions.py +++ b/class4gl/interface_functions.py @@ -588,11 +588,14 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor # jsonstream.close() # os.system('rm '+TEMPDIR+'/'+yamlfilename+'.buffer.yaml.'+str(current_tell)) records_station_chunk = pd.DataFrame.from_dict(dictout) - records_station_chunk.index.set_names(('STNID','chunk','index'),inplace=True) - print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\ - +str(STNID)+', chunk number '+str(chunk)) - records_station_chunk.to_pickle(path_yaml+'/'+pklfilename) - records_station = pd.concat([records_station,records_station_chunk]) + if len(records_station_chunk) > 0: + records_station_chunk.index.set_names(('STNID','chunk','index'),inplace=True) + print('writing table file ('+path_yaml+'/'+pklfilename+') for station '\ + +str(STNID)+', chunk number '+str(chunk)) + records_station_chunk.to_pickle(path_yaml+'/'+pklfilename) + records_station = pd.concat([records_station,records_station_chunk]) + else: + print('Warning. No records found in ',yaml_file) # else: # os.system('rm '+pklfilename) if (getchunk == 'all') and (pklfilename_unified not in pklchunks): diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index 1795592..e4fd4e9 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -264,22 +264,25 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal print('exclude exceptional observations') print('exclude unrealistic model output -> should be investigated!') valid = (\ - #(self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.00) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25000) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 3.0000) & - # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & + # (self.frames['stats']['records_all_stations_ini'].lat >-2. ) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.00) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25000) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 3.0000) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 40.0000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 350.) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 400.) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -0.0006) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < 0.0003) & - - # # filter 'extreme' model output -> should be investigated! - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & - # (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -0.0005) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < 0.0003) & + # ((self.frames['stats']['records_all_stations_ini'].ldatetime- + # self.frames['stats']['records_all_stations_ini'].lSunset).total_seconds() + # <= -2.*3600.) & + # filter 'extreme' model output -> should be investigated! + (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0005) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0003) & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & diff --git a/class4gl/model.py b/class4gl/model.py index 87dbd99..8a85c23 100644 --- a/class4gl/model.py +++ b/class4gl/model.py @@ -285,7 +285,11 @@ def init(self): self.dtheta = self.input.dtheta # initial temperature jump at h [K] self.gammatheta = self.input.gammatheta # free atmosphere potential temperature lapse rate [K m-1] - self.gammatheta_lower_limit = \ + + if 'gammatheta_lower_limit' not in self.input.__dict__.keys(): + self.gammatheta_lower_limit = 0.002 + else: + self.gammatheta_lower_limit = \ self.input.gammatheta_lower_limit # free atmosphere potential temperature lapse rate lower limit to avoid crashes [K m-1] self.advtheta = self.input.advtheta # advection of heat [K s-1] self.beta = self.input.beta # entrainment ratio for virtual heat [-] @@ -418,6 +422,10 @@ def init(self): # we will update the original variables afterwards #self.air_ap['q'] = self.air_ap.QABS/1000. + #work around for corrupt input + if 'level_0' in self.air_ap.columns: + self.air_ap = self.air_ap.drop(columns=['level_0']) + self.air_ap = \ self.air_ap.assign(R= lambda x: self.Rd*(1.-x.q) + self.Rv*x.q) # we require the temperature fields, since we need to consider diff --git a/class4gl/setup/batch_setup_igra.py b/class4gl/setup/batch_setup_igra.py index 4f1e432..4950dcf 100644 --- a/class4gl/setup/batch_setup_igra.py +++ b/class4gl/setup/batch_setup_igra.py @@ -26,7 +26,7 @@ parser = argparse.ArgumentParser() #parser.add_argument('--timestamp') parser.add_argument('--exec') # chunk simulation script -parser.add_argument('--pbs_string',default=' -l walltime=50:0:0') +parser.add_argument('--pbs_string',default=' -l walltime=70:00:00') parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/') parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') # parser.add_argument('--first_YYYYMMDD',default="19810101") diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py index f1516c2..ad0fa43 100644 --- a/class4gl/setup/setup_igra.py +++ b/class4gl/setup/setup_igra.py @@ -176,54 +176,51 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu while wy_strm.current is not None: c4gli.clear() - try: - c4gli.get_profile_wyoming(wy_strm) - #print(STN['ID'],c4gli.pars.datetime) - #c4gli.get_global_input(globaldata) - - print(c4gli.pars.STNID, c4gli.pars.ldatetime) - - logic = dict() - logic['morning'] = (c4gli.pars.ldatetime.hour <= 12.) - - # Sounding should have taken place after 3 hours before sunrise. - # Note that the actual simulation only start at sunrise - # (specified by ldatetime_daylight), so the ABL cooling af the time - # before sunrise is ignored by the simulation. - logic['daylight'] = \ - ((c4gli.pars.ldatetime - - c4gli.pars.lSunrise).total_seconds()/3600. >= -3.) - - logic['springsummer'] = (c4gli.pars.theta > 278.) - - # we take 3000 because previous analysis (ie., HUMPPA) has - # focussed towards such altitude - le3000 = (c4gli.air_balloon.z <= 3000.) - logic['10measurements'] = (np.sum(le3000) >= 7) - - leh = (c4gli.air_balloon.z <= c4gli.pars.h) - - logic['mlerrlow'] = (\ - (len(np.where(leh)[0]) > 0) and \ - # in cases where humidity is not defined, the mixed-layer - # values get corr - (not np.isnan(c4gli.pars.theta))\ - #and \ - #(rmse(c4gli.air_balloon.theta[leh] , \ - # c4gli.pars.theta,filternan_actual=True) < 1.0)\ - ) + c4gli.get_profile_wyoming(wy_strm) + #print(STN['ID'],c4gli.pars.datetime) + #c4gli.get_global_input(globaldata) + + print(c4gli.pars.STNID, c4gli.pars.ldatetime) + + logic = dict() + logic['morning'] = (c4gli.pars.ldatetime.hour <= 12.) + + # Sounding should have taken place after 3 hours before sunrise. + # Note that the actual simulation only start at sunrise + # (specified by ldatetime_daylight), so the ABL cooling af the time + # before sunrise is ignored by the simulation. + logic['daylight'] = \ + ((c4gli.pars.ldatetime - + c4gli.pars.lSunrise).total_seconds()/3600. >= -4.) + + logic['springsummer'] = (c4gli.pars.theta > 278.) + + # we take 3000 because previous analysis (ie., HUMPPA) has + # focussed towards such altitude + le3000 = (c4gli.air_balloon.z <= 3000.) + logic['10measurements'] = (np.sum(le3000) >= 7) + + leh = (c4gli.air_balloon.z <= c4gli.pars.h) + + logic['mlerrlow'] = (\ + (len(np.where(leh)[0]) > 0) and \ + # in cases where humidity is not defined, the mixed-layer + # values get corr + (not np.isnan(c4gli.pars.theta)) and \ + (rmse(c4gli.air_balloon.theta[leh] , c4gli.pars.theta,filternan_actual=True) < 1.8)\ + ) - logic['mlherrlow'] = (c4gli.pars.h_e <= 150.) - - print('logic:', logic) - # the result - morning_ok = np.mean(list(logic.values())) - print(morning_ok,c4gli.pars.ldatetime) + logic['mlherrlow'] = (c4gli.pars.h_e <= 150.) + + print('logic:', logic) + # the result + morning_ok = np.mean(list(logic.values())) + print(morning_ok,c4gli.pars.ldatetime) - except: - morning_ok =False - print('obtain morning not good') + # except: + # morning_ok =False + # print('obtain morning not good') # the next sounding will be used either for an afternoon sounding # or for the morning sounding of the next day. @@ -238,109 +235,109 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu c4gli.pars.ldatetime.day) c4gli_afternoon.clear() print('AFTERNOON PROFILE CLEARED') - # try: - c4gli_afternoon.get_profile_wyoming(wy_strm) - print('AFTERNOON PROFILE OK') - - if wy_strm.current is not None: - current_date_afternoon = \ - dt.date(c4gli_afternoon.pars.ldatetime.year, \ - c4gli_afternoon.pars.ldatetime.month, \ - c4gli_afternoon.pars.ldatetime.day) - else: - # a dummy date: this will be ignored anyway - current_date_afternoon = dt.date(1900,1,1) - - # we will dump the latest afternoon sounding that fits the - # minimum criteria specified by logic_afternoon - print(current_date,current_date_afternoon) - c4gli_afternoon_for_dump = None - while ((current_date_afternoon == current_date) and \ - (wy_strm.current is not None)): - logic_afternoon =dict() - - logic_afternoon['afternoon'] = \ - (c4gli_afternoon.pars.ldatetime.hour >= 12.) - # the sounding should have taken place before 1 hours - # before sunset. This is to minimize the chance that a - # stable boundary layer (yielding very low mixed layer - # heights) is formed which can not be represented by - # class. - logic_afternoon['daylight'] = \ - ((c4gli_afternoon.pars.ldatetime - \ - c4gli_afternoon.pars.lSunset \ - ).total_seconds()/3600. <= -1.) - - - le3000_afternoon = \ - (c4gli_afternoon.air_balloon.z <= 3000.) - logic_afternoon['5measurements'] = \ - (np.sum(le3000_afternoon) >= 7) - - # we only store the last afternoon sounding that fits these - # minimum criteria - - afternoon_ok = np.mean(list(logic_afternoon.values())) - - print('logic_afternoon: ',logic_afternoon) - print(afternoon_ok,c4gli_afternoon.pars.ldatetime) - if afternoon_ok == 1.: - # # doesn't work :( - # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon) - - # so we just create a new one from the same wyoming profile - c4gli_afternoon_for_dump = class4gl_input() - c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm) - - wy_strm.find_next() - c4gli_afternoon.clear() + try: c4gli_afternoon.get_profile_wyoming(wy_strm) + print('AFTERNOON PROFILE OK') if wy_strm.current is not None: current_date_afternoon = \ - dt.date(c4gli_afternoon.pars.ldatetime.year, \ - c4gli_afternoon.pars.ldatetime.month, \ - c4gli_afternoon.pars.ldatetime.day) + dt.date(c4gli_afternoon.pars.ldatetime.year, \ + c4gli_afternoon.pars.ldatetime.month, \ + c4gli_afternoon.pars.ldatetime.day) else: # a dummy date: this will be ignored anyway current_date_afternoon = dt.date(1900,1,1) - # Only in the case we have a good pair of soundings, we - # dump them to disk - if c4gli_afternoon_for_dump is not None: - c4gli.update(source='pairs',pars={'runtime' : \ - int((c4gli_afternoon_for_dump.pars.datetime_daylight - - c4gli.pars.datetime_daylight).total_seconds())}) + # we will dump the latest afternoon sounding that fits the + # minimum criteria specified by logic_afternoon + print(current_date,current_date_afternoon) + c4gli_afternoon_for_dump = None + while ((current_date_afternoon == current_date) and \ + (wy_strm.current is not None)): + logic_afternoon =dict() + + logic_afternoon['afternoon'] = \ + (c4gli_afternoon.pars.ldatetime.hour >= 12.) + # the sounding should have taken place before 1 hours + # before sunset. This is to minimize the chance that a + # stable boundary layer (yielding very low mixed layer + # heights) is formed which can not be represented by + # class. + logic_afternoon['daylight'] = \ + ((c4gli_afternoon.pars.ldatetime - \ + c4gli_afternoon.pars.lSunset \ + ).total_seconds()/3600. <= -2.) + + + le3000_afternoon = \ + (c4gli_afternoon.air_balloon.z <= 3000.) + logic_afternoon['5measurements'] = \ + (np.sum(le3000_afternoon) >= 7) + + # we only store the last afternoon sounding that fits these + # minimum criteria + + afternoon_ok = np.mean(list(logic_afternoon.values())) + + print('logic_afternoon: ',logic_afternoon) + print(afternoon_ok,c4gli_afternoon.pars.ldatetime) + if afternoon_ok == 1.: + # # doesn't work :( + # c4gli_afternoon_for_dump = cp.deepcopy(c4gli_afternoon) + + # so we just create a new one from the same wyoming profile + c4gli_afternoon_for_dump = class4gl_input() + c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm) + + wy_strm.find_next() + c4gli_afternoon.clear() + c4gli_afternoon.get_profile_wyoming(wy_strm) + + if wy_strm.current is not None: + current_date_afternoon = \ + dt.date(c4gli_afternoon.pars.ldatetime.year, \ + c4gli_afternoon.pars.ldatetime.month, \ + c4gli_afternoon.pars.ldatetime.day) + else: + # a dummy date: this will be ignored anyway + current_date_afternoon = dt.date(1900,1,1) + + # Only in the case we have a good pair of soundings, we + # dump them to disk + if c4gli_afternoon_for_dump is not None: + c4gli.update(source='pairs',pars={'runtime' : \ + int((c4gli_afternoon_for_dump.pars.datetime_daylight - + c4gli.pars.datetime_daylight).total_seconds())}) - print('ALMOST...') - if c4gli.pars.runtime > 3600*4.: # more than 4 hours simulation - + print('ALMOST...') + if c4gli.pars.runtime > 3600*4.: # more than 4 hours simulation + - c4gli.get_global_input(globaldata) - print('VERY CLOSE...') - if c4gli.check_source_globaldata() and \ - (c4gli.check_source(source='wyoming',\ - check_only_sections='pars')): - print('starting dumps') - c4gli.dump(fileout) - print('file morning dumped') - c4gli_afternoon_for_dump.dump(fileout_afternoon) - print('file afternoon dumped') - - - # for keyEXP,dictEXP in experiments.items(): - # - # c4gli.update(source=keyEXP,pars = dictEXP) - # c4gl = class4gl(c4gli) - # # c4gl.run() - # - # c4gl.dump(c4glfiles[key]) - - print('HIT!!!') - one_run = True - # except: - # print('get profile failed') + c4gli.get_global_input(globaldata) + print('VERY CLOSE...') + if c4gli.check_source_globaldata() and \ + (c4gli.check_source(source='wyoming',\ + check_only_sections='pars')): + print('starting dumps') + c4gli.dump(fileout) + print('file morning dumped') + c4gli_afternoon_for_dump.dump(fileout_afternoon) + print('file afternoon dumped') + + + # for keyEXP,dictEXP in experiments.items(): + # + # c4gli.update(source=keyEXP,pars = dictEXP) + # c4gl = class4gl(c4gli) + # # c4gl.run() + # + # c4gl.dump(c4glfiles[key]) + + print('HIT!!!') + one_run = True + except: + print('get profile failed') if one_run: #STN.name = STN.name @@ -355,6 +352,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu refetch_records=True, ) else: + print('no valid record found. Removing files', fnout, fnout_afternoon) os.system('rm '+fnout) os.system('rm '+fnout_afternoon) diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index 3639c2d..d759e28 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -352,14 +352,14 @@ def execute(**kwargs): c4gl = class4gl(c4gli_morning) if args.error_handling == 'dump_always': - try: - print('checking data sources') - if not c4gli_morning.check_source_globaldata(): - print('Warning: some input sources appear invalid') - c4gl.run() - print('run succesful') - except: - print('run not succesful') + # try: + print('checking data sources') + if not c4gli_morning.check_source_globaldata(): + print('Warning: some input sources appear invalid') + c4gl.run() + print('run succesful') + # except: + # print('run not succesful') onerun = True print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') From 4b48c0c109c68e404e8598907d5fc1280634764b Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Mon, 4 Mar 2019 16:37:35 +0100 Subject: [PATCH 123/129] multiple updates --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3ea0222..b82c7c4 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='0.9.4', + version='0.9.5', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From 98b1a223ee8db1c243035cc5669222db3ecb9b2c Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Wed, 29 May 2019 14:31:12 +0200 Subject: [PATCH 124/129] multiple updates --- class4gl/class4gl.py | 65 +-- class4gl/data_global.py | 11 +- class4gl/interface/interface_stations.py | 21 +- class4gl/interface/taylorDiagram.py | 7 +- class4gl/interface_functions.py | 8 +- class4gl/interface_multi.py | 62 ++- class4gl/model.py | 10 +- class4gl/setup/setup_igra.py | 115 ++-- class4gl/setup/update_input.py | 8 +- class4gl/setup/update_setup.py | 327 ------------ class4gl/simulations/batch_simulations.pbs | 6 +- class4gl/simulations/copy_update.py | 584 +++++++++++---------- class4gl/simulations/simulations.py | 7 + class4gl/simulations/simulations_iter.py | 1 + setup.py | 2 +- 15 files changed, 511 insertions(+), 723 deletions(-) delete mode 100644 class4gl/setup/update_setup.py diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py index bebf496..cdb85c8 100644 --- a/class4gl/class4gl.py +++ b/class4gl/class4gl.py @@ -554,7 +554,7 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): string = '\n'.join(string) columns = [ 'PRES', 'HGHT', 'TEMP', 'DWPT', 'RELH', 'MIXR', 'DRCT','SKNT' , 'THTA','THTE', 'THTV'] - air_balloon = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1] + air_balloon_in = pd.read_fwf(io.StringIO(str(string)),widths=[7]*11,names=columns,skiprows=5,dtype=np.float,skipfooter=0)#.iloc[5:-1] #ONE_COLUMN = pd.read_table(io.StringIO(str(string)),sep=r"\s*",skiprows=[0,1,3,4]) #string = soup.pre.next_sibling.next_sibling @@ -573,17 +573,18 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): dpars['STNID'] = dpars['Station number'] # altitude above ground level - air_balloon['z'] = air_balloon.HGHT -dpars['Station elevation'] + air_balloon = pd.DataFrame() + air_balloon['z'] = air_balloon_in.HGHT -dpars['Station elevation'] # absolute humidity in g/kg - air_balloon['q']= (air_balloon.MIXR/1000.) \ + air_balloon['q']= (air_balloon_in.MIXR/1000.) \ / \ - (air_balloon.MIXR/1000.+1.) + (air_balloon_in.MIXR/1000.+1.) # convert wind speed from knots to m/s - air_balloon['WSPD'] = 0.51444 * air_balloon.SKNT - angle_x = (90.-air_balloon.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees. + air_balloon['V'] = 0.51444 * air_balloon_in.SKNT + angle_x = (90.-air_balloon_in.DRCT)/180.*np.pi # assuming that wind in direction of the south is 0 degrees. - air_balloon['u'] = air_balloon.WSPD * np.sin(angle_x) - air_balloon['v'] = air_balloon.WSPD * np.cos(angle_x) + air_balloon['u'] = air_balloon.V * np.sin(angle_x) + air_balloon['v'] = air_balloon.V * np.cos(angle_x) @@ -592,8 +593,9 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): Rv = 461.5 # gas constant for moist air [J kg-1 K-1] air_balloon['R'] = (Rd*(1.-air_balloon.q) + Rv*air_balloon.q) - air_balloon['p'] = air_balloon.PRES*100. + air_balloon['p'] = air_balloon_in.PRES*100. + air_balloon['t'] = air_balloon_in['TEMP']+273.15 # Therefore, determine the sounding that are valid for 'any' column is_valid = ~np.isnan(air_balloon).any(axis=1) & (air_balloon.z >= 0) @@ -608,9 +610,8 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): if len(air_balloon) > 2: dpars['Ps'] = air_balloon.p.values[0] - air_balloon['t'] = air_balloon['TEMP']+273.15 air_balloon['theta'] = (air_balloon.t) * \ - (dpars['Ps']/(air_balloon.PRES*100.))**(air_balloon['R']/cp) + (dpars['Ps']/(air_balloon.p))**(air_balloon['R']/cp) air_balloon['thetav'] = air_balloon['theta']*(1. + 0.61 * air_balloon['q']) # t_cut_off = 1.5 @@ -628,7 +629,7 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): #calculated mixed-layer height considering the critical Richardson number of the virtual temperature profile - dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.WSPD) + dpars['h'],dpars['h_u'],dpars['h_l'] = blh(air_balloon.z,air_balloon.thetav,air_balloon.V) dpars['h_b'] = np.max((dpars['h'],10.)) dpars['h_u'] = np.max((dpars['h_u'],10.)) #upper limit of mixed layer height @@ -654,10 +655,10 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): # ... and those of the mixed layer is_valid_below_h = (air_balloon.z < dpars['h']) valid_indices_below_h = air_balloon.index[is_valid_below_h].values - if len(valid_indices_below_h) >= 3.: + if len(valid_indices_below_h) >= 2.: ml_mean = air_balloon[is_valid_below_h].mean() else: - ml_mean = air_balloon[0:2].mean() + ml_mean = air_balloon.iloc[0:1].mean() dpars['theta']= ml_mean.theta @@ -679,9 +680,9 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): #calculate mixed-layer jump ( this should be larger than 0.1) air_ap_head['z'] = pd.Series(np.array([2.,dpars['h'],dpars['h']])) - air_ap_head['HGHT'] = air_ap_head['z'] \ - + \ - np.round(dpars[ 'Station elevation'],1) + # air_ap_head['HGHT'] = air_ap_head['z'] \ + # + \ + # np.round(dpars[ 'Station elevation'],1) # make a row object for defining the jump jump = air_ap_head.iloc[0] * np.nan @@ -716,7 +717,7 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): air_ap_head[column][2] += jump[column] - air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) + air_ap_head.V = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) @@ -838,9 +839,7 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): # we round the columns to a specified decimal, so that we get a clean # output format for yaml - decimals = {'p':0,'HGHT':1,'t':2,'DWPT':2,'RELH':2,'MIXR':2,\ - 'DRCT':2 ,'SKNT':2, 'theta':4, 'THTE':2, 'THTV':2,\ - 'z':2, 'q':5, 'WSPD':2, 'u':4, 'v':4} + decimals = {'p':0, 't':2, 'theta':4, 'z':2, 'q':5, 'V':2, 'u':4, 'v':4} # for column,decimal in decimals.items(): air_balloon[column] = air_balloon[column].round(decimal) @@ -1078,9 +1077,9 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None): if globaldata.datasets[key].page.variables['lon'].values[ilonmin] > globaldata.datasets[key].page.variables['lon'].values[ilon]: ilonmin = ilon - # for the koeppen climate classification we just take nearest + # for the koeppen climate classification and midsummermonth, we just take nearest print(key) - if key == 'KGC': + if key in ['KGC','midsummermonth','AI']: ilatrange = range(ilat,ilat+1) ilonrange = range(ilon,ilon+1) else: @@ -1099,7 +1098,8 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None): idatetime = np.where((DIST) == np.min(DIST))[0][0] #print('idatetime',idatetime,globaldata.datasets[key].variables['time'].values[idatetime],classdatetime) - if key not in ['Tsoil','T2']: + + if key not in ['Tsoil','T2','blptb_daymax','t2m_daymax','t2m','blpt','blpt_afternoon','blh','t2m_afternoon','blh_afternoon','blq','blq_afternoon','blptb','blptb_afternoon','blptw','blptw_afternoon','HI','HI_afternoon','rh100','rh100_afternoon']: if ((globaldata.datasets[key].page.variables['time'].values[idatetime] < classdatetime) ): idatetime += 1 @@ -1118,9 +1118,9 @@ def get_global_input(self, globaldata,only_keys=None,exclude_keys=None): idatetime = idatetime - 1 idatetimeend = idatetimeend - 1 - # in case of soil temperature, we take the exact + # in case of soil temperature or maximum daytime temperature, we take the exact # timing (which is the morning) - if key in ['Tsoil','T2']: + if key in ['Tsoil','T2','t2m_daymax','t2m','blpt','blpt_afternoon','blh','t2m_afternoon','blh_afternoon','blq','blq_afternoon','blptb','blptb_afternoon','blptw','blptw_afternoon','HI','HI_afternoon','rh100','rh100_afternoon']: idatetimeend = idatetime idts = range(idatetime,idatetimeend+1) @@ -1469,13 +1469,6 @@ def check_source_globaldata(self): # and now we can get the surface values #class_settings = class4gl_input() #class_settings.set_air_input(input_atm) - - # we only allow non-polar stations - if not (self.pars.lat <= 60.): - source_globaldata_ok = False - self.logger.warning('cveg is invalid: ('+str(self.pars.cveg)+')') - - # check lat and lon if (pd.isnull(self.pars.lat)) or (pd.isnull(self.pars.lon)): source_globaldata_ok = False self.logger.warning('lat is invalid: ('+str(self.pars.lat)+')') @@ -1596,7 +1589,6 @@ def mixed_layer_fit(self,air_ap,source,mode): mlvalues['v'] = np.nan - self.update(source='fit_from_'+source,pars=mlvalues) # First 3 data points of the mixed-layer fit. We create a empty head @@ -1644,10 +1636,11 @@ def mixed_layer_fit(self,air_ap,source,mode): jump.theta = np.max((0.1,jump.theta)) air_ap_head[column][2] += jump[column] + mlvalues['d'+column] = jump[column] - air_ap_head.WSPD = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) - + self.update(source='fit_from_'+source,pars=mlvalues) + air_ap_head.V = np.sqrt(air_ap_head.u**2 +air_ap_head.v**2) # make theta increase strong enough to avoid numerical # instability diff --git a/class4gl/data_global.py b/class4gl/data_global.py index 73f0dda..c46b65e 100644 --- a/class4gl/data_global.py +++ b/class4gl/data_global.py @@ -151,6 +151,11 @@ def set_page(self,ipage,page=None): if 'level' in self.page.dims: self.page = self.page.rename({'level':'lev'}) + lon = self.page.lon.values + lon[lon > 180.] -= 360. + self.page.lon.values = lon[:] + + self.page = self.page.rename(self.renames) self.page = self.page.squeeze(drop=True) @@ -213,7 +218,6 @@ def browse_page(self,rewind=2,**args): else: self.logger.debug("I'm now at page "+ str(self.ipage)) - class data_global(object): def __init__(self,sources= { 'KOEPPEN:KGC' : '/user/data/gent/gvo000/gvo00090/EXT/data/KOEPPEN/Koeppen-Geiger.nc', @@ -259,6 +263,11 @@ def __init__(self,sources= { #"ERAINT:divU_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__", "ERAINT:sp" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc", "ERAINT:wp" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w', + "MSWEPGLEAM:AI" : '/data/gent/vo/000/gvo00090/D2D/data/Aridity//Ep_1981_2017_MO_meanhottestmonth.nc', + "ERA5:t2m_daymax" : '/data/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/t2m_1hourly_for_t2m_daymax.nc:t2m', + "ERA5:blptb_daymax" : '/scratch/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/blptb_1hourly_for_blptb_daymax.nc:blptb', + # "ERA5:slhf" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA5/by_var_nc/slhf_1hourly/slhf_*_1hourly.nc', + # "ERA5:sshf" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA5/by_var_nc/sshf_1hourly/sshf_*_1hourly.nc', #"MSWEP:pr" :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation" },debug_level=None): self.library = {} #unique references to data sources being used. They can be files that are original on the disks or some unambiguous xarray virtual sources. These references are used in other variables. This way, a file or source cannot be loaded twice (a warning is made if one would try it). diff --git a/class4gl/interface/interface_stations.py b/class4gl/interface/interface_stations.py index e98bbee..9fc2162 100644 --- a/class4gl/interface/interface_stations.py +++ b/class4gl/interface/interface_stations.py @@ -176,8 +176,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # Q95 = obs.quantile(0.95) # Q95 = obs.quantile(0.90) # Add RMS contours, and label them - contours = dias[varkey].add_contours(levels=5, colors='0.5') # 5 levels - dias[varkey].ax.clabel(contours, inline=1, fontsize=10, fmt='%.1f') + contours = dias[varkey].add_contours(levels=5, colors='0.7') # 5 levels + dias[varkey].ax.clabel(contours, inline=1, fontsize=10,fmt='%.1f') #dia._ax.set_title(season.capitalize()) i += 1 @@ -197,7 +197,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # print(PR) print(varkey,STD,STD_OBS,STD/STD_OBS,PR) dias[varkey].add_sample(STD/STD_OBS, PR, - marker='o', ms=5, ls='', + marker='o', ms=7, ls='', #mfc='k', mec='k', # B&W mfc=colors[ikey], mec=colors[ikey], # Colors label=key,zorder=101) @@ -234,6 +234,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu 'Bias = '+format((BIAS*1000.),'0.2f')+r'$\, \mathrm{g\, kg^{-1}\, h^{-1}}$'+' \n'+\ r'$R$ = '+format(PR,'0.2f') ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9, + # ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9, horizontalalignment='right', verticalalignment='bottom' , bbox={'edgecolor':'black', 'fc':'white', @@ -245,8 +246,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu 'RMSE = '+format(RMSE,'0.1f')+r'$\, \mathrm{m\, h^{-1}}$'+'\n'+\ 'Bias = '+format(BIAS,'0.1f')+r'$\, \mathrm{m\, h^{-1}}$'+'\n'+\ r'$R$ = '+format(PR,'0.2f') - ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9, - horizontalalignment='left', verticalalignment='top' , + ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9, + horizontalalignment='right', verticalalignment='bottom' , bbox={'edgecolor':'black', 'fc':'white', 'boxstyle':'square', @@ -258,8 +259,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu 'Bias = '+format(BIAS,'0.3f')+r'$\, \mathrm{K\, h^{-1}}$'+'\n'+\ r'$R$ = '+format(PR,'0.2f') - ann = axes[varkey].annotate(annotate_text, xy=(0.05, .97 ), xycoords='axes fraction',fontsize=9, - horizontalalignment='left', verticalalignment='top' , + ann = axes[varkey].annotate(annotate_text, xy=(0.95, .05 ), xycoords='axes fraction',fontsize=9, + horizontalalignment='right', verticalalignment='bottom' , bbox={'edgecolor':'black', 'fc':'white', 'boxstyle':'square', @@ -303,7 +304,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu dias[varkey].add_sample(station_end_mod.std()/station_obs.std(), pearsonr(station_end_mod,station_obs)[0],#annotate=symbols[istation], - marker=symbols[istation], ms=5, ls='', + marker=symbols[istation], ms=7, ls='', mfc='k', mec='k', # B&W #mfc=colors[ikey], mec=colors[ikey], # Colors label=key,zorder=100) @@ -367,12 +368,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu leg = [] isymbol = 0 for icurrent_station,current_station in c4gldata[key].frames['worldmap']['stations'].table.iterrows(): - leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=10) + leg1, = ax.plot([],'k'+symbols[isymbol] ,markersize=14) leg.append(leg1) isymbol += 1 # symbol for all stations - leg1, = ax.plot([],'ko',markersize=10) + leg1, = ax.plot([],'ko',markersize=14) leg.append(leg1) diff --git a/class4gl/interface/taylorDiagram.py b/class4gl/interface/taylorDiagram.py index 5ece652..8d3e72d 100644 --- a/class4gl/interface/taylorDiagram.py +++ b/class4gl/interface/taylorDiagram.py @@ -192,7 +192,8 @@ def test1(): dia.add_grid() # Add RMS contours, and label them - contours = dia.add_contours(colors='0.5') + print('BLABLA') + # contours = dia.add_contours(colors='lightgrey') PLT.clabel(contours, inline=1, fontsize=10, fmt='%.2f') # Add a figure legend @@ -235,7 +236,7 @@ def test2(): label=name) # Add RMS contours, and label them - contours = dia.add_contours(levels=5, colors='0.5') # 5 levels in grey + # contours = dia.add_contours(levels=5, colors='lightgrey') # 5 levels in grey PLT.clabel(contours, inline=1, fontsize=10, fmt='%.0f') dia.add_grid() # Add grid @@ -245,7 +246,7 @@ def test2(): fig.legend(dia.samplePoints, [ p.get_label() for p in dia.samplePoints ], numpoints=1, prop=dict(size='small'), loc='upper right') - fig.suptitle("Taylor diagram", size='x-large') # Figure title + # fig.suptitle("Taylor diagram", size='x-large') # Figure title return dia diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py index d3e8829..240f246 100644 --- a/class4gl/interface_functions.py +++ b/class4gl/interface_functions.py @@ -496,7 +496,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor generate_pkl = True if not generate_pkl: records_station_chunk = pd.read_pickle(path_yaml+'/'+pklfilename) - records_station = pd.concat([records_station,records_station_chunk]) + records_station = pd.concat([records_station,records_station_chunk],sort=True) # irecord = 0 else: with open(path_yaml+'/'+yamlfilename) as yaml_file: @@ -572,8 +572,8 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor # # needed for Ruby # dictouttemp[key] = dt.datetime.strptime(value,"%Y-%m-%d %H:%M:%S %z") - # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!! - dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC) + # # Workaround. Unfortunately, Ruby puts it in local time of the computer. Turn it back to UTC (note that UTC means actually local time)!!! + # dictouttemp[key] = dictouttemp[key].astimezone(pytz.UTC) recordindex = record['index'] dictouttemp['chunk'] = chunk dictouttemp['index_start'] = index_start @@ -604,7 +604,7 @@ def get_records(stations,path_yaml,getchunk='all',subset='morning',refetch_recor +str(STNID)) records_station.to_pickle(path_yaml+'/'+pklfilename_unified) - records = pd.concat([records,records_station]) + records = pd.concat([records,records_station],sort=True) return records def stdrel(mod,obs,columns): diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index e4fd4e9..8931172 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -264,30 +264,58 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal print('exclude exceptional observations') print('exclude unrealistic model output -> should be investigated!') valid = (\ - # (self.frames['stats']['records_all_stations_ini'].lat >-2. ) & - # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.00) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25000) & - # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 3.0000) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.) & + #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > 0.25000) & + #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 1.8000) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 2.0000) & #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 40.0000) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 350.) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 400.) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -0.0005) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < 0.0003) & - # ((self.frames['stats']['records_all_stations_ini'].ldatetime- - # self.frames['stats']['records_all_stations_ini'].lSunset).total_seconds() - # <= -2.*3600.) & - # filter 'extreme' model output -> should be investigated! - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0005) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 400.) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 350.) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -.0006) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & + + # filter 'extreme' model output -> should be investigated! + (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.3) & # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0003) & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & ~np.isnan(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt) & ~np.isnan(self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt)) +# +# +# +# +# # (self.frames['stats']['records_all_stations_ini'].lat >-2. ) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.00) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25000) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 3.0000) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 1.8000) & +# #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 40.0000) & +# #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 350.) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 450.) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -0.0006) & +# (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < 0.0003) & +# # ((self.frames['stats']['records_all_stations_ini'].ldatetime- +# # self.frames['stats']['records_all_stations_ini'].lSunset).total_seconds() +# # <= -2.*3600.) & +# # filter 'extreme' model output -> should be investigated! +# (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0005) & +# (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & +# (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & +# (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.) & +# # (self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 400.) & +# # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0003) & +# # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & +# # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & +# ~np.isnan(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt) & +# ~np.isnan(self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt)) for key in self.frames['stats'].keys(): if (type(self.frames['stats'][key]) == pd.DataFrame) and \ diff --git a/class4gl/model.py b/class4gl/model.py index 8a85c23..30387f0 100644 --- a/class4gl/model.py +++ b/class4gl/model.py @@ -208,6 +208,7 @@ def run(self): self.exitmodel() def init(self): + self.dtmax = +np.inf # assign variables from input data # initialize constants self.Lv = 2.5e6 # heat of vaporization [J kg-1] @@ -460,7 +461,7 @@ def init(self): value_new = self.air_ap[var][indexh[0][0]] if ((value_old is not None) & (value_old != value_new)): - warnings.warn("Warning: input was provided ("+str(value_old)+ "kg kg-1), but it is now overwritten by the first level (index 0) of air_ap.var which is different (" +str(value_new)+"kg kg-1).") + warnings.warn("Warning: input was provided ("+str(value_old)+ "), but it is now overwritten by the first level (index 0) of air_ap.var which is different (" +str(value_new)+").") self.__dict__[var] = value_new # make a profile of the stratification @@ -480,6 +481,11 @@ def init(self): gammavar = np.array(gammavar) self.air_ap = self.air_ap.assign(**{'gamma'+var : gammavar}) + value_old = self.__dict__['d'+var] + value_new = self.air_ap[var][2] - self.air_ap[var][1] + if ((value_old is not None) & (value_old != value_new)): + warnings.warn("Warning: input was provided ("+str(value_old)+ "), but it is now overwritten by the first level (index 0) of air_ap.dvar which is different (" +str(value_new)+").") + self.__dict__['d'+var] = value_new # gammatheta, gammaq, gammau, gammav are updated here. self.__dict__['gamma'+var] = \ @@ -782,8 +788,8 @@ def init(self): self.run_mixed_layer() def timestep(self): - self.dtmax = +np.inf + self.logger.debug('before stats') self.statistics() diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py index ad0fa43..b37225a 100644 --- a/class4gl/setup/setup_igra.py +++ b/class4gl/setup/setup_igra.py @@ -156,10 +156,12 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu fnout = args.path_output+"/"+format(STN.name,'05d')+"_ini.yaml" fnout_afternoon = args.path_output+"/"+format(STN.name,'05d')+"_end.yaml" + fnout_diag = args.path_output+"/"+format(STN.name,'05d')+"_diag.pkl" # c4glfiles = dict([(EXP,odirexperiments[EXP]+'/'+format(STN['ID'],'05d')+'.yaml') \ # for EXP in experiments.keys()]) + dict_diag_station = {} with open(fnout,'w') as fileout, \ open(fnout_afternoon,'w') as fileout_afternoon: wy_strm = wyoming(PATH=args.path_input, STNM=STN.name) @@ -191,23 +193,25 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # before sunrise is ignored by the simulation. logic['daylight'] = \ ((c4gli.pars.ldatetime - - c4gli.pars.lSunrise).total_seconds()/3600. >= -4.) + c4gli.pars.lSunrise).total_seconds()/3600. >= -3) logic['springsummer'] = (c4gli.pars.theta > 278.) + print(c4gli.pars.theta) # we take 3000 because previous analysis (ie., HUMPPA) has # focussed towards such altitude le3000 = (c4gli.air_balloon.z <= 3000.) - logic['10measurements'] = (np.sum(le3000) >= 7) + logic['7measurements'] = (np.sum(le3000) >= 7) - leh = (c4gli.air_balloon.z <= c4gli.pars.h) + leh = (c4gli.air_balloon.z < c4gli.pars.h) logic['mlerrlow'] = (\ (len(np.where(leh)[0]) > 0) and \ # in cases where humidity is not defined, the mixed-layer # values get corr - (not np.isnan(c4gli.pars.theta)) and \ - (rmse(c4gli.air_balloon.theta[leh] , c4gli.pars.theta,filternan_actual=True) < 1.8)\ + (not np.isnan(c4gli.pars.theta)) and ( + # in cases where humidity is not defined, the mixed-layer + (rmse(c4gli.air_balloon.theta[leh] , c4gli.pars.theta,filternan_actual=True) < 1.5))\ ) @@ -216,8 +220,11 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu print('logic:', logic) # the result morning_ok = np.mean(list(logic.values())) + logic['morning_ok'] = morning_ok print(morning_ok,c4gli.pars.ldatetime) + + # except: # morning_ok =False # print('obtain morning not good') @@ -227,6 +234,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu wy_strm.find_next() # If the morning is ok, then we try to find a decent afternoon # sounding + logic_afternoon_def =dict() + if morning_ok == 1.: print('MORNING OK!') # we get the current date @@ -252,6 +261,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # minimum criteria specified by logic_afternoon print(current_date,current_date_afternoon) c4gli_afternoon_for_dump = None + afternoon_first = True while ((current_date_afternoon == current_date) and \ (wy_strm.current is not None)): logic_afternoon =dict() @@ -263,22 +273,36 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # stable boundary layer (yielding very low mixed layer # heights) is formed which can not be represented by # class. - logic_afternoon['daylight'] = \ + logic_afternoon['afternoon_daylight'] = \ ((c4gli_afternoon.pars.ldatetime - \ c4gli_afternoon.pars.lSunset \ - ).total_seconds()/3600. <= -2.) - + ).total_seconds()/3600. <= -1.) le3000_afternoon = \ (c4gli_afternoon.air_balloon.z <= 3000.) - logic_afternoon['5measurements'] = \ + logic_afternoon['afternoon_7measurements'] = \ (np.sum(le3000_afternoon) >= 7) + leh = (c4gli_afternoon.air_balloon.z < c4gli_afternoon.pars.h) + logic_afternoon['afternoon_mlerrlow'] = (\ + (len(np.where(leh)[0]) > 0) and \ + # in cases where humidity is not defined, the mixed-layer + # values get corr + (not np.isnan(c4gli_afternoon.pars.theta)) and ( + # in cases where humidity is not defined, the mixed-layer + (rmse(c4gli_afternoon.air_balloon.theta[leh] , c4gli_afternoon.pars.theta,filternan_actual=True) < 1.5))\ + ) + # we only store the last afternoon sounding that fits these # minimum criteria afternoon_ok = np.mean(list(logic_afternoon.values())) + #we set the definitive first afternoon logic only the first time. We only set a new one later if we find a good one. + if afternoon_first: + logic_afternoon_def = {**logic_afternoon,**dict()} + afternoon_first = False + print('logic_afternoon: ',logic_afternoon) print(afternoon_ok,c4gli_afternoon.pars.ldatetime) if afternoon_ok == 1.: @@ -288,7 +312,10 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # so we just create a new one from the same wyoming profile c4gli_afternoon_for_dump = class4gl_input() c4gli_afternoon_for_dump.get_profile_wyoming(wy_strm) + logic_afternoon_def = {**logic_afternoon,**dict()} + afternoon_set = True + wy_strm.find_next() c4gli_afternoon.clear() c4gli_afternoon.get_profile_wyoming(wy_strm) @@ -301,43 +328,65 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu else: # a dummy date: this will be ignored anyway current_date_afternoon = dt.date(1900,1,1) + if not afternoon_set: + logic_afternoon_def = {**logic_afternoon,**dict()} # Only in the case we have a good pair of soundings, we # dump them to disk + logic_afternoon_def['afternoon_ok'] = False if c4gli_afternoon_for_dump is not None: + logic_afternoon_def['afternoon_ok'] = True c4gli.update(source='pairs',pars={'runtime' : \ int((c4gli_afternoon_for_dump.pars.datetime_daylight - c4gli.pars.datetime_daylight).total_seconds())}) + logic_afternoon_def['runtime_ok'] = False print('ALMOST...') - if c4gli.pars.runtime > 3600*4.: # more than 4 hours simulation - + if c4gli.pars.runtime >= 3600*4.: # more than 4 hours simulation + + logic_afternoon_def['runtime_ok'] = True + logic_afternoon_def['not_polar'] = False + if abs(c4gli.pars.lat) < 70.: + logic_afternoon_def['not_polar'] = True + logic_afternoon_def['global_parameters_ok'] = False - c4gli.get_global_input(globaldata) - print('VERY CLOSE...') - if c4gli.check_source_globaldata() and \ - (c4gli.check_source(source='wyoming',\ - check_only_sections='pars')): - print('starting dumps') - c4gli.dump(fileout) - print('file morning dumped') - c4gli_afternoon_for_dump.dump(fileout_afternoon) - print('file afternoon dumped') - - - # for keyEXP,dictEXP in experiments.items(): - # - # c4gli.update(source=keyEXP,pars = dictEXP) - # c4gl = class4gl(c4gli) - # # c4gl.run() - # - # c4gl.dump(c4glfiles[key]) - - print('HIT!!!') - one_run = True + c4gli.get_global_input(globaldata) + print('VERY CLOSE...') + if c4gli.check_source_globaldata() and \ + (c4gli.check_source(source='wyoming',\ + check_only_sections='pars')): + + logic_afternoon_def['global_parameters_ok'] = True + print('starting dumps') + c4gli.dump(fileout) + print('file morning dumped') + c4gli_afternoon_for_dump.dump(fileout_afternoon) + print('file afternoon dumped') + + + # for keyEXP,dictEXP in experiments.items(): + # + # c4gli.update(source=keyEXP,pars = dictEXP) + # c4gl = class4gl(c4gli) + # # c4gl.run() + # + # c4gl.dump(c4glfiles[key]) + + print('HIT!!!') + one_run = True except: print('get profile failed') + dict_diag_day = {} + + dict_diag_day = {**dict_diag_day , **c4gli.pars.__dict__} + dict_diag_day = {**dict_diag_day , **logic} + dict_diag_day = {**dict_diag_day , **logic_afternoon_def} + for key,value in dict_diag_day.items(): + if key not in dict_diag_station.keys(): + dict_diag_station[key] = {} + dict_diag_station[key][(c4gli.pars.STNID,c4gli.pars.ldatetime)] = dict_diag_day[key] + pd.DataFrame.from_dict(dict_diag_station).to_pickle(fnout_diag) if one_run: #STN.name = STN.name diff --git a/class4gl/setup/update_input.py b/class4gl/setup/update_input.py index 635e1c0..9f1a894 100644 --- a/class4gl/setup/update_input.py +++ b/class4gl/setup/update_input.py @@ -23,11 +23,11 @@ parser.add_argument('--station_id') # run a specific station id parser.add_argument('--error_handling',default='dump_on_success') parser.add_argument('--diag_tropo',default=None)#['advt','advq','advu','advv']) -parser.add_argument('--subset_input',default='morning') # this tells which yaml subset +parser.add_argument('--subset_input',default='ini') # this tells which yaml subset # to initialize with. # Most common options are # 'morning' and 'ini'. -parser.add_argument('--subset_output',default='morning') +parser.add_argument('--subset_output',default='ini') # Tuntime is usually specified from the afternoon profile. You can also just @@ -58,6 +58,10 @@ "ERAINT:q" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc", "ERAINT:u" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc", "ERAINT:v" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc", + "ERAINT:v" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc", + "ERAINT:v" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc", + "ERA5:sshf" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA5/by_var_nc/slhf_1hourly/slhf_*_1hourly.nc', + "ERA5:slhf" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA5/by_var_nc/sshf_1hourly/sshf_*_1hourly.nc', }} # ... and load initial data pages diff --git a/class4gl/setup/update_setup.py b/class4gl/setup/update_setup.py deleted file mode 100644 index a36b60a..0000000 --- a/class4gl/setup/update_setup.py +++ /dev/null @@ -1,327 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Purpose: - update variables in class4gl yaml files, eg., when you need new categorical - values in the table. - - -""" -import pandas as pd -import io -import os -import numpy as np -import datetime as dt -import sys -import pytz -import math -import dateutil.parser - -import argparse - - -#if __name__ == '__main__': -parser = argparse.ArgumentParser() -parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') -parser.add_argument('--first_station_row') -parser.add_argument('--last_station_row') -parser.add_argument('--path_output') -parser.add_argument('--diag_tropo',default=None)#['advt','advq','advu','advv']) -parser.add_argument('--station_id') # run a specific station id -parser.add_argument('--mode',default='ini') # run a specific station id -# this is the type of the yaml that needs to be updated. Can be 'ini' or 'mod' -parser.add_argument('--updates') -parser.add_argument('--subset_input',default='morning') # this tells which yaml subset -parser.add_argument('--subset_output',default='morning') # this tells which yaml subset - # to update in the yaml - # dataset. - # Most common options are - # 'morning' and 'ini'. - -parser.add_argument('--split_by',default=-1)# station soundings are split - -#parser.add_argument('--station-chunk',default=0) -parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') -parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations -parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations -parser.add_argument('--global_keys') -args = parser.parse_args() - -sys.path.insert(0, args.c4gl_path_lib) -from class4gl import class4gl_input, data_global,class4gl -from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records -from class4gl import blh,class4gl_input - -# iniitialize global data -globaldata = data_global() -if 'era_profiles' in args.updates.strip().split(" "): - globaldata.sources = {**globaldata.sources,**{ - "ERAINT:t" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/t_6hourly/t_*_6hourly.nc", - "ERAINT:q" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/q_6hourly/q_*_6hourly.nc", - "ERAINT:u" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/u_6hourly/u_*_6hourly.nc", - "ERAINT:v" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/v_6hourly/v_*_6hourly.nc", - }} - -# ... and load initial data pages -globaldata.load_datasets(recalc=0) - - -print("getting stations") -# these are all the stations that are found in the input dataset -all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False) - -print('defining all_stations_select') -# these are all the stations that are supposed to run by the whole batch (all -# chunks). We narrow it down according to the station(s) specified. -if args.station_id is not None: - print("Selecting station by ID") - stations_iter = stations_iterator(all_stations) - STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) - all_stations_select = pd.DataFrame([run_station]) -else: - print("Selecting stations from a row range in the table") - all_stations_select = pd.DataFrame(all_stations.table) - if args.last_station_row is not None: - all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] - if args.first_station_row is not None: - all_stations_select = all_station_select.iloc[int(args.first_station):] -print("station numbers included in the whole batch "+\ - "(all chunks):",list(all_stations_select.index)) - -print("getting all records of the whole batch") -all_records_morning_select = get_records(all_stations_select,\ - args.path_input,\ - subset=args.subset_input, - refetch_records=False, - ) - -# only run a specific chunck from the selection -if args.global_chunk_number is not None: - if args.station_chunk_number is not None: - raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') - - - # if not (int(args.split_by) > 0) : - # raise ValueError("global_chunk_number is specified, but --split-by is not a strict positive number, so I don't know how to split the batch into chunks.") - - run_station_chunk = None - print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') - totalchunks = 0 - stations_iter = all_stations_select.iterrows() - in_current_chunk = False - try: - while not in_current_chunk: - istation,current_station = stations_iter.__next__() - all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) - #chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) - - chunks_current_station = len(all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique()) - print('chunks_current_station',chunks_current_station) - - in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) - - if in_current_chunk: - run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] - run_station_chunk =all_records_morning_station_select.query('STNID == '+str(current_station.name)).chunk.unique()[int(args.global_chunk_number) - totalchunks ] - - totalchunks +=chunks_current_station - - - except StopIteration: - raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') - print("station = ",list(run_stations.index)) - print("station chunk number:",run_station_chunk) - -# if no global chunk is specified, then run the whole station selection in one run, or -# a specific chunk for each selected station according to # args.station_chunk_number -else: - run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] - if args.station_chunk_number is not None: - run_station_chunk = int(args.station_chunk_number) - print("station(s) that is processed.",list(run_stations.index)) - print("chunk number: ",run_station_chunk) - else: - if args.split_by != -1: - raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.") - run_station_chunk = 0 - print("stations that are processed.",list(run_stations.index)) - - -#print(all_stations) -print('Fetching current records') -records_input = get_records(run_stations,\ - args.path_input,\ - subset=args.subset_input, - refetch_records=False, - ) - -# if args.timestamp is None: -# backupdir = args.path_input+'/'+dt.datetime.now().isoformat()+'/' -# else: -# backupdir = args.path_input+'/'+args.timestamp+'/' -# print('creating backup dir: '+backupdir) -# os.system('mkdir -p "'+backupdir+'"') - - -os.system('mkdir -p '+args.path_output) - -for istation,current_station in run_stations.iterrows(): - records_input_station = records_input.query('STNID == ' +\ - str(current_station.name)) - - records_input_station_chunk = records_input_station.query('STNID == ' +\ - str(current_station.name)+\ - '& chunk == '+str(run_station_chunk)) - print('lenrecords_input_station_chunk: ',len(records_input_station_chunk)) - print('split_by*run_station_chunk',int(args.split_by) * int(run_station_chunk)) - print('split_by*run_station_chunk+1',int(args.split_by) * int(run_station_chunk+1)) - - # if (int(args.split_by) * int(run_station_chunk)) >= (len(records_forcing_station)): - # print("warning: outside of profile number range for station "+\ - # str(current_station)+". Skipping chunk number for this station.") - if len(records_input_station_chunk) == 0: - print("warning: outside of profile number range for station "+\ - str(current_station)+". Skipping chunk number for this station.") - else: - # normal case - if ((int(args.split_by) > 0) or \ - (os.path.isfile(args.path_input+'/'+format(current_station.name,'05d')+'_'+\ - str(run_station_chunk)+'_'+args.subset_input+'.yaml'))): - fn_input = \ - args.path_input+'/'+format(current_station.name,'05d')+'_'+\ - str(run_station_chunk)+'_'+args.subset_input+'.yaml' - file_input = \ - open(fn_input,'r') - fn_output = args.path_output+'/'+'/'+format(current_station.name,'05d')+'_'+\ - str(run_station_chunk)+'_'+args.subset_output+'.yaml' - file_output = \ - open(fn_output,'w') - # fn_forcing_pkl = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+\ - # str(run_station_chunk)+'_'+args.subset_forcing+'.pkl' - - # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\ - # str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' - # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\ - # str(run_station_chunk)+'_'+args.subset_forcing+'.pkl' - else: - print("\ -Warning. We are choosing chunk 0 without specifying it in filename. \ - No-chunk naming will be removed in the future."\ - ) - - fn_input = \ - args.path_input+'/'+format(current_station.name,'05d')+'_'+\ - args.subset_input+'.yaml' - file_input = \ - open(fn_input,'r') - fn_output = args.path_output+'/'+'/'+format(current_station.name,'05d')+'_'+\ - str(run_station_chunk)+'_'+args.subset_output+'.yaml' - file_output = \ - open(fn_output,'w') - # fn_forcing_pkl = args.path_forcing+format(current_station.name,'05d')+'_'+\ - # str(run_station_chunk)+'_'+args.subset_forcing+'.pkl' - - # fn_backup = backupdir+format(current_station.name,'05d')+'_'+\ - # str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' - # fn_backup_pkl = backupdir+format(current_station.name,'05d')+'_'+\ - # args.subset_forcing+'.pkl' - - onerun = False - print('starting station chunk number: '\ - +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)') - - #records_forcing_station_chunk = records_forcing_station[(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] - - # records_forcing_station_chunk = records_forcing.query('STNID == ' +\ - # str(current_station.name)+\ - # '& chunk == '+str(run_station_chunk)) - isim = 0 - for (STNID,chunk,index),record_input in records_input_station_chunk.iterrows(): - print('starting '+str(isim+1)+' out of '+\ - str(len(records_input_station_chunk) )+\ - ' (station total: ',str(len(records_input_station)),')') - - c4gli_output = get_record_yaml(file_input, - record_input.index_start, - record_input.index_end, - mode=args.mode) - if args.diag_tropo is not None: - seltropo = (c4gli_input.air_ac.p > c4gli_input.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) - profile_tropo = c4gli_input.air_ac[seltropo] - for var in args.diag_tropo: - if var[:3] == 'adv': - mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) - c4gli_output.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) - else: - print("warning: tropospheric variable "+var+" not recognized") - if 'era_profiles' in args.updates.strip().split(" "): - c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp']) - - c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp}) - - cp = 1005. # specific heat of dry air [J kg-1 K-1] - Rd = 287. # gas constant for dry air [J kg-1 K-1] - Rv = 461.5 # gas constant for moist air [J kg-1 K-1] - R = (Rd*(1.-c4gli_output.air_ac.q) + Rv*c4gli_output.air_ac.q) - rho = c4gli_output.air_ac.p/R/c4gli_output.air_ac.t - dz = c4gli_output.air_ac.delpdgrav/rho - z = [dz.iloc[-1]/2.] - for idz in list(reversed(range(0,len(dz)-1,1))): - z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.) - z = list(reversed(z)) - - theta = c4gli_output.air_ac.t * \ - (c4gli_output.pars.sp/(c4gli_output.air_ac.p))**(R/cp) - thetav = theta*(1. + 0.61 * c4gli_output.air_ac.q) - - - c4gli_output.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z), - 'theta':list(theta), - 'thetav':list(thetav), - })) - air_ap_input = c4gli_output.air_ac[::-1].reset_index().drop('index',axis=1) - air_ap_mode = 'b' - air_ap_input_source = c4gli_output.query_source('air_ac:theta') - - - c4gli_output.mixed_layer_fit(air_ap=air_ap_input, - source=air_ap_input_source, - mode=air_ap_mode) - - if not c4gli_output.check_source_globaldata(): - print('Warning: some input sources appear invalid') - - - - #print('c4gli_forcing_ldatetime',c4gli_forcing.pars.ldatetime) - - # if args.global_keys is not None: - # print(args.global_keys.strip(' ').split(' ')) - # c4gli_forcing.get_global_input( - # globaldata, - # only_keys=args.global_keys.strip(' ').split(' ') - # ) - - c4gli_output.dump(file_output) - - - onerun = True - isim += 1 - - - file_input.close() - file_output.close() - - if onerun: - # os.system('mv "'+fn_forcing+'" "'+fn_backup+'"') - # if os.path.isfile(fn_forcing_pkl): - # os.system('mv "'+fn_forcing_pkl+'" "'+fn_backup_pkl+'"') - # os.system('mv "'+fn_experiment+'" "'+fn_forcing+'"') - # print('mv "'+fn_experiment+'" "'+fn_forcing+'"') - records_forcing_current_cache = get_records(pd.DataFrame([current_station]),\ - args.path_output+'/'+'/',\ - getchunk = int(run_station_chunk),\ - subset=args.subset_output, - refetch_records=True, - ) - diff --git a/class4gl/simulations/batch_simulations.pbs b/class4gl/simulations/batch_simulations.pbs index 4f12ccc..2426063 100644 --- a/class4gl/simulations/batch_simulations.pbs +++ b/class4gl/simulations/batch_simulations.pbs @@ -8,10 +8,10 @@ #PBS -N c4gl_sim module purge -source ~/.bashrc -echo loading modules: $LOADDEPSCLASS4GL -$LOADDEPSCLASS4GL +# echo loading modules: $LOADDEPSCLASS4GL +# $LOADDEPSCLASS4GL +source ~/load_anaconda.sh EXEC_ALL="python $C4GLJOB_exec --global_chunk_number $PBS_ARRAYID" diff --git a/class4gl/simulations/copy_update.py b/class4gl/simulations/copy_update.py index 2410d2c..8dcea02 100644 --- a/class4gl/simulations/copy_update.py +++ b/class4gl/simulations/copy_update.py @@ -9,308 +9,324 @@ import pytz import math -import argparse - -#if __name__ == '__main__': -parser = argparse.ArgumentParser() -#parser.add_argument('--timestamp') -parser.add_argument('--path_input')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/SOUNDINGS/') -parser.add_argument('--path_output')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') -parser.add_argument('--first_station_row') -parser.add_argument('--last_station_row') -parser.add_argument('--updates') -parser.add_argument('--station_id') # run a specific station id -parser.add_argument('--error_handling',default='dump_on_success') -parser.add_argument('--diag_tropo',default=['advt','advq','advu','advv']) -parser.add_argument('--subset_input',default='morning') # this tells which yaml subset - # to initialize with. - # Most common options are - # 'morning' and 'ini'. -parser.add_argument('--subset_output',default='morning') +arguments = [] +#parser.add_argument('--timestamp') +arguments.append(dict(arg='--path_forcing',\ + help='directory of forcing data to initialize and constrain the ABL model simulations')) +arguments.append(dict(arg='--path_output', + help='output directory in which the output as subdirectories are stored'))#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') +arguments.append(dict(arg='--first_station_row',\ + help='starting row number of stations table')) +arguments.append(dict(arg='--last_station_row',\ + help='ending row number of stations table')) +arguments.append(dict(arg='--global_vars',\ + help="global vars to update")) +arguments.append(dict(arg='--station_id',\ + help="process a specific station id")) +arguments.append(dict(arg='--error_handling',\ + default='dump_on_success',\ + help="type of error handling: either\n - 'dump_on_success' (default)\n - 'dump_always'")) +arguments.append(dict(arg='--diag_tropo',\ + default=['advt','advq','advu','advv'],\ + help="field to diagnose the mean in the troposphere (<= 3000m)")) +arguments.append(dict(arg='--subset_forcing', + default='ini', + help="This indicates which yaml subset to initialize with. Most common options are 'ini' (default) and 'morning'.")) # Tuntime is usually specified from the afternoon profile. You can also just # specify the simulation length in seconds -parser.add_argument('--experiments') -parser.add_argument('--split_by',default=-1)# station soundings are split - # up in chunks - -#parser.add_argument('--station-chunk',default=0) -parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') -parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations -parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations -args = parser.parse_args() - -sys.path.insert(0, args.c4gl_path_lib) -from class4gl import class4gl_input, data_global,class4gl -from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records -from class4gl import blh,class4gl_input - -# this is a variant of global run in which the output of runs are still written -# out even when the run crashes. - -# #only include the following timeseries in the model output -# timeseries_only = \ -# ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', -# 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', -# 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', -# 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', -# 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] - +arguments.append(dict(arg='--split_by',\ + type=int, + help="the maxmimum number of soundings that are contained in each output file of a station. -1 means unlimited (default). In case of arrays experiments, this is usually overwritten by 50.")) -EXP_DEFS =\ -{ - 'ERA_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_ERA_NEW':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_ADV_SHR':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'sw_shr':True}, - 'GLOBAL_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'GLOBAL_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, - 'IOPS_NOAC': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, - 'IOPS_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, - 'IOPS_W': {'sw_ac' : ['w',],'sw_ap': True,'sw_lit': False}, - 'IOPS_AC': {'sw_ac' : ['adv','w'],'sw_ap': True,'sw_lit': False}, -} +#arguments.append(dict(arg='--station-chunk',default=0) +arguments.append(dict(arg='--c4gl_path_lib',help="the path of the CLASS4GL program"))#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') +arguments.append(dict(arg='--global_chunk_number',help="this is the batch number of the expected series of experiments according to split_by")) +arguments.append(dict(arg='--station_chunk_number',help="this is the batch number according to split_by in case of considering one station")) -# ======================== -print("getting a list of stations") -# ======================== -# these are all the stations that are found in the input dataset -all_stations = stations(args.path_input,suffix=args.subset_input,refetch_stations=False) +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + #parser.add_argument('--timestamp') + for argument in arguments: + name = argument.pop('arg') + parser.add_argument(name,**argument) -# ==================================== -print('defining all_stations_select') -# ==================================== - -# these are all the stations that are supposed to run by the whole batch (all -# chunks). We narrow it down according to the station(s) specified. - - - -if args.station_id is not None: - print("Selecting station by ID") - stations_iter = stations_iterator(all_stations) - STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) - all_stations_select = pd.DataFrame([run_station]) + args = parser.parse_args() else: - print("Selecting stations from a row range in the table") - all_stations_select = pd.DataFrame(all_stations.table) - if args.last_station_row is not None: - all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] - if args.first_station_row is not None: - all_stations_select = all_station_select.iloc[int(args.first_station):] -print("station numbers included in the whole batch "+\ - "(all chunks):",list(all_stations_select.index)) - -print(all_stations_select) -print("getting all records of the whole batch") -all_records_input_select = get_records(all_stations_select,\ - args.path_input,\ - subset=args.subset_input, - refetch_records=False, - ) - -# only run a specific chunck from the selection -if args.global_chunk_number is not None: - if args.station_chunk_number is not None: - raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') - - - if not (int(args.split_by) > 0) : - raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.") - - run_station_chunk = None - print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') - totalchunks = 0 - stations_iter = all_stations_select.iterrows() - in_current_chunk = False - try: - while not in_current_chunk: - istation,current_station = stations_iter.__next__() - all_records_input_station_select = all_records_input_select.query('STNID == '+str(current_station.name)) - chunks_current_station = math.ceil(float(len(all_records_input_station_select))/float(args.split_by)) - print('chunks_current_station',chunks_current_station) - in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) - - if in_current_chunk: - run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] - run_station_chunk = int(args.global_chunk_number) - totalchunks - - totalchunks +=chunks_current_station + class Namespace: + def __init__(self,**kwargs): + self.__dict__.update(kwargs) + + args = Namespace() + for argument in arguments: + if 'default' in argument.keys(): + args.__dict__[argument['arg'].strip('-')] = argument['default'] + else: + args.__dict__[argument['arg'].strip('-')] = None + print(args.__dict__) - except StopIteration: - raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') - print("station = ",list(run_stations.index)) - print("station chunk number:",run_station_chunk) - -# if no global chunk is specified, then run the whole station selection in one run, or -# a specific chunk for each selected station according to # args.station_chunk_number -else: - run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] - if args.station_chunk_number is not None: - run_station_chunk = int(args.station_chunk_number) - print("station(s) that is processed.",list(run_stations.index)) - print("chunk number: ",run_station_chunk) +def execute(**kwargs): + # note that with args, we actually mean the same as those specified with + # the argparse module above + + # overwrite the args according to the kwargs when the procedure is called + # as module function + for key,value in kwargs.items(): + args.__dict__[key] = value + + print("-- begin arguments --") + for key,value in args.__dict__.items(): + print(key,': ',value) + print("-- end arguments ----") + + # load specified class4gl library + if args.c4gl_path_lib is not None: + sys.path.insert(0, args.c4gl_path_lib) + + from class4gl import class4gl_input, data_global,class4gl + from interface_multi import stations,stations_iterator, records_iterator,get_record_yaml,get_records + from class4gl import blh,class4gl_input + + # this is a variant of global run in which the output of runs are still written + # out even when the run crashes. + + # #only include the following timeseries in the model output + # timeseries_only = \ + # ['Cm', 'Cs', 'G', 'H', 'L', 'LE', 'LEpot', 'LEref', 'LEsoil', 'LEveg', 'Lwin', + # 'Lwout', 'Q', 'RH_h', 'Rib', 'Swin', 'Swout', 'T2m', 'dq', 'dtheta', + # 'dthetav', 'du', 'dv', 'esat', 'gammaq', 'gammatheta', 'h', 'q', 'qsat', + # 'qsurf', 'ra', 'rs', 'theta', 'thetav', 'time', 'u', 'u2m', 'ustar', 'uw', + # 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] + + if (args.global_vars is not None): + globaldata = data_global() + globaldata.load_datasets(recalc=0) + + + # ======================== + print("getting a list of stations") + # ======================== + + # these are all the stations that are found in the input dataset + all_stations = stations(args.path_forcing,suffix=args.subset_forcing,refetch_stations=False) + + # ==================================== + print('defining all_stations_select') + # ==================================== + + # these are all the stations that are supposed to run by the whole batch (all + # chunks). We narrow it down according to the station(s) specified. + + if args.station_id is not None: + print("Selecting station by ID") + stations_iter = stations_iterator(all_stations) + STNID,run_station = stations_iter.set_STNID(STNID=int(args.station_id)) + all_stations_select = pd.DataFrame([run_station]) else: - if args.split_by != -1: - raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.") - run_station_chunk = 0 - print("stations that are processed.",list(run_stations.index)) - - -#print(all_stations) -print('Fetching initial/forcing records') -records_input = get_records(run_stations,\ - args.path_input,\ - subset=args.subset_input, - refetch_records=False, - ) - - - -os.system('mkdir -p '+args.path_output) -for istation,current_station in run_stations.iterrows(): - print(istation,current_station) - records_input_station = records_input.query('STNID == '+str(current_station.name)) - if (int(args.split_by) * int(run_station_chunk)) >= (len(records_input_station)): - print("warning: outside of profile number range for station "+\ - str(current_station)+". Skipping chunk number for this station.") + print("Selecting stations from a row range in the table") + all_stations_select = pd.DataFrame(all_stations.table) + if args.last_station_row is not None: + all_stations_select = all_station_select.iloc[:(int(args.last_station)+1)] + if args.first_station_row is not None: + all_stations_select = all_station_select.iloc[int(args.first_station):] + print("station numbers included in the whole batch "+\ + "(all chunks):",list(all_stations_select.index)) + + print(all_stations_select) + print("getting all records of the whole batch") + all_records_morning_select = get_records(all_stations_select,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + + # only run a specific chunck from the selection + if args.global_chunk_number is not None: + if args.station_chunk_number is not None: + raise ValueError('You need to specify either global-chunk-number or station-chunk-number, not both.') + + if (args.split_by is None) or (args.split_by <= 0): + raise ValueError("global_chunk_number is specified, but --split_by is not a strict positive number, so I don't know how to split the batch into chunks.") + + run_station_chunk = None + print('determining the station and its chunk number according global_chunk_number ('+args.global_chunk_number+')') + totalchunks = 0 + stations_iter = all_stations_select.iterrows() + in_current_chunk = False + try: + while not in_current_chunk: + istation,current_station = stations_iter.__next__() + all_records_morning_station_select = all_records_morning_select.query('STNID == '+str(current_station.name)) + chunks_current_station = math.ceil(float(len(all_records_morning_station_select))/float(args.split_by)) + print('chunks_current_station',chunks_current_station) + in_current_chunk = (int(args.global_chunk_number) < (totalchunks+chunks_current_station)) + + if in_current_chunk: + run_stations = pd.DataFrame([current_station])# run_stations.loc[(int(args.__dict__['last_station'])] + run_station_chunk = int(args.global_chunk_number) - totalchunks + + totalchunks +=chunks_current_station + + + except StopIteration: + raise ValueError("Could not determine station chunk number. --global_chunk_number ("+args.global_chunk_number+") outside of range [0,"+ str(totalchunks)+'[') + print("station = ",list(run_stations.index)) + print("station chunk number:",run_station_chunk) + + # if no global chunk is specified, then run the whole station selection in one run, or + # a specific chunk for each selected station according to # args.station_chunk_number else: - fn_input = args.path_input+'/'+format(current_station.name,'05d')+'_'+args.subset_input+'.yaml' - if os.path.isfile(fn_input): - file_input = open(fn_input) + run_stations = pd.DataFrame(all_stations_select)# run_stations.loc[(int(args.__dict__['last_station'])] + if args.station_chunk_number is not None: + run_station_chunk = int(args.station_chunk_number) + print("station(s) that is processed.",list(run_stations.index)) + print("chunk number: ",run_station_chunk) else: - fn_input = \ - args.path_input+'/'+format(current_station.name,'05d')+\ - '_'+str(run_station_chunk)+'_'+args.subset_input+'.yaml' - file_input = open(fn_input) - - fn_output = args.path_output+'/'+format(current_station.name,'05d')+'_'+\ - str(int(run_station_chunk))+'_'+args.subset_output+'.yaml' - file_output = open(fn_output,'w') - + if args.split_by is not None: + raise ValueError("Chunks are defined by --split_by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split_by.") + run_station_chunk = 0 + print("stations that are processed.",list(run_stations.index)) + + + #print(all_stations) + print('Fetching initial/forcing records') + records_morning = get_records(run_stations,\ + args.path_forcing,\ + subset=args.subset_forcing, + refetch_records=False, + ) + if len(records_morning) == 0: + raise IOError("No initialization records records found in "+\ + args.path_forcing+' (subset: '+args_subset_forcing+')') + + # note that if runtime is an integer number, we don't need to get the afternoon + # profiles. + + + + path_output = args.path_output + + os.system('mkdir -p '+path_output) + for istation,current_station in run_stations.iterrows(): + # records_morning_station = records_morning.query('STNID == '+str(current_station.name)) + records_morning_station = records_morning.loc[(current_station.name):(current_station.name)] + + fn_morning = args.path_forcing+'/'+format(current_station.name,'05d')+'_'+args.subset_forcing+'.yaml' + if os.path.isfile(fn_morning): + file_morning = open(fn_morning) + else: + fn_morning = \ + args.path_forcing+'/'+format(current_station.name,'05d')+\ + '_'+str(run_station_chunk)+'_'+args.subset_forcing+'.yaml' + file_morning = open(fn_morning) + + # if args.runtime == 'from_profile_pair': + # file_afternoon = open(args.path_forcing+'/'+format(current_station.name,'05d')+'_end.yaml') + fn_ini = path_output+'/'+format(current_station.name,'05d')+'_'+\ + str(int(run_station_chunk))+'_ini.yaml' + file_ini = open(fn_ini,'w') + #iexp = 0 onerun = False print('starting station chunk number: '\ - +str(run_station_chunk)+'(size: '+str(args.split_by)+' soundings)') - - records_input_station_chunk = records_input_station.iloc[((run_station_chunk)*int(args.split_by)):((run_station_chunk+1)*int(args.split_by))] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] - - isim = 0 - for (STNID,chunk,index),record_input in records_input_station_chunk.iterrows(): - print('starting '+str(isim+1)+' out of '+\ - str(len(records_input_station_chunk) )+\ - ' (station total: ',str(len(records_input_station)),')') - - - c4gli_output = get_record_yaml(file_input, - record_input.index_start, - record_input.index_end, - mode='ini') - if args.diag_tropo is not None: - print('add tropospheric parameters on advection and subsidence (for diagnosis)') - seltropo = (c4gli_input.air_ac.p > c4gli_input.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) - profile_tropo = c4gli_input.air_ac[seltropo] - for var in args.diag_tropo:#['t','q','u','v',]: - if var[:3] == 'adv': - mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) - c4gli_output.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) - else: - print("warning: tropospheric variable "+var+" not recognized") - - - if 'era_profiles' in args.updates.strip().split(" "): - c4gli_output.get_global_input(globaldata,only_keys=['t','u','v','q','sp']) - - c4gli_output.update(source='era-interim',pars={'Ps' : c4gli_output.pars.sp}) - - cp = 1005. # specific heat of dry air [J kg-1 K-1] - Rd = 287. # gas constant for dry air [J kg-1 K-1] - Rv = 461.5 # gas constant for moist air [J kg-1 K-1] - R = (Rd*(1.-c4gli_output.air_ac.q) + Rv*c4gli_output.air_ac.q) - rho = c4gli_output.air_ac.p/R/c4gli_output.air_ac.t - dz = c4gli_output.air_ac.delpdgrav/rho - z = [dz.iloc[-1]/2.] - for idz in list(reversed(range(0,len(dz)-1,1))): - z.append(z[-1]+ (dz[idz+1]+dz[idz])/2.) - z = list(reversed(z)) - - theta = c4gli_output.air_ac.t * \ - (c4gli_output.pars.sp/(c4gli_output.air_ac.p))**(R/cp) - thetav = theta*(1. + 0.61 * c4gli_output.air_ac.q) - - - c4gli_output.update(source='era-interim',air_ac=pd.DataFrame({'z':list(z), - 'theta':list(theta), - 'thetav':list(thetav), - })) - air_ap_input = c4gli_output.air_ac[::-1].reset_index().drop('index',axis=1) - air_ap_mode = 'b' - air_ap_input_source = c4gli_output.query_source('air_ac:theta') - - - c4gli_output.mixed_layer_fit(air_ap=air_ap_input, - source=air_ap_input_source, - mode=air_ap_mode) - - - + +str(run_station_chunk)+' (chunk size:',args.split_by,')') + + skip_chunk = False + if 'chunk' in records_morning.index.names: + records_morning_station_chunk = records_morning_station.loc[(current_station.name,run_station_chunk):(current_station.name,run_station_chunk)] + else: + start_record = run_station_chunk*args.split_by if run_station_chunk is not 0 else 0 + end_record = (run_station_chunk+1)*args.split_by if args.split_by is not None else None + if start_record >= (len(records_morning_station)): + print("warning: outside of profile number range for station "+\ + str(current_station)+". Skipping chunk number for this station.") + skip_chunk = True + records_morning_station_chunk = None + else: + records_morning_station_chunk = records_morning_station.iloc[start_record:end_record] # [(int(args.split_by)*run_station_chunk):(int(args.split_by)*(run_station_chunk+1))] + + if not skip_chunk: + + isim = 0 + for (STNID,chunk,index),record_morning in records_morning_station_chunk.iterrows(): + print('starting '+str(isim+1)+' out of '+\ + str(len(records_morning_station_chunk) )+\ + ' (station total: ',str(len(records_morning_station)),')') - c4gli_output.dump(file_output) + + c4gli_morning = get_record_yaml(file_morning, + record_morning.index_start, + record_morning.index_end, + mode='model_input') - file_output.close() - file_input.close() - - if onerun: - records_output = get_records(pd.DataFrame([current_station]),\ - args.path_output,\ - getchunk = int(run_station_chunk),\ - subset=args.subset_output, - refetch_records=True, - ) - else: - # remove empty files - os.system('rm '+fn_output) - -# # align afternoon records with initial records, and set same index -# records_afternoon.index = records_afternoon.ldatetime.dt.date -# records_afternoon = records_afternoon.loc[records_output.ldatetime.dt.date] -# records_afternoon.index = records_output.index - -# stations_for_iter = stations(path_exp) -# for STNID,station in stations_iterator(stations_for_iter): -# records_current_station_index = \ -# (records_output.index.get_level_values('STNID') == STNID) -# file_current_station_mod = STNID -# -# with \ -# open(path_exp+'/'+format(STNID,"05d")+'_output.yaml','r') as file_station_output, \ -# open(path_exp+'/'+format(STNID,"05d")+'_mod.yaml','r') as file_station_mod, \ -# open(path_input+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: -# for (STNID,index),record_output in records_iterator(records_output): -# c4gli_output = get_record_yaml(file_station_output, -# record_output.index_start, -# record_output.index_end, -# mode='ini') -# #print('c4gli_in_ldatetime 3',c4gli_output.pars.ldatetime) -# -# record_mod = records_mod.loc[(STNID,index)] -# c4gl_mod = get_record_yaml(file_station_mod, -# record_mod.index_start, -# record_mod.index_end, -# mode='mod') -# record_afternoon = records_afternoon.loc[(STNID,index)] -# c4gl_afternoon = get_record_yaml(file_station_afternoon, -# record_afternoon.index_start, -# record_afternoon.index_end, -# mode='ini') - + + + if args.global_vars is not None: + c4gli_morning.get_global_input(globaldata,only_keys=args.global_vars.strip().split(',')) + + onerun = True + + print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') + c4gli_morning.dump(file_ini) + + + isim += 1 + + + file_ini.close() + file_morning.close() + + if onerun: + records_ini = get_records(pd.DataFrame([current_station]),\ + path_output,\ + getchunk = int(run_station_chunk),\ + subset='ini', + refetch_records=True, + ) + else: + # remove empty files + os.system('rm '+fn_ini) + + # # align afternoon records with initial records, and set same index + # records_afternoon.index = records_afternoon.ldatetime.dt.date + # records_afternoon = records_afternoon.loc[records_ini.ldatetime.dt.date] + # records_afternoon.index = records_ini.index + + # stations_for_iter = stations(path_output) + # for STNID,station in stations_iterator(stations_for_iter): + # records_current_station_index = \ + # (records_ini.index.get_level_values('STNID') == STNID) + # file_current_station_end_mod = STNID + # + # with \ + # open(path_output+'/'+format(STNID,"05d")+'_ini.yaml','r') as file_station_ini, \ + # open(path_output+'/'+format(STNID,"05d")+'_end_mod.yaml','r') as file_station_end_mod, \ + # open(path_forcing+'/'+format(STNID,"05d")+'_afternoon.yaml','r') as file_station_afternoon: + # for (STNID,index),record_ini in records_iterator(records_ini): + # c4gli_ini = get_record_yaml(file_station_ini, + # record_ini.index_start, + # record_ini.index_end, + # mode='ini') + # #print('c4gli_in_ldatetime 3',c4gli_ini.pars.ldatetime) + # + # record_end_mod = records_end_mod.loc[(STNID,index)] + # c4gl_end_mod = get_record_yaml(file_station_end_mod, + # record_end_mod.index_start, + # record_end_mod.index_end, + # mode='mod') + # record_afternoon = records_afternoon.loc[(STNID,index)] + # c4gl_afternoon = get_record_yaml(file_station_afternoon, + # record_afternoon.index_start, + # record_afternoon.index_end, + # mode='ini') + + +if __name__ == '__main__': + #execute(**vars(args)) + execute() diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index d759e28..9cefd61 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -108,8 +108,15 @@ def execute(**kwargs): # 'v', 'v2m', 'vw', 'wq', 'wtheta', 'wthetae', 'wthetav', 'wthetae', 'zlcl'] + # for iEXP in range(4): + # EXPKEY = 'LCZ'+str(iEXP) + # EXP_DEFS[EXPKEY] = {'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'urban':'LCZ'+str(iEXP)} + + EXP_DEFS =\ { + 'LCZ':{'sw_ac' : [],'sw_ap': True,'sw_lit': False, 'urban':'lcw1'}, + 'BASE':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, 'NOADV':{'sw_ac' : [],'sw_ap': True,'sw_lit': False}, diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py index bf0da9d..3ebe456 100644 --- a/class4gl/simulations/simulations_iter.py +++ b/class4gl/simulations/simulations_iter.py @@ -114,6 +114,7 @@ def execute(**kwargs): 'BASE_ITER_ADV':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False}, 'BASE_ITER_W_ADV':{'sw_ac' : ['adv',"w"],'sw_ap': True,'sw_lit': False}, 'BASE_ITER_W':{'sw_ac' : ["w"],'sw_ap': True,'sw_lit': False}, + 'BASE_ITER_ADV_B05':{'sw_ac' : ['adv',],'sw_ap': True,'sw_lit': False,'beta':0.2}, 'ERA_NOAC_ITER': {'sw_ac' : [],'sw_ap': True,'sw_lit': False}, diff --git a/setup.py b/setup.py index b82c7c4..a536320 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='0.9.5', + version='1.0.0', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From e07b40c3198cfe1d43ee463f573c3e9a841b096d Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Wed, 29 May 2019 14:35:04 +0200 Subject: [PATCH 125/129] assign next version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a536320..1506d2d 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='1.0.0', + version='1.0.1', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From 05fecf5340f033493e8450177c6093f207f5b4ed Mon Sep 17 00:00:00 2001 From: woutersh Date: Fri, 12 Feb 2021 22:33:48 +0100 Subject: [PATCH 126/129] Compatibility fixes --- class4gl/class4gl.py | 37 +++++++----- class4gl/data_global.py | 30 +++++---- class4gl/interface/interface_new_koeppen.py | 45 ++++++++------ class4gl/interface_functions.py | 5 ++ class4gl/interface_multi.py | 40 ++++++------ class4gl/model.py | 67 +++++++++++++++------ class4gl/setup/setup_igra.py | 2 + class4gl/simulations/batch_simulations.py | 11 ++-- class4gl/simulations/copy_update.py | 4 +- class4gl/simulations/simulations.py | 16 ++--- class4gl/simulations/simulations_iter.py | 9 +++ 11 files changed, 167 insertions(+), 99 deletions(-) diff --git a/class4gl/class4gl.py b/class4gl/class4gl.py index cdb85c8..f54db96 100644 --- a/class4gl/class4gl.py +++ b/class4gl/class4gl.py @@ -563,7 +563,7 @@ def get_profile_wyoming(self,wy_strm,air_ap_mode = 'b'): # this crazy long line just loads the sounding parameter table into parameters object (using amongst others the pandas internal engine to detect the right value types (int, float, np.Datetime64 etc.)). dpars = {**dpars, - **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.convert_objects(convert_numeric=True).iloc[0].to_dict() + **pd.read_fwf(io.StringIO(str(string)),widths=[43,1,20],names=['descr','dummy','value']).iloc[1:-1].drop("dummy",1).set_index("descr").T.apply(pd.to_numeric,errors='ignore').iloc[0].to_dict() } # we get weird output when it's a numpy Timestamp, so we convert it to @@ -1972,22 +1972,27 @@ def dump(self,file,include_input=False,timeseries_only=None): dictout = {} dictoutlast = {} - if timeseries_only == None: - outvars = self.__dict__['out'].__dict__.keys() + if not 'out' in self.__dict__.keys(): + print('Warning: no timeseries section found in output.') else: - outvars = timeseries_only - for key in outvars: - dictout[key] = self.__dict__['out'].__dict__[key] - dictoutlast[key] = dictout[key][-1] - - if type(dictoutlast[key]).__module__ == 'numpy': - dictoutlast[key] = dictoutlast[key].item() - # convert numpy types to native python data types. This - # provides cleaner data IO with yaml: - if type(dictout[key]).__module__ == 'numpy': - dictout[key] = [ a.item() for a in \ - self.__dict__['out'].__dict__[key]] - #dictout[key] = list(dictout[key] ) + outvars = [] + if timeseries_only == None: + outvars = self.__dict__['out'].__dict__.keys() + else: + outvars = timeseries_only + + for key in outvars: + dictout[key] = self.__dict__['out'].__dict__[key] + dictoutlast[key] = dictout[key][-1] + + if type(dictoutlast[key]).__module__ == 'numpy': + dictoutlast[key] = dictoutlast[key].item() + # convert numpy types to native python data types. This + # provides cleaner data IO with yaml: + if type(dictout[key]).__module__ == 'numpy': + dictout[key] = [ a.item() for a in \ + self.__dict__['out'].__dict__[key]] + #dictout[key] = list(dictout[key] ) yaml.dump({'pars' : {**dictoutlast,**dictpars}},file) diff --git a/class4gl/data_global.py b/class4gl/data_global.py index c46b65e..564f0f1 100644 --- a/class4gl/data_global.py +++ b/class4gl/data_global.py @@ -153,7 +153,7 @@ def set_page(self,ipage,page=None): lon = self.page.lon.values lon[lon > 180.] -= 360. - self.page.lon.values = lon[:] + self.page.lon.values[:] = lon[:] self.page = self.page.rename(self.renames) @@ -226,10 +226,10 @@ def __init__(self,sources= { # 'GLEAM:w2' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/SMroot_*_GLEAM_v3.1a.nc:SMroot', # 'GLEAM:BR' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/BR_*_GLEAM_v3.1a.nc:BR', # 'GLEAM:EF' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/v3.1a/????/EF_*_GLEAM_v3.1a.nc:EF', - 'GLEAM:wg' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf', - 'GLEAM:w2' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc:SMroot', + # 'GLEAM:wg' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMsurf_*_GLEAM_v3.2a.nc:SMsurf', + # 'GLEAM:w2' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/SMroot_*_GLEAM_v3.2a.nc:SMroot', #'GLEAM:BR' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a/????/BR_*_GLEAM_v3.2a.nc:BR', - 'GLEAM:EF' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc:EF', + # 'GLEAM:EF' : '/user/data/gent/gvo000/gvo00090/GLEAM/data/GLEAM_v3.2/v3.2a_OUTPUT/????/EF_*_GLEAM_v3.2a.nc:EF', "IGBPDIS:alpha" : "/user/data/gent/gvo000/gvo00090/EXT/data/IGBP-DIS/FRACTIONS_GLEAMv31a.nc", "GLAS:z0m" : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1", "GLAS:z0h" : "/user/data/gent/gvo000/gvo00090/EXT/data/GLAS/global_canopy_height_0.25.nc:Band1", @@ -251,21 +251,25 @@ def __init__(self,sources= { "GIMMS:LAIpixel": "/user/data/gent/gvo000/gvo00090/EXT/data/GIMMS/v2/LAI/gimms-3g.v2.lai.1981-2015_monmean_remapcon_0.25.nc:LAI", #'CERES.low': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_*.nc%cldarea_low_1h', #'CERES.cc%20000301%20100101': '/user/data/gent/gvo000/gvo00090/vsc42247/EXT/data/CERES/CERES_SYN1deg-1H_Terra-Aqua-MODIS_Ed4A_Subset_$YYYYMMDD_CERES_START-$YYYYMMDD_CERES_END.nc.cldarea_total_1h%cldarea_total_1h' - "ERAINT:advt_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x", - "ERAINT:advt_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y", - "ERAINT:advq_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc", - "ERAINT:advq_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc", - "ERAINT:advu_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc", - "ERAINT:advu_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc", - "ERAINT:advv_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc", - "ERAINT:advv_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc", +# "ERAINT:advt_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_x_6hourly/advt_x*_6hourly.nc:advt_x", +# "ERAINT:advt_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advt_y_6hourly/advt_y*_6hourly.nc:advt_y", +# "ERAINT:advq_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_x_6hourly/advq_x*_6hourly.nc", +# "ERAINT:advq_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advq_y_6hourly/advq_y*_6hourly.nc", +# "ERAINT:advu_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_x_6hourly/advu_x*_6hourly.nc", +# "ERAINT:advu_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advu_y_6hourly/advu_y*_6hourly.nc", +# "ERAINT:advv_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_x_6hourly/advv_x*_6hourly.nc", +# "ERAINT:advv_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/advv_y_6hourly/advv_y*_6hourly.nc", #"ERAINT:divU_x" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_x_6hourly/divU_x*_6hourly.nc:__xarray_dataarray_variable__", #"ERAINT:divU_y" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/divU_y_6hourly/divU_y*_6hourly.nc:__xarray_dataarray_variable__", "ERAINT:sp" : "/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/sp_6hourly/sp_*_6hourly.nc", "ERAINT:wp" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA-INTERIM/by_var_nc/w_6hourly_xarray/w*_6hourly.nc:w', "MSWEPGLEAM:AI" : '/data/gent/vo/000/gvo00090/D2D/data/Aridity//Ep_1981_2017_MO_meanhottestmonth.nc', - "ERA5:t2m_daymax" : '/data/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/t2m_1hourly_for_t2m_daymax.nc:t2m', +# "ERA5:t2m_daymax" : '/data/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/t2m_1hourly_for_t2m_daymax.nc:t2m', + "ERA5:blpt_daymax" : '/scratch/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/blpt_1hourly_for_blpt_daymax.nc:blpt', "ERA5:blptb_daymax" : '/scratch/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/blptb_1hourly_for_blptb_daymax.nc:blptb', + "ERA5:blptb_daymean" : '/scratch/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/blptb_1hourly_daymean.nc:blptb', + "ERA5:blptb_daymean_ge90" : '/scratch/gent/vo/000/gvo00090/D2D/data/ERA5/by_var_nc/blptb_1hourly_daymean_for_blptb_daymean_mean3month_is_max_ge90.nc:blptb', + # "ERA5:slhf" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA5/by_var_nc/slhf_1hourly/slhf_*_1hourly.nc', # "ERA5:sshf" : '/user/data/gent/gvo000/gvo00090/EXT/data/ERA5/by_var_nc/sshf_1hourly/sshf_*_1hourly.nc', #"MSWEP:pr" :"/user/data/gent/gvo000/gvo00090/EXT/data/MSWEP/MSWEP_v1.2_precip_1979-2015/3hr/raw_data/globe/*.nc:precipitation" diff --git a/class4gl/interface/interface_new_koeppen.py b/class4gl/interface/interface_new_koeppen.py index dddb816..3dbe77f 100644 --- a/class4gl/interface/interface_new_koeppen.py +++ b/class4gl/interface/interface_new_koeppen.py @@ -246,6 +246,9 @@ def brightness(rrggbb): koeppenlookuptable = koeppenlookuptable.sort_values('amount',ascending=False) include_koeppen = list(koeppenlookuptable.KGCID) +for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:2]): + c4gldata[key].frames['stats']['records_all_stations_ini']['KGCname'] = \ + c4gldata[key].frames['stats']['records_all_stations_ini']['KGC'].map(koeppenlookuptable['KGCID']) if args.make_figures: fig = plt.figure(figsize=(11,8.0)) #width,height @@ -303,10 +306,11 @@ def brightness(rrggbb): #dia.ax.plot(x99,y99,color='k') i += 1 + i = 1 for varkey in ['h','theta','q']: #for ikey,key in enumerate(args.experiments.strip(' ').split(' ')): - for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]): + for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:2]): # cc = c4gldata[key].frames['stats']['records_all_stations_ini']['cc'] # clearsky = (cc < 0.05) # mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats'].loc[clearsky]['d'+varkey+'dt'] @@ -327,6 +331,7 @@ def brightness(rrggbb): RMSE = rmse(obs,mod) BIAS = np.mean(mod) - np.mean(obs) STD = mod.std() + markers=['o','^'] # fit = np.polyfit(x,y,deg=1) # axes[varkey].plot(x, fit[0] * x + fit[1],\ @@ -340,23 +345,23 @@ def brightness(rrggbb): # print(STD) # print(PR) dias[varkey].add_sample(STD/STD_OBS, PR,\ - marker='o',ls='', mfc='white',mec='black', + marker=markers[ikey],ls='', mfc='white',mec='black', zorder=-100, - ms=3.5*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\ + ms=2.5*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\ np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float))) # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7} ) dias[varkey].add_sample(STD/STD_OBS, PR,\ - marker='o',ls='', mfc='none',mec='black', + marker=markers[ikey],ls='', mfc='none',mec='black', zorder=700, - ms=3.5*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\ + ms=2.5*np.sqrt(np.sum(np.array(koeppenlookuptable.amount.values,dtype=np.float)))/\ np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float))) # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7} ) dias[varkey].add_sample(STD/STD_OBS, PR,\ - marker='o',ls='', mfc='none',mec='black', + marker=markers[ikey],ls='', mfc='none',mec='black', zorder=700, ms=1. # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ @@ -379,7 +384,7 @@ def brightness(rrggbb): i = 1 for varkey in ['h','theta','q']: - for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:1]): + for ikey,key in enumerate(args.experiments.strip(' ').split(' ')[:2]): icolor = 0 for ikoeppen,koeppen in koeppenlookuptable.iterrows(): if koeppen.amount >= 200: @@ -403,26 +408,26 @@ def brightness(rrggbb): dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), pearsonr(koeppen_end_mod,koeppen_obs)[0], - marker='o',linewidth=0.5, + marker=markers[ikey],linewidth=0.5,alpha=0.7, mfc=koeppen.color,mec='black',#koeppen.color, zorder=300+icolor, - ms=3.5*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float))) - # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ - # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7} - ) - dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), - pearsonr(koeppen_end_mod,koeppen_obs)[0], - marker='o',linewidth=0.5, - mfc=koeppen.color,mec='black',#koeppen.color, - zorder=301+icolor, ms=1 + ms=2.5*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float))) # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7} ) + # dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), + # pearsonr(koeppen_end_mod,koeppen_obs)[0], + # marker=markers[ikey],linewidth=0.5, + # mfc=koeppen.color,mec='black',#koeppen.color, + # zorder=301+icolor, ms=1 + # # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ + # # bbox={'edgecolor':'black','boxstyle':'circle','fc':koeppen.color,'alpha':0.7} + # ) # dias[varkey].add_sample(koeppen_end_mod.std()/koeppen_obs.std(), # pearsonr(koeppen_end_mod,koeppen_obs)[0], - # marker='o',linewidth=0.5, mfc='none',mec=str(koeppen.color), + # marker=markers[ikey],linewidth=0.5, mfc='none',mec=str(koeppen.color), # zorder=600+icolor, # ms=10.*np.sqrt(koeppen.amount)/np.mean(np.sqrt(np.array(koeppenlookuptable.amount.values,dtype=np.float))) # # annotate=koeppen.KGCID, color=koeppen.textcolor,weight='bold',fontsize=5.,\ @@ -530,7 +535,7 @@ def brightness(rrggbb): mod = c4gldata[key].frames['stats']['records_all_stations_end_mod_stats']['d'+varkey+'dt'] obs = c4gldata[key].frames['stats']['records_all_stations_end_obs_stats']['d'+varkey+'dt'] print ('filtering classes that have sufficient samples: ', include_koeppen) - filter_classess = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen)) + filter_classes = (c4gldata[key].frames['stats']['records_all_stations_ini'].KGCname.isin(include_koeppen)) mod = mod.loc[filter_classes] obs = obs.loc[filter_classes] @@ -664,7 +669,7 @@ def brightness(rrggbb): # ax = fig.add_axes([0.05,0.00,0.15,0.15]) #[*left*, *bottom*, *width*, *height*] # leg = [] # for ikey,key in enumerate(args.experiments.strip().split(' ')): - # leg1, = ax.plot([],colors[ikey]+'o' ,markersize=10) + # leg1, = ax.plot([],colors[ikey]+markers[ikey] ,markersize=10) # leg.append(leg1) # ax.axis('off') # #leg1 = diff --git a/class4gl/interface_functions.py b/class4gl/interface_functions.py index 240f246..fbc335a 100644 --- a/class4gl/interface_functions.py +++ b/class4gl/interface_functions.py @@ -110,6 +110,7 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'): # # needed in case of Ruby # os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) + print('hello') return modelout elif mode == 'model_input': @@ -142,6 +143,9 @@ def get_record_yaml(yaml_file,index_start,index_end,mode='model_output'): # # needed in case of ruby # os.system('rm '+TEMPDIR+'/'+shortfn+'.buffer.json.'+str(index_start)) return c4gli + else: + print('Warning. Mode '+mode+' not recorgnized. Returning None') + return None @@ -205,6 +209,7 @@ def __init__(self,path,suffix='ini',refetch_stations=True): self.table = self.table.set_index('STNID') def get_stations(self,suffix): + print(suffix) stations_list_files = glob.glob(self.path+'/?????_*_'+suffix+'.yaml') if len(stations_list_files) == 0: stations_list_files = glob.glob(self.path+'/?????_'+suffix+'.yaml') diff --git a/class4gl/interface_multi.py b/class4gl/interface_multi.py index 8931172..0c5a0dd 100644 --- a/class4gl/interface_multi.py +++ b/class4gl/interface_multi.py @@ -264,24 +264,28 @@ def __init__(self,path_exp,path_forcing=None,globaldata=None,refetch_records=Fal print('exclude exceptional observations') print('exclude unrealistic model output -> should be investigated!') valid = (\ - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > 0.25000) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 1.8000) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 2.0000) & - #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 40.0000) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 400.) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 350.) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & - (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -.0006) & - # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & - - # filter 'extreme' model output -> should be investigated! - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .2) & - (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.3) & + (self.frames['stats']['records_all_stations_end_mod'].theta > 273.) & + (self.frames['stats']['records_all_stations_end_obs'].theta > 273.) & + #(self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.25) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt > 0.) & + #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > 0.25000) & + #(self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 1.8000) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dthetadt < 2.0000) & + #(self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 50.0000) & + (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt > 0.0000) & + # (self.frames['stats']['records_all_stations_end_mod_stats'].dhdt < 400.) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dhdt < 350.) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt > -.0006) & + # (self.frames['stats']['records_all_stations_end_obs_stats'].dqdt < .0003) & + + # filter 'extreme' model output -> should be investigated! + # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0006) & + # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt > -.0006) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .0) & + # (self.frames['stats']['records_all_stations_end_mod_stats'].dhdt > 40.) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt > .0) & + (self.frames['stats']['records_all_stations_end_mod_stats'].dthetadt < 2.3) & # (self.frames['stats']['records_all_stations_end_mod_stats'].dqdt < .0003) & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Cwb') & # (self.frames['stats']['records_all_stations_ini'].KGC != 'Dfc') & diff --git a/class4gl/model.py b/class4gl/model.py index 30387f0..b2359d5 100644 --- a/class4gl/model.py +++ b/class4gl/model.py @@ -156,6 +156,10 @@ def __init__(self, model_input = None,debug_level=None): # self.input.air_ac.p.iloc[irow] *\ # self.input.air_ac.delpdgrav.iloc[irow]*grav + # if 'ts' in model_input.__dict__.keys(): + # self.input.__dict__['ts'] = model_input.__dict__['ts'] + if 'ts' in model_input.__dict__.keys(): + self.input.__dict__['ts'] = model_input.__dict__['ts'] # 2. Air circulation data @@ -250,6 +254,7 @@ def init(self): self.R10 = 0.23; # respiration at 10 C [mg CO2 m-2 s-1] self.E0 = 53.3e3; # activation energy [53.3 kJ kmol-1] + # Read switches self.sw_ml = self.input.sw_ml # mixed-layer model switch self.sw_shearwe = self.input.sw_shearwe # shear growth ABL switch @@ -982,8 +987,20 @@ def run_mixed_layer(self): # Calculate entrainment fluxes self.wthetae = -self.we * self.dtheta + + #ent2 + #self.wthetae = self.input.ts.wthetae[self.t] #-self.we * self.dq + self.wqe = -self.we * self.dq + #ent and ent2 + #self.wqe = self.input.ts.wqe[self.t] #-self.we * self.dq self.wCO2e = -self.we * self.dCO2 + + # none(bot entrianment on) - ent2 (only dry air entrainment off): dry air entrainment effect + # ent2(only dry air entrainment off ) - ent (both theta and q entrainment off): dry air entrainment effect + # none - ent: full entrainment + + htend_pre = self.we + self.ws + self.wf - self.M @@ -1000,15 +1017,15 @@ def run_mixed_layer(self): thetatend_pre + w_th_ft dtheta_pre = float(self.dtheta + dthetatend_pre *self.dt) - l_entrainment = True + self.l_entrainment = True if (self.dtheta <= 0.1) and (dthetatend_pre < 0.): - l_entrainment = False + self.l_entrainment = False warnings.warn(str(self.t)+"/"+str(self.tsteps)+\ " Warning! temperature jump is at the lower limit and is not growing: entrainment is disabled for this (sub)timestep.") elif dtheta_pre < 0.1: dtmax_new = float((0.1 - self.dtheta)/dthetatend_pre) - l_entrainment = True + self.l_entrainment = True warnings.warn(str(self.t)+"/"+str(self.tsteps)+\ " Warning! Potential temperature jump at mixed-layer height would become too low. So I'm limiting the timestep from "+ str(self.dtmax)+' to '+str(dtmax_new)) self.dtmax = min(self.dtmax,dtmax_new) @@ -1021,18 +1038,22 @@ def run_mixed_layer(self): # when entrainment is disabled, we just use the simplified formulation # as in Wouters et al., 2013 (section 2.2.1) - self.dthetatend = l_entrainment*dthetatend_pre + \ - (1.-l_entrainment)*0. - self.thetatend = l_entrainment*thetatend_pre + \ - (1.-l_entrainment)*((self.wtheta ) / self.h + self.advtheta) - self.htend = l_entrainment*htend_pre + \ - (1.-l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta) + self.dthetatend = self.l_entrainment*dthetatend_pre + \ + (1.-self.l_entrainment)*0. + self.thetatend = self.l_entrainment*thetatend_pre + \ + (1.-self.l_entrainment)*((self.wtheta ) / self.h + self.advtheta) + self.htend = self.l_entrainment*htend_pre + \ + (1.-self.l_entrainment)*((self.ws - self.M)+ self.thetatend/self.gammatheta) #print(l_entrainment,htend_pre,self.ws,self.M,self.thetatend,self.gammatheta) #stop + # qtend_pre = (self.wq - l_entrainment*self.wqe - self.wqM ) / self.h + self.advq + # q_pre = float(self.q + dtend_pre *self.dt) + # if q_pre < 0: + # self.qtend = - self.qtend = (self.wq - l_entrainment*self.wqe - self.wqM ) / self.h + self.advq - self.CO2tend = (self.wCO2 - l_entrainment*self.wCO2e - self.wCO2M) / self.h + self.advCO2 + self.qtend = (self.wq - self.l_entrainment*self.wqe - self.wqM ) / self.h + self.advq + self.CO2tend = (self.wCO2 - self.l_entrainment*self.wCO2e - self.wCO2M) / self.h + self.advCO2 # self.qtend = l_entrainment*qtend_pre + \ @@ -1064,16 +1085,16 @@ def run_mixed_layer(self): # self.dthetatend = dthetatend_pre # self.thetatend = thetatend_pre - self.dqtend = self.gammaq * (self.we*l_entrainment + self.wf - self.M) - self.qtend + w_q_ft - self.dCO2tend = self.gammaCO2 * (self.we*l_entrainment + self.wf - self.M) - self.CO2tend + w_CO2_ft + self.dqtend = self.gammaq * (self.we*self.l_entrainment + self.wf - self.M) - self.qtend + w_q_ft + self.dCO2tend = self.gammaCO2 * (self.we*self.l_entrainment + self.wf - self.M) - self.CO2tend + w_CO2_ft # assume u + du = ug, so ug - u = du if(self.sw_wind): - self.utend = -self.fc * self.dv + (self.uw + l_entrainment*self.we * self.du) / self.h + self.advu - self.vtend = self.fc * self.du + (self.vw + l_entrainment*self.we * self.dv) / self.h + self.advv + self.utend = -self.fc * self.dv + (self.uw + self.l_entrainment*self.we * self.du) / self.h + self.advu + self.vtend = self.fc * self.du + (self.vw + self.l_entrainment*self.we * self.dv) / self.h + self.advv - self.dutend = self.gammau * (l_entrainment*self.we + self.wf - self.M) - self.utend - self.dvtend = self.gammav * (l_entrainment*self.we + self.wf - self.M) - self.vtend + self.dutend = self.gammau * (self.l_entrainment*self.we + self.wf - self.M) - self.utend + self.dvtend = self.gammav * (self.l_entrainment*self.we + self.wf - self.M) - self.vtend # tendency of the transition layer thickness if(self.ac > 0 or self.lcl - self.h < 300): @@ -1111,6 +1132,18 @@ def integrate_mixed_layer(self): self.theta = theta0 + self.dtcur * self.thetatend #print(dtheta0,self.dtcur,self.dthetatend) self.dtheta = dtheta0 + self.dtcur * self.dthetatend + + + # qtend_pre = (self.wq - l_entrainment*self.wqe - self.wqM ) / self.h + self.advq + # q_pre = float(self.q + dtend_pre *self.dt) + # if q_pre < 0: + # self.qtend = + + q_pre = q0 + self.dtcur * self.qtend + if q_pre < 0.001: + self.qtend = (0.001-q0)/self.dtcur + self.dqtend = self.gammaq * (self.we*self.l_entrainment + self.wf - self.M) - self.qtend + w_q_ft + self.q = q0 + self.dtcur * self.qtend self.dq = dq0 + self.dtcur * self.dqtend self.CO2 = CO20 + self.dtcur * self.CO2tend diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py index b37225a..9414220 100644 --- a/class4gl/setup/setup_igra.py +++ b/class4gl/setup/setup_igra.py @@ -179,6 +179,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu c4gli.clear() c4gli.get_profile_wyoming(wy_strm) + #import pdb;pdb.set_trace() #print(STN['ID'],c4gli.pars.datetime) #c4gli.get_global_input(globaldata) @@ -343,6 +344,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu logic_afternoon_def['runtime_ok'] = False print('ALMOST...') + if c4gli.pars.runtime >= 3600*4.: # more than 4 hours simulation logic_afternoon_def['runtime_ok'] = True diff --git a/class4gl/simulations/batch_simulations.py b/class4gl/simulations/batch_simulations.py index 0762cf8..fec9032 100644 --- a/class4gl/simulations/batch_simulations.py +++ b/class4gl/simulations/batch_simulations.py @@ -26,10 +26,12 @@ parser.add_argument('--multi_processing_mode',default='pythonpool') parser.add_argument('--cpu_count',type=int,default=2) parser.add_argument('--subset_forcing',default='ini') - # this tells which yaml subset - # to initialize with. - # Most common options are - # 'morning' and 'ini'. +# parser.add_argument('--path_timeseries_forcing',default=False) +# parser.add_argument('--timeseries_forcing',default=False) +# # this tells which yaml subset +# # to initialize with. +# # Most common options are +# # 'morning' and 'ini'. # Tuntime is usually specified from the afternoon profile. You can also just # specify the simulation length in seconds @@ -56,7 +58,6 @@ help='output directory in which the experiments as subdirectories are stored')#,default='/user/data/gent/gvo000/gvo00090/D2D/data/C4GL/') - #arguments only used for update_yaml.py #parser.add_argument('--path_dataset') #parser.add_argument('--global_keys') diff --git a/class4gl/simulations/copy_update.py b/class4gl/simulations/copy_update.py index 8dcea02..defa8c3 100644 --- a/class4gl/simulations/copy_update.py +++ b/class4gl/simulations/copy_update.py @@ -22,7 +22,7 @@ arguments.append(dict(arg='--last_station_row',\ help='ending row number of stations table')) arguments.append(dict(arg='--global_vars',\ - help="global vars to update")) + help="global vars to update ( ':'-seperated list) ")) arguments.append(dict(arg='--station_id',\ help="process a specific station id")) arguments.append(dict(arg='--error_handling',\ @@ -268,7 +268,7 @@ def execute(**kwargs): if args.global_vars is not None: - c4gli_morning.get_global_input(globaldata,only_keys=args.global_vars.strip().split(',')) + c4gli_morning.get_global_input(globaldata,only_keys=args.global_vars.strip().split(':')) onerun = True diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index 9cefd61..93fbb20 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -359,14 +359,14 @@ def execute(**kwargs): c4gl = class4gl(c4gli_morning) if args.error_handling == 'dump_always': - # try: - print('checking data sources') - if not c4gli_morning.check_source_globaldata(): - print('Warning: some input sources appear invalid') - c4gl.run() - print('run succesful') - # except: - # print('run not succesful') + try: + print('checking data sources') + if not c4gli_morning.check_source_globaldata(): + print('Warning: some input sources appear invalid') + c4gl.run() + print('run succesful') + except: + print('run not succesful') onerun = True print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') diff --git a/class4gl/simulations/simulations_iter.py b/class4gl/simulations/simulations_iter.py index 3ebe456..1270145 100644 --- a/class4gl/simulations/simulations_iter.py +++ b/class4gl/simulations/simulations_iter.py @@ -276,6 +276,15 @@ def execute(**kwargs): os.system('mkdir -p '+path_exp) records_morning_station = records_morning.query('STNID == '+str(current_station.name)) + # records_afternoon_station = records_afternoon.query('STNID == '+str(current_station.name)) + # for varkey in ['h','theta']: + # records_morning_station['d'+varkey+'dt'] = \ + # (records_afternoon_station[sourcekey][varkey] - records_morning_station[sourcekey][varkey])/\ + # (records_afternoon_station[sourcekey].ldatetime - records_morning_station[sourcekey].ldatetime).dt.seconds*3600. + # select_loc = records_morning_station.query( '(dthetadt > 0) & (dhdt > 0.)').index + # records_morning_station = records_morning_station.loc[select_loc] + # records_afternoon_station = records_afternoon_station.loc[select_loc] + for istation,current_station in run_stations.iterrows(): if (int(args.split_by) * int(run_station_chunk)) >= (len(records_morning_station)): print("warning: outside of profile number range for station "+\ From 43e374b76511eb522e81f100c77383c66f50a289 Mon Sep 17 00:00:00 2001 From: woutersh Date: Mon, 22 Feb 2021 14:20:28 +0100 Subject: [PATCH 127/129] features (manual input parameters) bugfixes in the era-based data generation allow user to specify logging level --debug_level --- class4gl/model.py | 12 +++++++++++- class4gl/setup/setup_era.py | 14 ++++++++++---- class4gl/setup/setup_igra.py | 26 ++++++++++++++++++++++---- class4gl/simulations/simulations.py | 19 ++++++++++++++----- 4 files changed, 57 insertions(+), 14 deletions(-) diff --git a/class4gl/model.py b/class4gl/model.py index b2359d5..155b619 100644 --- a/class4gl/model.py +++ b/class4gl/model.py @@ -97,7 +97,7 @@ def __init__(self, model_input = None,debug_level=None): self.logger = logging.getLogger('model') if debug_level is not None: - self.logger.setLevel(debug_level) + self.logger.setLevel(logging._nameToLevel[debug_level]) """ initialize the different components of the model """ @@ -204,6 +204,7 @@ def run(self): # time integrate model #for self.t in range(self.tsteps): while self.t < self.tsteps: + print(self.t, '/', self.tsteps) # time integrate components self.timestep() @@ -1139,6 +1140,15 @@ def integrate_mixed_layer(self): # if q_pre < 0: # self.qtend = + # calculate compensation to fix the free troposphere in case of subsidence + if(self.sw_fixft): + w_th_ft = self.gammatheta * self.ws + w_q_ft = self.gammaq * self.ws + w_CO2_ft = self.gammaCO2 * self.ws + else: + w_th_ft = 0. + w_q_ft = 0. + w_CO2_ft = 0. q_pre = q0 + self.dtcur * self.qtend if q_pre < 0.001: self.qtend = (0.001-q0)/self.dtcur diff --git a/class4gl/setup/setup_era.py b/class4gl/setup/setup_era.py index 1674d80..6fa6b90 100644 --- a/class4gl/setup/setup_era.py +++ b/class4gl/setup/setup_era.py @@ -23,6 +23,8 @@ import argparse +# purpose: create CLASS4GL input data from era-interim +# example: python $CLASS4GL/setup/setup_era.py --split_by 50 --path_forcing /data/gent/vo/000/gvo00090/D2D/data/SOUNDINGS/ERA_JOAO/ --path_experiments /user/data/gent/gvo000/gvo00090/$USER/data/MY_FIRST_CLASS4GL_EXPERIMENTS/ERA_TEST_JOAO/ --c4gl_path_lib $CLASS4GL --station_chunk_number 0 --station_id 83779 --force_pars "b=7.12,CGsat=3.670e-06,p=6,a=0.135,C2ref=0.8,C1sat=0.213" --first_YYYYMMDD "20140101" #if __name__ == '__main__': parser = argparse.ArgumentParser() #parser.add_argument('--timestamp') @@ -54,6 +56,7 @@ parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') parser.add_argument('--global_chunk_number') # this is the batch number according to split-by in case of considering all stations parser.add_argument('--station_chunk_number') # this is the batch number according to split-by in case of considering all stations +parser.add_argument('--force_pars',default='') # run a specific station id args = parser.parse_args() sys.path.insert(0, args.c4gl_path_lib) @@ -141,8 +144,8 @@ print("station numbers included in the whole batch "+\ "(all chunks):",list(all_stations_select.index)) -dtfirst = dt.datetime.strptime(args.first_YYYYMMDD,"%Y%m%d",) -dtlast = dt.datetime.strptime(args.last_YYYYMMDD,"%Y%m%d",) +dtfirst = dt.datetime.strptime(args.first_YYYYMMDD,"%Y%m%d").astimezone(dt.timezone.utc) +dtlast = dt.datetime.strptime(args.last_YYYYMMDD,"%Y%m%d").astimezone(dt.timezone.utc) # =============================== print("Creating daily timeseries from", dtfirst," to ", dtlast) # =============================== @@ -165,14 +168,14 @@ if args.split_by != -1: raise ValueError("Chunks are defined by --split-by, but I don't know which chunk to run. Please provide --global_chunk_number or --station_chunk_number, or leave out --split-by.") run_station_chunk = 0 - print("stations that are processed.",list(run_stations.index)) + #print("stations that are processed.",list(run_stations.index)) DTS_chunk = DTS[(int(run_station_chunk)*int(args.split_by)):\ (int(run_station_chunk)+1)*int(args.split_by)] # for the current implementation we only consider one station. Let's upgrade it # later for more stations. -run_station_chunk = int(args.global_chunk_number) +#run_station_chunk = int(args.global_chunk_number) # =============================== print('start looping over chunk') @@ -215,6 +218,7 @@ datetime_daylight = datetime, \ doy = datetime.timetuple().tm_yday,\ runtime = runtime,\ + tstart = ldatetime.hour + ldatetime.minute/60. + ldatetime.second/3600.,\ )) c4gli.get_global_input(globaldata) @@ -250,6 +254,8 @@ source=air_ap_input_source, mode=air_ap_mode) + for parkey, parvalue in [par.split('=') for par in args.force_pars.split(',')]: + c4gli.update(source='user_specified', pars={parkey: float(parvalue)}) if not c4gli.check_source_globaldata(): print('Warning: some input sources appear invalid') diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py index 9414220..d53934f 100644 --- a/class4gl/setup/setup_igra.py +++ b/class4gl/setup/setup_igra.py @@ -49,6 +49,8 @@ parser.add_argument('--last_station_row') parser.add_argument('--c4gl_path_lib')#,default='/user/data/gent/gvo000/gvo00090/D2D/software/CLASS/class4gl/lib') parser.add_argument('--station_id') # run a specific station id +parser.add_argument('--check_inputdata',default='True') # run a specific station id +parser.add_argument('--force_pars',default='') # run a specific station id # parser.add_argument('--error_handling',default='dump_on_success') # parser.add_argument('--subset_output',default='morning') # this tells which yaml subset @@ -61,6 +63,13 @@ fn_stations = args.path_input+'/igra-stations.txt' +def isfloat(value): + try: + float(value) + return True + except ValueError: + return False + #calculate the root mean square error @@ -162,6 +171,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # for EXP in experiments.keys()]) dict_diag_station = {} + df_logic_morning = pd.DataFrame() + df_logic_afternoon = pd.DataFrame() with open(fnout,'w') as fileout, \ open(fnout_afternoon,'w') as fileout_afternoon: wy_strm = wyoming(PATH=args.path_input, STNM=STN.name) @@ -216,7 +227,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu ) - logic['mlherrlow'] = (c4gli.pars.h_e <= 150.) + logic['mlherrlow'] = (c4gli.pars.h_e <= 350.) print('logic:', logic) # the result @@ -236,7 +247,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu # If the morning is ok, then we try to find a decent afternoon # sounding logic_afternoon_def =dict() - + df_logic_morning = df_logic_morning.append(logic,ignore_index=True) if morning_ok == 1.: print('MORNING OK!') # we get the current date @@ -305,6 +316,9 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu afternoon_first = False print('logic_afternoon: ',logic_afternoon) + df_logic_afternoon = df_logic_afternoon.append(logic_afternoon, ignore_index=True) + if len(df_logic_afternoon)> 30: + stop print(afternoon_ok,c4gli_afternoon.pars.ldatetime) if afternoon_ok == 1.: # # doesn't work :( @@ -354,10 +368,14 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu logic_afternoon_def['global_parameters_ok'] = False c4gli.get_global_input(globaldata) + #saopaulo:sandyclayloam -> --force_pars "LAI=3.5,b=7.12,CGsat=3.670,p=6,a=0.135,C2ref=0.8,C1sat=0.213" + #curitiba:clay -> --force_pars "LAI=3.5,b=11.40,CGsat=3.6,p=12,a=0.083,C2ref=0.3,C1sat=0.342" + for parkey,parvalue in [par.split('=') for par in args.force_pars.split(',')]: + c4gli.update(source='user_specified', pars={parkey:float(parvalue)}) print('VERY CLOSE...') - if c4gli.check_source_globaldata() and \ + if (args.check_inputdata == 'False') or (c4gli.check_source_globaldata() and \ (c4gli.check_source(source='wyoming',\ - check_only_sections='pars')): + check_only_sections='pars'))): logic_afternoon_def['global_parameters_ok'] = True print('starting dumps') diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index 93fbb20..035f8e0 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -8,6 +8,7 @@ import sys import pytz import math +import logging arguments = [] @@ -27,7 +28,7 @@ default='dump_on_success',\ help="type of error handling: either\n - 'dump_on_success' (default)\n - 'dump_always'")) arguments.append(dict(arg='--diag_tropo',\ - default=['advt','advq','advu','advv'],\ + default='',#\'advt,advq,advu,advv',\ help="field to diagnose the mean in the troposphere (<= 3000m)")) arguments.append(dict(arg='--subset_forcing', default='ini', @@ -48,7 +49,15 @@ arguments.append(dict(arg='--global_chunk_number',help="this is the batch number of the expected series of experiments according to split_by")) arguments.append(dict(arg='--station_chunk_number',help="this is the batch number according to split_by in case of considering one station")) arguments.append(dict(arg='--experiments_names', help="Alternative output names that are given to the experiments. By default, these are the same as --experiments") ) - +arguments.append(dict(arg='--debug_level', type=str,default=None,help="Debug level according to the standard python logging module. ") ) +# {'CRITICAL': 50, +# 'FATAL': 50, +# 'ERROR': 40, +# 'WARN': 30, +# 'WARNING': 30, +# 'INFO': 20, +# 'DEBUG': 10, +# 'NOTSET': 0} if __name__ == '__main__': @@ -326,11 +335,11 @@ def execute(**kwargs): record_morning.index_start, record_morning.index_end, mode='model_input') - if args.diag_tropo is not None: + if args.diag_tropo is not '': print('add tropospheric parameters on advection and subsidence (for diagnosis)') seltropo = (c4gli_morning.air_ac.p > c4gli_morning.air_ac.p.iloc[-1]+ 3000.*(- 1.2 * 9.81 )) profile_tropo = c4gli_morning.air_ac[seltropo] - for var in args.diag_tropo:#['t','q','u','v',]: + for var in args.diag_tropo.split(','):#['t','q','u','v',]: if var[:3] == 'adv': mean_adv_tropo = np.mean(profile_tropo[var+'_x']+profile_tropo[var+'_y'] ) c4gli_morning.update(source='era-interim',pars={var+'_tropo':mean_adv_tropo}) @@ -356,7 +365,7 @@ def execute(**kwargs): runtime}) c4gli_morning.update(source=expname, pars=exp) - c4gl = class4gl(c4gli_morning) + c4gl = class4gl(c4gli_morning,debug_level=args.debug_level) if args.error_handling == 'dump_always': try: From c50c60a8ecd071944f3c0990ec45715c1632ca9b Mon Sep 17 00:00:00 2001 From: woutersh Date: Mon, 22 Feb 2021 14:42:49 +0100 Subject: [PATCH 128/129] features (manual input parameters) bugfixes in the era-based data generation allow user to specify logging level --debug_level revert test setting (h_e=150 instead of 350) to previous default for igra-sounding --- class4gl/setup/setup_igra.py | 6 +++--- setup.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/class4gl/setup/setup_igra.py b/class4gl/setup/setup_igra.py index d53934f..f8e72f4 100644 --- a/class4gl/setup/setup_igra.py +++ b/class4gl/setup/setup_igra.py @@ -227,7 +227,7 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu ) - logic['mlherrlow'] = (c4gli.pars.h_e <= 350.) + logic['mlherrlow'] = (c4gli.pars.h_e <= 150.) ###350 print('logic:', logic) # the result @@ -317,8 +317,8 @@ def rmse(y_actual,y_predicted,z_actual = None, z_predicted = None,filternan_actu print('logic_afternoon: ',logic_afternoon) df_logic_afternoon = df_logic_afternoon.append(logic_afternoon, ignore_index=True) - if len(df_logic_afternoon)> 30: - stop + # if len(df_logic_afternoon)> 30: + # stop print(afternoon_ok,c4gli_afternoon.pars.ldatetime) if afternoon_ok == 1.: # # doesn't work :( diff --git a/setup.py b/setup.py index 1506d2d..8bfffd9 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ # repository: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56 setup( name='class4gl', - version='1.0.1', + version='1.0.2', license='gpl-3.0', # https://help.github.com/articles/licensing-a-repository description = 'a framework to investigate the dynamics of the atmospheric boundary layer weather balloons worldwide', # Give a short description author = 'Hendrik Wouters', # Type in your name From 324f76718d868ee8b8c8b7b24bd6f70b605bcd21 Mon Sep 17 00:00:00 2001 From: hendrikwout Date: Sat, 26 Jun 2021 22:48:52 +0200 Subject: [PATCH 129/129] write out error messages on job failure in sumulations.py --- class4gl/simulations/simulations.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/class4gl/simulations/simulations.py b/class4gl/simulations/simulations.py index 035f8e0..08ec611 100644 --- a/class4gl/simulations/simulations.py +++ b/class4gl/simulations/simulations.py @@ -374,8 +374,9 @@ def execute(**kwargs): print('Warning: some input sources appear invalid') c4gl.run() print('run succesful') - except: - print('run not succesful') + except Exception as inst: + print('run not succesful. Error message is:') + print(inst.args) onerun = True print("dumping to "+str(file_ini)+ ' ('+fn_ini+')') @@ -405,7 +406,9 @@ def execute(**kwargs): #timeseries_only=timeseries_only,\ ) onerun = True - except: + except Exception as inst: + print('run not succesful. Error message is:') + print(inst.args) print('run not succesful') isim += 1

>KLEhTcJUtOzgb};Lt~!{>_EST{88IDlY<4!A79;2#HHfux z=JbHFQoMOBf2itWb;hVr6B_``rB7$bUZBPw|}LC2`L03 zEf}=q5%h0Um3T$$8y!gdJyG+rLdl{v+N-Mh0D_#<(??Bbe&Xlk7CaiaI*4 z!s}OIpo$4O_FSqAWbEr4dK*f<7hUaVs}>e7h{GUdSrV;U1?DFa!~*+44$(>)#*v59 zcS<@QMXdsU$a{oaz2_#qt$WcI7Z-IQ*INJq?c6Vmw?g)=s ze5=*tvCY1?MJBQGE6JMiXdxNMPV>*1_@}EvPijyF!3V7OE@XTSw8%7jI*Vr;Z)TG3 zUzDJQBu`2PK=W2|;nb3q~yH=v&IsecZw39zzm zZ(t@WWu&F~Va7*J7$CdTSK6u4>{vW&_4czt22tVe1ylLjx52%?YXq@&()mptA>!^Q z7m_d0u|{FqI<$8_3o|~OV@Ul+!z4vmidiME4#xNC4+wrNBKH>EA?lY1U zJ*~4d0l&?GczS%Ttn_|)fKusb^0`7xZmE2RW3Iunej3XpkSJ;+_{LhwPyqLWC~)H( zQ*kecWPLgMYU&I>#jF&)AA!2qy7x9m7ozwSPOD$ZUSvj2m6Wa~SfoBuxk<6Kotq5f z=5&bzfv~{9#0$$jrh>SuXSH2J5l3k|4Em5BUNLAyG77^PMG$rr8acPJYoM7fYO&<~ zPMVMmv?nwQF$yZ;9Zr6Gdoa7^{cB7b=v|BxDSOkLzDE1Q&v8X@bc|jaF;W`f)HlMM zoCpv|CovMa1Xi-Ckc@|u@;O0Zbd{hzXNLvoMY@M-qZ*NpqI2xs&w#4Sro#kV%4N%` z)G)a4W8}|=4W{HtTT{U+lJC8-@>lzkHC&qd0`nB*CTwm~&w-{S*604|2lIj&#fDmE z%30l6eL3Ie#>ipMRP-u*Jmbs2Nef=eVZ|hzk@y=DunAjWEejI=)22)V2H$et|H1J1RxB;2^^@RqVhBYMuJ8EQEItb- zE#5r6W#7me)#Ck_lRXYPhz1qSNhJY(1Dj<>hK;VF4M#W+V(-veH@*P3c<$OrHMDVl zsE@E4D~cOD{&-kbmc7liXw=X@!I&5`{t&G0!fXSkLv{1URX_zx;o2`j;B%LE`7}}E ze7Rcf2*&sy)Z4Hc6y4@()i-WvBhi3#^pBF!8nl-^ik$fa!sNBdvQRxVQIfx8FuR34 z2>qal^C!A^u!%qZ&VpQ~+yc)l0Us;}edQnnC{BI(_j&^wC5T-fVpFV$^BB=Jo3cd3 zi&Y|*l`;|R#5CS!I7-p~^q>hK^i2C)O$uC1fHYS11!HKzL;4y&G8GhN)i1$bW-=-z z)k&Y!QqIjqhxRR1HZ%h&(D-$9xt1^+PQpgA0W|cEGKtXh49n zxY!A*LYaAjLa>|T)61um>nUEjw)Uo#2jQ)(-f{P19NZKtWcgY{kNhvWYC^nfdQC2o z{8j} zh^go8*pU5u<#vs=p71#TO4RB8!mr2X@Hf%ye}bHPA#NlL+)W)M% z_p{%#9F}z)CbH~ATs*y*a6D6;IJqO8<5UZa2;(*YD{>83E->){`E;ec5Ngi%El{O+ z?HP-()Kg&7JH$x>qEXbViW~2@r6ys0@4PBruo`znPW3=oTBK2}F~3>R97VVjcZ;1J z%=r8P=z*nTlp2Dnf>=W&kgE?iO+sP-t}4l$c4?lQmYfcF?@;ag*>YkRH+dasdhY=` zd1QbHu#bOTuD&?d;O*=4KfC-A+SYwJowv-z+j6=vZ#s1{SVv^L1KEG)T85w9qBt~W0?_mHTAms+ZTpkT z+*BM*bMF5=G&MCvPdcCu&iy^H8REq{SCw@%+DMSYPy@mct5(&iQ3^6#IC?$td@x3!fgI5SiM|No``O0DoE+Qm%%$f z-SfG8dL8{lM}wRMk7QMh+wLi7>W4tP0hfUIxh7RTboRE$-~`mn%&nC^&rxRXd&h*1 z+R0}CI`{W(#)~V`dL}R~z}2wx%v>KucXSm`FSL!`4ybJ|LpotR%ssKeQ@l2BSkzOs= z$O{|ntjT=t-@-S)RTH4|OOlF!HBg)$B0fKq65$syIP2si$-?wsZ@aKf!eBPQw-Asp z=xFe=fH99BS7&lkN z`PF!%n|}7Ru0SSON&|qr3#JlXdc*}ls|;Ui5t?XHg7KR-))vlBv_|B!#7jh3+_c?oG=otV;(Z+8M}h$ z4o|P%ql8YQqo3!(?+$mPvpC*`?1Wryuw6`5JGC~vHdysv!?2z{*f{kI!L3OaPCR7` z{u^<0^UuExa!d&i#)(*`x0z9Gz7m}Bc?61@GWk;DY{1Op#e|EW?eJZ(sww+adv}zX zo3RuNy*f#L7J6WO!+myhEqk>nWN|$@WVkY*79hyl9;Rc-{SP!(s)fO-!%eCOa9cXTyYI7F{6rh6poq&irXen2_x6BWlDzS?wNXW$wkkc$41?J^ci)|7Amvd(t z*_qJD3AAMTCuaWp-o1}L8lwQ{e5rkcJC-9t;mN+fapX3DflO%ZcH91_9S4k9US-g1 zbeX$!t|C#Ocf*)D1ztsXds}^au2+JGahV5WK2MhO0t~0u4u~MAnK%>Fog5k?75}Sh zeI{A>)}D8)ff};-sYqW(Kj!#DLI~5tu3WQLh*J~wg!=jLGUEaR_qVofZ#kKvG_|FF zhuJL`zfikBiTD1Z`x&smAfZ=m&&#4W(bwn5jmW*k|F$~T}Wh_~ewb%`w)06Gi%cDziR*T3y*{ZY9ZzE>6p2;v7 z4Zc=C&P*}{n%_RpcrDx?<~3*8>zd2Ie&5%E{3Wjz5B%F?L*386cWROtT>aXf$cw8| z?_lZ}TmSlUagjsa3{hevv#`G^&2osnt=KXcz9rQAZ*TWKDn5#V*WDDWQuZ*r(Rq$O zx^w>dn#DmB&~YJoqo;P(ZA_o#QLQd=D~aKMzzVE(Y*DpY4c}QOt0(Aq$!wlIBcUH( zD&YikGJ7h>vfe8}dUcz+MVy8IsaThIY+}~V?X+;c53?Vy{*Y=B&p;oXXJO+16!eut(;>8-pDmF{OZCJVKti>Oep_0->yW36F z3_y-Ea`az%N$lx7ELK~x$oYA9*?jasj}Z3U;!>UQ)DAY-y3>Wj-Ql(CK# zhuaxvWBgY9L)uoImqmhev!n!LvJrFP6gAjP{ytFWM8&~MnCCZxd_nHg#e8Z3-C1H#dvT)T$+vU@>RBp}V=Tp~u zv)T6VCEK&grFM7mhadiVA|Hz&N>3UBILR4hXB%i!{xe=o(|Kg|&p*{p9sgLA1yvcY<)lOGKu{04>lo3AB0g4w)_I5%zojh-u5~w z#Yval&384oMIVKy#DU(~s=Zfo`0EdIUziB7T&q=S-{$=FT{8@`qP(Lfs^;APYd zZHlDMNg(-BTqCZ`LprUe1RWFyMc%rl@W7Iy%Bdb$2a-%75=g~~E9B#O;B?g7B+pdH z6<2lRR_8+_8*)gkpqxggkXjl}iZ9MCk54Mok?C)<7wZL(;8YN^(twK1Z;H+BI0Pvv zkv6*`znzjzb|k!nhMg&|D&6#%apaHBOd49v6?6XKI`)loc}IC!Y#YZZ+l2|T-*1o4 z36;v$`ND7q5O{Fo(a6?7#zcY;@w@inzTdOKOeMAXM16f%k|FNhE0Yh>y#t!NyF2gW ztxUck2-%v>X06>zZ1;u?TeETH^!WJClvj$L@$Di#N4}ky&0BN%HkbcSPxd_hLKp7K z?4+suk=D{*KlIiP3zsUR9qVF>TF%Qm3jW)@N?2Lvz+UvMbq53onqimU%J{d4>_5sI z0W{n{Ymj*;A4S2|JOmzp(aGhb=Gbli{;QLFJ~R95RC{va3u80qp=?0iID75C^oI|q zV7;X>19I0dMMB(9`k!Y}G%&+Q9SYx!tHD&&?8Y~4Ps;9vDQ@OnJhyRS?*Udl=UPK` zS&kvT9#%P%I6ws4*GMnqiFI?e(DZ1@56VtjnVS6gv2{rLFcgsMv&uPP?nm$b{+&j} zna$SI))UmmX2U}Hur!WZeH{1Ap700s#V5pAT9o?;a8=%^QxFHwkCxI*QV-GbfY-{lNOeF=uiOQiPuW7D&f^){84sR*H4$Ds9th7?( z4>u;7S3tjapr6?fSMJjj2UYXQ$*^kDn?K?STJ!KwR|TNx)$3PRs~lUV?|6t>R=pJ_ zKyt}#n|6WONG`_>i58{CDlFlRQD*LMVanuwW_@_mICLAyCz_d?0Ii;t9hEHYuYck!V7TlWEBBfb%UHl5%Yb=}832X#Cw! zn3PXxE=R@q4hEZ1pV-HuTf8<%qp%!$)s-JU=`#$N=2ATs8`Ql)28lck4y5(Bw8|%( zfrNish`np+5chWA%16Bth}CXgJ`vN%JD8O$bxaC-HJ6?{)8%dU)G~R<2K*MuIPLeL zBw>Lw$f|Sc`v89o5P=)M18L>pi=(v_QBfpKpwsbvmWf6|5M$CAX$jD7!jY*loYEEy zPL0Rv>s52|7W3A>21B^*KE%a{17K+PXAbUBvkd>0l{qIsqqGN%v%Ky(>*|B8#y}|> z(kNhJ444l`ZR8Y*ZOWSX)foaS)3|VYVDJbiOwl}#&WJJG4-5Lh@Ic>Ut*71SqWOsG zb(&xS=liiLi4Cwro6yH82km3b?NGvOVe06~$%$n7OkUpA z%2oXRf_kcq?INX`ZIOKXQT5B!_=&nY*N~vj^PTgPm6b=?85Y%o@pI$vppNtBrn z0uLg(d)r-foBG}~fx%#aq-yHyJZz~6B)IxY3XG07d z_T6^2?46dUwPQH{Lf3gK6mM*NVT}+i(TlZ5M?)Kx+qR!fFU+%k!fZ2UJitT|Sg_XF zh~KJ_RNgdQfmSXp8C8PePl z)HVrJlQfgXKMtOJ8uY8qc7XGX59QJx)8aO~pKxyEO9{?{%S&x-Z_r72S}- z_hMvu?Zwx-CzUf4522F7g|crvVJtnXnihAyrWR(9uUhv)xz$bVd(2XJL0w)&*YSJ^ zur}9Ya(!Mf*fL@I#btsaGedzv&R;Qy-`n0sDlIq?IZK7Nl%(0r3Ntsw^G8Sc-ip=a z+pF^z|M>KhX*hsXbND0~h(8ZqOp|8Sq#0FR(VO0P+QhQ6wYDm7#(VcP@1SaV)+X0N z^VhdXl*G->1x{9aYFpO0psOf2C#jDtZ0B@+wFoekWCB&=yAIa{0Ara z>$KO!f7~S*^#V}42c{BQxQp{|b!12XF0MS7hNh-xPu`cxF-RQ|>j;(#(|{83yKZQf3QOA~pK@6C+f5ns4~-$)IEW$>&Ih|M&+2 z)k$iZV;FP3wV}NRl}pJ$O{!t_b44x`b;@(SujhT>VzYN1spF zIF>=UtfcJ~lmwNkSRbb%ctFem?mjB9fBi^#z|SNVpqxHxjSUwkOSa{GE_;S>6(&LX(sQki}O&nV052`WmDgC@L=b&zPK4$8hAFhMZ0=s6&u68vDAj4Izr2NDKzHN?z67Gs~N zbnkt2y4q)86nFWcB#IWMlem2ZoN6y4Rv81Gi6=CCDYA{j^mIfwBQrkBDPkyiDl4{- zIc@7*39>i}`{_&+bj zLZ@O=y1P8=pj!-ZJp>m`?h7zaJkc-B8XY~W%G~kiabwGLqSYXVN^*X37owFOVmT68 zBJwF7YNz+!`g;Gjx4fa-6B;VLCHEcywj9>iZ@g9c@ZBw5Z7(`&C6FS>c_i%LUmMSd zZ~kz0dG+lq^HaTn(-A?$PJX*|$8Qtp35Luti$6kjDs`G}=Dfht^_7K%aYxZa`1{OB zNT>Rxy$od*U%>U{A_a#x{rxkWQI*vsm|YG^phYG3Nlk~xBkJyeHs+2VxtIMv9zlBp zPX?}zUq50jmSn|ChyK}TR4|62ijem#Sno{=`sRhiKo2L&U3b`euIxh>BuulR_FnbR zS&|(K)FADDjJ?&s*` zS>(-fG?DIJ9Y6l~qkib7>714v*sfQrk1jqaNitJP<@xjHF{LlQ{o3_iu;F}n{Lb?a zr)j#mx%q3q|9fA4{nh?_T1x7OR@<#{VSha2oIRTmd@|g+#&Mp5b-VTT#de*RnB%fq ztrX!!H%!a?)1QA)wXQ?oJ_N1lEOSYCK8>Z^t=3)F`6U&?+*+vXegNk{q0}7uZkp$q z5;HS#&MC(PqG?%La)Wb@A9IF9L_pM93kbC;lkveZxn^W}_xbzwI&7l?0svw(YCxnp z0GL@s5xFT(1z@!{Xi!8R_IG{g92?{PvP`)|K&$u}Mc2`)Rso;NEr{*Xx`v{Q#D$Xhi^!i_Nz0`=R+$_xqGHfH>!tWm&J+ zEl^>arWW$4s+}2nAKDtD@B0u!N{NY(OwKvy=$ocE=Vowh?3(tz8jpyGuCK4BY3jPJ zox6t9d6$dZI6|eAlCv>}7)mK=KkfGiLbqOTyRJK(PUAQ>b$pv%v@4;BVPlMlNCaKi z2x}uk0SE?`BP^5>eVY}C^)~QRAa2FeK6B9UtL{wUH8Qo zUjV?1ryn5EX19H~KRD-d&dm}UW6U`rVy=&`hncn3Wmy;>6QOIZhr{796x!gn+de!z zG#P#i86HBg-p%ve`loH$)Uv6Y=GZxBjB9zS?MSvwWQRX1lC#<)lx~p8EQnt zcJD+6M~=F1+V7b;F8QO+KQBvi!N+BChy-i0e3+-Z=_I8NL-y9H4EHDV^5y>H54v5C z<#g2KpI<#AqVw@|`SkkBFTOq=kJndMyY;R}{prtt7F@W#zK$^>!1urN-Irf{g~*|6 zxFq=TkA4!T`B#7C2Tv|P@ZSIACqEhbZnIhECANSNlWD5elDA9Y0K8iFA^f|4?Z0(9 zjcJ*N&Rc7>iZeE)BvKS~S(bK4z4sT+SA{v|!`-d7me(5uoDZk(e)x$o#-o|)d^(+5 znpY|H*|TTOt1-tJnNrDLeDQ@3_H;T8!%$T2-rThN;^~to#uza)PVwsUGAI^R5xoe* zYP&fe_jeEZ#fwi(=wgg%UaaHYZa1CBoXhES0)UR~#d?EPI_Q|fGM(=arx&{?w-5Jo zoY0YT4uCA%CC#Su%v4paP%K#)to6(!B0vP7X^CQtQGO(<8Gdqk)et>Ws))2LUI>nm za!ffUa*hGLv6hW7^e&A3U6chPKJ1Sngcft+oU`UJ&h5$W+OdHu8N>mshgBk&bClD3 z%Exv+gW=W1_VwMZ9Rla9wWhR`8kba8n?98|&tobj=CtIjs#9D{Esg&sm6*{vV+@Ii zXfeigUDqphUEkYIE0iKYs8A8LVfqvS5JUwL84Xx;=pJ2b0Da#Bz{A7CIF9)*<){-; z^Y_$RO|YPpb8TBWYi-rCEK8bXwOZK_mg&AODf9y(F6R-H0A+o-CI)6COZ$h@l9vsU zR4|0D@3l(%gk`pl5QK=;Ij0?TLtiqaC0a7xdt{?Ax90}{Id7@L6z6#!L7|!Xrs>@G z-D;zAg;|wyvD)>3OkyidH}T=}2f6=zz{oHs9?<4WN@`=3m<@~lHkr*i*{(h%dJ{gI z`wcK#XKaBiRm!MwRt5re#fnJix%Oner$X`z>fnl^A_}MiC^7;m0JD|dR-LCN!D3)S zR|RtY#k|V*_0L!D#{=){ef<@xzwFo|MMh-f6yaMl8DpFes=8#ebkBHrhKTu--<=lA z4`1*0|B1$L-z?>07z3#sG-gmx#clYr1Jsif9s^pvk!yp5PpZw$|osexHn&)Ukhs4gg&H7oqy;)KuOKi)q z4Qx#m#!7F#er=2y`gN%($Akz(XdR8`wArlO8Wg7c+b?q#475L=hoL*oaZXv78ZOpY z>w0T}QY}o(WEx2Z07@yl%_`S2rwE8uWg4fLQ?4xv5)j~cJ^{dLwL<3O`NT+uxRlbi z4NcAi08$@2fQXMMd*>`tU`FRG7Hks*LUD$<1P-|@?mLe4mXQ@gU z$EZ-PpS<`@@hRX{P$T|C+OF)nk8CFd!#q0RB=oed=ys$h&GV^vd5il$VNSgNYBv&<}3q>7$>9UjztWAL{;z3}Fl>KiKY;`%~F?(dIr z$vNi+Jun=A!O8)X{ABw-e6#&6|N4*d{)-T%&*+d%oXa_Bb|DPiWtw3*MR{QW6cXDS zNMV8HluOy(MW|J)W+AX-h#y(~87k&c6Z+i@`-!;ghV$~#pZ$A7{-V@Wp8oa`uR2p4q)tlI6lqJg zU_pb6R)H#@LJGuy6#)RdDo6>?fB{4RMO9T{c*IbMiXsTgW@s;sOBfsAtda37}`a=qB@RP~3S ze_CrD)3iT~b)NDxzC0Z*6F#}z41FYharK0eM#;lwHI(9<|MLC~v01NHF-F_d=n zuT~f9SUX-}iaO4tF$QvuF>17vT4&jN-17;6kT*ZONQMokIc z;_Sz9wALB>E?ribOoG8u!MKx1i(#z?GCMMX*} zl1tZhA%v83$@wvT8-R!`@uPHuNkmj02LtU{3;@nKqQ@3$1MI}K)LQ8=xPw##5e*R% z13-<12?0O=3?eg&frWLo$V}zSZ*Prvo7MVywRP4W4u@)umd6;)3VNK!^YLIQ9ATR5 z_9pS;iDas^LI@qGw{DuwK%~x8#&6CK|Mx%pr@!<04|f-vQc|h; zVz=p6EBL0MVX~W4@4GlZdtyU)cAQ=x9!lT8pfV1wx7K=c`@8+a!^5*@&$hcu!sbRR z38MGj)VeIoo7b1PFmiaud{LtI(&D}jJ>^9p{Mnipft)61(ic}h$4y?HmOYMWvOZ6mJrdL2SIpU(}& zCbhPkt1+*Vrj)WEfT016V;JYjT5G$oviA1=ejLwh->GT>Fvg`=z4s++mfUn^1$EA; zh$vwd(NZ2$wI!A+R$By1#ZoPsz~(Znoed0}r7C9uMS@lDybU?$A{C+0VU>(Q!CbPn z*4dCHzZ;tk!!X4Z<0A>Gsl?hnvMdWIrIbq%B2t7FmBVe(nSQW)OVd!e=j&E|@MEkKd!O1bX0NswQ71z@=Yef#%=TH8v z;fsG?&PyH=WWOJNxwxH5;hZ2NWCcB$cpp*-MX7`u6PO}{J3E>YE`C9aUPV$uQLU=X zq)1SO*of&+NQ!DLLX4y!l~MuwUT*dOd-Z-i@V?&HU#)7LJyg*O`Znlwb8%U7DtW0X zGk^;o$;4Eiz507@dC;f7cmCvWzRtx>uhyqGZ`|cq7pv{=L$&_dmw)PB|1S>XLtJt^ z^%eGxowqj5=_fz=36va{1+-f4N{QBT=fkrnPdDrJJRXLjyPuEFFe^AVVuY+&YA&hv zp^tG62G#SD%90mrH^y{`M`q5sB9iysT4OAgQl30{vVV1Vu?oNZOTY5f>o30g5=+KY zJr)6w^5r+LF1IV@ts@%a=)uo| z7G@s0PF0}_A`;tn(nVzGx^Wz(lu`;?6NVw9Ru#WnHKDV0Y=fW1r4$i?d7d8g=TN)e zBDz{@l}72+hC*$ViS{c_lQqh!o*Xd&aBvQ6XSk$V@l00-`Yh}|`eys#(tY$4#|0(U zvw$JPNhdW9!2{O4NS{Z$dS=!i%@04y2Ci2ZD`cp5vk@Xsl#xWlOBE?(6iTUW;b^UO z?orrSiZm>2t*t3Wss#W<9`#=(rYR=FR+;CV-Ox=bnF8KhV;vLJs?$7n#t3W8l1m1q zQc{dLSl1+7t=#}js!Ae6l=Z|0`W0@(Z7+-vRn4Uo?>sZ3c@&GKTBTN1P4#>~_E5il zf&Vizo07WIX{~jZkgajGW(V9`VQLN`QXy#18r85T=Fu6L1S>+t(!mjr#c8N?tK6PN- z{^of7E8l$jKQfya{`A%2ZD63vfW}iri$uMuB8osJsQ?P1RSJrRqG*h;OJp-sL?9^W zpo=X=9Y|57!eeVyfZoxo1@&=40013uWUfq+Knyki2|)(^|Lo!YzW!@k4EQ!vSOI`l z34xS|l@J+#t5(obDgYo8z$125KvmTdR?x>0H;@VdfFQK)C;zASR*0xeo+bQa`sGbI zqC9NNEx4V5jj?L=(%^p4s4jrXi{7C2tKxcdd6@Q7p8H{Kz>1`ebqK~Id$xoW0y^8R z*XxoBK)JrU!kmX;7&>1|!Ucxic89CyPvHJ=w#2Vrzj^xXgRTpROi7b97(Aph9>*WO z{HCgW`1FHCD-peV^(yBqphBojieg_xp#br1g3=9#3VCm%H6|z0oR&SP19CzVC;AHK5^?mL<&_vj!mN z%recYy4&njwMv20V=P&{MP2#U6z;li9FIBIK6J6B*vIaWwxM{mx?yMySm;DjNe^yRpecMz`OjUoHHr6uI{51 zFD>Lfp|1O(wXOgl#@LZ--=TtrnUGW!1(mSP!n8suxwWbRKpIS$Oo}tWBgdbq<^Bo) z7&N7nrb{IvRU}|zjOOHBh*hLi!xZ`snU<0#sy7d(T=K^se>@nM&T~z1=)%ItW?;5e z@kv;HzI&RCaTLBi-cQT?C+D9(zk2dw_arTmJ()g0EaNz)l$s#}5tn6Z2?Xt^zO!F2 z^OGk}PCp(W9v(_5t@&<@>H8i5N>w7L6_#ZwCADwpIv?|6o&htfAR>W(_MEddzKvBv2*whO(ilyTL0s*OGdKIMMLUisYnv{Fn@K`MR_op- zQ)>;@G4q^a&KW>zC1XsR(5R|NDN^4~K9Go*t*X95{VNp@LTF`_Me5 z?N_%+KpRc9QB8Xn?PYuK)BKoC-TvMhtEx4>)m*>5B-(owz0AP>+6p$EDOD$42 ztAVJ0^Dsl!sidom%}1Yn9E|PBn$8(RIVCo>>$;rkyx$LNzP^0Y_m{u^>%V@p|Mum# zuhU|_zWLTz+hQIJnayTvjJ>(J6{*hH(7Wf)pA+HBmv5}KU;g}y%gtuB-Z8^=yKaM= zU;E`>o95%q!!f0Fd3kv_JcJOMujTsk^8fMjr+@Hg|Lnj0um5ZRrs>`YPt}j*lu5<2La2`+;V7*??F;COd4?~QRxT@;a)fF?1r}5_I zW&z2mGTCKLamjbL4>@N-)1Eo}c!fytuJd8L-73oQ&8w=qesYOKcdu_({ZOR-=*yp# zQoj4ycXNj0{#20^={zmh7nfgr@kLJQ^5XjGhfj0P^Stc$`;G5ZbsWdB${FUdASc#TY@P#=k~EV+HXR!js3HZ0+t!JiJ4bMuCg)rTeGBbt)4_m` zX(DJ?2}|3`5XKm5Q2_EjurZ}3MD#ug@%elvBI7(e$T=@@0RYoM7+9ec5vrh~&XX~S z)mm#&A=Ls^jU^-mfayH11}8<2$K#UXo4Z?Lzr9=&5h&HFITuh;Ej8t&ONoLCjG$WQ zd0f)$x-jL4V-E~rW0c~U4u^R;sl5@i|LosPyHC~Vf(HHWf@#R(r2Za?6=7W8UH&Wn z;UDzNKQ(qWZhu(&j}beQ9w;ta6$BlkQIwje&KI}E(;`ZkD-{8;z`vM{iXbpikqU}{ z1OOG(Ah|@S)clD6RfGx(n&QCzLSE-8^ph{XjBzpqtG>J2 z-wE1_^;Xj2nfu))WjW8%b*pif)q0JHr{msQ4&I80V+U1&ubZ8B-rg?v#eJ~&AN8(! zqVwsDh|}~CImeWiT#_PI$tX%z(1w=i zON7=M#K%phNO8`1?~OMKAf{peWST|*QLQ$!s^(mYnz*5;YAGp~YOOcc7|S9?(xV=* z?E{+}3=yqEucH=XZw=ZmA?HYANh!Y`et3NL*Q42O&R@{i|Eye%ho_(WdGWlRA!Z@# zKGvKxoqN0Lc!fMIZhMg5b;obq_yx($k%twQ&^xTm6f?0z6Z|+cKte=7g_4U_Y03^m zoJ(q3sYfa9+ZX&c-i?hhDdq^dX~$AZh+w_9*3R=-Qf*rZ5hz)hIdE@`DP^ujj7ItX;DJ4a3sNFnDqCX6Y1;AC}XL z{Kcl;f`}TM&GA%wsG(yB4(5b4o9C;3T#iI;d;M%@FfSDZsA65jxnz}V%B6xa9xb2d zX`UAw+&s@${ouVnpU){R#+cJI0@Hb3Lg;_^!yh)?YpFWVr#BD#o$r(y1N=e8Rb4{@rW4w3X^I<>7 zX|7(J(zD$YCRkAJPq(6cKE?ILDloYaa?aDVlq}A(we$zw3r)tKFN^y%yPCT-)dF&4Zj z0gNes@5RT?+E_|m3L+Zfd7drtcD3$97mr`_${XXV;4-KE={!wSE}By{?0|Tg)A@Yf zY}S2nODSHl5mzi5BJ#!%P)Z5t(XG;`5br>X&bfI`IVTZt)-577Ha9aDG(ZM&WCaw&T#aI%=^w^7P~&fW=lea`JT2ZERH(Ib zMJ5n5OU{&PPy#R>=!4yr2K(ml@ao~d8+vc719nQ_T%&maKuT$z=bUp>$^$?{YCI|l zQUZXhtE*SveB1s>jE{ttT1x1;)`lL(6C#EXh#&+PMBBGLrW9D)fF$Sh830;idY)$h z2q7%Xa&d9db>2Cb@=~i3(R#fB0K*}ttWf(Qgb)Z#JD7dnuU0E&zP-K84Ms%Nx{Ux} zh?goAt3hf(>DSj!$8mgkxbxn(#D!AI{oTHNM#U+=vK!3It(HOXA2X_8W$wXJU$oXa`K7~8oXhJle{i~!Q?5p8q<0A1Hj)6`Oe zLACR?wpd`GMrp4#ivR%vQfzp103sl=?GkD+!rnPX$XZb<;9smZgL97;q4CbSwjD4? zB_?BxL3)Q3Y7P?bJ(mL3F>?iNf@oC@AvE`BlXbW0LW`tsU$>V*q^fmUN)TM`4<^{_ zi_2Gs`+xeQKe_pX|Jbiqf9(A)ee&sVeCK6`ey&;{{F@qw_We{ zOFqmwJ9rrHKYa1Q<>lp);`w}DVmyxLlv0jqSg)P2+hO(U)vFdxzAQ17OrXbUdGd5K z^msTN9v%*;R8wT8p$qeP4k7e?V8(f#$9ewIw||g*#k}})-R&;_;MLFmy}$Jz{+qx1 z-}xW^tG|b+1Y!+?;*v^?afx|6AG^+<4#!dx5dm_XmK@XVGA+wuj9Zokrt_PJJ4B3m zIZoqh=t2kxa&@_DQy=G?>wPeUC=tKi1E zkQlsr>X2W(zS-YD80V#e_u;Z13~|m`N+Cm=&5oJxZ|@YT>$;20&Js(ljh~ZJifLhH zZ(T}hN_pt|%iRStd#08@EV)#GuIu`)i!m-Ss%kBzeaJSH5CBYbETvfMALqXI(L)oL z#aOr9cI}Z1x!PgyWDYsDK=|oAc0*qb9**Oi`IzzufTiS=mNz#K_os({<@e7bGRLKY z=2F^&xA`HoRQBu)8)63ZHVj=yfN4pL#7NGNfMx;AMWxo9vh|No{i(AAn_OyhZ0MHB@fLJlD; z%S1#iv%~~zotdVo&ZC1Jn_T_kU~|2NargOa^KZtdPaSfp8qI2D%&H_R84D$60%We| zLiEk{Z~Wl!4^8F!^{KcxC#EidfDB?1TaSg+~zo=hiz5 zU?XL!DvBb35vq*U5Gff|QUoHg<`?ub-`77^y&n&}ulMy=s8B)Qu2z8wwPEd5=h0i| z`ZZgbb4}btH|ErR65Q7&Ps=z>>EPbH?rKzstL=_u+KB1;&t9&7DU5ts#j;^$)ab17 z{ct*;OG#SubRPR*{rqBUnONa!yXl11!QGFOoafDYHSaEhv15+5N;(|vFt8bZ_{ry4 zk}he4zGi&?B+c;BWmz<2at5okYjA zBxa7Gttx4l5jceCop;Vv%vDH1J$L{`HNyun2RcS3Anf~f({wrKVIE2;^E{(t5ougw zW{PTLfJ>exO+Kj;qQrP3$nnNCak(rjf`Q0vNta7BXh6ra)LtopZ%hN;Odvd2{u~ zd){u>O}i?kh$=JXT$k7cA02x&GqJo(M0u*qAiX^7do~jo1T3Q9v}E#7uCC+ys&{Xn z{>_B_`S4$yzx_Yx<`>iXugCKzM`LcDv87NG=7~!-Wd(yegNXRCPKSN>jDP9hTHpSI zWBOT!?sfRYMQd)qUh9_yd{tYN^3xVVfp%vaVLE9l27bx=fWtY{6XEyItbVoWHwzW8L*U z7duArmyv3LUA7{MQOP;iIA>7M1Y09_^i&ZXXt*zK&KDY<>_7R-dU!!q_u-e7Rv%pb z$9(+q>$ATU`;{S+0h*~6fu)V6JQqhwM4&zckwL@~s6tgV1p@;^WF%mA(J|S}5LT+% z?f(Ad7ypZXcn$5z+&tMt$;l9WXb4e|)VBfAG#A6(H)^dIY}K?_D%o$TL{Njm#tm|4 zeCHS>OSjsjLqmsM=zjLCf4kN6Yya?n%y#)lzwqBl82l*gcgdd;6Pc=IFa|)OC1cr0 z3`|k2pn?m=yg1BK1yQtWBPtVmVqPfPDh?a~C@LF}D(gGaHV}ZRQ8CJ(vso1swF-zp z4FAN_y}qe`{)*util87anXar$Vg?X^GomGuZJC(~fxW6K*s)eaPy_=M)n&Ipj!Bm( z?BT+wswS$crqtGHg}jaYA^yIVJYtXGz{lFrl%==A?54G*Qke|WbXd`eOer?JUb*0? zW@#^04McYBSjOEj&a)kdAxSo~t=Vd|_Rf9&`RC{7=iO>uw7h)%>i#&qc=6)m{L!OF zkEUtj7`m=IPGj%mh=PCtsA*T$gfCvd>|L0wE}$d;EZ8VBmMPzsnb?iT<1mdN=*vJA@-{crc|vS<|!B0r@UHq?P-%t(9to^(_ttn-yMeKr(o-t zg8~VFAS*d$RDk3i5jM;>!|2(89#Wd;c@E9`?DS%{+kf`O*UmL6u-Lnw_x*>@Ke8%U zS664}Cyy`B=6Qbe>NOBL;--z$VOYRx@4cn+`qi7u%gdDVet$qS6qQ^Yk&g2^G;Q0G z+Bi>$*mbK~DSIacdBav&nXx!y8PSb&?Ev3#=_Kr%no9lZy z-SmBba^^0MHNuAl8&qO3s^iutWj9Kg{nfhEx^k z0jLGk_ss(#a{$r0u&QH>h+J#+zR?Q0#2g@+lH&-XQkUH=0e}I@Vgd88dbb6Q>#u zu?b7qQWGKoEcbdaLM2rxmW8O)8sDiLbm@|DiJY&EU|g-$x&1JNYM%IR9=7NlAf_6b z$hpOYJ}3$)lPR$}I;!l;)I{!pxaiaE+4c4H>)q|6)APVl#!(h>z&kqm^8CB*Aughn zay%Zr_ifuAkH^nH|9rLGz!IvwY*EY*F@&(Zw%&U}i!pl7^E}sDySA57OUm@F+NOD* z%yfEss;c8Sdhh$b_uiYyk`)}n#ogWAaT=MIJ(I=g5y?5XTCGYcwTSmorGOd|^le8y zE$1QUtRQU{hry+kR;$gkXCE+|4E31i5Spf0zBEXZ?EnThk;D5 z-oDzbn$wdtA>ZHKHcfMWcDcMJIcF8EC9A4xHS>!Ty{azTl*O;y4#1lTyu)3;@wF z5xWpV6XN?_6ujf6r~^zT*#t&11si~QmssRD& zdrPS0EmkdK?@u{IebqKm!5&?l9}c_2VYth81ZZYe zWz3UvZmuOV?+*L6KXW0@IZxx z*Zzj&o<=ejPnx=8VPdN?!1{q40I!x>B z84$2SDJ6}^)6E4U2In2|Vp_Ev#l_D`OZDC_Pt1ovb89spL5&7ef=fS}P(ubFqsbUwqW` zoeO@6A+dLY7z6+{0^|jh5Y0uMl51D1*k+852#G*}ykoV78)ER@JuL8;h{%IOU_@j> zRc+gL@m5;aREX9z%~X=Aiq?mj1i(<8qrUI6t8*?7@+JZRnCCfky$BkBN?EQa-aGF- zxlBZ)7SK8Lh=?6{AE#*w><9@E7#$KWMnmYplC^!e*=&~AjnvIGACC6`fJUW=8G3Yt zfXJdc=Y(jU8Sr7HO~`)3xg3Ur!tGSI-ERAXtG>PHA!;^Gt8;!b@VV7z4uw-K5M@4b^vurrffF5?Q>*k$A2=&k+A~AY3w4RRS~O z1+zc|G_3@}XpTtD73C_3s6_gW9Q)5;eX}0;roO3vhU&q+7)%j>5e>)y)I0^LV2}fm zZ@qCCa~e`wJ^D-I%X*T3r}dMhde*I0+vnGJ_dOgA^{l`_>frPbR_EW@HjpN0ymPD7 zaX*Y@Jnr_}lQo*Iw*BSV`L~`t`S9tpKE{p%)?}(|@Zm?#Lu{Trd9ptqzIyRuv)O#- z(IX<2ZfA0@|G|ee8lDS-`4AG`*%s|Cd zYGo6{8PHSdorG%XIE`3%wFv;Uu>1rn0#-H0h)wgX`=N+G9_!@SLa`a%6k1JSZ46BZ zP_xJc>;PR=YBJ|CiDvJu;n&aUOCEpBwWedyt08!&P~CL1J6~_SV~=f1T{%qVqoW2Xs2*!A zS@Jy3!y!+3>H)g0J6o+t4b{#s9=RAQ^JlMKeEI6ldUJC6?8C!wc=5yk;_~vMKiRy! zyLtKg4L(w0PGkceE0?%1yBOB&n)poho;M`#z=ae_AEftyO zGy~E!BRNG{9F0NrWV2SN(=HD(1*V$IdbLV9SCyQzgW>G-QWeHIsT5NZQ|DaMwO7ZT zM*ya#X~vrGk2_PzMKfX(LbvK`t)e1X57RJ#6@v)Whvvz}hr7c;!2~p*bI!4M)pXJs zJr5->0hQ#P5#~~ekqLzWJU6_VMmg^9=ajlt->y~{7Z+xBdp*tbd~1V7E{Kz#cS(IcE_QDToL}xz^?DXjV#bAp}7)%|%{~yU*UdY|x*D?n!7r zeEirOA_xIN6?2?T*+@)`7#WmM8G(ttbB1Y7B<{mUPp-A!Q<9)2PvTvSl&sM9=Znw+xu-om}SqQF_qzWN~oa+L+FA(-T z&%51jNd;g}3wda@TCG;A3UE9g>#``c6JfERdoLD_D1I917Dt= zAd@I|UH9zSv#YDC>+9>yX7m2HTe{O2*8s4%qo`H@b>u4`sC8ZUzCkUe7ziU7m{#f9 z*8AATrf<6iGbN&wruwjeQX-0tH;|gMArg_QBAN5#y`Sg#;W5n+ImWnz8zV9hsMd!V z3pF!C)8LyW9=TRUqc(bGu9CfT%Q9-&3>B4H>+x`4=0$4)E{wSFr-{#QQ!-a+U8;c+11ORzWVyv z#iPrUi|^7C*Yt^*Nb8(y%~mUETEaqL|hJ1sI{(FD@K~)oGg0U^pDQ*sR)q7zPH^TvlDzk}p)!ID?cy=Iwf` zU>w*?u3ld`@4x%uC-wmh(~wdEwUf;z##lsxi{Y79mgzW+`?28=eF)JbL#msz?L{eN z(fuqrE{y0ySZ$YhY-M|vOp{h-YP17tFibXo0ssy+gcHJHK`TmXo zopaF#3~@^HaTvZn9R830(ck%VKls(Z@ZkqnZ{JF(Oe84-xe!81DK@@ewG{*zrkqT4 zp%yhUk^zY^IYjfpqX9E}wCH2!+s1d!%9M{%iy(=CsxZ62ObC-^^3E+raLxfPS{cf@ zF4kP0*tsR1y_6zL9FB;Pcgrw{h`x!=xhm$=9@Iy$7~Bzyd9WB`8&=Ddt*W4lAcsm3 zXU?Jbh;Zf;G6oU{u!!;MqR<|U23QSAQ(YFt8CSo zAdu)(a^Za3$=&6}ufg?;G5x57_U-*}J>+QkwQoXBrTrj8y12iKD<6*aW=f9v{&ojI zO%vMok`akuv)vHjG#uO5bWIbzuhTpX$ENA~bvLC_t4J!B>+PfSb4I8oy=ms#+gtN^ zx;pvn#fx-z&jg!w+ceE|9GHDcnTeXFnWl+=&(7P3>60yD=uh1jzxQKP^xW-WkIoSQ z?e6Zoi$_GRCNYEiB`onNM3^eZXAb{rp1W1pDVTnXR1XS51Vomt&i-#N;b8h;{o)OvKZ zbzq>^owFmHgsKjZeM5EN=EKxKBguirIM4n3rrlqgYby#clDA0B`?R-(W%j_T(}(b9 z{1akQW+vzJoV<6zaVb;Dxg^J{_Uv?>b3Go$p-!}rP7zh1nt5+c+m=$s;Sho$Qmv(J zx`n3Kv|(YEEl31_BEjNkP%WDXU}g66oE(t{T7Wu6dSDKM_vVUfX0X;phRz(Vg2#|^ z7A-`;ftxnUpv3t2iNBvhK4kP|ZlFw z1hE$R>G!Vwy{NCX&9Wl1CojTZ{wjXz?UqYtX3Ka$eo+xvttIwOi>ekt_5fZh4|}L)S55Cc}L#(kWV*n#$VlznY5p%{rdU0n&up>0~1WwE?>1aWx8#B zS?ArB*N1Ta#;=O+n%(O#|F{{yxR1|wC%3B#fK*di zh89H3wPw*?78VN{LPY~4B_d)_0|pT#1YXi(Q2=;vl9ZqV6`*WVbXFZr`#O(Z~d&`%_ zd(B;<_%J6b^H00K()0eC?;VAKRPf;m55ChS{~TK|f|~NTtpmy~oPLMFz#8 z8t)FfF(2kEL~hfsPPe{q`c2b_2m$Yh1D5>7&t84;>QzCzn=6_$(SP)vPnqy%Uwl3f z<3}HTuQRbZHl9{>fJfdUrO^jOd zJRToEy11T46;6|^RvlAz-qKux4^8WZ``7z@zjA?VAL0JyNE(NF?3#Ai9e1~fDa$aX zan69ehU{W!+LacYhS9V(L}{AOR@-41BKZQiX*WkZ3W9S%v(&1NL-5U1su(bPF&O9h z55N5R`XtuiUaX##D+$ zm(31~Nef3b1l3Z?LDS;F5}XfBv#_DBZ*PuMZCBm%XPf=;a2$sH?Owpzw)e!F^NZVC zLPhTyXqX73!ZeIs*LfdR^{^kzY`Z-PA=H%rP-uM=)9Z(+dEK|E z1W8I+4p?x`rBvSUe%`;!%XR5+IE>@Cv|cHt4Db+#(5>cqULf_Z>xk&9ufE#d?;m~e zk>+%|+6;O-O~+~qm5bOg-8D_)o0w`5u(oXkY?!C;?scfO7Fm)Qz|7`(UXEqoc49h? z`}_MC<955fzP<(k=g3M?umwV0b}g#vV_f2tagiS=0>HAo z$vH34FN+Tq1M7lwfxZ{Yq?DH56cLZdk?29xviy_91?$1=h%kg;_>kSIW()g!5lk#^ zFc>eV1`T6;n zXV*2iyZd39*dMO%UjO2EHZF!DmD$a?bgIiBvvlUW-L7exrfa`*@?Y8DdOv<~{pRa( z65Mtjb94Oa`1hxu|Lgy?zYeGQVJt>K9bn=&D9LaQ+vAUL%*u~ zTvW!xs8&Du;3G1~yU`H=!b!g|QLL8hEM~dn!{Oi>d~*3H^{L@tW^IVTASGYFy*fEL zd61_tEgM`jOF8ZLdlA`gx63xMZCe*Z-}g)C$Z|ZLg;&%6AMZya9TFl^ib!{-5-QC| z>5%U36zK+uF}k~R3Az-DDyXN8oVOSA3hJb|~+g7V-(E$R`>}a+Z z!Y#8>Luq&zwfEY&n$S*M6>6in>2VpyY1-vU$j}YUtP=vkC_)AW(~jA}B1kk9nea8@ z!S3%p6*V2ANiyVZA6zO}*?>gZ6l-Ozkb~Zgyl3AzLz(VBeuER8P0brc)Ds7T5?MQJ zgl!m~6oNb+#P6;@r{*Yub%*C#?^;c<(D~i|y@>9|Ni6t=Lu6{J%SMSiWm`r)KF;7a{)j5wZ{G-MZ^+R`UrPIK~XJ@+#a1lc1F*ATsr zvA1_>C&c9UN$!q0EEw4KIpT&ZQunt5&eC`3lRWe{nmM1AG}(xb@3(6dG6Wv_CROQi z{vkz{6Jgm8n>1tcb>}})Tg&K?k5^VLQZql42U9=uc$VwC>sYqdT-`{ggPRoy1IfM{7r(d_AY6`y?S+D z^NYh9ruSlELJh0ivf+#437b5{zU8d6aPh<_N0e`< z%@Ql!@j{qtq&bFSN%hJ6jqA$c`6E2|@5-auzVOWoG0_tJ7M!#X>!zGU1U#C&Lt?Mc#+9A$ zP3tOUj6KLH@5$hC$D-#shoPD&Q+DXLUZ}fvxhNB++#fa#W- z!Z%%?gm)ejQs>Oow$<&tnB+dxClf%O5M{Fy;zG-!eMZ{?%1Y{YnF0%je=x74u^=Jg zkKZauV5Oj#{NmmKD)B6FTuGTPt(3Q$;OB31ghWw*@ngRSr~=sN7-{CtpYbY^jOtJ2eSMYM+S9Lbs#(3MfgoA{ngOMm<~q>Nn)|xe z$b0DR$>kjIF>@bU)!ZT42i>m#Vzn~-=&PuDtHN>8e9>K6!Hr-;&a-7)4bRl<&;-8y z6r+i~q?0l8*XWOU1tvTG)A8{0K%FDe{P!>hUqb?BS_~vQ;8?Cw(KDzdQnaZk_qChb zM4O4>+VjxIpp=S9>pUs?;2FASjjR*9%euMVojqHse{XIPJXgZOESsa-nW`qwYLhT) z>EqxQr`Lt4a?bh$@YHFs0kkZ%SAe8Z^2!-4a@QXtzvHm+%-JxfFqP%ALb6B}UZcGt zBl*u}219<71N|jmhk3|vvM#L#(T@#_S!9{$6tAb6DsVilVy_^KxD3H>Q;?-ka*RE_ zUoij@F5nDlQ0P3Fcf%Hxqf#c^`|H5<(?^m9CK;S5-mW5jR+%2+{7PUmC?B;P$b~4T z&aa2$u(U3O%oJW|2uHZMdagfwWT5Z#4{GI9<38~hb@PF$d%3lF9n;Vlp1ZtEN)x;} z#CP83d1H-M1*xk$+4V#@B5R$AI$ploS7&jcRvG3?w|)ExbM`zcXB)o0FrZ&MK3O}e z8v;h8ZTYO-Any=khXZKEst^xf*YmCE?THC(E`(t$Ss4fBp3!AeH%6pIL&LjvUrDR~ zzSK({D{2qNTi5ZvNr?3M)4yq30rgp)Zg|N#bMJ5yz<*40UDh?xw%L{&SpPZ!G9Y!` zKkoL*+@^#N1o63id~%W=_g>$a#+j6QzKES`n`1JU;Z@+n??GF)RXm~v-G#^{HC(GG zZ_XGS7TR=Xi(gk9gtXEV@lF8LfzS@aTnDf_y#u%EVvt91xXJOV-zmBny&)89Lba0) zAonhV33H~N_Op4M3PXtgp}@p=0Jza!-6OXQuYAeTpg3Uwy!*pVh3UBRO(Zy##vHZ0 zFH`B>U-DJ^rwGjQVXyI!=d-6!QkUCg?H+*#VpyjO6 z3qj1aG+Xl_K0@mugm#k`Bv~P=);VZ#k8A5r&FcrCNuCOrTM+ zL$SU7uY)z&L^q7Bq zF%AD!pTw$Xn{8!h)O{6bfKO3zd{8*N0ZhoTUMgRcbkt;#HEW>J$pf~p&TXiNq-)bUi7kYh>@HVMi68NdJ zg4}P}zk0;Bon5zkTaU21-Ls~)Hu`Jv7f4jXbrQ<;S0MQIazRAxuyyUtHTP4b)?6?iXyJ(%r+#b;PAH{i!r+&eR#MsW>fM*kryi#$571evd zsszOvjcn)Bm@J6LCQXks2k5=Th%~EXf{N-tI)*7B+Sfjw)f&{1@MaXF*Hb&N)nZ2P zC0SvMe5;||;2|#lC{d;L?&U{W7wF)8qdP@>S1d2VK)yDp)K+(8!v*)?)o2!C9oI#< zDeF#Ku7Y7IiN#|f<}}P63AXN7<5%bOgkP7uq*tHc2B&UOzXjM-Tta2X$#rYw^d^+6 zK&)Y*>=aGEY;(~e@XG!64)L9|rdM@~?*-5RA#VU%=HpTs%jJi%RI(#lfcz+0*t>i} zCQ+*hfyr2Zgv1WXr<}c-ICruLAL>L|3OkU1B^Kt(mOopxskV&iwfIRa2F^PY(N}N2 zMEb6`TtLr_9X~uRxm_QAUb__Wjr(jz*MR1yAq(p>6{rIK#yyb_NuB%6rS&%;DH<&wr9hx@0$lz9Xl49seTcA6w~L=M1FPG-8jI}C_Zk={ zw%DoKsM_P8dX2N+UQOeSmJngIaJs&GM`HRNKq4sn3Dc8UZ75olB*a;vRJ!W_O+Uve z?cyiE$2Pl5JLF}EuWQ91l89W1g4bR|{Nn$qNd={sBKP<2{S@`9SiHv55B=cYytMYn z4QpwY;XC0VW{LA#FEN)_q(4cemvgo1o1Q#e+mUW&a~ z&=}1M{dfO@2LK2Ge3kpCMTfx}M@Go0J9Q@ticcYQhberxv%OqDH> zGuCPCvTmqwdzYAVv5QwW_5gq4=DKawa7O?*i_KNZfa21b5Tg0RU4sgA!QBKh)Qii3yZRoD{kRyVEmO< zBmX*)XE(L>`J^l_WPqwocMc-X|80aXR(|JVMDC@9794o!_FjoqbZ*-gfO>yUCsBRo zZNoqbvx0%Z%**qJ;4}lgr*I`|OW+#i@+a^}Km|(-Z*?GO;RTJufI2iXkB@fsR_!xE z4$sibe0ka`6KkjjNWY!gl>MD{Jrl`792>5pGP=%SJs%k_Kfh?Ce#9r0a1ApKQ7!z< zYaFt_SKGadJ4o)w%V5M_RWHH~acB~J*5-PJ3ze>5dkVUR=ONXW&#jMgRgCJc;Xf~O z!hMmulH`k~U8Gv{EtVMPr*jywu4$aug-~U=*vyw{e{fet)xaQ`Es}DY@=8T4S$PH* zOuVr&q6&0M0Dxx{vyX_B$HyEvRj9)^j3dfX$nSsi`Joh!I+5Dy5EldB$qaa*A8^aj zNs~y)#ol=VB8tPUO4~hq)cLjAvNVNNe#2e|NJK*-uXpeB17_GFA z{x4IIX$<^9Nh`r|6O8W^O*R18mPtlnz9~5o5z#J|FH* zu1^jQVvdVEm!QlUK%FY>%I$-vSzKB2VMCD!{VE`6rMXI*FAI~$BnJHZRk9|{eGs9X z_khuwH?6I=bm#=IIaVwY11&etSb6hy@95D*g!U#fc5Ezz+hlcZO~`0T&Sal#M9rk|BIsy?ENeVl{(f&g-CY$Lb7x>`uX*hutGxgWC+b5otX7M zv7|FbMKk`))XG9Tz8K9fj~$z~=#x&L`@Fks`A=dTa6o-}O5E=pd49`OR+cykRn>7^ zBI{u5P;>%WHR$TQgqu+VTw3%?(mqeQBf9S{YYI83 z-0Z!KE=NgP8oREQPLnI`4}>F3LS4q*l8ImQ$6!m52o69=b%&{Sq=T!XLoHA0Oq36J0wc7ICyByaZ|a*> zbTv;tjRCstW4s-VJrC|Do?OfiHySIm9)}bl^AUM6)xIEVU6>O4^3`=@tu2qP-0Iw@@6~n?jT5Ip zpto51H^mhJ_BZ5cZsa1jzI0S!0W}oGT-}tUZ-^p+u)jjwc0ba9xMt(2Hw5bw$}O`X zO%NboCz_YbAQY&mxEG*4|*y96|_86Bt z)zmI?b%Vn7ms%xJvoiXW0*1JF8f#cm_+MW7lg^WUg@)u4r|0)huM9&PUm4S*&t}Dg zal&|Yd6>?O-vFW^pH_YZgT^o|?J#t21L*0w8CS+qbwf*?-JrZf>Ls;8Cd(!T>{`Q1 z+|r*3myS0jzrsBF@6!GT^>WKU{{7DQyLErADzyog22A(!_xJbow8Yh%V8ld^5MGX$ z7ns<;-V(cwpKdx0yw6)%K}rYRv`M2TK4bLHDXEZ^LsVJbWm#3N_=1D{cRH5O9*qlp zo3RyZ4<$u8KS~6)k-yx@dZ*O);t_bi(eSwOKI9mZ5 zvlxdatOlU*o43PIE!Y;=piU>&a#mpSeJ8__(!Y_ExQP?9Btsx9!_^1wybI={GcsN= zev>e)MJB$pZT{;J?A~%OC2x^yjqcD16~`$&tQBtPt0g4lSEAgV;=FRQ`NA{G_h8>pfR=BTw9ni@9&gW4;7^$;W`P;e`*4_e7(oyQ7D++GgHO zZ*O1pmu&~P;>?K`F?0262`aGy(efP)eqkzTk7IG8w*2pvY^d^5Csp9?-~~QF4pZAb zlzEVtK)!yq(hhT`*|8F%|9ymLJmh2ROtAu%aKOrA337+q?zVjF6`-Xg_vaKE186#h z{qDq@c+)b>qjh>?tIp&2`IjJ-SS(+)^OLvt0OO#hy&gWLD9|BVyc{7tEd?W?ZG*~_ zS9mh;xY_S`>N=HerFhSHm#YuIf^|oV4<skmRgb`-;nX1QgP<{+=*dGZMd;(CF*v0xA8Ab&e48wa8t`0FPCjFUzC3}YK zA%3T51j(6bp<3E9#abE~+KKD0s_>R%J2ft)BIRM8WF}5c7Mt#7vKNZ z`)(5Zi|n25JAOC8Z-)TV3I33nP#f*4kw?l^7s+qmj=v=!xsgYgCQEfeX7c1~K^lqQ zJgs`kwj}Rf!`cpHKzQ&;TmGQy@6W_sWs}z%7sRd4NaVJ&SPkPT`^X!$-y~7!x?e&# ztXjLc^7lCNT{uE|EYrA<{Z@YQiZrTmnzgU07+!^tvWl9kt#)a)`QVeJklJy5B?pvq zYJ@4sB^dp3uo@S@pEvAS7op(C@A-Jvf(MaiwVLscGd>GV9+&@KI!F(mh${b&eprZW zXd*usQ_D7&JXy?Idq%W3Gn8vMf*LS0F2@!!NL?zy+tJi?yuBY?*yDpd@C&X>K<`xx zl;dY-#sHA243h%plW$bwT(ii{<6;M0I2)B0j~?CR9+tl=4)cn3#6G5)Wfma&>^wS# z?9MsR}?#{=a$8EeDHa*j(}E!>5kPSzofcXBGqjoYXO z2)A6>{4IHk7_HB8eR4Kd`B()2z?X`hkm>j9sSs&5%O2yvhtz@Kp8J2nl!5N&2hzxn z=M(F^$Lt2_VX(Tz{@=Uo=dLkb6HjRuDIqcKhqF?s_e9%plQa$>ZRelbgaZGtCk`bE z>1wZ_UH*`b`JgyKe{!MaU$A4YHldI`x%xYGfHsD_Ji}WCbxnp(n!oY1NYFx?!_fXS zce4aG+RFcR80oVKDGwnoM7uA=c`AL{*D13Z`BlwF@xu-TudMj4BO}l}cttSH_+nA9 z`u?dt$ZE=z&QbdGGb?;ZS67W*o=BOz+K@H(W9OEb?$3oV?^;`f+(qrj>o#dVLUhf! zWUF zN4XeEi$8446Y`9$s1B4K5ph{f91%LykwO#HdG*IXue8YOyVa)|Z#lHc@2lh@8qB|# zDEeC*{oRB4OYTJ8EcQfC>#A%U(HuWe8+_tPg&mJ>xCGo?&5ieqvehVgKB;6o#Q=)PHv@Q<*qUe!ktdeE5(=}?;jSrRRr_Xlz+kE$Du?1pBb@d~XW zXUn+PjJta}$lznJrrAX|+U|`^etz9lfk~+7Fy?z(I%bQ_02|QNS9-Bt`pCSVKnQ)L zfYDct^8bA*)8ji}$dy8st>QRsAr+|>3NUi&uid@2qH3YPKEix!x1gQLsV7=@Cz#&pi>aKXFu!WBwqjr%C&Nqn|+zE$M zO&kVk?|1}z3%lH1ZaP!tgAof03p^^=s2(6!e}D1q=7%FhnOdITBr5%avTnSgMPGPaaRV5PF=r{862*`B?8(;%fauPq-`wx17;Yc>EXibF*D zvBnwF>Ns7VMYCI8SqyA%%rLUDF_osca~9Y-9S9^AMdv!I&3<>MQDo&b z)&BUK1Y`?3IFE{|f>#Hj zZf6?WOR}pOa@qj(?~vvszMPYisk9WbNKb z77&ucJ?>8aHDdq{uR0IK^oBI7e645iSKn~K$w0V(mf5AHrEMVg_apUUrXPttW`zA! z+DzEI6h@wbm#a;(g1fYz6M9%-VBo$Q6w*2&s>{Pn7x}KET4&`e)viSRuw+;(Z{b2? zo56`W2f7{nrFAFmt{P{FI82MxB4in^cGQX0JH4^+c2fB#u7?wz^2s->#c z9xd)R;`pm;KJ52Y6fsoMuz0B| zv|4d}+-;KxP<48{bgKDgZB1-Vya{P5JhnFToOhUg)hB$Vf_(KsfQKJThR461EBh@$ z0YBUfk9{v^8<1v2=UzHbn%3l)J#0a~SCzrNV?H-Lusv8rEd6-a_S7%!0QR=3Nu=ZD z`xcrC-|0FP9}w)I*5^Nu`1w0BiTDQ!!4)$bL6YP2TXdL)8T|zOY`)Qr_fIFG zp+$!PNHzj7jH>wD4ds*An21@uh^M2f@onN6aq4-@Q zTKsrir>NaSXxNqu`7#;%Q>yddV}1(BjY=kcF|32Ioj#s{io8v82340fA8pDzAK&)#Ds1G)zIQ`#q_YBmhYWisYo(J2j z)J~+Lss+7}p#Tq^BRWh0?aPZApg}pa;BZY{KnL6Wz3Eqj!g&!CFRrfC29O2+W@KvwxHMlGRT4_7ZIbY17uz5~QA zy-%c7#Mbr0CKB0ms*nD97Y#69;JxZ!$Al^}MRpp5>M2$f6UboNn2$^e#BX8)&_(3% zjR?#ECwAUPx{fYAAfbj$tvO_ArFn=sb}_i#WE@x+w?IYY&TQ*T|t3|m#0q4pGNw}q@v{#RPy^%_ELn=?)NbyV)C$fT2- z|D7In#j?xU>YSl`TGng&^oU9u=F&}(ayCBB>upx%Z=b@`%bEO15S)3UR>o1d_#uN^ zq^`Di>c=o*sm|;K)Cv9kdPhtw*sD=7GIT*uMe|)o;i|uRZYgU?bM(^jomK5Hcdg0t zHgf6i?4Z5()pd7<E~=*c$}ccD*hDioE61J5&D&ddXGUOR)UPGK96(~>Ap5&Clx@?j zS1)|cWDTV`>o>VW)s^0;cJ4@o>#P4hhZ?5|4}-WSp=vyG7W%@G64_h6xm9T(H)x0B zW{RG=y-|%$$2g$gxK94v(7fjg66x5W9Nt0`GPFle`*tFVvJO$73_j&ZIp!u3RCbRje1QsUxE z@yGSO`W?-L^fT1be4+D}iIKh%bUS@jwkY!JijGXUVp#H0tL^{7e0y1-bqAws=cR#P zb5_?ZDj57yza`2KcDA-2`A)nbTJM^3C}ctu`v1ENReQRc|UnVDkKR#SJIr`9hJ>B*U zJ$;_#t_>>qS{`p~Y`o(ubwa!7e!FG>MgK;Mlq*3Ubrt*$!ifc>U{ZP9Be;Y z!jWn*&;^z>{lvp`QL1g}EkZkt0z};oMUoJ0(oMOs9aZ%Wn3@;Tl>@|fJ4-7%L_Xv3 z5pRy}`_45Fttt|(7SXU_UodO^QGj|LCd_9~WIv&1-~uIlDw>zMQ9xE;<%TR6k`>Cv z)m7;YQ@LCf)m52^>y}l1I{`p1WCF@yy{oqm(EGVFz2lL*%;_uY9;Ejbl*LhnqoFSU zUe@qhyASLpMF2~)(st}LCW$mFcjvp{9b#@uic;2a|MngqOPd_vnVZfW`Q;;q8#zhw zGTAon9UZr^5d3H^o^xXdwgB@OSG7jRJ+C{|KM37iurQK;bO8xXHl6cqj{cqeRn_r`ZiaY%2 z>iCUkCi3>aMeLu%_LRHB5+e1u^>qHSd0$xNlt3^8x7m$%E{(8i9%;5#q{KQgK-W)T z$0ce){}yCJxeL{qsFM!#D8HBgJ=cbFDhoN_gV-uEp>N@mBx{1YVu4%mF<}-KD&Rqo z_ZiWos_H{eF!JB_z8BOMc-m~{I-q)tk$`KYmR z8~611Ar1ztCug%KTtpPPV3~Q6K^I%2sZs*+GvZWB*Z@1#-$zm1h!M<^e7isfI=Cge z^-js80mJ%wx3(cjS2ITC4*TCl7WY7JL z+UgS;4t8ml)pw!K1*|N$FrR?zKgV}zpKtPdJf8nJK5(<<;p+Vq67|p#jhCVqrLR|A zB}GBo&LF?9#wJVx8-kuRI{Z7Ii^LMk^?wLwQdCu9?8j7oV*H}8ic6YI9q1w@!gR5! zgmv2e<(rV0<7+A&BfF2ES3gn(DB20WO~&s@vz%+(lVA1^thIR`ag$BWkD(4o;GMM% zdLGng5B0H+BTbzR(5A;-Z%- zYQH38BZGH^SE6w|`papeJG@bb^AHoJ2R?kfN_0ft5<`0Td#}tY8v76{mkI)!hT?1# zpghAz?=pC#rvF{{X0P-BOTUI~u4-bB(v@*Eyd`&J5EuJEt>$IOpn8l7yA~Ed6QUVi zj-Q0-il9f3j|mU){foKY(T3*dQq!GGJ;Kqxvp|WrmEW=4bF+9Eu!M-hsv|;^&`8-9 zWfiXTuD74F?{_<%dS7UU&ybK%Yg31Y5{&dQgey9{k6{SG?ju(9QsmHiS@jVMO__@< z@5zO$E2j5Too;|TiP`=_(agudF3Hs&rZ697JflOJ_Lbu<4E`nJVHB%t9A=b7>F1d) zHrW(a3^sbr>f;%di-<77HEYqo3OvlTuk=+7(Xv+&B(`-?AqdQlRYoj(Rzc=WjHK?@ zw=T`OzFmpfcG7zjQ12;6#vMRAhl9`Op3OmjQC2p_Uw0yFT^+!$Rma#`YHejELFa=K z-_`x^E-YaJE0xcX;G0u)Sy;pxvU}~CVCne!;EJLEKZqGVVe!#H>iH3Y$~l;A%+%>D z!7J^TLj@J?b|0_rE6t>dK2M%F z1+NLQ1DVlx&JxIEtXeZwNf-d-cASGGg~U*Jsijk=xoN0LcL4unN6d`~w{RVG*bUuQ ze@4G(hSI@-Mb`J9xPGHZ zCg(nJ1%#{GO+Y+b&t+Z&_EA#VpPiM?lAanue+6sD!J$ZtWIA+`G{ffbO^=u1Oro`& zh*Zt@f7u2Egrus+i%Y+;3vzv(DDjf|srlQ@5r1>F>f*{jA<%}b$^yor z)?^9Wnlr8vua7z7aRKVuT80U~!(7`MEO+S~*ouOhn zkpBiZJP>=+Me0d3RB>;;j#eZlfA-gjBOcAWp6=kfCd9fOX0B*zYHD2c-c_)4TH=L* z)4wBhbmwiVoWa*cdC%RG!IzKXlMi^A>!U@x?}gTmI}*#XkAr%H&;JcdOd8F-nEx-! zXfXmAlvh_5pA?mzW?@Aafd)vYvtT@;CyoRvz8<(69ffC05Mq$HIq3JnCI*o{%k+OX zo8YHUx86D|uAcvNk6WR7gTUBU+eFbC*{T=$^2wEQ_}D0M)j;V*|FIVY-f6!?i4sH& ziwUaWcJ<7pe<-6!*1p<}(-r>6Q2S$!`HqR%@;&IHSp@o)sS=(xEP%5s331JJgBs0o zjO8ACEwvt>`xu7Bk|xv6p+F`pZj46YHMIQMMS%%1aK|~OO;Tr@acJ?#jT84w2)Oj~K%8i_-KkAJ{j_L5 ztL>P(t2=#8MV*|%hz%Ej8!EaC?La=^h8IQiD-Za1%?gRv;C@n!Pzzr^{2W23*C$9&Vxg&Z(MMigdjvMpMVPJB82J}|on0C$?Rb10 z{8U#r=St^fva$@Wo>hTme&CGPD5~nQ=E+wt_)?+2eZF0^cQ1fH@@VXEzMWwrwwpe2 z*m-;pKlq=(|9JGU_8g>GF%CQT0lRBx8`s!#5%@H&bk_Gx{kY-XrZJZ$h@|2+sS^&= z4y}}Hp1${p*Lg~0C_l1pi55pOW|Fe7$0o}+8GfVaR zlohe2*B=AF*7f$}MQmENEY=FM9IBk52EeZlm#Wz1B&4NHyy(VsZ0TS>7}Y50F-RLV z$PKk)8BsJWF0MM2sHiU?j6yT+P|*^BNq22QZ2)yd!RbR#Q&#DIwYAnUzc_y=M8Ad-LjdrhgoC|r+b`4Z+i%&D zG-Ock5pii+!sgU<&P_EykVU;pJFZ2i2KAvv{rCxSP9@R&{=F*ooU1w@D2h%P-dym5 zEaob4pwtG~B-ub+b#K%XZ)Cv_z0-d3F%BQwNZ9S__^@WoD&= z?0C_>lWBee?G$6pe>YL zbr3L$&L*;cR$->n>yuxvWjSb0N=KuJCncnO4t#4$8Dd_je6u;;=>e0u%h9Jsc=m;WR9ze6-H>>cHh{K^E;;~Az~O{L!g=+Xav zhSS}*V1#Qhl1LFK1TS|yZGL75(0o>g|KErVzxt(#SGNx4_b?%sKJ_nl6O|%cuc&FL z9LZv;_0+h8ZysjqgcpD^mTLj$+jGa2ESRT(oZBA$VLD!$#&VBVuiL8Ty`cdS`Y{FN z=-(+gzg|=`kbSE3gi{p<4{QLIxsDI zvvm%elnzFjF(AfQs2cvYm6g?;++4@DCEq_LJ-7G7!H=ls{@ea-Kg2l>{mIxPBJcUJ zQQ8+(*cB&>>E-I@)@Y~eI5)JD%0gfM--p5RMUotR;#+w|QR^DZ4HJ^&3J%VU8)G|0 zCOLA}N@9u9pZ-6izJy~JvbBJs5%F+l+V(`jeL*MQC zeu$>VJ&4;N6_ZzCFfR`$;w?WA%r46a4GQ|E?#_=UuJ24Uw#s!tJV`7a`gZ#t_t-PT zh>rEojBhD_tx%Jp5LjEnzq;48Bva%ug_ zL5%~!`~$v<13Z^TiRElr$%}h7yOn=HFgxkw8c%K=D6hxT!oMa^H@j|k>W+HIz>kz#O|M*;Bw7)f6^ zOL*4LeX4J{WLs&O~t_O=v~*-&w*#U zjr|%a(e|;1d#Au5genTq2YbanG#<1al1tCzR2%Ho`QS)2=e;rdCir3184o{%g~=;C zgrRgttGl7{1DW_PRqOTu42VHCCBaT}`PWgo^42MT_>_MsQgCou3(PBj>>*D1zp0j2cC!{KAh|0u8E2t=nOneJSp~?2CXKRv?(~wsW)t``?4k3I?U9gct7K?`+5u%{v zg9=EiAYrV`m<)n%1pzX!ZJKG35p45v)5=?}2J8iefqX90Pz9WNCL-+luG!am0yw5fv3e!sk>IZYI6O$YiywZE>9|RFxWYeeELM#K~pnt|G-#h}g4?05;a` zWD57Z*pq0IH&GI$Bf^IZD52C73JjJ;6{gFmjoKS9n0mW=q*M|6+ZNZM{725jTQP5iFis6Uf;v`H z(Bw}g$5|cK=C;nRvNOg}9I8vDHsJ>t3}#CcGb2g?Tcnw-%rycjS;{w+aK3;-;GkbK z)cFd#l^PWlr5sFTN8?u|GY~Ws3iu?3Ht13RWVN;z+03|VWF)z?R=YGLPrsx9I#Zg= zGcqxuALhkO#Af*G`38eb?-v1Ip@_2QYvJ9p#M;%>r$}k!TXp*YRORH#svk7|^bU5p*J>OCl&2&izT<;LS?yUq)#J%Bj#~K0rdWnUlJVk<;ocxj0PWx06!*7?4 zmL-43kS7VBj#OJt`{N8QT1DkO&U3W1f*563hlN5&p}vx90*FMLV#Yju6=H&Nv!UPim!YOIp}*W=?O z;`|05L}T-CSF_&ziJeUOP$CsD1VGzlaIjJ5|==0Lr-H+az$lm9wwz~ssljpJC z`y90rUlcj!Qa`kZC3Vxf7lNFSUJQcF`SPDi2R#Hs zdXbIaOt`+XbR6&yG#sl=PL|p#9{bTIqPR?VO@~z3BwTG}th&3ORBWvJ4x;Z8IW>1u z*qBaEh^=;}JR9)2@(V+WHp?qfGr^`sAx1k2R=9V;zJ=oc@uRt)`@`iH*nyKh(fQKF zf3lxDvf#YH7PR3bAt^0NBpC1BbRdgcQp_dv{Trd-86c^O0m3?=37UjLcSO_1w=&(uMP2qvyL}$_uCMQXmdk*%jx0=45v8_& z1-PBTH$mXCR!4W}@Fpqg8`p_JIZ4QX)2YQ-9L!bbw`cDk_fGnRu?6EBNng~^4*N~Z zpC-7i|8?;WS<&edB_U9;1{mz>>gU%94jmLPD+U9}LbsVVlXI+bj2XCr7FM4_!cEz1 zTBp1z@#%3w0g%$G)zg~?S6X|nz%`|cNH^1+^}sW!T=14?w{8RX>pzz4QGG2di4d=o zn%XCFw9A8ogN0O7Z|9~64Q0hYT#i_b6GOeLD$tlng3=@bfD3yUoCZ{E7|LAeII9n# zB_=n0o!xUPh*9Jmn31Ze@i_=q*Emyao8vxWXn$OkSNc_JqWK`@!|r&s4yRyAO5;o+ zQz_mEsyKX_7Y)RQc2#gLv*|-?PqXU_-S?3w)$iP5kI|=2yoiTa!MiX7nH8+zX1JofF?a-;-X+ zdsIm37p}R1RtYgxzc2k2qv@wd$N&gqiu%SoMWA3SxmFXwO`yhV*cH|rRPO3|PIc%N zH(Zxj`fQV;>wGEvE!(~Nx%fdjls$ZPAVx;pl7oC+`LAjei}_LO!%hIYqb89IjbCW< zzdBcC#!_u$zXEG($*xbC~-;4|XjbE+Q1-sm=Q&>ckjafA4I zx$a3Hz>#l64xyGf7l`v6U;Qk16lUUgUX|SzIq*?g78lx)I^pMP|IHU-^k25LzA`QPX`o)xe^j)bH z|KPt<6nkaRPMh@C-oJ|>ex)?SnupWrH6mOnu?>p&ErO<^MMtXeflLYRvX4j zme)EAi%Z^_9g`AYm$%n3t3{%01MP%5gjoV z_3n&GxTQ=g`8U3Cm27AEyYTHCpplp~4}Up!l6&io#@{F?@7@osmNbI$qbXGIBreov zZ3|5Ju1q3978fP)8OodjjW28An&YA`J~$&idgT3!oe(oYI#%cV9uoZgTs#zbreic*8p;r|ZMveoyYA)M!H5 z=iuv>=aUa=Mkm()sjfris$%$C_2w@ zw%;}k$E>PZTWzJN+C@lFo7%)yv$0oWZ>>#jwfBzI@;77et@bLaMr{rFatRzv@2zrCyMIE?Y@TZxL(2J(G$y3V*lM4t;`(CeBdq^5*tuIcC_-B7RJ&O)?-Vhx0FaA}pGxKs-!!teLRMhC{YC?zzE1RaLYNiF;tx&Z$O%i(|kH zQ@-h3LxC^t$urT(d^4BNbmk1C#^ibA zkE&Kd36Dwn*goeGfs%OEYL5TPTEN=DZjLn((?aW_h8s)Cf5L2iOcJEFuFipuwWrw| zrq=g^*XW?UcW)c(@o;CU=_87L0)ZKh%A2yOhreZ;Rd%-+%|(b5>2< z?u^l}d4VvN5vBx9rY;;AXE=-}T2NIG!gNjmm>h!Drt4vmkL6eq5a1K=;bUKb)p#5@ z5|&UMlfc`5Mh(j}cnGfBe~^c6i=QL8h|GX-{MEJ6HtP)(q_+ZIwU+a$v3~na-qN_% zM5J3zqS^o7I9!S)F>89Z)-pn4TB>^3^GV}&mIAEMEbhHtaXoF)bv&9t!y_BOnfS%* z1IgTi^J3$h6|3r+;r{i^Oz-$O+&|FDl+uw4V0GLu27njEB5*^N14V3#?EGg`XZyzc z4zA-%Z_hCM;3k1#V3xV(e9%ToJ=rMXy*uplmE8cD&Y!Z>dcZi{2^BN%9mXn%tTI|v z7pKf~F|8KWiuX(LYj;U_n2FX{j_LnCyZ&%$W)}E($6V7y`p~e* z|3JzweH*h9{LkQ_rk&Y`r&F8!%*?yO&VWoxGKtLMj47iGGYviDqX2T3rp}zF$MQ@f z<~nkO0W9>w*uQ#LumJKOjU@+CJE{(|GBcMjCL6Isugs!~L;6frZh#&*l$`8W_>YkD z532>vt2==v%0&4l39tjgtGbPw>@G9(z2k4H^aZ4nc^0B-^F&hmUiv}lakq={CnRN& zU)q_skzwz8bD0k-_PyV9mv{;A*H^6UpWUeA4taQUNU$@hJ z=VK98xov|Dz}8V6O_sX3*mv;c7j=K%F)LEDQuW8G0tC3|qXT?z`j(laQU2#zBp2)7 z?rvG?Og}6R+2N03xM@1UjCY$A`?KMz;dnD|opqZJFK>5u`AX-SUJJh$!lRfCVi@!t zg<9f`bFpE}>iL z4ze$^Ey3W!O9}B{OiE(u&0PxCJGe_TcF5LQi`_&AtK#g7QhG)DG9z#Um;a)O*#_!v zD~O!e3WE5Zx?8%~Xviq8^kJ0+k#qU2qv!9tb&-jpq$-$|YQN_ym*M;zDHBMXR8E>? zadFYpB%Dk&Z!8B4U<9wfJ@f;+>R<8<6+eLp8R_j)KYoJXI9MS9(mr$cgW_Q;o@;GK zy|qTC3+s0@cQjdzt<2u=6SoXN$h1+WTt&YgIJyu9%W2p#eNadUqjglUv#xkg+6UC6 z5TNTV2`68!_6mZ3tf?9t4G#S_AK#fuec5@Pj zgeipZXac}^9!~m#$eVz?GO^Z)nSY5^JTZzq1yRp_eTj^mN;_NV(ueIk13tZ`h zuA|pp{K!ps3;l{MTKpbMROU&KRLYl+Ffuu=h$4qJlC+pHqb!N;!`)nypj7ffKB1T1 zL2jPcC7(54pi4$cC5g))`ESo{r614cUXn;EiokL3l&bb5X6OXOT!ar)oIR)+7ykY& z=@sajK0-HKSH?>_lHMC(UXFCn@(cLogkG9g!EqhR=TbJrj^wz8iLbqaL376^K7oO! zJtl%ZsG*Ng-_x)Bc_?qeTkeUC+lA)NE2z2e-MU7y_B>mK+DJ9-U$bV7^7GqUFSyZL zv(@>xp#g}#)B7t_7y9Ct5Rzsi=x)tFz0MUib&|*aS28_ey={qZPtA)<_lwTrPoZ?p zSrCmu&sHSqY(_40Dqq^5p$OwnBk}e|@nKqjJN&W3mzSPkFyFtOfeweY;}bIva7{A* ziP=%4^^3KdJyL1(_HwM|Mj)cbteIGC@{a9G-*yLoYr%?3vq{7NnR}=?%|l}^bJUKn z*Oqvek=f#=uu5onMIIEw@^Nr7G*j@CAk`@yAS!W~^?7gh`;zcKHxQo`I;yvhRMuHC zqt!)rL^jW?yV+SAQf2YSdB<;!j!P8!*Si&Bx)Itj&4!Hq}G9kaF=0ms;LurA&nY z5`R9=p1~XSSm9qHD-r+jozhq)MA-4`XepPzwyfmSBi{XG`<%2~-xp?~!kPJHtMYS$ zw}tPPPUZa-dw`~$b#E#>gD^teJy4nsNhG@Xjx;nk=8Lk){JMW5HtaJ~8sLlWS;|vCgRidBphiwKvmYf7NhA@`d8D+1TY+q{L=kJW^l8^t&MU=ID8;cJ9to_jX%yV2o<0oI&U!#MV544y z?8y%q+_)c6NSn(zg8nf1AHz1}Y|ri8tA3pprjJWF=q%!{)QW5Ng_W#tneHx|h6QBi zom#J!30t0^c{G;AMcqPLNgEiO4+S8nym+BD+a2eI$$Wr`b4FG+MFQ3wZM9FEg-D@Q zJbXxzmP|iyRV6EJ;v0<_W1rapnH7IOAeGuG;aUgK$bJ8B11jUd!vD z)Nhaf|EfJwamBiiW(#RO1#|CVtSI^C(ki!m?|Mm-+w^bL3(@< zt;XJ;Gj@Hyvkpm+O>g?NOXe}X3;3;fnNw~$ify->nza;g*1;dLG&_5lR9Bo}kDt4y zGHAOY9eDQP^!`r@vr{uo-RZ|D3^sZaxM6yQ=zxNQ>1PJe>HHr_k%^zBHOlkR=Zk~B z4hD=q@t!^p_sg*3kDURnChruZJZGaOp(CVBAGwuh_1tj0y~S8@1sR7hq`FtGUExvX zfy}a0vYx)aM)hpHHv#%1GKS#!L{6#-SrHlTzq$D}adJGR0?3SA7!12*-eLaq?$Ut@ zmesFOo@thZXp z8ot<#Dv-|vHl>t^9CJ|q=|}TwFJ;jkkHEl=b&oVsKnV_d+}%*Nw&c%*@dPm{NiWN+cnTn7SlBv3M^Rnb2dk3qpldJ`lMlx*IYEcln4|RuG1k@afZM7rr8>oEH z-GxX_nFy3h|B$*=a#pW^;H#h11oGID^pF19ZfMHc)rnVDGTw%h+;8W@C)t;Bv6aqk z)e%ux9hpxFt`p1q>K+<`&kcfiYc3_q4m0V67Sb1|OpXEw)Mqh?II`X|pK+jT^D1U5 zy>YtS`u`~x2Cp6)JhShSg=mHGG#JTKZ!&`I9P?-9z$T|_h7qlr;66k}!?+oePtCD}^$T9~#V0wY8i* zpqQ5<{pdB&tuUBBjb!Xz6g2O{XysCy$=lOK%xO!$+#aKi_mCjg4fgeYYJV_vHodCZ zUw!KDF8{IZ^7~Lry*~zkfhylj&8EH`iL`*IR|im)+Y2>}RaI@-OP2|XN>yQ4Lrup5 zM7TMlFZx@RKZao1=!=J!n{JD!j@Zzok%E%@zqbp5L$FFYn_YC!sdbC@DU-jZyn577 z-~J6U-P6o_P4Nu^B<26`-(j1Wm99cmh2sis2Wp*&qw5TSk;5NFMD;TO!n81#2sMlC zoDVv%vNe71VhwZ>LnJBtVRrg?l33x5s*M?AL&?A&XAoncX_`*Et?uDn9_>ogo6>4A zMqvYRBjGW3{@YM!ynG@fo8PX);DEi;`vaP&{LCQ?v9i$_zU-6{s5+ZG$iGTEw*-%k zefD8TeMp;_;5!K>tKuOg1l9G1=*yS+Dzcn@$>6PNEUNcdSyx{4ixoi| zz;DK@>BvvtqcY9@)ozzI5&_olFn_Yl`koet{P7FMa0dG}P?po}&VmzZ3Y7jv;cpPG z93Saxwabea_aDKFa6{pSglYeL1*WxN>2z%sm#aF`NO)6iw1Ttg=%yI;1%Ts|Oat5>lt2GcD6~5@>J8EUq3UrY1xamVh-GC9WD&A%?#$NS=c^s{P@pd1vXYNRQPIl z-bhheQ1txUq0Z*rL9o8%-G(%ZO(Hg{hX@q`ZS$>8$l?( zLjU{;V-9D+WOzr)gGTT!7})>@6k6I4+Kp{s&WkL2@o4iDmoKn&+&gmi)J{t5?6rJu zxo6YLDD+R`KMeuNV{)uvZnCU^b=K>z)AQ1YboUx8 zE9H%i6SSpF6b8`h23WGuH`b4l#>2gP*HG(H4_hzR)=v&ZVF?zx?$FAEczQEbJ`%?c?>q)8eSoG%k z!w!ai>%Bz33+K$v=0pTh_sn{ zd|WH1+&VK(q{&kwB}+h3B#u=L*&g?G(g@QyPoIs8!V6)reT#=z^b!EY-`@NNnts8- zAWWf$=jdoVXynx4K<%urRhxA%a-b0#@}-=|jdCV+raBxkF8>Y>5J%aw)2b$7PZDPA zIf~2yqjcFd3ROCJRb#Cj=wzMkQX`8xDazya1jsHHB!jymI&Bw4`Sy<;7(Sp&G>zK) zE~;owT>B+lHJb$=bzgfhbowv{Z8(C&D`7F38*RB8oh0VV+I^0(c~i>I?b=k0B- z8}z${9QLc+GcfTmIxC=2qTIST13!PnZv75ruCT(G3Uz~IND<+HFxyY4{C|5=RZ+AR zqc8MKzWpkOa?oYqWPhy#Mr97bxFNHlZM5YNS_&{)ez9X6lkcW{`m2J(81KdeY^?R7 z4=_CbJD=2SQBbarmu+yDA%LWZg^e|On@pDABbc#1lRWWh9NGrb_zd!0MmA1`xSK*N zOE~kn&-(&0_^2M0jukfMe^ZVcEBC(mH^2K~BNg?`6U-#(W0Wr0d&=YnU2WI>n^JY| zKBcef`}i;a^sls3dUJpT!*3c#uXSnYW>Z%1KY`%wkBqZfD9HfO7vof>V>hr+LAqzbsCm2BSD$S630Xx4!HNk7|GDQn*2r$ z^Nkq9(%Fd~el7I%%hT*cHeqezqE^Fyf2I*4Z(?TfCO|w}M~6D9+#@m!U)U`Kaov5f z2gmvNmAGsK09X3eI$As$!_TwoRETpm{$r`nPl_Q6YWoz@H1i7{UrbzaQkUTmW$Qml~|0peG>M4pVgBcFe+wy}B2gkkWd2W`2Ck+)8ELGeTk z1hyl_A|I${{Tg^8e>%fyvFUhKm&ZW~@4RY|QWM`Z`(zGC1 zTJ?H(YBDT|ou~(^f}W*H!cyNFY$waGgS!o|s0XTZuccuYgfm+c9lm*u1mu}fFWsX& zorVW^g(dh*b5fFDDQM$T8qZ0uz~Tkt?WOWGpgf}S2oUof5{Oas;sy8yzU;V+X$#B=j6&j72T0~#^+}&SYdglk- z@59#HJum-t?W`vgMXnT_`-gh3=3?05hTZkEcu&0c+%2Di@vkE9;LFq&GJeNKd#zpqrj zN_8AGkJF0)AbVh$iPbC9nObCQe44f#=|CrnI(n(nY^2#22cZxRohp>+J%EVUAa9CV zZw(h7mw&orq@4zjL`U})4Q5oINtvp$Pqx*N-8Mm>8yt`Q}4){rr@lmc!U1n#tM0o70 zlj2v(?FaE$4>zb_O@;RJ+WqyQbN!z;KH{0nzY>f`XOb(e=igpNN?&iLyuy^z`OGT3 z*Wz!MJk5@OA0L{W1~$nG=GkVJGqel*J3jtMW@Y9)+*nA*nZ!C^6EARCz?$Bw*U5N} z<{OzZX)?DP>&7>j?%z*s%M2bI6j&^`Jyfbbt`z)ukv0dJ2KeOhfHy z+Q6+totXC!98RW}jn0zs)*Gbc)+#U<*v%9C=FJFO>-NliCq`*Es}*DWJP8m67`KD4 zij&jrR%Q^sr*ic0@m`1T@nv}u6xJRV&7cA&jylBVdjpsv^y&)_s&zHM6hlI+&%Fm#*IL|>FuLQ^`zmx1H! zR&!v+0%IhE_h$Ln8DJa^gJ|#yDysNO*k&OJS@B)oHLu<&lmSt7lZ$9zTB{QrGT(nk zS_@#I!kOw1;7ZwRU-M)z)!6r7$4g0;5gpRSZp>^?5Yo0FB9ARV%DZQ&;%k(uDJ^%d zKja?z2U~+jtm%~;h)3f9B*Qr&IJm!BGZy4=%UVO~*+t|+NI8z*VdQ>JxOjYXxktfMo|@0oSwpDM&?hHPVNacYHR78tE_ zU;~U98np1flbcLY#@x3UJk*J36wX;OS>@eDyw_er0M z?bA1=zU$dmW}#1gPBtODuy6%BR@q@`Bh;Am-|e*q@(d-lVZ1VJ&7OhSrmVLwX6;+< zW=u}60)yY>xW+5xCDQ?MaYA*P)Y0M1>kjMBM8E$KY9jOsY&l!6Ggu{XW4c8w-lG#R)7#znHw%T+ zrrj9LLmM=&gJ(N+7r4FoAqqHHw~o&Gp;IT0I_!!CoR zYjKrR5Awe{aPWT|S#<>>FchBmY;Twh;|JYyM%_S+NM{Xhu)4bZ>Sb0APKfU$M3guM z^|L0sGGE4$X;)P)uIs+^3b(1jFgB z2E=-i8W&ot&u(ud>R$N1n=`-GzmHg!yLzg8aQk)_UUnqrJ!KUXtr5_z8uUmey;PE< z7I|!tzhEwP`=saNf^SzANy4sc<8Ad{?CpWSFNq|6QUZqcd|Io6ZkSt&`j9z=o8K+DcgA+fQZCZp*HQ1dmd*s z52w->otO5LNBDwzM!#%ul-XJ-6XJ&@BVk!HsSUhaN(213zLwwyF&;U9OvcE2t+&GD zx;S!5NALy5Oof5x%7dbMD7dHT)+|~*yie}7XTgPYRjp~H7Wn6GgMq2AG;1|p+G+Gv zrD;ava-Fu=w{h>Qc}=)9D(`M>*W;btJkq=~-rSD47Qe!V&vWOSt&Cl(!Z)k>#I6az zTU)KyPEyYZSOi!FGVGG8jb6|&{E!O^|L)j5ye&?5kc~wlWQaT8LnI>4S{Frl^#qrG zTSrk|>5mp|Mgbc%>)m@)GCe`PA z^Y^kj=+K+*#5wqK@lhMh!QG4!eUmgleF^1YgFQEX^)hnyeWL)fx$ALXeDJtzHiy*K z{ZAVqp@^A*LPGX&S^nYl=otF_=b-i#+_>Xg2tO%v&*iId1os_sbL*zIj37R#!lZ5& zC<)UKN@_%A(ef#{{`*&zPa%LwA(IEL0g@-}=x z(r`dW!encAUhXqR^=7|rf;`(4qzFlh)k0HkTh*}*H-@x}Ljr>hkINA1v$9wjyp8jb zO0ap(C<>=C`#ol4-qD6%y!Tph#YO)R!U)9ZJABQVH-`gjHk2TXb0Dg5i5Oc+`pwL|=xFXmq&9$l1xYv)o51 zqUJr*Dg3Gl-zwKWB^e*h!T)JbTP{MQ%Pd=KU0768OmaIn<$L}X)we&Oq}r{2{X-hC zGxmyxb||=nSXv{A@^??2W8Z^=;!}_9GHg-}=|Hi_lMa%xw-n>w75f8{+2+ zf@C)ZEOiB)_Y7b}_wJ)Tu*~xEDd{YhPkfKtVvqa&uE7G;yH#UKX7G=LNI4D&j{=ct z^=q1f)a?%M>73?NLLiSZS&_ADBGO)@qa*mV$IIUmg0{5zH{J=$sy$Ry!9ack+h33j zM`22w*w2A5>i17Q@l;~D$=Dj?lyG4%f^D+USeOiVqSYaulk)))m6<@^wB_&LdZ^$% z=Gu=W9b)dtukhKf$8TMJl4`Re^Q{UBxZ^tVam%G5C_?cC@n4GQcZy>6h=R01o;zo04Rz}x-dI)@Qg@bO0bIO%?7%S+g?19}9R z&~12)l80C-Y6HVU zTYFu~5Wk3z7kw@wmIn831^R*VH60P~CdE@qR)o<^#_oHa-P(8gcQ0q=t7L!V501;I zGg5xygJUijCx35{G&|zXrn4It>Ski;Nl|5ms3Y06wB#Hp+LG3#tm0>>fF?JB0(%P9 zyr1}ny=UL!c767A-m?RvS|v#EHbA(u?3UHku3X;us0QXk%bND5icxtZ?7XJHqbX~G zn(>rR(awy4hdFWR#6fOAnvUopaV%CM%87-P&ujP+G*vn-UqVl<8Cz!bN(FD^=T;1; zAWE$WXlwg1S64QMDhV&%i~mDZEDjA zYC(zTg7NcuHaxPdw{7p`Ra6ouxq%4zb6Gb`2FAEXZgoJAA9=I|x(t-o439D`0jiIy zZG*l>*)8n-y|sFmD(eXf&5M9ae-l(B&H8XcX^f!9#-k7(h2rh4xL^|}Ayeh3?vSR< z?pgR&l)SohLphVPED$h(9yqFnlcX@&Gy=_--oCFdh>HjivMjyvnVhBkwPP>EH08;t ziReHc70D&%AYfi>H*PI(;0bgRLT3H@#H&mo9T#wx8XXlPM+N2f(D-Jwx^UTde0Fs; zw{Z#0E315(wB){X2Tv)fcaVf!9j$o1-4Q`Sla>`SCd&mUSZn#jvcYD8X)2WD^Wl}- zA`{(5ZO+Nbr-M9$$9{(`xueT)MQ1Oy?ZF_B$uHPRvJcHInY&b2cO;*>oawvO`^%di z^PvAC^mj!JLLcxB|4b#_0>iBEL|%VQo+$X*mSpx~kVkv!NAP3%(9lLA-=O_)8H6tB z*PCQVmV(T4o7vHA=qMgr&`BwSrFPnXj|qqRE|RP2=A#SPTIrU<)8C9|0jAQ_b(T$3 zOJpYq3e(^BLyoXJ{>S-QnofM)wfihuRNLfw4fJtK!}HPR9FY|X67hU@A%_{G>L|}G z=ZpT%-s0lwEw+?>{gI03Jr<~&jjse)oDkLB4tUK2ta>JWlJ@xe8#U26Z`q%}sc&rM z=A7+jxlH9j51V6AAwI zk5)^}Xvcca*&Z26$09|v1HB)RIRI?_u8k&>UUT&>ef9&U0|;Kr5?5Gv=J)ozJn5)X zfz1qZ=`m%4y zyt>)bEI+~aZB=0V{$0XHkQ|Kz&HMhs=PS2X9N~vr9X>%FC;G7SO;Tz6zx)B4&uFR+ z7K0DP0(Y>_b_N{QBtA)&uWno)o8PtaqsLbhmfuV@7DGfg9P&U!RcD zF86lFSq{9T9QKYbHK~P z7mA{-&TBI-7zxL9evOm+Us06V=RM7A9|ORK;~{z(+@Bm@7`)Bv4ygXD{Zc8OOhC(q zA86}A`-Qz1N(-;MA*_8vO#c7R#{m0-to?yLuvGV=jz0D+NvBNaN zofnux^PT_A3EKPWkvrAaDCbF(0J#!?jQll0NCjuv_r}k$#JCyU^oe~kv<%3B(QzAE zW#y;!HdS23f{8d$#8%4T_@Aklywf(o21f#lU0S>0b4qoDO1bq3%UaRnyyV$pR1Uxt zDyqP(2xt6)cwWsvIp!j6iE8q^p<5Nn991yFqD%=kJsbcIe>hbpLp@GHbYceUD{>re z5b<+v)BjK*qpz3)K{XipqMMU5^M_>^P*L-D`T@1*#J^4Ojm@)Wv5PeJPTlF(zoGHj zo}Ip@*C zOuFuvG}F&moI^*3Sj1|$-!PlCNhb(FNem=kGAP5E4MzZs_fl2{n+?ZCL>>SU(Bp~; zIRbKhPyIB?Rz|?5XeBnqLB%S==_T(ZZ%j)Q`tpH&YAO|)@$>0qWR^JRk7OIFDrx5% zO>n;@*O5X^p@k~TjzHY+dyh_1;)s#z&tbAcX-2vV)0P>Fb_t1K0~O$iGH0BzGJ@ph@M`{Aooz1P!%dQ1Y0lVa9C z<7R=;^^Byp0Ky=C5Db!KxsiU{GLxVZvUH-)d;3QVQP9JnmxN8w#;M- zBKxxF-CQ-Ve?0#2lmrc$Qe5_bUAn>i@!|FonVp?owtvv}(7`O#mw-D0?3Ob`3tdq@9Y;$?=#t!TV9=J+g_p*b;?QMe>wX1$<-m*9OtbeE5*ESiUUsJGWLQXj|uUS9|N5(lD~r zK)d}bKc*q~y*q{8B9j_%3AWxMOc5D@93g)faIH_|o>jIrw{b$s!5*4DDQsX_U-G8V zJ#!vt8>8^eaf+3w&uzafG{O*k!%Miey~rFabpL`Ky;Z`mxBP?hQS8y%RXNfjf48_fMyJ28LIlyBv<+e0+w zH&1kTbM@=Po8e2?dgxyl#CniL_DTwEwxU;edxbqthdm?zK`<+>kV2V-jC|e_PB@uB zAs03ah5Qp6nQ$%lxoyma00%&uTCJO@t4a>Uck9WfQgnC9Q`hmT5X7Gt#27C16(PwY zkuA_f>_xFVgSu0fXVfwnU(c(Nr>fdWo z=McFK#(5@_XUV%jdh?t4^uWk<>^PkneRL;{96O#*sbmhiq6sQxL+|ySg3x>Lts6yK zI*qO3SFQ<_IUhTRDJD5I`__TY*F<{NhgFg0x2^f8_2BExNa_2FOgkPzRz3aLCwE;B z(m}S!$$`i1jRzEi!DGYYc`))36nrmze_(E6)`VESpU=O+z#h95CDu+*aIedMo9Oh# z)>VD`oZC%4e6hHm%wYBGeF8a}=(Cpw>8HEA+*E85O!?}w-SJ|p z)ZH$%`Pki-!8nvc*ohd>*(FBLcoap4(A^OE7W&iuUk~{wK~6qh_wA*hRCGf>mCx7x zPqFmsD+mB~3lF~}5y}%XC6L= zb+QDogK4m**cN7zzP|A|${%p-X#QHkl~jqltxNK(=OK2n(a znO+0pj_9jz8KQ8iURLUP>cNuBdC}Z`J@&|tq(fCG;=45QSA>F`M8 zUm6t{(yYuofEPVgF>zKGP}Sd(Ph2V-;OW@?eYe%@0a|irLFU@3n8XM@J1KDJ67YOU z+hZ~Q3;(3z5+2YR7ao`tWI$B=FWqf>pP!v7*7&!@{q6b{o`)( zp##_e0Fc5(@F2dh?c_Y6Vh8N5|Cc`~xMK!)Fo5@6Zk}9rPE`Vgpi~Z;Ik89JLHMd! z(jE{I7Z08X@L5&BVU_Jsz}lCw)SH0NUeJ`xl3Q-_Z$`}`dTYx0t$=<|)_9mue8tu2`F6!?6hfbYc%!H}3r zoG+#CWbPp%%m2R7nCmKBKh4g2NFm{5dHGVTY5Mg`7v8 zfEI0#D(Wlu@JkzGcNW3S=l(IBS;4(0D5a++PHycBv)r$#TT*|z9Hy25-CtZ_9?@!1 z{4RCC+Vzg#2Xny}w^S&c{v6&+M^e0`u`g_Jhxg=GUkw?^iN3`c?Cs5w|`dNG3mD zlR`4PC6A{<>xJFI+A!UfZ^mKu;X#hGTGzEzvN)K;jnfs@G)}3U`p%` z-Z$~^;WT*jL_F~RY-mn*`SJ5FZ7&T_J=he$PT~)Jim=ej}%Ldt0;mMLCW+o zoJ@sXiEH~&fCX2PaI%2mckV&1?-S#a|F1SSprrICM5YuttvK|j$CaI_p5#stX}zD9 zp=QtrXLe*3J!RwJ3gN{b5-OiME{TecN{NR3C>6CZQ=<7R(2YrI_DlX99QYN1A#O## z+38#jgYs9t6!hd{d7eZcQc;$plD5O6WQ;t+7u_&^^megt?!05_rFr#eSFFTsUCG9W+rJxE2tNFd&^}EDwr$VXpp56A)rif*ngQ1xR_XW%?Pp;~1}F zv5+igR^?zd@QdL6$Uz$+YsiA3(6{<$hp!HwL>S+=fPTjfEv`dN%;;1?#|fOPClOGI zTvMVXJq5kD1?0N*)f8{66&aq=fr&%3I^0AlP5yiT;!6=5$1*KOBnZf$W@BdklF5oY zkx1O`CljtB6KlN5)yJafS+S^7mf$J{V+iMz@#N-S20aO1FwnA894ng`=t*;=BBW)| zM_m>_yOF1IsY2cT(|Y0GcssV_BqCe2sV67KK%ec|@$;wFphe-SZT)kHVHG_r?ysnI zUriDKp3_f89Dr*?l%n$br4B;@4!$B@B3_?-&Ham4mU4LV@_NF6Vc?smKj|%8=)HJWwWY^- zvst@$uqo-?jMLyf{f+;0uvP1&poJ}@%&>ma&W@m8AMd$#iO%PwR0iA$Uqxb?><3Ig z;Mm~U_+wg_tpAzIq8YRRe7-k%rV$sjGWEJ&>N20@a?KSJUm==4Mw0~ zgR*9gW~Ih3j1AJD@yKab6bv>q-+)a*yc!Sr#4xi#t%{Wvr8k}~K0z8zTDKyyt!=+a zwp#bwl6sZX{L3 zIsJbP)#@oOi0tRCki(boP5F>mJa>zD@Bq?xG?3hDQqY1sJPHQ0QqrdcQ(;W10?)a2 z(({9i8z)2;+SfFo*b3&nxsg_?>)QV+Q+*lAk^_lYulIf81t*cbYz77fm~~WS9~Sv5 zASS|S6ix>F6EN`K$F~H}K?Iqo0(`a@DO&-kj-41+-m9lab&<0Z<#z~vr~h1Bm9Za}KRz}lcKua-`5{EC%trTE{6otjqBH54=f@kN+b4$- zRu6B)b6=5U!tY!^?A1wBXZx%H^G3_O7Wr$X-*tKIooqniR0YXxgt-&wBTx4|RscE0 zOY0#b`GSfsHus9hGI(0^!jjxZ^5hw0RDf>s!dNvfJW2qWU>I2yEL;^xvSqJRxe8&+ zW4OfKQ;~1xv%#WLH>$UEvmE8lxdKyN>P>nFX4+4BfQYzDb0Q4ICgEXjOp!>SI^D`d zxMw!DnpUx!+jJ!CSRe11aG8)Mh|H0m^g9BOI90yZ8i@Eyn9?Cp>H~!ifP>-rB;Ff= zw-cDCvhVfw=AkC13QjCI3bB9Gl&-&s)jcjt0A|df`>?#x{!4^0VaPY{u+59e44gZK z_;VyC?d$G&vmJcz{cvawZ)vCUk;pAa?mu3AHFsqWy3G5}T~7poZB#$+wfJY?Xu=`}O(NwpzV1Ij5Ib#zatLUircc`~)Nwzdh%gI|tjm=dt+d7`nV$8-pV zsa5XatfkS*c=cND+)%#OXtn;>y@0@<+Vi|AnbDR!u2PLW{g~cXaa4vb0+Wdaij`Vs zrkzQ#bqiclfnmn2^Po>|It=g#(~-jfw2f;W{1=1k;|uA#%LIwrWAv*RDFM*HfJ+9? z3#qL1WTCI~KO=+~BfffrCw(=FVBdLVWO{OtMy{~3N(bqwPiofR$H(&mz;%?YDtwhC zIj&bj7z^2V9m24(og%r_U~cfK!!x$nX62QbeOVT zEXh(LpLD3{V|r!F5$u;?ofQ1;7>5FNesTk8$V7-7P4Rg6$9)mT`+%X@mg-!Hw^FVi zSOvZwhYW0cmMxmgFaV-Tq7S z-kL3qi*4-R!WUi|NzEI(Si%uk9%quz)n<)u(q~qB8_CjNL;WH-wucAlII2v9Hh}Dao;*1^f?f72|}b{Nv2zlZF_t$^?--l z`8Bs@nEqnqtV`qifFBdSSIde^(Dg-My4Ye}T|VpI{eX}6-P6TClQK77JmRZ)oSg0o zd7F7lwVU@Z)W>9^~v3){qg=e7viFELtP=`{lFP%8Xb z{*NRi(uBV*@3_;w{knPgPP(hsIoGu^V-t4SaZk`*p>*mbaEfvM`_^KLdg zrtc^$1-GAX0V+*k=w*@j=T#NYJq2kbu0zo>K^zunN=K0^K1c%YFy5Iu0tEC4a<~JM z@H4sYZKC-exyI6kh_kqtB;2+>NPo}4r&u=i$`c_ew>}gTF^^h9e!>(4?-t1!x!6cFRz0#dmA11}R zmIG_hD^tOhaX82w^eahr$LWp$UvzBO#ih_BMmt>wobrM~{ZIe?oi2{#cdZ)|$v(KU zG8sBJaic}ttatbu9Ofc}*1!nl4ZI7LyXNA3nk-OcZVVD>l(VH7ZZ+j907#Yh~}&k4c+{nku!ZgQHq0 zoe$Kx31x~PSu(^)OZO|`SXo)F<0~`|{EcM%L*^J8Qghc!@XjF9n6kJAnJ~V==+wkx zT~l0q69;a3?5M+=JJp9XiAUr#=8*ff{LDaS_NSwk+Yj5rt&T9^o7<~tjPO0cRT%ZO zF!V_)LpuPvJKcF*f!grMM2C6+#NYqlp1NCzsFzi>jp8?E|gv)gKO z9aow-ia@VU-(l|x#B#V8#b_js4Dl??pB!AB7)Rm%4?#h`zR@fIp;m+e@FWW6(;zu# z4nBkm07OJ`76t$`19Dan1j-wgNOOj$ispzz1QyM6LNf7Lz|7bP5z&y5)D*dZDadRI zOjMHqM*u#_7ncXY2C;0GND?wK5WOHW5vgQzBwCG+C6mjLZ=1N3gG;Oqo#vTcIgWtG3Oiqf_^% zj5XMsRF0zOQi|3*NI~b)Jeuc!`4|5Q3^h~6tXYv_pne{+6b->Vs>dRdB?}sodH^K0 zpRjz`md>XmGaxWiP&5EwHYOrKMnE8yY+?$+?3M2Z3m){idXVVBS6_bgNqPF{tq)Sp z({V7wah?SPh#gXJD>G9BFe*h9jG3L))6{dICbq~7Ob`PBA5U`|FK#W>)z%AYi8rr@ z>+9S19zJOz7Bdsq598s~k8?U7hV^Z`yl!Se@youimiIv<6Z8N2N+dhZ?KFbvU$Sl5hrKA(4+$I~?BT+AGY=mUbP6q>UVIV3W( zqNV^MG7MA7J{?O8mFHEmaTx-1cv+={(LeGmjxpV*B%JgaJ8!2Sp@(VZJ|OrI`6%Yl4Ik= z?A#w3)`~Q19>>(wE6hSgJu{JGvdRT#IGGz1e(krv^U=%auBxXwS5;H8Utf0pbV{NV zl0jE@kK3w2=H5qd#uzAP&J$NT1Q$bWUVZPuYV~&4-HRTY_B0o z=>^`NPp9LlKm5+8zl$yW`7eL%z2+DD+kGxms8(^D?{&DeuaFD?QcBZl5Xsuhtb>ka z*KLSs90rI~V?4i>|ND3I-~Q{r{EM51uZP#%AOG85`CIt=|NGni=1y*R&ea}umNo?N zO)5FhSwX6{yLtBdcs#as%r_6{Le5hiW=ef+{y=Ue!p^7=Kz4uGTi-fcF?Nnzljf`P{G0W?SHu2$uV4MUfBwJr z!Rf{C{@%BebKk#y-hcbs@4ol;qeqWo&38Aq^S+NUZmPA)$S6`0s(M{?;mbVt2?(ZA z6iYEfk4y?MX5|WAym)bQcbif|=F@Q8Znux$dGhMz%cx4J4Y2+CFMR#GAAkS#?MHWq z6zEG2FRq$(HRp1ihat^6YcN00R+1+S1T31Hs=2y&d$qc}yS_P}&vn&~Q<4SV|fC zNyZYj4lb}U%*8ne00!d0H!*nc1#B3G{xE8$hBisnIcVCp>FTNqo6QEWynJ?dx;d0n z2~|YkoXcLW`|E=#&Sl6sJLl?Hoetw1Lff_#hM_+>fW`%-y7W5XTvw|{>rJe6nx>CG z`u_2_Z`yjbYJ=x;K6_rf;9}eq(|MlfVJtZ(qR00hAY!E0bd7X!I-Nu$#`xskgL$5{ zFAX<3YR*l!m3%%voUa8*8tET`&3dABqIP& zRaPbeO{wEHC|j{6Mor=%dgWBiGdpl#9uWadflb8796JFP5jBNe%*+7=s1`PDpn!-1 zMnoK#vWN&|Wuld9*XwSf5r~Ks(NvI)Kxqa5MYM*c+O8PqUam|4LP5SOh!sEbIhl^;3=1)Hl}$>`CL^1IHQrpX5!gd*{VlMXKJnb+b`ey zJ5a5vD%E7pMIcd=0`nr%{s-yVXXOKGLBhmt09GiKO`Hxk7PtP|gQtK0z4Bk={cYI( z#(w+XyQ%L16}3@70C1V2QUb6t<*DUGXv+Z5KmmXN0RcSzk=Ko-n;nL7eUi1_{NT|? z|H<9`zliO_%NPIaQLDN60I7>Sj_Z&P5AgNJx~bRhD@CRWOV`#^vbQ(`n)^|O)4$%~ zITV#tQc{E9CQGgjLrL%xmgoOTN1;s;O>+RLK*B5lWaw}~dmR9Ph!U}L2B0}1z;3<1 zSN~+eqkqzob&g$RtVz;E*R;+GA>pjr!QWA z@5S@Mrh6CnEV#M~oBFMY=T>!{M-7 zUF~)|$2iYpmQvTXa4<(oTtL{7wIp=!S zee}^s?XGUS)&N$uk4=5=@~TWT5jkx|M5H>kda3{bfB;EEK~w{P$}s>eJl0_>mluy= zMpG2dfUfJ-9=IKclu{`wBBHv0&Z^46sj3$SfJIDZQBHRPT5?5Aa2!HVkgPJLvF56X zbVLA)|xJvxR&-ut#~jgW{K7!AzK%A~6DrVcV>SgzYN-9nqC&yt}{)J;oWy0Z#QkyWQt~(^P6!G z@7a4^$5^mLd+=?27v!y9qh%Bypj`CtEuq;Vc)gk~Cii@S_(nUZrW(KuAL{H{aoKQ)gtIjtMmD*LaiN2P3 zs`%x-b*WzD#Mt&{bI>1`8h400;;bp#Z2U#R@;`SuOAI!m5$RaM`g2 z0J;GFi1;~qbNTV}kA_8v(C81q=;Z|gv<@{hnPehx8!y1Q%=f3p%#bMrZ*O`G7`xb% zf@o|&K%fplaWVI{KN{v{-{F{)1!CN-qoiRQ{0>|FQ^p!JBLhz++cwr@5n|Gbo4Cw! z{`3V3WHx8YRC(u8T@g@ix^Z+FJ#mAf7^{FdB{4$!$!e2`*&FpVP*D2=@mZn*n1KXA zL8X8g3c2hwHXCOV)9FmJuc_{Mpcx62C6zN0uWAJ*Btpj`VrGF| zs+!Bo9V2$J9@8{QUaeLghjJPotsnThTCdmAPfcv@rrw{ccC~7v8R4Pti2zWaJbK*L zP209z^_7xxxsKYU=a(oafY5n^m&{fVOJA z_h@8hnhPS{UpFfXQAHY@MYYcUslaC<+IHWOLP7kg=$|l3~13bLI z4avLj=EGOqb$fZ~r1q33x5l>?Je#+Y$tN1-?) z^yJV04V+^vO2ppB=mU`hHAD=~RTjtrIbN?6#aX7n&HCyd2$=w)tE(mko#xp)@0=wX z_QwNzc=%}7tXHBrgdn+mI-$&tJ$aq8lw!jirtz)H-D|hN1fmUBdN-XvzJ2uelZW@$pML4XoBqXJe)oQNU*|}I zeIB_9HLdEp7Ljr6^PE(JLjovzISYHLxHkpR>Z(&vQ~JT6PgPo#r+QOwR>SewR-1X% zJ&gtM9njN_4v&{AO;;Z-XpAUOi*Pw{pB2mthW+ex!5WV-#=`jJ z02W;-5p|B8_h}q~&DYTpFUfY!NlGCiE9#gb>q2^J+6^L#r=G{aQrC_GfN=3O#096xUv1O=%@cM2#4ffXCKjXY#)Pe3leDM19>#zUp-=X>Z@yFjS(ZX83^3K(< z=D6CdTNmo8Tdl@<@zY0>vr8{IoO?ZV!B$bw%hIbcyxib7aI}DW%S(}WnNX-X+M!;9^9nup`*j+xc!T;`NgXgv{m?^%4G&k=3AYMl#rcXuH)@4WZ( z#4t_6;c%FXY%VTUMg=e=Qy}o0Rl921D#SvNbG|#D@6L1Ab$qcZ^E{~fB1Y_^N8};` zr0Bh>I>M@|og-#6Q)ai>tbA~QF6Ye5;3y?2Y%_35Nm0O~W6#V>hDk(Lz{t+|?3z%D z0F(EgtSCU;t(ZL`FNKAHfD#cft^oigp$Gu@>P-hLI9WC=lXK1~(!zQHP!TDmhzJ5K z4}y8lK;VK$Geq+~x;BakGV{bz5;<!j_hGoK>snQZ zVOVaQh_b+Jb78Ezj)+RhAu&6bp?J@;OkDYda-PmbbCDD)zuWEVx}K(~r+&%Lo4Rf5 zT8qXQ0WlR}@2jc-fZfGKO`EQ5d1JfXZkh*97&$w?%2zs9JZq5){rc0_bMpOImEZ?|1| z8OUdP!}y1DLvxHaj%jYlrhhqf+(nFOPt@g2ks;@5Z zZz@;xNyUf_8qY`poTgcST5;f_o2mw7M#>~>$JY0cwwoe9onC7{Dy&RgRk@*0A*toS z0KjH%Or}xIASdtvp$NGD{(txH{qO$c|Lx!YkN&6sgMaV8^5b6lztRwI(fiwD{+sL5 zKen|c^e*aj^!U>`HUFdH5#L?Y>Q;DFN6NVfiCJ)H>8BGWm>n^rlo4HPv%KZbO!~2` zgXq8fi~ljkk75rvr1N|SSaaB|?tWVkQ(@owC!wCSF!OYtlIodo5(q9Tiz}r?-qbae zyil+i_(XFX+$n1eZw;sKs-derjz}m9c${We1rY%$tGa5u@3ZBc2@n7Q5gixPtkNYUz(=hF9s%!S$Z_$s#;1)?} z6<|`8EXDg!R2T7|RPq8nAk%`SE+Roh5Sk0-w>VEFf|Io~YGLbU_6%cs? zbH^C#*~b9KGu$8$&YMQh`ksRd;c(BEl04Tk{DfF8X@DJc16my;g zL?olW=}9c}S#!?IkElots_GmenyNa_W@csSTfCVVS{5lH7{Y?U5)p_3=<+&#T7h%I zSiH-C7y`$TbAfqqWI>ZFGe~g+h>$gttBVye6);3(U{*o{wgI3UO@s@(?abSZR0Bbr zPPW>no5U4MEWzjLqsP3fqkKo&ZM1yY`)XCJC{|UL6Bz&yya6Q@K*0-0Fd#HQCUmACS#oFf(hypj zF3a~wL@*jDE-Hy;W{V*O0{n>c{NVLMy<@l=OSR=M=U3mHAFiC0pHfooj|z_NSm+z! z**{AcMC2xb&Yg>_@=xdVP$nlH*oKnWBXZsJ=kd=~=d*2p|9ptmDk!2uCE-G%WLSRO zOa2!EKUbmsL&kK=(edU##6SoDvixQq%qRw)0cXvH;6eXGu&AnCyGyxe=e4xMa13=a zAV*G2iz*PRDgj3CQ%XvRqIImyEU7Rv74>F7d@l25y_0dCi%cMdu4b>fuj_g$Ib|tD ztEySL*Q>fpDS39|Jh>Q-=L34T+N^0t07#{*R;ziQgY%?@i0tuvK0D_eF*9pP%b}6! zY`A7Cj+1b0nMiYCOj#6_xh@B~-%JGY?m+jObhzi% z=l*vS)fU>E)2i}Q$qS0)y*JgIbBvLhP2D_C&bd+w16Ea)b4D_6GUo(D3KpCXA!y1X zg2>BrXn7tnGckzBEMTI{9GzFyQW7CL=hPU8Qpx1q(j}FgqYp0lVHp09dHbR?$UcCG zEGYr1nVBP3N&y09M?~{H#~2OybUM{_?NZKdGo;e!JnM0e7w0?t^1}zu_IolC4!Mq| zOfvw0c&gM!Z-u~3$s^>(i)lM5Ff@8TNUV!JUY!ta+&c>e$1znnBVe)Ux@`49P-3&0 zUwOZoP-k{oTZ%Z(uLC&JW<}5abcaxJRnIo5aoucle_XJzH-J~pOEXLmmdCf5IbuX! zKrxX-L`B8S5D5(kI67aFFtZ~vMKmBFB4Wgu3OY%uv92oj&fW3*fSJ*=b0z1`UVKR~ zU{0xmcpUnnq-5F0z|8YBI3%`~VoU}ptBV0m^4K@g5p|Jq9FAh@+g01Pm@gK`;Q)T$URIljTc9%&Ne_FMkGO zTDFI(4%1Xr>bh>%D~FWxXVX83r&I5U&8)JN+gc49RZA&(4QRW9niH&GE;z91>35W-Bbv=5!*ManYgd70Q)5>D?UB9<(wno?2` z?|tyiFdlcS4_@DWtL^Hs*QTxJd0LJ>L_ z_Vb(}keO9g1!nU+6X9}dwZ=t6!4yH%R0I({u{rbJr<9i4S(Js$EK&e?Q;C?F$eS!0 zDs?VQQ>v>s=7>eoLO1ywlwXY5n0cBeL{Fc|crLH(y%!M&Hg<2j>+i;PJx^z$7=uF< zBQ!%PZ_Zm)H4tXBH`6Z$pbT$rJmtxeN5mqAh|UE>6aykkXelu z>blNZ7G26Yr?zdSn8IR}_vYJbAmp5L&de0db10=u{jeCQq?CXugrI!E#?36u@*26oJGuYGI2;dx9(CPwOo}0oR<3Q|IOZi1zWam=V90w4d^6UrjC5tVXBq)-3|i@x7M9owlIpct=gw#Azgvg!*j zQ4!`lQ<}LI`xqVo(WrC=%A$-?6%|NBj^m;Bi=dCwHxJTa#tMg5;HIO?cDPd^+%(;x7mMl9A>TITMPgH7y=~sIxHVJBc00-IixM5&1k^z- znh0X(5#4+hm#%;wN9DuFM%R}#-2U@!b^kjl(K